diff --git a/.circleci/config.yml b/.circleci/config.yml index 222f14d50..53611d565 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -805,6 +805,11 @@ workflows: suite: itest-deals_padding target: "./itests/deals_padding_test.go" + - test: + name: test-itest-deals_partial_retrieval_dm-level + suite: itest-deals_partial_retrieval_dm-level + target: "./itests/deals_partial_retrieval_dm-level_test.go" + - test: name: test-itest-deals_partial_retrieval suite: itest-deals_partial_retrieval @@ -825,6 +830,11 @@ workflows: suite: itest-deals_publish target: "./itests/deals_publish_test.go" + - test: + name: test-itest-deals_retry_deal_no_funds + suite: itest-deals_retry_deal_no_funds + target: "./itests/deals_retry_deal_no_funds_test.go" + - test: name: test-itest-deals suite: itest-deals @@ -885,6 +895,11 @@ workflows: suite: itest-sector_terminate target: "./itests/sector_terminate_test.go" + - test: + name: test-itest-self_sent_txn + suite: itest-self_sent_txn + target: "./itests/self_sent_txn_test.go" + - test: name: test-itest-tape suite: itest-tape @@ -935,7 +950,7 @@ workflows: codecov-upload: false suite: conformance-bleeding-edge target: "./conformance" - vectors-branch: master + vectors-branch: specs-actors-v7 - trigger-testplans: filters: branches: @@ -966,19 +981,10 @@ workflows: tags: only: - /^v\d+\.\d+\.\d+(-rc\d+)?$/ - - build-appimage: - filters: - branches: - ignore: - - /.*/ - tags: - only: - - /^v\d+\.\d+\.\d+(-rc\d+)?$/ - publish: requires: - build-all - build-macos - - build-appimage filters: branches: ignore: diff --git a/.circleci/template.yml b/.circleci/template.yml index 4b954391b..ef6818c6d 100644 --- a/.circleci/template.yml +++ b/.circleci/template.yml @@ -785,7 +785,7 @@ workflows: codecov-upload: false suite: conformance-bleeding-edge target: "./conformance" - vectors-branch: master + vectors-branch: specs-actors-v7 - trigger-testplans: filters: branches: @@ -816,19 +816,10 @@ workflows: tags: only: - /^v\d+\.\d+\.\d+(-rc\d+)?$/ - - build-appimage: - filters: - branches: - ignore: - - /.*/ - tags: - only: - - /^v\d+\.\d+\.\d+(-rc\d+)?$/ - publish: requires: - build-all - build-macos - - build-appimage filters: branches: ignore: diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 000000000..c806120b1 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,21 @@ +## Related Issues + + +## Proposed Changes + + + +## Additional Info + + +## Checklist + +Before you mark the PR ready for review, please make sure that: +- [ ] All commits have a clear commit message. +- [ ] The PR title is in the form of of `: : ` + - example: ` fix: mempool: Introduce a cache for valid signatures` + - `PR type`: _fix_, _feat_, _INTERFACE BREAKING CHANGE_, _CONSENSUS BREAKING_, _build_, _chore_, _ci_, _docs_,_perf_, _refactor_, _revert_, _style_, _test_ + - `area`: _api_, _chain_, _state_, _vm_, _data transfer_, _market_, _mempool_, _message_, _block production_, _multisig_, _networking_, _paychan_, _proving_, _sealing_, _wallet_, _deps_ +- [ ] This PR has tests for new functionality or change in behaviour +- [ ] If new user-facing features are introduced, clear usage guidelines and / or documentation updates should be included in https://lotus.filecoin.io or [Discussion Tutorials.](https://github.com/filecoin-project/lotus/discussions/categories/tutorials) +- [ ] CI is green diff --git a/.gitignore b/.gitignore index 766de24ec..522451ca4 100644 --- a/.gitignore +++ b/.gitignore @@ -51,6 +51,7 @@ scratchpad gen.gen *.key +data/* data/*/* !data/*/keystore/ !data/*/config.toml diff --git a/CHANGELOG.md b/CHANGELOG.md index bcedad7c2..a420421de 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,349 @@ # Lotus changelog +# v1.13.2 / 2022-01-09 + +Lotus v1.13.2 is a *highly recommended* feature release with remarkable retrieval improvements, new features like +worker management, schedule enhancements and so on. + +## Highlights +- πŸš€πŸš€πŸš€Improve retrieval deal experience + - Testing result with MinerX.3 shows the retrieval deal success rate has increased dramatically with faster transfer + speed, you can join or follow along furthur performance testings [here](https://github.com/filecoin-project/lotus/discussions/7874). We recommend application developers to integrate with the new + retrieval APIs to provide a better client experience. + - 🌟🌟🌟 Reduce retrieval Time-To-First-Byte over 100x ([#7693](https://github.com/filecoin-project/lotus/pull/7693)) + - This change makes most free, small retrievals sub-second + - 🌟🌟🌟 Partial retrieval ux improvements ([#7610](https://github.com/filecoin-project/lotus/pull/7610)) + - New retrieval commands for clients: + - `lotus client ls`: retrieve and list desired object links + - `lotus client cat`: retrieve and print the data from the network + - 🌟🌟 The monolith `ClientRetrieve` method was broken into: + - `ClientRetrieve` which retrieves data into the local repo (or into an IPFS node if ipfs integration is enabled) + - `ClientRetrieveWait` which will wait for the retrieval to complete + - `ClientExport` which will export data from the local node + - Note: this change only applies to v1 API. v0 API remains unchanged. + - 🌟 Support for full ipld selectors was added (for example making it possible to only retrieve list of directories in a deal, without fetching any file data) + - To learn more, see [here](https://github.com/filecoin-project/lotus/blob/0523c946f984b22b3f5de8cc3003cc791389527e/api/types.go#L230-L264) +- πŸš€πŸš€ Sealing scheduler enhancements ([#7703](https://github.com/filecoin-project/lotus/pull/7703), + [#7269](https://github.com/filecoin-project/lotus/pull/7269)), [#7714](https://github.com/filecoin-project/lotus/pull/7714) + - Workers are now aware of cgroup memory limits + - Multiple tasks which use a GPU can be scheduled on a single worker + - Workers can override default resource table through env vars + - Default value list: https://gist.github.com/magik6k/c0e1c7cd73c1241a9acabc30bf469a43 +- πŸš€πŸš€ Sector storage groups ([#7453](https://github.com/filecoin-project/lotus/pull/7453)) + - Storage groups allow for better control of data flow between workers, for example, it makes it possible to define that data from PC1 on a given worker has to have it's PC2 step executed on the same worker + - To set it up, follow the instructions under the `Sector Storage Group` section [here](https://lotus.filecoin.io/docs/storage-providers/seal-workers/#lotus-worker-co-location) + +## New Features +- Add RLE dump code ([#7691](https://github.com/filecoin-project/lotus/pull/7691)) +- Shed: Add a util to list miner faults ([#7605](https://github.com/filecoin-project/lotus/pull/7605)) +- lotus-shed msg: Decode submessages/msig proposals ([#7639](https://github.com/filecoin-project/lotus/pull/7639)) +- CLI: Add a lotus multisig cancel command ([#7645](https://github.com/filecoin-project/lotus/pull/7645)) +- shed: simple wallet balancer util ([#7414](https://github.com/filecoin-project/lotus/pull/7414)) + - balancing token balance between multiple accounts + +## Improvements +- Add verbose mode to `lotus-miner pieces list-cids` ([#7699](https://github.com/filecoin-project/lotus/pull/7699)) +- retrieval: Only output matching nodes, MatchPath dagspec ([#7706](https://github.com/filecoin-project/lotus/pull/7706)) +- Cleanup partial retrieval codepaths ( zero functional changes ) ([#7688](https://github.com/filecoin-project/lotus/pull/7688)) +- storage: Use 1M buffers for Tar transfers ([#7681](https://github.com/filecoin-project/lotus/pull/7681)) +- Chore/dm level tests plus merkle proof cars ([#7673](https://github.com/filecoin-project/lotus/pull/7673)) +- Shed: Add a util to create miners more easily ([#7595](https://github.com/filecoin-project/lotus/pull/7595)) +- add timeout flag to wait-api command ([#7592](https://github.com/filecoin-project/lotus/pull/7592)) +- add log for restart windows post scheduler ([#7613](https://github.com/filecoin-project/lotus/pull/7613)) +- remove jaeger envvars ([#7631](https://github.com/filecoin-project/lotus/pull/7631)) +- remove api and jaeger env from docker file ([#7624](https://github.com/filecoin-project/lotus/pull/7624)) +- Wdpost worker: Reduce challenge confidence to 1 epoch ([#7572](https://github.com/filecoin-project/lotus/pull/7572)) +- add additional methods to lotus gateway ([#7644](https://github.com/filecoin-project/lotus/pull/7644)) +- Add caches to lotus-stats and splitcode ([#7329](https://github.com/filecoin-project/lotus/pull/7329)) +- remote store: Remove debug printf ([#7664](https://github.com/filecoin-project/lotus/pull/7664)) +- docsgen-cli: Handle commands with no description correctly ([#7659](https://github.com/filecoin-project/lotus/pull/7659)) + +## Bug Fixes +- fix docker logic error ([#7709](https://github.com/filecoin-project/lotus/pull/7709)) +- add missing NodeType tag ([#7559](https://github.com/filecoin-project/lotus/pull/7559)) +- checkCommit should return SectorCommitFailed ([#7555](https://github.com/filecoin-project/lotus/pull/7555)) +- ffiwrapper: Validate PC2 by calling C1 with random seeds ([#7710](https://github.com/filecoin-project/lotus/pull/7710)) + +## Dependency Updates +- Update go-graphsync v0.10.6 ([#7708](https://github.com/filecoin-project/lotus/pull/7708)) +- update go-libp2p-pubsub to v0.5.6 ([#7581](https://github.com/filecoin-project/lotus/pull/7581)) +- Update go-state-types ([#7591](https://github.com/filecoin-project/lotus/pull/7591)) +- disable mplex stream muxer ([#7689](https://github.com/filecoin-project/lotus/pull/7689)) +- Bump ws from 5.2.2 to 5.2.3 in /lotuspond/front ([#7660](https://github.com/filecoin-project/lotus/pull/7660)) +- Bump color-string from 1.5.3 to 1.6.0 in /lotuspond/front ([#7658](https://github.com/filecoin-project/lotus/pull/7658)) +- Bump postcss from 7.0.17 to 7.0.39 in /lotuspond/front ([#7657](https://github.com/filecoin-project/lotus/pull/7657)) +- Bump path-parse from 1.0.6 to 1.0.7 in /lotuspond/front ([#7656](https://github.com/filecoin-project/lotus/pull/7656)) +- Bump tmpl from 1.0.4 to 1.0.5 in /lotuspond/front ([#7655](https://github.com/filecoin-project/lotus/pull/7655)) +- Bump url-parse from 1.4.7 to 1.5.3 in /lotuspond/front ([#7654](https://github.com/filecoin-project/lotus/pull/7654)) +- github.com/filecoin-project/go-state-types (v0.1.1-0.20210915140513-d354ccf10379 -> v0.1.1): + +## Others +- Update archive script ([#7690](https://github.com/filecoin-project/lotus/pull/7690)) + +## Contributors + +| Contributor | Commits | Lines Β± | Files Changed | +|-------------|---------|---------|---------------| +| @magik6k | 89 | +5200/-1818 | 232 | +| Travis Person | 5 | +1473/-953 | 38 | +| @arajasek | 6 | +550/-38 | 19 | +| @clinta | 4 | +393/-123 | 26 | +| @ribasushi | 3 | +334/-68 | 7 | +| @jennijuju| 13 | +197/-120 | 67 | +| @Kubuxu | 10 | +153/-30 | 10 | +| @coryschwartz | 6 | +18/-26 | 6 | +| Marten Seemann | 2 | +6/-34 | 5 | +| @vyzo | 1 | +3/-3 | 2 | +| @hannahhoward | 1 | +3/-3 | 2 | +| @zenground0 | 2 | +2/-2 | 2 | +| @yaohcn | 2 | +2/-2 | 2 | +| @jennijuju | 1 | +1/-1 | 1 | +| @hunjixin | 1 | +1/-0 | 1 | + + + +# v1.13.1 / 2021-11-26 + +This is an optional Lotus v1.13.1 release. + +## New Features +- Shed: Add a util to find miner based on peerid ([filecoin-project/lotus#7544](https://github.com/filecoin-project/lotus/pull/7544)) +- Collect and expose graphsync metrics ([filecoin-project/lotus#7542](https://github.com/filecoin-project/lotus/pull/7542)) +- Shed: Add a util to find the most recent null tipset ([filecoin-project/lotus#7456](https://github.com/filecoin-project/lotus/pull/7456)) + +## Improvements +- Show prepared tasks in sealing jobs ([filecoin-project/lotus#7527](https://github.com/filecoin-project/lotus/pull/7527)) +- To make Deep happy ([filecoin-project/lotus#7546](https://github.com/filecoin-project/lotus/pull/7546)) +- Expose per-state sector counts on the prometheus endpoint ([filecoin-project/lotus#7541](https://github.com/filecoin-project/lotus/pull/7541)) +- Add storage-id flag to proving check ([filecoin-project/lotus#7479](https://github.com/filecoin-project/lotus/pull/7479)) +- FilecoinEC: Improve a log message ([filecoin-project/lotus#7499](https://github.com/filecoin-project/lotus/pull/7499)) +- itests: retry deal when control addr is out of funds ([filecoin-project/lotus#7454](https://github.com/filecoin-project/lotus/pull/7454)) +- Normlize selector use within lotus ([filecoin-project/lotus#7467](https://github.com/filecoin-project/lotus/pull/7467)) +- sealing: Improve scheduling of ready work ([filecoin-project/lotus#7335](https://github.com/filecoin-project/lotus/pull/7335)) +- Remove dead example code + dep ([filecoin-project/lotus#7466](https://github.com/filecoin-project/lotus/pull/7466)) + +## Bug Fixes +- fix the withdrawn amount unit ([filecoin-project/lotus#7563](https://github.com/filecoin-project/lotus/pull/7563)) +- rename vm#make{=>Account}Actor(). ([filecoin-project/lotus#7562](https://github.com/filecoin-project/lotus/pull/7562)) +- Fix used sector space accounting after AddPieceFailed ([filecoin-project/lotus#7530](https://github.com/filecoin-project/lotus/pull/7530)) +- Don't remove sector data when moving data into a shared path ([filecoin-project/lotus#7494](https://github.com/filecoin-project/lotus/pull/7494)) +- fix: support node instantiation in external packages ([filecoin-project/lotus#7511](https://github.com/filecoin-project/lotus/pull/7511)) +- Stop adding Jennifer's $HOME to lotus docs ([filecoin-project/lotus#7477](https://github.com/filecoin-project/lotus/pull/7477)) +- Bugfix: Use correct startup network versions ([filecoin-project/lotus#7486](https://github.com/filecoin-project/lotus/pull/7486)) +- Dep upgrade pass ([filecoin-project/lotus#7478](https://github.com/filecoin-project/lotus/pull/7478)) +- Remove obsolete GS testplan - it now lives in go-graphsync ([filecoin-project/lotus#7469](https://github.com/filecoin-project/lotus/pull/7469)) +- sealing: Recover sectors after failed AddPiece ([filecoin-project/lotus#7444](https://github.com/filecoin-project/lotus/pull/7444)) + +## Dependency Updates +- Update go-graphsync v0.10.1 ([filecoin-project/lotus#7457](https://github.com/filecoin-project/lotus/pull/7457)) +- update to proof v10.1.0 ([filecoin-project/lotus#7564](https://github.com/filecoin-project/lotus/pull/7564)) +- github.com/filecoin-project/specs-actors/v6 (v6.0.0 -> v6.0.1): +- github.com/filecoin-project/go-jsonrpc (v0.1.4-0.20210217175800-45ea43ac2bec -> v0.1.5): +- github.com/filecoin-project/go-fil-markets (v1.13.1 -> v1.13.3): +- github.com/filecoin-project/go-data-transfer (v1.11.1 -> v1.11.4): +- github.com/filecoin-project/go-crypto (v0.0.0-20191218222705-effae4ea9f03 -> v0.0.1): +- github.com/filecoin-project/go-commp-utils (v0.1.1-0.20210427191551-70bf140d31c7 -> v0.1.2): +- github.com/filecoin-project/go-cbor-util (v0.0.0-20191219014500-08c40a1e63a2 -> v0.0.1): +- github.com/filecoin-project/go-address (v0.0.5 -> v0.0.6): +- unpin the yamux dependency ([filecoin-project/lotus#7532](https://github.com/filecoin-project/lotus/pull/7532) +- peerstore@v0.2.9 was withdrawn, let's not depend on it directly ([filecoin-project/lotus#7481](https://github.com/filecoin-project/lotus/pull/7481)) +- chore(deps): use tagged github.com/ipld/go-ipld-selector-text-lite ([filecoin-project/lotus#7464](https://github.com/filecoin-project/lotus/pull/7464)) +- Stop indirectly depending on deprecated github.com/prometheus/common ([filecoin-project/lotus#7473](https://github.com/filecoin-project/lotus/pull/7473)) + +## Others +- fix the changelog ([filecoin-project/lotus#7594](https://github.com/filecoin-project/lotus/pull/7594)) +- v1.13.1-rc2 prep ([filecoin-project/lotus#7593](https://github.com/filecoin-project/lotus/pull/7593)) +- lotus v1.13.1-rc1 ([filecoin-project/lotus#7569](https://github.com/filecoin-project/lotus/pull/7569)) +- misc: back-port v1.13.0 back to master ([filecoin-project/lotus#7537](https://github.com/filecoin-project/lotus/pull/7537)) +- Inline codegen ([filecoin-project/lotus#7495](https://github.com/filecoin-project/lotus/pull/7495)) +- releases -> master ([filecoin-project/lotus#7507](https://github.com/filecoin-project/lotus/pull/7507)) +- Make chocolate back to master ([filecoin-project/lotus#7493](https://github.com/filecoin-project/lotus/pull/7493)) +- restore filters for the build-macos job ([filecoin-project/lotus#7455](https://github.com/filecoin-project/lotus/pull/7455)) +- bump master to v1.13.1-dev ([filecoin-project/lotus#7451](https://github.com/filecoin-project/lotus/pull/7451)) + +Contributors + +| Contributor | Commits | Lines Β± | Files Changed | +|-------------|---------|---------|---------------| +| @magik6k | 27 | +1285/-531 | 76 | +| @ribasushi | 7 | +265/-1635 | 21 | +| @raulk | 2 | +2/-737 | 13 | +| @nonsens | 4 | +391/-21 | 19 | +| @arajasek | 6 | +216/-23 | 14 | +| @jennijuju| 8 | +102/-37 | 29 | +| Steven Allen | 2 | +77/-29 | 6 | +| @jennijuju | 4 | +19/-18 | 11 | +| @dirkmc | 2 | +9/-9 | 4 | +| @@coryschwartz | 1 | +16/-2 | 2 | +| @frrist | 1 | +12/-0 | 2 | +| @Kubuxu | 5 | +5/-5 | 5 | +| @hunjixin | 2 | +6/-3 | 2 | +| @vyzo | 1 | +3/-3 | 2 | +| @@rvagg | 1 | +3/-3 | 2 | +| @hannahhoward | 1 | +3/-2 | 2 | +| Marten Seemann | 1 | +3/-0 | 1 | +| @ZenGround0 | 1 | +1/-1 | 1 | + + +# v1.13.0 / 2021-10-18 + +Lotus v1.13.0 is a *highly recommended* feature release for all lotus users(i.e: storage providers, data brokers, application developers and so on) that supports the upcoming +[Network v14 Chocolate upgrade](https://github.com/filecoin-project/lotus/discussions/7431). +This feature release includes the latest functionalities and improvements, like data transfer rate-limiting for both storage and retrieval deals, proof v10 with CUDA support, etc. You can find more details in the Changelog below. + +## Highlights +- Enable separate storage and retrieval transfer limits ([filecoin-project/lotus#7405](https://github.com/filecoin-project/lotus/pull/7405)) + - `SimultaneousTransfer` is now replaced by `SimultaneousTransfersForStorage` and `SimultaneousTransfersForRetrieval`, where users may set the amount of ongoing data transfer for storage and retrieval deals in parallel separately. The default value for both is set to 20. + - If you are using the lotus client, these two configuration variables are under the `Client` section in `./lotus/config.toml`. + - If you are a service provider, these two configuration variables should be set under the `Dealmaking` section in `/.lotusminer/config.toml`. +- Update proofs to v10.0.0 ([filecoin-project/lotus#7420](https://github.com/filecoin-project/lotus/pull/7420)) + - This version supports CUDA. To enable CUDA instead of openCL, build lotus with `FFI_USE_CUDA=1 FFI_BUILD_FROM_SOURCE=1 ...`. + - You can find additional Nvidia driver installation instructions written by MinerX fellows [here](https://github.com/filecoin-project/lotus/discussions/7443#discussioncomment-1425274) and perf improvements result on PC2/C2/WindowPoSt computation on different profiles [here](https://github.com/filecoin-project/lotus/discussions/7443), most people observe a 30-50% decrease in computation time. + +## New Features +- Feat/datamodel selector retrieval ([filecoin-project/lotus#6393](https://github.com/filecoin-project/lotus/pull/66393393)) + - This introduces a new RetrievalOrder-struct field and a CLI option that takes a string representation as understood by [https://pkg.go.dev/github.com/ipld/go-ipld-selector-text-lite#SelectorSpecFromPath](https://pkg.go.dev/github.com/ipld/go-ipld-selector-text-lite#SelectorSpecFromPath). This allows for partial retrieval of any sub-DAG of a deal provided the user knows the exact low-level shape of the deal contents. + - For example, to retrieve the first entry of a UnixFS directory by executing, run `lotus client retrieve --miner f0XXXXX --datamodel-path-selector 'Links/0/Hash' bafyROOTCID ~/output` +- Expose storage stats on the metrics endpoint ([filecoin-project/lotus#7418](https://github.com/filecoin-project/lotus/pull/7418)) +- feat: Catch panic to generate report and reraise ([filecoin-project/lotus#7341](https://github.com/filecoin-project/lotus/pull/7341)) + - Set `LOTUS_PANIC_REPORT_PATH` and `LOTUS_PANIC_JOURNAL_LOOKBACK` to get reports generated when a panic occurs on your daemon miner or workers. +- Add envconfig docs to the config ([filecoin-project/lotus#7412](https://github.com/filecoin-project/lotus/pull/7412)) + - You can now find supported env vars in [default-lotus-miner-config.toml](https://github.com/filecoin-project/lotus/blob/master/documentation/en/default-lotus-miner-config.toml). +- lotus shed: fr32 utils ([filecoin-project/lotus#7355](https://github.com/filecoin-project/lotus/pull/7355)) +- Miner CLI: Allow trying to change owners of any miner actor ([filecoin-project/lotus#7328](https://github.com/filecoin-project/lotus/pull/7328)) +- Add --unproven flag to the sectors list command ([filecoin-project/lotus#7308](https://github.com/filecoin-project/lotus/pull/7308)) + +## Improvements +- check for deal start epoch on SectorAddPieceToAny ([filecoin-project/lotus#7407](https://github.com/filecoin-project/lotus/pull/7407)) +- Verify Voucher locks in VoucherValidUnlocked ([filecoin-project/lotus#5609](https://github.com/filecoin-project/lotus/pull/5609)) +- Add more info to miner allinfo command ([filecoin-project/lotus#7384](https://github.com/filecoin-project/lotus/pull/7384)) +- add `lotus-miner storage-deals list --format=json` with transfers ([filecoin-project/lotus#7312](https://github.com/filecoin-project/lotus/pull/7312)) +- Fix formatting ([filecoin-project/lotus#7383](https://github.com/filecoin-project/lotus/pull/7383)) +- GetCurrentDealInfo err: handle correctly err case ([filecoin-project/lotus#7346](https://github.com/filecoin-project/lotus/pull/7346)) +- fix: Enforce verification key integrity check regardless of TRUST_PARAMS=1 ([filecoin-project/lotus#7327](https://github.com/filecoin-project/lotus/pull/7327)) +- Show more deal states in miner info ([filecoin-project/lotus#7311](https://github.com/filecoin-project/lotus/pull/7311)) +- Prep retrieval for selectors: no functional changes ([filecoin-project/lotus#7306](https://github.com/filecoin-project/lotus/pull/7306)) +- Seed: improve helptext ([filecoin-project/lotus#7304](https://github.com/filecoin-project/lotus/pull/7304)) +- Mempool: reduce size of sigValCache ([filecoin-project/lotus#7305](https://github.com/filecoin-project/lotus/pull/7305)) + - Stop indirectly depending on deprecated github.com/prometheus/common ([filecoin-project/lotus#7474](https://github.com/filecoin-project/lotus/pull/7474)) + +## Bug Fixes +- StateSearchMsg: Correct usage of the allowReplaced flag ([filecoin-project/lotus#7450](https://github.com/filecoin-project/lotus/pull/7450)) +- fix staging area path buildup ([filecoin-project/lotus#7363](https://github.com/filecoin-project/lotus/pull/7363)) +- storagemgr: Cleanup workerLk around worker resources ([filecoin-project/lotus#7334](https://github.com/filecoin-project/lotus/pull/7334)) +- fix: check padSector Cid ([filecoin-project/lotus#7310](https://github.com/filecoin-project/lotus/pull/7310)) +- sealing: Recover sectors after failed AddPiece ([filecoin-project/lotus#7492](https://github.com/filecoin-project/lotus/pull/7492)) +- fix: support node instantiation in external packages ([filecoin-project/lotus#7511](https://github.com/filecoin-project/lotus/pull/7511)) +- Chore/backport cleanup withdrawn dependency ([filecoin-project/lotus#7482](https://github.com/filecoin-project/lotus/pull/7482)) + +## Dependency Updates +- github.com/filecoin-project/go-data-transfer (v1.10.1 -> v1.11.1): +- github.com/filecoin-project/go-fil-markets (v1.12.0 -> v1.13.1): +- github.com/filecoin-project/go-paramfetch (v0.0.2-0.20210614165157-25a6c7769498 -> v0.0.2): +- update go-libp2p to v0.15.0 ([filecoin-project/lotus#7362](https://github.com/filecoin-project/lotus/pull/7362)) +- update to go-graphsync v0.10.1 ([filecoin-project/lotus#7359](https://github.com/filecoin-project/lotus/pull/7359)) + +## Others +- Chocolate to master ([filecoin-project/lotus#7440](https://github.com/filecoin-project/lotus/pull/7440)) +- releases -> master ([filecoin-project/lotus#7403](https://github.com/filecoin-project/lotus/pull/7403)) +- remove nerpanet related code ([filecoin-project/lotus#7373](https://github.com/filecoin-project/lotus/pull/7373)) +- sync branch main with master on updates ([filecoin-project/lotus#7366](https://github.com/filecoin-project/lotus/pull/7366)) +- remove job to install jq ([filecoin-project/lotus#7309](https://github.com/filecoin-project/lotus/pull/7309)) +- restore filters for the build-macos job ([filecoin-project/lotus#7455](https://github.com/filecoin-project/lotus/pull/7455)) +- v1.13.0-rc2 ([filecoin-project/lotus#7458](https://github.com/filecoin-project/lotus/pull/7458)) +- v1.13.0-rc1 ([filecoin-project/lotus#7452](https://github.com/filecoin-project/lotus/pull/7452)) + +## Contributors + +| Contributor | Commits | Lines Β± | Files Changed | +|-------------|---------|---------|---------------| +| @dirkmc | 8 | +845/-375 | 55 | +| @magik6k | 10 | +1056/-60 | 26 | +| @aarshkshah1992 | 6 | +813/-259 | 16 | +| @arajasek | 10 | +552/-251 | 43 | +| @ribasushi | 6 | +505/-78 | 22 | +| @jennijuju | 7 | +212/-323 | 34 | +| @nonsense | 10 | +335/-139 | 19 | +| @dirkmc | 8 | +149/-55 | 16 | +| @hannahhoward | 4 | +56/-32 | 17 | +| @rvagg | 4 | +61/-13 | 9 | +| @jennijuju | 2 | +0/-57 | 2 | +| @hannahhoward | 1 | +33/-18 | 7 | +| @Kubuxu | 8 | +27/-16 | 9 | +| @coryschwartz | 1 | +16/-2 | 2 | +| @travisperson | 1 | +14/-0 | 1 | +| @frrist | 1 | +12/-0 | 2 | +| @ognots | 1 | +0/-10 | 2 | +| @lanzafame | 1 | +3/-3 | 1 | +| @jennijuju | 1 | +2/-2 | 1 | +| @swift-mx | 1 | +1/-1 | 1 | + +# v1.12.0 / 2021-10-12 + +This is a mandatory release of Lotus that introduces [Filecoin Network v14](https://github.com/filecoin-project/community/discussions/74#discussioncomment-1398542), codenamed the Chocolate upgrade. The Filecoin mainnet will upgrade at epoch 1231620, on 2021-10-26T13:30:00Z. + +The Chocolate upgrade introduces the following FIPs, delivered in [v6 actors](https://github.com/filecoin-project/specs-actors/releases/tag/v6.0.0) + +- [FIP-0020](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0020.md): Add return value to `WithdrawBalance` +- [FIP-0021](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0021.md): Correct quality calculation on expiration +- [FIP-0022](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0022.md): Bad deals don't fail PublishStorageDeals +- [FIP-0023](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0023.md): Break ties between tipsets of equal weight +- [FIP-0024](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0024.md): BatchBalancer & BatchDiscount Post-HyperDrive Adjustment +- [FIP-0026](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0026.md): Extend sector faulty period from 2 weeks to 6 weeks + +Note that this release is built on top of lotus v1.11.3. Enterprising users like storage providers, data brokers and others are recommended to use lotus v1.13.0 for latest new features, improvements and bug fixes. + +## New Features and Changes +- Implement and support [FIP-0024](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0024.md) BatchBalancer & BatchDiscount Post-HyperDrive Adjustment: + - Precommit batch balancer support/config ([filecoin-project/lotus#7410](https://github.com/filecoin-project/lotus/pull/7410)) + - Set `BatchPreCommitAboveBaseFee` to decide whether sending out a PreCommits in individual messages or in a batch. + - The default value of `BatchPreCommitAboveBaseFee` and `AggregateAboveBaseFee` are now updated to 0.32nanoFIL. +- The amount of FIL withdrawn from `WithdrawBalance` from miner or market via lotus CLI is now printed out upon message landing on the chain. + +## Improvements +- Implement [FIP-0023](https://github.com/filecoin-project/FIPs/blob/master/FIPS/fip-0023.md) (Break ties between tipsets of equal weight) + - ChainStore: Add a tiebreaker rule for tipsets of equal weight ([filecoin-project/lotus#7378](https://github.com/filecoin-project/lotus/pull/7378)) +- Randomness: Move getters from ChainAPI to StateAPI ([filecoin-project/lotus#7322](https://github.com/filecoin-project/lotus/pull/7322)) + +## Bug Fixes +- Fix Drand fetching around null tipsets ([filecoin-project/lotus#7376](https://github.com/filecoin-project/lotus/pull/7376)) + +## Dependency Updates +- Add [v6 actors](https://github.com/filecoin-project/specs-actors/releases/tag/v6.0.0) + - **Protocol changes** + - Multisig Approve only hashes when hash in params + - FIP 0020 WithdrawBalance methods return withdrawn value + - FIP 0021 Fix bug in power calculation when extending verified deals sectors + - FIP 0022 PublishStorageDeals drops errors in batch + - FIP 0024 BatchBalancer update and burn added to PreCommitBatch + - FIP 0026 Add FaultMaxAge extension + - Reduce calls to power and reward actors by passing values from power cron + - Defensive programming hardening power cron against programmer error + - **Implementation changes** + - Move to xerrors + - Improved logging: burn events are not logged with reasons and burned value. +- github.com/filecoin-project/go-state-types (v0.1.1-0.20210810190654-139e0e79e69e -> v0.1.1-0.20210915140513-d354ccf10379): + +## Others +- v1.12.0-rc1 prep ([filecoin-project/lotus#7426](https://github.com/filecoin-project/lotus/pull/7426) +- Extend FaultMaxAge to 6 weeks for actors v6 on test networks only ([filecoin-project/lotus#7421](https://github.com/filecoin-project/lotus/pull/7421)) + +## Contributors + +| Contributor | Commits | Lines Β± | Files Changed | +|-------------|---------|---------|---------------| +| @ZenGround0 | 12 | +4202/-2752 | 187 | +| @arajasek | 25 | +4567/-854 | 190 | +| @laudiacay | 4 | +1276/-435 | 37 | +| @laudiacay | 12 | +1350/-209 | 43 | +| @magik6k | 1 | +171/-13 | 8 | +| @Stebalien | 2 | +115/-12 | 6 | +| @jennijuju | 7 | +73/-34 | 26 | +| @travisperson | 2 | +19/-19 | 7 | +| @coryschwartz | 1 | +16/-2 | 2 | +| @Kubuxu | 5 | +5/-5 | 5 | +| @ribasushi | 1 | +5/-3 | 1 | + # v1.11.3 / 2021-09-29 lotus v1.11.3 is a feature release that's **highly recommended to ALL lotus users to upgrade**, including node diff --git a/Dockerfile.lotus b/Dockerfile.lotus index 72c609305..812ad9f61 100644 --- a/Dockerfile.lotus +++ b/Dockerfile.lotus @@ -36,7 +36,7 @@ WORKDIR /opt/filecoin ARG RUSTFLAGS="" ARG GOFLAGS="" -RUN make lotus lotus-miner lotus-worker lotus-shed lotus-wallet lotus-gateway +RUN make lotus lotus-miner lotus-worker lotus-shed lotus-wallet lotus-gateway lotus-stats FROM ubuntu:20.04 AS base @@ -66,8 +66,6 @@ COPY scripts/docker-lotus-entrypoint.sh / ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters ENV LOTUS_PATH /var/lib/lotus -ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 -ENV LOTUS_JAEGER_AGENT_PORT 6831 ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car ENV DOCKER_LOTUS_IMPORT_WALLET "" @@ -92,8 +90,6 @@ MAINTAINER Lotus Development Team COPY --from=builder /opt/filecoin/lotus-wallet /usr/local/bin/ ENV WALLET_PATH /var/lib/lotus-wallet -ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 -ENV LOTUS_JAEGER_AGENT_PORT 6831 RUN mkdir /var/lib/lotus-wallet RUN chown fc: /var/lib/lotus-wallet @@ -114,10 +110,6 @@ MAINTAINER Lotus Development Team COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/ -ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 -ENV LOTUS_JAEGER_AGENT_PORT 6831 -ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http - USER fc EXPOSE 1234 @@ -135,11 +127,7 @@ COPY --from=builder /opt/filecoin/lotus-miner /usr/local/bin/ COPY scripts/docker-lotus-miner-entrypoint.sh / ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters -ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http ENV LOTUS_MINER_PATH /var/lib/lotus-miner -ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 -ENV LOTUS_JAEGER_AGENT_PORT 6831 -ENV DOCKER_LOTUS_MINER_INIT true RUN mkdir /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters RUN chown fc: /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters @@ -163,10 +151,7 @@ MAINTAINER Lotus Development Team COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/ ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters -ENV MINER_API_INFO /ip4/127.0.0.1/tcp/2345/http ENV LOTUS_WORKER_PATH /var/lib/lotus-worker -ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 -ENV LOTUS_JAEGER_AGENT_PORT 6831 RUN mkdir /var/lib/lotus-worker RUN chown fc: /var/lib/lotus-worker @@ -186,16 +171,11 @@ CMD ["-help"] from base as lotus-all-in-one ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters -ENV FULLNODE_API_INFO /ip4/127.0.0.1/tcp/1234/http -ENV LOTUS_JAEGER_AGENT_HOST 127.0.0.1 -ENV LOTUS_JAEGER_AGENT_PORT 6831 ENV LOTUS_MINER_PATH /var/lib/lotus-miner ENV LOTUS_PATH /var/lib/lotus ENV LOTUS_WORKER_PATH /var/lib/lotus-worker -ENV MINER_API_INFO /ip4/127.0.0.1/tcp/2345/http ENV WALLET_PATH /var/lib/lotus-wallet ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car -ENV DOCKER_LOTUS_MINER_INIT true COPY --from=builder /opt/filecoin/lotus /usr/local/bin/ COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/ @@ -203,6 +183,7 @@ COPY --from=builder /opt/filecoin/lotus-wallet /usr/local/bin/ COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/ COPY --from=builder /opt/filecoin/lotus-miner /usr/local/bin/ COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/ +COPY --from=builder /opt/filecoin/lotus-stats /usr/local/bin/ RUN mkdir /var/tmp/filecoin-proof-parameters RUN mkdir /var/lib/lotus diff --git a/FAQ.md b/FAQ.md new file mode 100644 index 000000000..6e7f1ea84 --- /dev/null +++ b/FAQ.md @@ -0,0 +1,75 @@ +# Eudico FAQ +_Note: The aim of these FAQs is to get you started fast with Eudico so you can spawn a test network and start contributing to the code in no time. For more in depth documentation or further information about how to perform other actions go to the [Lotus documentation](https://lotus.filecoin.io/docs/set-up/about/), or refer to the base code. Eudico is a fork of Lotus, which means that "almost" everything that works in Lotus should work in Eudico out-of-the-box._ + +## Q: How to import a wallet? + +**A:** Generate a key: +``` +./lotus-keygen -t secp256k1 +``` +Import it (as default if needed): +``` +./eudico wallet import –-as-default –-format=json-lotus $file +``` + +## Q: How to generate the cbor files when creating a new actor? + +**A:** The Cbor library is used to help with marshalling and unmarshalling. +In the same folder where your `$actorName_actor.go` and `$actorName_state.go` files are defined, create a `gen` folder with a `gen.go` file that looks like [this](https://github.com/filecoin-project/eudico/blob/eudico/chain/consensus/hierarchical/actors/sca/gen/gen.go +). Add all the correspondings states and parameters, update the import url and add the package using `go get`. +Pre-populate the functions of the interface in a `cbor_gen.go` file as follows: +``` +func (t *$YourStruct) MarshalCBOR(w io.Writer) error { return nil } + +func (t *$YourStruct) MarshalCBOR(w io.Writer) error { return nil } +``` +Run `go run gen.go`. + + +## Q: How do you send a transaction directly from the code? +**A:** By using the `api-MpoolPushMessage.` See how to use it [here](https://github.com/filecoin-project/eudico/blob/113829e7fc115daac08ea0217170baddcb7788ba/chain/consensus/hierarchical/subnet/manager/manager.go#L375-L391). + +## Q: How can a built-in actor be initialize? +**A:** You can see an example of how is done for the SCA in [this piece of code](https://github.com/filecoin-project/eudico/blob/113829e7fc115daac08ea0217170baddcb7788ba/chain/consensus/hierarchical/actors/subnet/genesis.go#L131). + +## Q: How can I run an eudico network with a specific consensus protocol? +**A:** Use the following commands: + +Run a network: + ``` + ./eudico delegated genesis $ADDR gen.gen + ./eudico delegated daemon --genesis=gen.gen + ``` + Run a miner: + ``` + ./eudico wallet import --format=json-lotus $ADDR + ./eudico delegated miner + ``` + +## Q: How can I run two eudico peers on the same host? +**A:** Use a different `EUDICO_PATH` variable and `api` argument for each path. + +Terminal 1: +``` +export EUDICO_PATH="~/.eudico1/" +./eudico delegated daemon --genesis=gen.gen --api=1234 +``` + +Terminal 2: +``` +export EUDICO_PATH="~/.eudico2/" +./eudico delegated daemon --genesis=gen.gen --api=1235 +``` + +## Q: How can two two eudico clients be connected with each other? +**A:** Suppose you have two eudico clients A and B. Run the following commands: + +On client A run the command below to output the target's libp2p address, `ADDR_A` +``` +./eudico net listen +``` + +Run on client B: +``` +./eudico net connect ADDR_A +``` diff --git a/Makefile b/Makefile index a509e701e..eea0e0269 100644 --- a/Makefile +++ b/Makefile @@ -299,6 +299,7 @@ method-gen: api-gen (cd ./lotuspond/front/src/chain && $(GOCC) run ./methodgen.go) actors-gen: + $(GOCC) run ./gen/inline-gen . gen/inlinegen-data.json $(GOCC) run ./chain/actors/agen $(GOCC) fmt ./... @@ -367,4 +368,4 @@ print-%: @echo $*=$($*) circleci: - go generate -x ./.circleci \ No newline at end of file + go generate -x ./.circleci diff --git a/README.md b/README.md index 8246d483e..121f49baa 100644 --- a/README.md +++ b/README.md @@ -71,7 +71,7 @@ See the [official Golang installation instructions](https://golang.org/doc/insta ### Build and install Eudico -Once all the dependencies are installed, you can build and install the Eudico suite (`eudico`, `eudico-miner`, and `eudico-worker`). +Once all the dependencies are installed, you can build and install Eudico. 1. Clone the repository: @@ -82,39 +82,60 @@ Once all the dependencies are installed, you can build and install the Eudico su Note: The default branch `eudico` is the dev branch where the latest new features, bug fixes and improvement are in. -2. To join mainnet -- don't! - - If you are changing networks from a previous installation or there has been a network reset, read the [Switch networks guide](https://docs.filecoin.io/get-started/lotus/switch-networks/) before proceeding. - - For networks other than mainnet, look up the current branch or tag/commit for the network you want to join in the [Filecoin networks dashboard](https://network.filecoin.io), then build Eudico for your specific network below. +2. Build Eudico: ```sh - git checkout - # For example: - git checkout # tag for a release + make eudico ``` + This will create the `eudico` executable in the current directory. - Currently, the latest code on the _master_ branch corresponds to mainnet. +### Run a local test network. -3. If you are in China, see "[Lotus: tips when running in China](https://docs.filecoin.io/get-started/lotus/tips-running-in-china/)". -4. This build instruction uses the prebuilt proofs binaries. If you want to build the proof binaries from source check the [complete instructions](https://docs.filecoin.io/get-started/lotus/installation/#build-and-install-lotus). Note, if you are building the proof binaries from source, [installing rustup](https://docs.filecoin.io/get-started/lotus/installation/#rustup) is also needed. +**Note**: `eudico` uses the `$HOME/.eudico` folder by default for storage (configuration, chain data, wallets, etc). See [advanced options](https://docs.filecoin.io/get-started/lotus/configuration-and-advanced-usage/) for information on how to customize the folder. +If you want to run more than one Eudico node the same host, you need to tell the nodes to use different folders (see [FAQ](FAQ.md#q-how-can-i-run-two-eudico-peers-on-the-same-host)) +Make sure that this directory does not exist when you are starting a new network. -5. Build and install Eudico: +First, a key needs to be generated. +In order to do that, compile the Lotus key generator: - ```sh - make clean all #mainnet + ```bash + make lotus-keygen + ``` + +Then, generate a key: + + ```bash + ./lotus-keygen -t secp256k1 + ``` +This creates a key file, with the name `f1[...].key` (e.g. `f16dv4rlp3b33d5deasf3lxkrbfwhi4q4a5uw5scy.key`) in the local directory. +The file name, without the `.key` extension, is the corresponding Filecoin address. +If this is the only key you generated so far, you can obtain the address, for example, by running + + ```bash + ADDR=$(echo f1* | tr '.' ' ' | awk '{print $1}') + ``` - # Or to join a testnet or devnet: - make clean calibnet # Calibration with min 32GiB sectors +Use this address to create a genesis block for the system and start the Eudico daemon. +The following command uses the `delegated` consensus. - sudo make eudico + ```bash + ./eudico delegated genesis $ADDR gen.gen + ./eudico delegated daemon --genesis=gen.gen ``` - This will put `eudico`, `eudico-miner` and `eudico-worker` in `/usr/local/bin`. +The daemon will continue running until you stop it. +To start a miner, first import a wallet, using the generated key +(replacing `f1*.key` by the generated key file if more than one key is present in the directory). - `eudico` will use the `$HOME/.eudico` folder by default for storage (configuration, chain data, wallets, etc). See [advanced options](https://docs.filecoin.io/get-started/lotus/configuration-and-advanced-usage/) for information on how to customize the folder. + ```bash + ./eudico wallet import --format=json-lotus f1*.key + ``` + +Then, start the miner. -6. You should now have Eudico installed. You can now [start the Eudico daemon and sync the chain](https://docs.filecoin.io/get-started/lotus/installation/#start-the-lotus-daemon-and-sync-the-chain). + ```bash + ./eudico delegated miner + ``` ## License diff --git a/api/api_full.go b/api/api_full.go index 27c9baccb..7c9d43823 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -7,7 +7,6 @@ import ( "time" "github.com/ipfs/go-cid" - textselector "github.com/ipld/go-ipld-selector-text-lite" "github.com/libp2p/go-libp2p-core/peer" "github.com/filecoin-project/go-address" @@ -28,7 +27,6 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/paych" "github.com/filecoin-project/lotus/chain/actors/builtin/power" "github.com/filecoin-project/lotus/chain/types" - marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/repo/imports" ) @@ -292,7 +290,7 @@ type FullNode interface { // // UX ? - // MethodGroup: Wallet + // MethodGroup: WalletF // WalletNew creates a new address in the wallet with the given sigType. // Available key types: bls, secp256k1, secp256k1-ledger @@ -353,10 +351,11 @@ type FullNode interface { // ClientMinerQueryOffer returns a QueryOffer for the specific miner and file. ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (QueryOffer, error) //perm:read // ClientRetrieve initiates the retrieval of a file, as specified in the order. - ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *FileRef) error //perm:admin - // ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel - // of status updates. - ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin + ClientRetrieve(ctx context.Context, params RetrievalOrder) (*RestrievalRes, error) //perm:admin + // ClientRetrieveWait waits for retrieval to be complete + ClientRetrieveWait(ctx context.Context, deal retrievalmarket.DealID) error //perm:admin + // ClientExport exports a file stored in the local filestore to a system file + ClientExport(ctx context.Context, exportRef ExportRef, fileRef FileRef) error //perm:admin // ClientListRetrievals returns information about retrievals made by the local client ClientListRetrievals(ctx context.Context) ([]RetrievalInfo, error) //perm:write // ClientGetRetrievalUpdates returns status of updated retrieval deals @@ -631,10 +630,14 @@ type FullNode interface { // , , MsigApproveTxnHash(context.Context, address.Address, uint64, address.Address, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign + // MsigCancel cancels a previously-proposed multisig message + // It takes the following params: , + MsigCancel(context.Context, address.Address, uint64, address.Address) (*MessagePrototype, error) //perm:sign + // MsigCancel cancels a previously-proposed multisig message // It takes the following params: , , , , // , , - MsigCancel(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign + MsigCancelTxnHash(context.Context, address.Address, uint64, address.Address, types.BigInt, address.Address, uint64, []byte) (*MessagePrototype, error) //perm:sign // MsigAddPropose proposes adding a signer in the multisig // It takes the following params: , , @@ -803,6 +806,7 @@ type MsgGasCost struct { type BlockMessages struct { BlsMessages []*types.Message SecpkMessages []*types.SignedMessage + CrossMessages []*types.Message Cids []cid.Cid } @@ -898,6 +902,7 @@ type QueryOffer struct { Size uint64 MinPrice types.BigInt UnsealPrice types.BigInt + PricePerByte abi.TokenAmount PaymentInterval uint64 PaymentIntervalIncrease uint64 Miner address.Address @@ -931,15 +936,14 @@ type MarketDeal struct { } type RetrievalOrder struct { - // TODO: make this less unixfs specific - Root cid.Cid - Piece *cid.Cid - DatamodelPathSelector *textselector.Expression - Size uint64 - - FromLocalCAR string // if specified, get data from a local CARv2 file. - // TODO: support offset - Total types.BigInt + Root cid.Cid + Piece *cid.Cid + DataSelector *Selector + + // todo: Size/Total are only used for calculating price per byte; we should let users just pass that + Size uint64 + Total types.BigInt + UnsealPrice types.BigInt PaymentInterval uint64 PaymentIntervalIncrease uint64 @@ -1082,7 +1086,7 @@ type CirculatingSupply struct { type MiningBaseInfo struct { MinerPower types.BigInt NetworkPower types.BigInt - Sectors []builtin.SectorInfo + Sectors []builtin.ExtendedSectorInfo WorkerKey address.Address SectorSize abi.SectorSize PrevBeaconEntry types.BeaconEntry @@ -1100,6 +1104,7 @@ type BlockTemplate struct { Epoch abi.ChainEpoch Timestamp uint64 WinningPoStProof []builtin.PoStProof + CrossMessages []*types.Message } type DataSize struct { diff --git a/api/api_gateway.go b/api/api_gateway.go index 862c6ddb5..fbe2e0cd6 100644 --- a/api/api_gateway.go +++ b/api/api_gateway.go @@ -31,6 +31,8 @@ import ( type Gateway interface { ChainHasObj(context.Context, cid.Cid) (bool, error) ChainHead(ctx context.Context) (*types.TipSet, error) + ChainGetParentMessages(context.Context, cid.Cid) ([]Message, error) + ChainGetParentReceipts(context.Context, cid.Cid) ([]*types.MessageReceipt, error) ChainGetBlockMessages(context.Context, cid.Cid) (*BlockMessages, error) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*HeadChange, error) @@ -39,6 +41,7 @@ type Gateway interface { ChainGetTipSetAfterHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) ChainNotify(context.Context) (<-chan []*HeadChange, error) ChainReadObj(context.Context, cid.Cid) ([]byte, error) + ChainGetGenesis(context.Context) (*types.TipSet, error) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) MpoolPush(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) diff --git a/api/api_hierarchical.go b/api/api_hierarchical.go index 6bf4220bc..2e83732c9 100644 --- a/api/api_hierarchical.go +++ b/api/api_hierarchical.go @@ -5,15 +5,23 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/chain/consensus/hierarchical" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" + "github.com/filecoin-project/lotus/chain/types" "github.com/ipfs/go-cid" ) type HierarchicalCns interface { - AddSubnet(ctx context.Context, wallet address.Address, parent hierarchical.SubnetID, name string, consensus uint64, minerStake abi.TokenAmount, - delegminer address.Address) (address.Address, error) // perm:write - JoinSubnet(ctx context.Context, wallet address.Address, value abi.TokenAmount, id hierarchical.SubnetID) (cid.Cid, error) // perm:write - MineSubnet(ctx context.Context, wallet address.Address, id hierarchical.SubnetID, stop bool) error // perm:write - LeaveSubnet(ctx context.Context, wallet address.Address, id hierarchical.SubnetID) (cid.Cid, error) // perm:write - KillSubnet(ctx context.Context, wallet address.Address, id hierarchical.SubnetID) (cid.Cid, error) // perm:write + AddSubnet(ctx context.Context, wallet address.Address, parent address.SubnetID, name string, consensus uint64, minerStake abi.TokenAmount, + checkperiod abi.ChainEpoch, delegminer address.Address) (address.Address, error) // perm:write + JoinSubnet(ctx context.Context, wallet address.Address, value abi.TokenAmount, id address.SubnetID) (cid.Cid, error) // perm:write + SyncSubnet(ctx context.Context, id address.SubnetID, stop bool) error // perm:write + MineSubnet(ctx context.Context, wallet address.Address, id address.SubnetID, stop bool) error // perm:read + LeaveSubnet(ctx context.Context, wallet address.Address, id address.SubnetID) (cid.Cid, error) // perm:write + KillSubnet(ctx context.Context, wallet address.Address, id address.SubnetID) (cid.Cid, error) // perm:write + ListCheckpoints(ctx context.Context, id address.SubnetID, num int) ([]*schema.Checkpoint, error) // perm:read + ValidateCheckpoint(ctx context.Context, id address.SubnetID, epoch abi.ChainEpoch) (*schema.Checkpoint, error) // perm:read + GetCrossMsgsPool(ctx context.Context, id address.SubnetID, height abi.ChainEpoch) ([]*types.Message, error) // perm:read + FundSubnet(ctx context.Context, wallet address.Address, id address.SubnetID, value abi.TokenAmount) (cid.Cid, error) // perm:write + ReleaseFunds(ctx context.Context, wallet address.Address, id address.SubnetID, value abi.TokenAmount) (cid.Cid, error) // perm:write + CrossMsgResolve(ctx context.Context, id address.SubnetID, c cid.Cid, from address.SubnetID) ([]types.Message, error) // perm:read } diff --git a/api/api_net.go b/api/api_net.go index 4cf9ca336..7dddb09ac 100644 --- a/api/api_net.go +++ b/api/api_net.go @@ -51,6 +51,11 @@ type Net interface { NetBlockRemove(ctx context.Context, acl NetBlockList) error //perm:admin NetBlockList(ctx context.Context) (NetBlockList, error) //perm:read + // ResourceManager API + NetStat(ctx context.Context, scope string) (NetStat, error) //perm:read + NetLimit(ctx context.Context, scope string) (NetLimit, error) //perm:read + NetSetLimit(ctx context.Context, scope string, limit NetLimit) error //perm:admin + // ID returns peerID of libp2p node backing this API ID(context.Context) (peer.ID, error) //perm:read } diff --git a/api/api_storage.go b/api/api_storage.go index 6ebee9908..c032a8e1b 100644 --- a/api/api_storage.go +++ b/api/api_storage.go @@ -14,6 +14,7 @@ import ( "github.com/filecoin-project/go-address" datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/filecoin-project/go-state-types/abi" + abinetwork "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/specs-actors/v2/actors/builtin/market" "github.com/filecoin-project/specs-storage/storage" @@ -99,8 +100,8 @@ type StorageMiner interface { // Returns null if message wasn't sent SectorTerminateFlush(ctx context.Context) (*cid.Cid, error) //perm:admin // SectorTerminatePending returns a list of pending sector terminations to be sent in the next batch message - SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin - SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error //perm:admin + SectorTerminatePending(ctx context.Context) ([]abi.SectorID, error) //perm:admin + SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber, snap bool) error //perm:admin // SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit. // Returns null if message wasn't sent SectorPreCommitFlush(ctx context.Context) ([]sealiface.PreCommitBatchRes, error) //perm:admin @@ -111,6 +112,7 @@ type StorageMiner interface { SectorCommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) //perm:admin // SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message SectorCommitPending(ctx context.Context) ([]abi.SectorID, error) //perm:admin + SectorMatchPendingPiecesToOpenSectors(ctx context.Context) error //perm:admin // WorkerConnect tells the node to connect to workers RPC WorkerConnect(context.Context, string) error //perm:admin retry:true @@ -118,17 +120,21 @@ type StorageMiner interface { WorkerJobs(context.Context) (map[uuid.UUID][]storiface.WorkerJob, error) //perm:admin //storiface.WorkerReturn - ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error //perm:admin retry:true - ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error //perm:admin retry:true - ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error //perm:admin retry:true - ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error //perm:admin retry:true - ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error //perm:admin retry:true - ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true - ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true - ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true - ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true - ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error //perm:admin retry:true - ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error //perm:admin retry:true + ReturnSealPreCommit1(ctx context.Context, callID storiface.CallID, p1o storage.PreCommit1Out, err *storiface.CallError) error //perm:admin retry:true + ReturnSealPreCommit2(ctx context.Context, callID storiface.CallID, sealed storage.SectorCids, err *storiface.CallError) error //perm:admin retry:true + ReturnSealCommit1(ctx context.Context, callID storiface.CallID, out storage.Commit1Out, err *storiface.CallError) error //perm:admin retry:true + ReturnSealCommit2(ctx context.Context, callID storiface.CallID, proof storage.Proof, err *storiface.CallError) error //perm:admin retry:true + ReturnFinalizeSector(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnReplicaUpdate(ctx context.Context, callID storiface.CallID, out storage.ReplicaUpdateOut, err *storiface.CallError) error //perm:admin retry:true + ReturnProveReplicaUpdate1(ctx context.Context, callID storiface.CallID, vanillaProofs storage.ReplicaVanillaProofs, err *storiface.CallError) error //perm:admin retry:true + ReturnProveReplicaUpdate2(ctx context.Context, callID storiface.CallID, proof storage.ReplicaUpdateProof, err *storiface.CallError) error //perm:admin retry:true + ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnReleaseUnsealed(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnUnsealPiece(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true + ReturnReadPiece(ctx context.Context, callID storiface.CallID, ok bool, err *storiface.CallError) error //perm:admin retry:true + ReturnFetch(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error //perm:admin retry:true // SealingSchedDiag dumps internal sealing scheduler state SealingSchedDiag(ctx context.Context, doSched bool) (interface{}, error) //perm:admin @@ -145,6 +151,7 @@ type StorageMiner interface { StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error //perm:admin StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) //perm:admin StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) //perm:admin + StorageGetLocks(ctx context.Context) (storiface.SectorLocks, error) //perm:admin StorageLocal(ctx context.Context) (map[stores.ID]string, error) //perm:admin StorageStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) //perm:admin @@ -160,12 +167,15 @@ type StorageMiner interface { MarketGetRetrievalAsk(ctx context.Context) (*retrievalmarket.Ask, error) //perm:read MarketListDataTransfers(ctx context.Context) ([]DataTransferChannel, error) //perm:write MarketDataTransferUpdates(ctx context.Context) (<-chan DataTransferChannel, error) //perm:write + // MarketDataTransferDiagnostics generates debugging information about current data transfers over graphsync + MarketDataTransferDiagnostics(ctx context.Context, p peer.ID) (*TransferDiagnostics, error) //perm:write // MarketRestartDataTransfer attempts to restart a data transfer with the given transfer ID and other peer MarketRestartDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write // MarketCancelDataTransfer cancels a data transfer with the given transfer ID and other peer MarketCancelDataTransfer(ctx context.Context, transferID datatransfer.TransferID, otherPeer peer.ID, isInitiator bool) error //perm:write MarketPendingDeals(ctx context.Context) (PendingDealInfo, error) //perm:write MarketPublishPendingDeals(ctx context.Context) error //perm:admin + MarketRetryPublishDeal(ctx context.Context, propcid cid.Cid) error //perm:admin // DagstoreListShards returns information about all shards known to the // DAG store. Only available on nodes running the markets subsystem. @@ -245,7 +255,7 @@ type StorageMiner interface { CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []storage.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) //perm:admin - ComputeProof(ctx context.Context, ssi []builtin.SectorInfo, rand abi.PoStRandomness) ([]builtin.PoStProof, error) //perm:read + ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv abinetwork.Version) ([]builtin.PoStProof, error) //perm:read } var _ storiface.WorkerReturn = *new(StorageMiner) diff --git a/api/api_worker.go b/api/api_worker.go index 4553c30e0..68d8e7baf 100644 --- a/api/api_worker.go +++ b/api/api_worker.go @@ -39,6 +39,10 @@ type Worker interface { SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) //perm:admin SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) //perm:admin FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) //perm:admin + ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) //perm:admin + ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) //perm:admin + ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) //perm:admin + GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) (storiface.CallID, error) //perm:admin ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (storiface.CallID, error) //perm:admin MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) //perm:admin UnsealPiece(context.Context, storage.SectorRef, storiface.UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (storiface.CallID, error) //perm:admin diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go index 25b9ac8c9..1190b0dc4 100644 --- a/api/docgen/docgen.go +++ b/api/docgen/docgen.go @@ -1,6 +1,7 @@ package docgen import ( + "encoding/json" "fmt" "go/ast" "go/parser" @@ -15,6 +16,7 @@ import ( "github.com/filecoin-project/go-bitfield" "github.com/google/uuid" "github.com/ipfs/go-cid" + "github.com/ipfs/go-graphsync" "github.com/libp2p/go-libp2p-core/metrics" "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" @@ -91,6 +93,8 @@ func init() { storeIDExample := imports.ID(50) textSelExample := textselector.Expression("Links/21/Hash/Links/42/Hash") + apiSelExample := api.Selector("Links/21/Hash/Links/42/Hash") + clientEvent := retrievalmarket.ClientEventDealAccepted addExample(bitfield.NewFromSet([]uint64{5})) addExample(abi.RegisteredSealProof_StackedDrg32GiBV1_1) @@ -118,13 +122,17 @@ func init() { addExample(api.FullAPIVersion1) addExample(api.PCHInbound) addExample(time.Minute) + addExample(graphsync.RequestID(4)) addExample(datatransfer.TransferID(3)) addExample(datatransfer.Ongoing) addExample(storeIDExample) addExample(&storeIDExample) + addExample(clientEvent) + addExample(&clientEvent) addExample(retrievalmarket.ClientEventDealAccepted) addExample(retrievalmarket.DealStatusNew) addExample(&textSelExample) + addExample(&apiSelExample) addExample(network.ReachabilityPublic) addExample(build.NewestNetworkVersion) addExample(map[string]int{"name": 42}) @@ -226,16 +234,18 @@ func init() { Hostname: "host", Resources: storiface.WorkerResources{ MemPhysical: 256 << 30, + MemUsed: 2 << 30, MemSwap: 120 << 30, - MemReserved: 2 << 30, + MemSwapUsed: 2 << 30, CPUs: 64, GPUs: []string{"aGPU 1337"}, + Resources: storiface.ResourceTable, }, }, Enabled: true, MemUsedMin: 0, MemUsedMax: 0, - GpuUsed: false, + GpuUsed: 0, CpuUse: 0, }, }) @@ -243,10 +253,18 @@ func init() { addExample(map[abi.SectorNumber]string{ 123: "can't acquire read lock", }) + addExample(json.RawMessage(`"json raw message"`)) addExample(map[api.SectorState]int{ api.SectorState(sealing.Proving): 120, }) addExample([]abi.SectorNumber{123, 124}) + addExample([]storiface.SectorLock{ + { + Sector: abi.SectorID{Number: 123, Miner: 1000}, + Write: [storiface.FileTypes]uint{0, 0, 1}, + Read: [storiface.FileTypes]uint{2, 3, 0}, + }, + }) // worker specific addExample(storiface.AcquireMove) @@ -281,6 +299,35 @@ func init() { State: "ShardStateAvailable", Error: "", }) + addExample(storiface.ResourceTable) + addExample(network.ScopeStat{ + Memory: 123, + NumStreamsInbound: 1, + NumStreamsOutbound: 2, + NumConnsInbound: 3, + NumConnsOutbound: 4, + NumFD: 5, + }) + addExample(map[string]network.ScopeStat{ + "abc": { + Memory: 123, + NumStreamsInbound: 1, + NumStreamsOutbound: 2, + NumConnsInbound: 3, + NumConnsOutbound: 4, + NumFD: 5, + }}) + addExample(api.NetLimit{ + Memory: 123, + StreamsInbound: 1, + StreamsOutbound: 2, + Streams: 3, + ConnsInbound: 3, + ConnsOutbound: 4, + Conns: 4, + FD: 5, + }) + } func GetAPIType(name, pkg string) (i interface{}, t reflect.Type, permStruct []reflect.Type) { @@ -331,7 +378,7 @@ func ExampleValue(method string, t, parent reflect.Type) interface{} { switch t.Kind() { case reflect.Slice: out := reflect.New(t).Elem() - reflect.Append(out, reflect.ValueOf(ExampleValue(method, t.Elem(), t))) + out = reflect.Append(out, reflect.ValueOf(ExampleValue(method, t.Elem(), t))) return out.Interface() case reflect.Chan: return ExampleValue(method, t.Elem(), nil) diff --git a/api/mocks/mock_full.go b/api/mocks/mock_full.go index 73c1923f6..9f5ce8237 100644 --- a/api/mocks/mock_full.go +++ b/api/mocks/mock_full.go @@ -23,10 +23,9 @@ import ( api "github.com/filecoin-project/lotus/api" apitypes "github.com/filecoin-project/lotus/api/types" miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - hierarchical "github.com/filecoin-project/lotus/chain/consensus/hierarchical" + schema "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" types "github.com/filecoin-project/lotus/chain/types" alerting "github.com/filecoin-project/lotus/journal/alerting" - marketevents "github.com/filecoin-project/lotus/markets/loggers" dtypes "github.com/filecoin-project/lotus/node/modules/dtypes" imports "github.com/filecoin-project/lotus/node/repo/imports" miner0 "github.com/filecoin-project/specs-actors/actors/builtin/miner" @@ -64,18 +63,18 @@ func (m *MockFullNode) EXPECT() *MockFullNodeMockRecorder { } // AddSubnet mocks base method. -func (m *MockFullNode) AddSubnet(arg0 context.Context, arg1 address.Address, arg2 hierarchical.SubnetID, arg3 string, arg4 uint64, arg5 big.Int, arg6 address.Address) (address.Address, error) { +func (m *MockFullNode) AddSubnet(arg0 context.Context, arg1 address.Address, arg2 address.SubnetID, arg3 string, arg4 uint64, arg5 big.Int, arg6 abi.ChainEpoch, arg7 address.Address) (address.Address, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddSubnet", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret := m.ctrl.Call(m, "AddSubnet", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) ret0, _ := ret[0].(address.Address) ret1, _ := ret[1].(error) return ret0, ret1 } // AddSubnet indicates an expected call of AddSubnet. -func (mr *MockFullNodeMockRecorder) AddSubnet(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { +func (mr *MockFullNodeMockRecorder) AddSubnet(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnet", reflect.TypeOf((*MockFullNode)(nil).AddSubnet), arg0, arg1, arg2, arg3, arg4, arg5, arg6) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnet", reflect.TypeOf((*MockFullNode)(nil).AddSubnet), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) } // AuthNew mocks base method. @@ -553,6 +552,20 @@ func (mr *MockFullNodeMockRecorder) ClientDealSize(arg0, arg1 interface{}) *gomo return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientDealSize", reflect.TypeOf((*MockFullNode)(nil).ClientDealSize), arg0, arg1) } +// ClientExport mocks base method. +func (m *MockFullNode) ClientExport(arg0 context.Context, arg1 api.ExportRef, arg2 api.FileRef) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClientExport", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClientExport indicates an expected call of ClientExport. +func (mr *MockFullNodeMockRecorder) ClientExport(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientExport", reflect.TypeOf((*MockFullNode)(nil).ClientExport), arg0, arg1, arg2) +} + // ClientFindData mocks base method. func (m *MockFullNode) ClientFindData(arg0 context.Context, arg1 cid.Cid, arg2 *cid.Cid) ([]api.QueryOffer, error) { m.ctrl.T.Helper() @@ -791,17 +804,18 @@ func (mr *MockFullNodeMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2, } // ClientRetrieve mocks base method. -func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) error { +func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder) (*api.RestrievalRes, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 + ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1) + ret0, _ := ret[0].(*api.RestrievalRes) + ret1, _ := ret[1].(error) + return ret0, ret1 } // ClientRetrieve indicates an expected call of ClientRetrieve. -func (mr *MockFullNodeMockRecorder) ClientRetrieve(arg0, arg1, arg2 interface{}) *gomock.Call { +func (mr *MockFullNodeMockRecorder) ClientRetrieve(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieve), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieve", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieve), arg0, arg1) } // ClientRetrieveTryRestartInsufficientFunds mocks base method. @@ -818,19 +832,18 @@ func (mr *MockFullNodeMockRecorder) ClientRetrieveTryRestartInsufficientFunds(ar return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveTryRestartInsufficientFunds", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveTryRestartInsufficientFunds), arg0, arg1) } -// ClientRetrieveWithEvents mocks base method. -func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { +// ClientRetrieveWait mocks base method. +func (m *MockFullNode) ClientRetrieveWait(arg0 context.Context, arg1 retrievalmarket.DealID) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ClientRetrieveWithEvents", arg0, arg1, arg2) - ret0, _ := ret[0].(<-chan marketevents.RetrievalEvent) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret := m.ctrl.Call(m, "ClientRetrieveWait", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 } -// ClientRetrieveWithEvents indicates an expected call of ClientRetrieveWithEvents. -func (mr *MockFullNodeMockRecorder) ClientRetrieveWithEvents(arg0, arg1, arg2 interface{}) *gomock.Call { +// ClientRetrieveWait indicates an expected call of ClientRetrieveWait. +func (mr *MockFullNodeMockRecorder) ClientRetrieveWait(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWithEvents", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveWithEvents), arg0, arg1, arg2) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClientRetrieveWait", reflect.TypeOf((*MockFullNode)(nil).ClientRetrieveWait), arg0, arg1) } // ClientStartDeal mocks base method. @@ -892,6 +905,21 @@ func (mr *MockFullNodeMockRecorder) CreateBackup(arg0, arg1 interface{}) *gomock return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBackup", reflect.TypeOf((*MockFullNode)(nil).CreateBackup), arg0, arg1) } +// CrossMsgResolve mocks base method. +func (m *MockFullNode) CrossMsgResolve(arg0 context.Context, arg1 address.SubnetID, arg2 cid.Cid, arg3 address.SubnetID) ([]types.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CrossMsgResolve", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]types.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CrossMsgResolve indicates an expected call of CrossMsgResolve. +func (mr *MockFullNodeMockRecorder) CrossMsgResolve(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossMsgResolve", reflect.TypeOf((*MockFullNode)(nil).CrossMsgResolve), arg0, arg1, arg2, arg3) +} + // Discover mocks base method. func (m *MockFullNode) Discover(arg0 context.Context) (apitypes.OpenRPCDocument, error) { m.ctrl.T.Helper() @@ -907,6 +935,21 @@ func (mr *MockFullNodeMockRecorder) Discover(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Discover", reflect.TypeOf((*MockFullNode)(nil).Discover), arg0) } +// FundSubnet mocks base method. +func (m *MockFullNode) FundSubnet(arg0 context.Context, arg1 address.Address, arg2 address.SubnetID, arg3 big.Int) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FundSubnet", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FundSubnet indicates an expected call of FundSubnet. +func (mr *MockFullNodeMockRecorder) FundSubnet(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FundSubnet", reflect.TypeOf((*MockFullNode)(nil).FundSubnet), arg0, arg1, arg2, arg3) +} + // GasEstimateFeeCap mocks base method. func (m *MockFullNode) GasEstimateFeeCap(arg0 context.Context, arg1 *types.Message, arg2 int64, arg3 types.TipSetKey) (big.Int, error) { m.ctrl.T.Helper() @@ -967,6 +1010,21 @@ func (mr *MockFullNodeMockRecorder) GasEstimateMessageGas(arg0, arg1, arg2, arg3 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateMessageGas", reflect.TypeOf((*MockFullNode)(nil).GasEstimateMessageGas), arg0, arg1, arg2, arg3) } +// GetCrossMsgsPool mocks base method. +func (m *MockFullNode) GetCrossMsgsPool(arg0 context.Context, arg1 address.SubnetID, arg2 abi.ChainEpoch) ([]*types.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCrossMsgsPool", arg0, arg1, arg2) + ret0, _ := ret[0].([]*types.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCrossMsgsPool indicates an expected call of GetCrossMsgsPool. +func (mr *MockFullNodeMockRecorder) GetCrossMsgsPool(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCrossMsgsPool", reflect.TypeOf((*MockFullNode)(nil).GetCrossMsgsPool), arg0, arg1, arg2) +} + // ID mocks base method. func (m *MockFullNode) ID(arg0 context.Context) (peer.ID, error) { m.ctrl.T.Helper() @@ -983,7 +1041,7 @@ func (mr *MockFullNodeMockRecorder) ID(arg0 interface{}) *gomock.Call { } // JoinSubnet mocks base method. -func (m *MockFullNode) JoinSubnet(arg0 context.Context, arg1 address.Address, arg2 big.Int, arg3 hierarchical.SubnetID) (cid.Cid, error) { +func (m *MockFullNode) JoinSubnet(arg0 context.Context, arg1 address.Address, arg2 big.Int, arg3 address.SubnetID) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "JoinSubnet", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(cid.Cid) @@ -998,7 +1056,7 @@ func (mr *MockFullNodeMockRecorder) JoinSubnet(arg0, arg1, arg2, arg3 interface{ } // KillSubnet mocks base method. -func (m *MockFullNode) KillSubnet(arg0 context.Context, arg1 address.Address, arg2 hierarchical.SubnetID) (cid.Cid, error) { +func (m *MockFullNode) KillSubnet(arg0 context.Context, arg1 address.Address, arg2 address.SubnetID) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "KillSubnet", arg0, arg1, arg2) ret0, _ := ret[0].(cid.Cid) @@ -1013,7 +1071,7 @@ func (mr *MockFullNodeMockRecorder) KillSubnet(arg0, arg1, arg2 interface{}) *go } // LeaveSubnet mocks base method. -func (m *MockFullNode) LeaveSubnet(arg0 context.Context, arg1 address.Address, arg2 hierarchical.SubnetID) (cid.Cid, error) { +func (m *MockFullNode) LeaveSubnet(arg0 context.Context, arg1 address.Address, arg2 address.SubnetID) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LeaveSubnet", arg0, arg1, arg2) ret0, _ := ret[0].(cid.Cid) @@ -1027,6 +1085,21 @@ func (mr *MockFullNodeMockRecorder) LeaveSubnet(arg0, arg1, arg2 interface{}) *g return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LeaveSubnet", reflect.TypeOf((*MockFullNode)(nil).LeaveSubnet), arg0, arg1, arg2) } +// ListCheckpoints mocks base method. +func (m *MockFullNode) ListCheckpoints(arg0 context.Context, arg1 address.SubnetID, arg2 int) ([]*schema.Checkpoint, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListCheckpoints", arg0, arg1, arg2) + ret0, _ := ret[0].([]*schema.Checkpoint) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListCheckpoints indicates an expected call of ListCheckpoints. +func (mr *MockFullNodeMockRecorder) ListCheckpoints(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListCheckpoints", reflect.TypeOf((*MockFullNode)(nil).ListCheckpoints), arg0, arg1, arg2) +} + // LogAlerts mocks base method. func (m *MockFullNode) LogAlerts(arg0 context.Context) ([]alerting.Alert, error) { m.ctrl.T.Helper() @@ -1146,7 +1219,7 @@ func (mr *MockFullNodeMockRecorder) MarketWithdraw(arg0, arg1, arg2, arg3 interf } // MineSubnet mocks base method. -func (m *MockFullNode) MineSubnet(arg0 context.Context, arg1 address.Address, arg2 hierarchical.SubnetID, arg3 bool) error { +func (m *MockFullNode) MineSubnet(arg0 context.Context, arg1 address.Address, arg2 address.SubnetID, arg3 bool) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MineSubnet", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) @@ -1503,18 +1576,33 @@ func (mr *MockFullNodeMockRecorder) MsigApproveTxnHash(arg0, arg1, arg2, arg3, a } // MsigCancel mocks base method. -func (m *MockFullNode) MsigCancel(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address, arg4 big.Int, arg5 address.Address, arg6 uint64, arg7 []byte) (*api.MessagePrototype, error) { +func (m *MockFullNode) MsigCancel(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address) (*api.MessagePrototype, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "MsigCancel", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + ret := m.ctrl.Call(m, "MsigCancel", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(*api.MessagePrototype) ret1, _ := ret[1].(error) return ret0, ret1 } // MsigCancel indicates an expected call of MsigCancel. -func (mr *MockFullNodeMockRecorder) MsigCancel(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { +func (mr *MockFullNodeMockRecorder) MsigCancel(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancel", reflect.TypeOf((*MockFullNode)(nil).MsigCancel), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancel", reflect.TypeOf((*MockFullNode)(nil).MsigCancel), arg0, arg1, arg2, arg3) +} + +// MsigCancelTxnHash mocks base method. +func (m *MockFullNode) MsigCancelTxnHash(arg0 context.Context, arg1 address.Address, arg2 uint64, arg3 address.Address, arg4 big.Int, arg5 address.Address, arg6 uint64, arg7 []byte) (*api.MessagePrototype, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MsigCancelTxnHash", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + ret0, _ := ret[0].(*api.MessagePrototype) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MsigCancelTxnHash indicates an expected call of MsigCancelTxnHash. +func (mr *MockFullNodeMockRecorder) MsigCancelTxnHash(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MsigCancelTxnHash", reflect.TypeOf((*MockFullNode)(nil).MsigCancelTxnHash), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) } // MsigCreate mocks base method. @@ -1858,6 +1946,21 @@ func (mr *MockFullNodeMockRecorder) NetFindPeer(arg0, arg1 interface{}) *gomock. return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetFindPeer", reflect.TypeOf((*MockFullNode)(nil).NetFindPeer), arg0, arg1) } +// NetLimit mocks base method. +func (m *MockFullNode) NetLimit(arg0 context.Context, arg1 string) (api.NetLimit, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetLimit", arg0, arg1) + ret0, _ := ret[0].(api.NetLimit) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetLimit indicates an expected call of NetLimit. +func (mr *MockFullNodeMockRecorder) NetLimit(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetLimit", reflect.TypeOf((*MockFullNode)(nil).NetLimit), arg0, arg1) +} + // NetPeerInfo mocks base method. func (m *MockFullNode) NetPeerInfo(arg0 context.Context, arg1 peer.ID) (*api.ExtendedPeerInfo, error) { m.ctrl.T.Helper() @@ -1903,6 +2006,35 @@ func (mr *MockFullNodeMockRecorder) NetPubsubScores(arg0 interface{}) *gomock.Ca return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPubsubScores", reflect.TypeOf((*MockFullNode)(nil).NetPubsubScores), arg0) } +// NetSetLimit mocks base method. +func (m *MockFullNode) NetSetLimit(arg0 context.Context, arg1 string, arg2 api.NetLimit) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetSetLimit", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// NetSetLimit indicates an expected call of NetSetLimit. +func (mr *MockFullNodeMockRecorder) NetSetLimit(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetSetLimit", reflect.TypeOf((*MockFullNode)(nil).NetSetLimit), arg0, arg1, arg2) +} + +// NetStat mocks base method. +func (m *MockFullNode) NetStat(arg0 context.Context, arg1 string) (api.NetStat, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetStat", arg0, arg1) + ret0, _ := ret[0].(api.NetStat) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetStat indicates an expected call of NetStat. +func (mr *MockFullNodeMockRecorder) NetStat(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetStat", reflect.TypeOf((*MockFullNode)(nil).NetStat), arg0, arg1) +} + // NodeStatus mocks base method. func (m *MockFullNode) NodeStatus(arg0 context.Context, arg1 bool) (api.NodeStatus, error) { m.ctrl.T.Helper() @@ -2157,6 +2289,21 @@ func (mr *MockFullNodeMockRecorder) PaychVoucherSubmit(arg0, arg1, arg2, arg3, a return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4) } +// ReleaseFunds mocks base method. +func (m *MockFullNode) ReleaseFunds(arg0 context.Context, arg1 address.Address, arg2 address.SubnetID, arg3 big.Int) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReleaseFunds", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReleaseFunds indicates an expected call of ReleaseFunds. +func (mr *MockFullNodeMockRecorder) ReleaseFunds(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReleaseFunds", reflect.TypeOf((*MockFullNode)(nil).ReleaseFunds), arg0, arg1, arg2, arg3) +} + // Session mocks base method. func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) { m.ctrl.T.Helper() @@ -2993,6 +3140,20 @@ func (mr *MockFullNodeMockRecorder) SyncSubmitBlock(arg0, arg1 interface{}) *gom return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncSubmitBlock", reflect.TypeOf((*MockFullNode)(nil).SyncSubmitBlock), arg0, arg1) } +// SyncSubnet mocks base method. +func (m *MockFullNode) SyncSubnet(arg0 context.Context, arg1 address.SubnetID, arg2 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncSubnet", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncSubnet indicates an expected call of SyncSubnet. +func (mr *MockFullNodeMockRecorder) SyncSubnet(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncSubnet", reflect.TypeOf((*MockFullNode)(nil).SyncSubnet), arg0, arg1, arg2) +} + // SyncUnmarkAllBad mocks base method. func (m *MockFullNode) SyncUnmarkAllBad(arg0 context.Context) error { m.ctrl.T.Helper() @@ -3036,6 +3197,21 @@ func (mr *MockFullNodeMockRecorder) SyncValidateTipset(arg0, arg1 interface{}) * return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncValidateTipset", reflect.TypeOf((*MockFullNode)(nil).SyncValidateTipset), arg0, arg1) } +// ValidateCheckpoint mocks base method. +func (m *MockFullNode) ValidateCheckpoint(arg0 context.Context, arg1 address.SubnetID, arg2 abi.ChainEpoch) (*schema.Checkpoint, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ValidateCheckpoint", arg0, arg1, arg2) + ret0, _ := ret[0].(*schema.Checkpoint) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateCheckpoint indicates an expected call of ValidateCheckpoint. +func (mr *MockFullNodeMockRecorder) ValidateCheckpoint(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateCheckpoint", reflect.TypeOf((*MockFullNode)(nil).ValidateCheckpoint), arg0, arg1, arg2) +} + // Version mocks base method. func (m *MockFullNode) Version(arg0 context.Context) (api.APIVersion, error) { m.ctrl.T.Helper() diff --git a/api/proxy_gen.go b/api/proxy_gen.go index 5164e16d5..ad6ef4eb7 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -17,11 +17,12 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" + abinetwork "github.com/filecoin-project/go-state-types/network" apitypes "github.com/filecoin-project/lotus/api/types" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/actors/builtin/paych" - "github.com/filecoin-project/lotus/chain/consensus/hierarchical" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" @@ -29,7 +30,6 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" "github.com/filecoin-project/lotus/journal/alerting" - marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/repo/imports" "github.com/filecoin-project/specs-storage/storage" @@ -165,6 +165,8 @@ type FullNodeStruct struct { ClientDealSize func(p0 context.Context, p1 cid.Cid) (DataSize, error) `perm:"read"` + ClientExport func(p0 context.Context, p1 ExportRef, p2 FileRef) error `perm:"admin"` + ClientFindData func(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) `perm:"read"` ClientGenCar func(p0 context.Context, p1 FileRef, p2 string) error `perm:"write"` @@ -197,11 +199,11 @@ type FullNodeStruct struct { ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` - ClientRetrieve func(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) error `perm:"admin"` + ClientRetrieve func(p0 context.Context, p1 RetrievalOrder) (*RestrievalRes, error) `perm:"admin"` ClientRetrieveTryRestartInsufficientFunds func(p0 context.Context, p1 address.Address) error `perm:"write"` - ClientRetrieveWithEvents func(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"` + ClientRetrieveWait func(p0 context.Context, p1 retrievalmarket.DealID) error `perm:"admin"` ClientStartDeal func(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) `perm:"admin"` @@ -273,7 +275,9 @@ type FullNodeStruct struct { MsigApproveTxnHash func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 address.Address, p5 types.BigInt, p6 address.Address, p7 uint64, p8 []byte) (*MessagePrototype, error) `perm:"sign"` - MsigCancel func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) `perm:"sign"` + MsigCancel func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) `perm:"sign"` + + MsigCancelTxnHash func(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) `perm:"sign"` MsigCreate func(p0 context.Context, p1 uint64, p2 []address.Address, p3 abi.ChainEpoch, p4 types.BigInt, p5 address.Address, p6 types.BigInt) (*MessagePrototype, error) `perm:"sign"` @@ -483,8 +487,14 @@ type GatewayStruct struct { Internal struct { ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) `` + ChainGetGenesis func(p0 context.Context) (*types.TipSet, error) `` + ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) `` + ChainGetParentMessages func(p0 context.Context, p1 cid.Cid) ([]Message, error) `` + + ChainGetParentReceipts func(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) `` + ChainGetPath func(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) `` ChainGetTipSet func(p0 context.Context, p1 types.TipSetKey) (*types.TipSet, error) `` @@ -554,15 +564,29 @@ type GatewayStub struct { type HierarchicalCnsStruct struct { Internal struct { - AddSubnet func(p0 context.Context, p1 address.Address, p2 hierarchical.SubnetID, p3 string, p4 uint64, p5 abi.TokenAmount, p6 address.Address) (address.Address, error) `perm:"write"` + AddSubnet func(p0 context.Context, p1 address.Address, p2 address.SubnetID, p3 string, p4 uint64, p5 abi.TokenAmount, p6 abi.ChainEpoch, p7 address.Address) (address.Address, error) `perm:"write"` + + CrossMsgResolve func(p0 context.Context, p1 address.SubnetID, p2 cid.Cid, p3 address.SubnetID) ([]types.Message, error) `perm:"read"` + + FundSubnet func(p0 context.Context, p1 address.Address, p2 address.SubnetID, p3 abi.TokenAmount) (cid.Cid, error) `perm:"write"` - JoinSubnet func(p0 context.Context, p1 address.Address, p2 abi.TokenAmount, p3 hierarchical.SubnetID) (cid.Cid, error) `perm:"write"` + GetCrossMsgsPool func(p0 context.Context, p1 address.SubnetID, p2 abi.ChainEpoch) ([]*types.Message, error) `perm:"read"` - KillSubnet func(p0 context.Context, p1 address.Address, p2 hierarchical.SubnetID) (cid.Cid, error) `perm:"write"` + JoinSubnet func(p0 context.Context, p1 address.Address, p2 abi.TokenAmount, p3 address.SubnetID) (cid.Cid, error) `perm:"write"` - LeaveSubnet func(p0 context.Context, p1 address.Address, p2 hierarchical.SubnetID) (cid.Cid, error) `perm:"write"` + KillSubnet func(p0 context.Context, p1 address.Address, p2 address.SubnetID) (cid.Cid, error) `perm:"write"` - MineSubnet func(p0 context.Context, p1 address.Address, p2 hierarchical.SubnetID, p3 bool) error `perm:"write"` + LeaveSubnet func(p0 context.Context, p1 address.Address, p2 address.SubnetID) (cid.Cid, error) `perm:"write"` + + ListCheckpoints func(p0 context.Context, p1 address.SubnetID, p2 int) ([]*schema.Checkpoint, error) `perm:"read"` + + MineSubnet func(p0 context.Context, p1 address.Address, p2 address.SubnetID, p3 bool) error `perm:"read"` + + ReleaseFunds func(p0 context.Context, p1 address.Address, p2 address.SubnetID, p3 abi.TokenAmount) (cid.Cid, error) `perm:"write"` + + SyncSubnet func(p0 context.Context, p1 address.SubnetID, p2 bool) error `perm:"write"` + + ValidateCheckpoint func(p0 context.Context, p1 address.SubnetID, p2 abi.ChainEpoch) (*schema.Checkpoint, error) `perm:"read"` } } @@ -599,11 +623,17 @@ type NetStruct struct { NetFindPeer func(p0 context.Context, p1 peer.ID) (peer.AddrInfo, error) `perm:"read"` + NetLimit func(p0 context.Context, p1 string) (NetLimit, error) `perm:"read"` + NetPeerInfo func(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) `perm:"read"` NetPeers func(p0 context.Context) ([]peer.AddrInfo, error) `perm:"read"` NetPubsubScores func(p0 context.Context) ([]PubsubScore, error) `perm:"read"` + + NetSetLimit func(p0 context.Context, p1 string, p2 NetLimit) error `perm:"admin"` + + NetStat func(p0 context.Context, p1 string) (NetStat, error) `perm:"read"` } } @@ -633,7 +663,7 @@ type StorageMinerStruct struct { CheckProvable func(p0 context.Context, p1 abi.RegisteredPoStProof, p2 []storage.SectorRef, p3 bool) (map[abi.SectorNumber]string, error) `perm:"admin"` - ComputeProof func(p0 context.Context, p1 []builtin.SectorInfo, p2 abi.PoStRandomness) ([]builtin.PoStProof, error) `perm:"read"` + ComputeProof func(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) `perm:"read"` CreateBackup func(p0 context.Context, p1 string) error `perm:"admin"` @@ -681,6 +711,8 @@ type StorageMinerStruct struct { MarketCancelDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` + MarketDataTransferDiagnostics func(p0 context.Context, p1 peer.ID) (*TransferDiagnostics, error) `perm:"write"` + MarketDataTransferUpdates func(p0 context.Context) (<-chan DataTransferChannel, error) `perm:"write"` MarketGetAsk func(p0 context.Context) (*storagemarket.SignedStorageAsk, error) `perm:"read"` @@ -705,6 +737,8 @@ type StorageMinerStruct struct { MarketRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` + MarketRetryPublishDeal func(p0 context.Context, p1 cid.Cid) error `perm:"admin"` + MarketSetAsk func(p0 context.Context, p1 types.BigInt, p2 types.BigInt, p3 abi.ChainEpoch, p4 abi.PaddedPieceSize, p5 abi.PaddedPieceSize) error `perm:"admin"` MarketSetRetrievalAsk func(p0 context.Context, p1 *retrievalmarket.Ask) error `perm:"admin"` @@ -727,12 +761,20 @@ type StorageMinerStruct struct { ReturnFinalizeSector func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` + ReturnGenerateSectorKeyFromData func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` + ReturnMoveStorage func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` + ReturnProveReplicaUpdate1 func(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaVanillaProofs, p3 *storiface.CallError) error `perm:"admin"` + + ReturnProveReplicaUpdate2 func(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateProof, p3 *storiface.CallError) error `perm:"admin"` + ReturnReadPiece func(p0 context.Context, p1 storiface.CallID, p2 bool, p3 *storiface.CallError) error `perm:"admin"` ReturnReleaseUnsealed func(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error `perm:"admin"` + ReturnReplicaUpdate func(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateOut, p3 *storiface.CallError) error `perm:"admin"` + ReturnSealCommit1 func(p0 context.Context, p1 storiface.CallID, p2 storage.Commit1Out, p3 *storiface.CallError) error `perm:"admin"` ReturnSealCommit2 func(p0 context.Context, p1 storiface.CallID, p2 storage.Proof, p3 *storiface.CallError) error `perm:"admin"` @@ -759,7 +801,9 @@ type StorageMinerStruct struct { SectorGetSealDelay func(p0 context.Context) (time.Duration, error) `perm:"read"` - SectorMarkForUpgrade func(p0 context.Context, p1 abi.SectorNumber) error `perm:"admin"` + SectorMarkForUpgrade func(p0 context.Context, p1 abi.SectorNumber, p2 bool) error `perm:"admin"` + + SectorMatchPendingPiecesToOpenSectors func(p0 context.Context) error `perm:"admin"` SectorPreCommitFlush func(p0 context.Context) ([]sealiface.PreCommitBatchRes, error) `perm:"admin"` @@ -805,6 +849,8 @@ type StorageMinerStruct struct { StorageFindSector func(p0 context.Context, p1 abi.SectorID, p2 storiface.SectorFileType, p3 abi.SectorSize, p4 bool) ([]stores.SectorStorageInfo, error) `perm:"admin"` + StorageGetLocks func(p0 context.Context) (storiface.SectorLocks, error) `perm:"admin"` + StorageInfo func(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) `perm:"admin"` StorageList func(p0 context.Context) (map[stores.ID][]stores.Decl, error) `perm:"admin"` @@ -864,6 +910,8 @@ type WorkerStruct struct { FinalizeSector func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"` + GenerateSectorKeyFromData func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) `perm:"admin"` + Info func(p0 context.Context) (storiface.WorkerInfo, error) `perm:"admin"` MoveStorage func(p0 context.Context, p1 storage.SectorRef, p2 storiface.SectorFileType) (storiface.CallID, error) `perm:"admin"` @@ -872,10 +920,16 @@ type WorkerStruct struct { ProcessSession func(p0 context.Context) (uuid.UUID, error) `perm:"admin"` + ProveReplicaUpdate1 func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid) (storiface.CallID, error) `perm:"admin"` + + ProveReplicaUpdate2 func(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid, p5 storage.ReplicaVanillaProofs) (storiface.CallID, error) `perm:"admin"` + ReleaseUnsealed func(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) `perm:"admin"` Remove func(p0 context.Context, p1 abi.SectorID) error `perm:"admin"` + ReplicaUpdate func(p0 context.Context, p1 storage.SectorRef, p2 []abi.PieceInfo) (storiface.CallID, error) `perm:"admin"` + SealCommit1 func(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 abi.InteractiveSealRandomness, p4 []abi.PieceInfo, p5 storage.SectorCids) (storiface.CallID, error) `perm:"admin"` SealCommit2 func(p0 context.Context, p1 storage.SectorRef, p2 storage.Commit1Out) (storiface.CallID, error) `perm:"admin"` @@ -1369,6 +1423,17 @@ func (s *FullNodeStub) ClientDealSize(p0 context.Context, p1 cid.Cid) (DataSize, return *new(DataSize), ErrNotSupported } +func (s *FullNodeStruct) ClientExport(p0 context.Context, p1 ExportRef, p2 FileRef) error { + if s.Internal.ClientExport == nil { + return ErrNotSupported + } + return s.Internal.ClientExport(p0, p1, p2) +} + +func (s *FullNodeStub) ClientExport(p0 context.Context, p1 ExportRef, p2 FileRef) error { + return ErrNotSupported +} + func (s *FullNodeStruct) ClientFindData(p0 context.Context, p1 cid.Cid, p2 *cid.Cid) ([]QueryOffer, error) { if s.Internal.ClientFindData == nil { return *new([]QueryOffer), ErrNotSupported @@ -1545,15 +1610,15 @@ func (s *FullNodeStub) ClientRestartDataTransfer(p0 context.Context, p1 datatran return ErrNotSupported } -func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) error { +func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder) (*RestrievalRes, error) { if s.Internal.ClientRetrieve == nil { - return ErrNotSupported + return nil, ErrNotSupported } - return s.Internal.ClientRetrieve(p0, p1, p2) + return s.Internal.ClientRetrieve(p0, p1) } -func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) error { - return ErrNotSupported +func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 RetrievalOrder) (*RestrievalRes, error) { + return nil, ErrNotSupported } func (s *FullNodeStruct) ClientRetrieveTryRestartInsufficientFunds(p0 context.Context, p1 address.Address) error { @@ -1567,15 +1632,15 @@ func (s *FullNodeStub) ClientRetrieveTryRestartInsufficientFunds(p0 context.Cont return ErrNotSupported } -func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) (<-chan marketevents.RetrievalEvent, error) { - if s.Internal.ClientRetrieveWithEvents == nil { - return nil, ErrNotSupported +func (s *FullNodeStruct) ClientRetrieveWait(p0 context.Context, p1 retrievalmarket.DealID) error { + if s.Internal.ClientRetrieveWait == nil { + return ErrNotSupported } - return s.Internal.ClientRetrieveWithEvents(p0, p1, p2) + return s.Internal.ClientRetrieveWait(p0, p1) } -func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *FileRef) (<-chan marketevents.RetrievalEvent, error) { - return nil, ErrNotSupported +func (s *FullNodeStub) ClientRetrieveWait(p0 context.Context, p1 retrievalmarket.DealID) error { + return ErrNotSupported } func (s *FullNodeStruct) ClientStartDeal(p0 context.Context, p1 *StartDealParams) (*cid.Cid, error) { @@ -1963,14 +2028,25 @@ func (s *FullNodeStub) MsigApproveTxnHash(p0 context.Context, p1 address.Address return nil, ErrNotSupported } -func (s *FullNodeStruct) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) { +func (s *FullNodeStruct) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) { if s.Internal.MsigCancel == nil { return nil, ErrNotSupported } - return s.Internal.MsigCancel(p0, p1, p2, p3, p4, p5, p6, p7) + return s.Internal.MsigCancel(p0, p1, p2, p3) } -func (s *FullNodeStub) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) { +func (s *FullNodeStub) MsigCancel(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address) (*MessagePrototype, error) { + return nil, ErrNotSupported +} + +func (s *FullNodeStruct) MsigCancelTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) { + if s.Internal.MsigCancelTxnHash == nil { + return nil, ErrNotSupported + } + return s.Internal.MsigCancelTxnHash(p0, p1, p2, p3, p4, p5, p6, p7) +} + +func (s *FullNodeStub) MsigCancelTxnHash(p0 context.Context, p1 address.Address, p2 uint64, p3 address.Address, p4 types.BigInt, p5 address.Address, p6 uint64, p7 []byte) (*MessagePrototype, error) { return nil, ErrNotSupported } @@ -3052,6 +3128,17 @@ func (s *GatewayStub) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*Bl return nil, ErrNotSupported } +func (s *GatewayStruct) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) { + if s.Internal.ChainGetGenesis == nil { + return nil, ErrNotSupported + } + return s.Internal.ChainGetGenesis(p0) +} + +func (s *GatewayStub) ChainGetGenesis(p0 context.Context) (*types.TipSet, error) { + return nil, ErrNotSupported +} + func (s *GatewayStruct) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Message, error) { if s.Internal.ChainGetMessage == nil { return nil, ErrNotSupported @@ -3063,6 +3150,28 @@ func (s *GatewayStub) ChainGetMessage(p0 context.Context, p1 cid.Cid) (*types.Me return nil, ErrNotSupported } +func (s *GatewayStruct) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]Message, error) { + if s.Internal.ChainGetParentMessages == nil { + return *new([]Message), ErrNotSupported + } + return s.Internal.ChainGetParentMessages(p0, p1) +} + +func (s *GatewayStub) ChainGetParentMessages(p0 context.Context, p1 cid.Cid) ([]Message, error) { + return *new([]Message), ErrNotSupported +} + +func (s *GatewayStruct) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) { + if s.Internal.ChainGetParentReceipts == nil { + return *new([]*types.MessageReceipt), ErrNotSupported + } + return s.Internal.ChainGetParentReceipts(p0, p1) +} + +func (s *GatewayStub) ChainGetParentReceipts(p0 context.Context, p1 cid.Cid) ([]*types.MessageReceipt, error) { + return *new([]*types.MessageReceipt), ErrNotSupported +} + func (s *GatewayStruct) ChainGetPath(p0 context.Context, p1 types.TipSetKey, p2 types.TipSetKey) ([]*HeadChange, error) { if s.Internal.ChainGetPath == nil { return *new([]*HeadChange), ErrNotSupported @@ -3404,61 +3513,138 @@ func (s *GatewayStub) WalletBalance(p0 context.Context, p1 address.Address) (typ return *new(types.BigInt), ErrNotSupported } -func (s *HierarchicalCnsStruct) AddSubnet(p0 context.Context, p1 address.Address, p2 hierarchical.SubnetID, p3 string, p4 uint64, p5 abi.TokenAmount, p6 address.Address) (address.Address, error) { +func (s *HierarchicalCnsStruct) AddSubnet(p0 context.Context, p1 address.Address, p2 address.SubnetID, p3 string, p4 uint64, p5 abi.TokenAmount, p6 abi.ChainEpoch, p7 address.Address) (address.Address, error) { if s.Internal.AddSubnet == nil { return *new(address.Address), ErrNotSupported } - return s.Internal.AddSubnet(p0, p1, p2, p3, p4, p5, p6) + return s.Internal.AddSubnet(p0, p1, p2, p3, p4, p5, p6, p7) } -func (s *HierarchicalCnsStub) AddSubnet(p0 context.Context, p1 address.Address, p2 hierarchical.SubnetID, p3 string, p4 uint64, p5 abi.TokenAmount, p6 address.Address) (address.Address, error) { +func (s *HierarchicalCnsStub) AddSubnet(p0 context.Context, p1 address.Address, p2 address.SubnetID, p3 string, p4 uint64, p5 abi.TokenAmount, p6 abi.ChainEpoch, p7 address.Address) (address.Address, error) { return *new(address.Address), ErrNotSupported } -func (s *HierarchicalCnsStruct) JoinSubnet(p0 context.Context, p1 address.Address, p2 abi.TokenAmount, p3 hierarchical.SubnetID) (cid.Cid, error) { +func (s *HierarchicalCnsStruct) CrossMsgResolve(p0 context.Context, p1 address.SubnetID, p2 cid.Cid, p3 address.SubnetID) ([]types.Message, error) { + if s.Internal.CrossMsgResolve == nil { + return *new([]types.Message), ErrNotSupported + } + return s.Internal.CrossMsgResolve(p0, p1, p2, p3) +} + +func (s *HierarchicalCnsStub) CrossMsgResolve(p0 context.Context, p1 address.SubnetID, p2 cid.Cid, p3 address.SubnetID) ([]types.Message, error) { + return *new([]types.Message), ErrNotSupported +} + +func (s *HierarchicalCnsStruct) FundSubnet(p0 context.Context, p1 address.Address, p2 address.SubnetID, p3 abi.TokenAmount) (cid.Cid, error) { + if s.Internal.FundSubnet == nil { + return *new(cid.Cid), ErrNotSupported + } + return s.Internal.FundSubnet(p0, p1, p2, p3) +} + +func (s *HierarchicalCnsStub) FundSubnet(p0 context.Context, p1 address.Address, p2 address.SubnetID, p3 abi.TokenAmount) (cid.Cid, error) { + return *new(cid.Cid), ErrNotSupported +} + +func (s *HierarchicalCnsStruct) GetCrossMsgsPool(p0 context.Context, p1 address.SubnetID, p2 abi.ChainEpoch) ([]*types.Message, error) { + if s.Internal.GetCrossMsgsPool == nil { + return *new([]*types.Message), ErrNotSupported + } + return s.Internal.GetCrossMsgsPool(p0, p1, p2) +} + +func (s *HierarchicalCnsStub) GetCrossMsgsPool(p0 context.Context, p1 address.SubnetID, p2 abi.ChainEpoch) ([]*types.Message, error) { + return *new([]*types.Message), ErrNotSupported +} + +func (s *HierarchicalCnsStruct) JoinSubnet(p0 context.Context, p1 address.Address, p2 abi.TokenAmount, p3 address.SubnetID) (cid.Cid, error) { if s.Internal.JoinSubnet == nil { return *new(cid.Cid), ErrNotSupported } return s.Internal.JoinSubnet(p0, p1, p2, p3) } -func (s *HierarchicalCnsStub) JoinSubnet(p0 context.Context, p1 address.Address, p2 abi.TokenAmount, p3 hierarchical.SubnetID) (cid.Cid, error) { +func (s *HierarchicalCnsStub) JoinSubnet(p0 context.Context, p1 address.Address, p2 abi.TokenAmount, p3 address.SubnetID) (cid.Cid, error) { return *new(cid.Cid), ErrNotSupported } -func (s *HierarchicalCnsStruct) KillSubnet(p0 context.Context, p1 address.Address, p2 hierarchical.SubnetID) (cid.Cid, error) { +func (s *HierarchicalCnsStruct) KillSubnet(p0 context.Context, p1 address.Address, p2 address.SubnetID) (cid.Cid, error) { if s.Internal.KillSubnet == nil { return *new(cid.Cid), ErrNotSupported } return s.Internal.KillSubnet(p0, p1, p2) } -func (s *HierarchicalCnsStub) KillSubnet(p0 context.Context, p1 address.Address, p2 hierarchical.SubnetID) (cid.Cid, error) { +func (s *HierarchicalCnsStub) KillSubnet(p0 context.Context, p1 address.Address, p2 address.SubnetID) (cid.Cid, error) { return *new(cid.Cid), ErrNotSupported } -func (s *HierarchicalCnsStruct) LeaveSubnet(p0 context.Context, p1 address.Address, p2 hierarchical.SubnetID) (cid.Cid, error) { +func (s *HierarchicalCnsStruct) LeaveSubnet(p0 context.Context, p1 address.Address, p2 address.SubnetID) (cid.Cid, error) { if s.Internal.LeaveSubnet == nil { return *new(cid.Cid), ErrNotSupported } return s.Internal.LeaveSubnet(p0, p1, p2) } -func (s *HierarchicalCnsStub) LeaveSubnet(p0 context.Context, p1 address.Address, p2 hierarchical.SubnetID) (cid.Cid, error) { +func (s *HierarchicalCnsStub) LeaveSubnet(p0 context.Context, p1 address.Address, p2 address.SubnetID) (cid.Cid, error) { return *new(cid.Cid), ErrNotSupported } -func (s *HierarchicalCnsStruct) MineSubnet(p0 context.Context, p1 address.Address, p2 hierarchical.SubnetID, p3 bool) error { +func (s *HierarchicalCnsStruct) ListCheckpoints(p0 context.Context, p1 address.SubnetID, p2 int) ([]*schema.Checkpoint, error) { + if s.Internal.ListCheckpoints == nil { + return *new([]*schema.Checkpoint), ErrNotSupported + } + return s.Internal.ListCheckpoints(p0, p1, p2) +} + +func (s *HierarchicalCnsStub) ListCheckpoints(p0 context.Context, p1 address.SubnetID, p2 int) ([]*schema.Checkpoint, error) { + return *new([]*schema.Checkpoint), ErrNotSupported +} + +func (s *HierarchicalCnsStruct) MineSubnet(p0 context.Context, p1 address.Address, p2 address.SubnetID, p3 bool) error { if s.Internal.MineSubnet == nil { return ErrNotSupported } return s.Internal.MineSubnet(p0, p1, p2, p3) } -func (s *HierarchicalCnsStub) MineSubnet(p0 context.Context, p1 address.Address, p2 hierarchical.SubnetID, p3 bool) error { +func (s *HierarchicalCnsStub) MineSubnet(p0 context.Context, p1 address.Address, p2 address.SubnetID, p3 bool) error { + return ErrNotSupported +} + +func (s *HierarchicalCnsStruct) ReleaseFunds(p0 context.Context, p1 address.Address, p2 address.SubnetID, p3 abi.TokenAmount) (cid.Cid, error) { + if s.Internal.ReleaseFunds == nil { + return *new(cid.Cid), ErrNotSupported + } + return s.Internal.ReleaseFunds(p0, p1, p2, p3) +} + +func (s *HierarchicalCnsStub) ReleaseFunds(p0 context.Context, p1 address.Address, p2 address.SubnetID, p3 abi.TokenAmount) (cid.Cid, error) { + return *new(cid.Cid), ErrNotSupported +} + +func (s *HierarchicalCnsStruct) SyncSubnet(p0 context.Context, p1 address.SubnetID, p2 bool) error { + if s.Internal.SyncSubnet == nil { + return ErrNotSupported + } + return s.Internal.SyncSubnet(p0, p1, p2) +} + +func (s *HierarchicalCnsStub) SyncSubnet(p0 context.Context, p1 address.SubnetID, p2 bool) error { return ErrNotSupported } +func (s *HierarchicalCnsStruct) ValidateCheckpoint(p0 context.Context, p1 address.SubnetID, p2 abi.ChainEpoch) (*schema.Checkpoint, error) { + if s.Internal.ValidateCheckpoint == nil { + return nil, ErrNotSupported + } + return s.Internal.ValidateCheckpoint(p0, p1, p2) +} + +func (s *HierarchicalCnsStub) ValidateCheckpoint(p0 context.Context, p1 address.SubnetID, p2 abi.ChainEpoch) (*schema.Checkpoint, error) { + return nil, ErrNotSupported +} + func (s *NetStruct) ID(p0 context.Context) (peer.ID, error) { if s.Internal.ID == nil { return *new(peer.ID), ErrNotSupported @@ -3613,6 +3799,17 @@ func (s *NetStub) NetFindPeer(p0 context.Context, p1 peer.ID) (peer.AddrInfo, er return *new(peer.AddrInfo), ErrNotSupported } +func (s *NetStruct) NetLimit(p0 context.Context, p1 string) (NetLimit, error) { + if s.Internal.NetLimit == nil { + return *new(NetLimit), ErrNotSupported + } + return s.Internal.NetLimit(p0, p1) +} + +func (s *NetStub) NetLimit(p0 context.Context, p1 string) (NetLimit, error) { + return *new(NetLimit), ErrNotSupported +} + func (s *NetStruct) NetPeerInfo(p0 context.Context, p1 peer.ID) (*ExtendedPeerInfo, error) { if s.Internal.NetPeerInfo == nil { return nil, ErrNotSupported @@ -3646,6 +3843,28 @@ func (s *NetStub) NetPubsubScores(p0 context.Context) ([]PubsubScore, error) { return *new([]PubsubScore), ErrNotSupported } +func (s *NetStruct) NetSetLimit(p0 context.Context, p1 string, p2 NetLimit) error { + if s.Internal.NetSetLimit == nil { + return ErrNotSupported + } + return s.Internal.NetSetLimit(p0, p1, p2) +} + +func (s *NetStub) NetSetLimit(p0 context.Context, p1 string, p2 NetLimit) error { + return ErrNotSupported +} + +func (s *NetStruct) NetStat(p0 context.Context, p1 string) (NetStat, error) { + if s.Internal.NetStat == nil { + return *new(NetStat), ErrNotSupported + } + return s.Internal.NetStat(p0, p1) +} + +func (s *NetStub) NetStat(p0 context.Context, p1 string) (NetStat, error) { + return *new(NetStat), ErrNotSupported +} + func (s *SignableStruct) Sign(p0 context.Context, p1 SignFunc) error { if s.Internal.Sign == nil { return ErrNotSupported @@ -3701,14 +3920,14 @@ func (s *StorageMinerStub) CheckProvable(p0 context.Context, p1 abi.RegisteredPo return *new(map[abi.SectorNumber]string), ErrNotSupported } -func (s *StorageMinerStruct) ComputeProof(p0 context.Context, p1 []builtin.SectorInfo, p2 abi.PoStRandomness) ([]builtin.PoStProof, error) { +func (s *StorageMinerStruct) ComputeProof(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) { if s.Internal.ComputeProof == nil { return *new([]builtin.PoStProof), ErrNotSupported } - return s.Internal.ComputeProof(p0, p1, p2) + return s.Internal.ComputeProof(p0, p1, p2, p3, p4) } -func (s *StorageMinerStub) ComputeProof(p0 context.Context, p1 []builtin.SectorInfo, p2 abi.PoStRandomness) ([]builtin.PoStProof, error) { +func (s *StorageMinerStub) ComputeProof(p0 context.Context, p1 []builtin.ExtendedSectorInfo, p2 abi.PoStRandomness, p3 abi.ChainEpoch, p4 abinetwork.Version) ([]builtin.PoStProof, error) { return *new([]builtin.PoStProof), ErrNotSupported } @@ -3965,6 +4184,17 @@ func (s *StorageMinerStub) MarketCancelDataTransfer(p0 context.Context, p1 datat return ErrNotSupported } +func (s *StorageMinerStruct) MarketDataTransferDiagnostics(p0 context.Context, p1 peer.ID) (*TransferDiagnostics, error) { + if s.Internal.MarketDataTransferDiagnostics == nil { + return nil, ErrNotSupported + } + return s.Internal.MarketDataTransferDiagnostics(p0, p1) +} + +func (s *StorageMinerStub) MarketDataTransferDiagnostics(p0 context.Context, p1 peer.ID) (*TransferDiagnostics, error) { + return nil, ErrNotSupported +} + func (s *StorageMinerStruct) MarketDataTransferUpdates(p0 context.Context) (<-chan DataTransferChannel, error) { if s.Internal.MarketDataTransferUpdates == nil { return nil, ErrNotSupported @@ -4097,6 +4327,17 @@ func (s *StorageMinerStub) MarketRestartDataTransfer(p0 context.Context, p1 data return ErrNotSupported } +func (s *StorageMinerStruct) MarketRetryPublishDeal(p0 context.Context, p1 cid.Cid) error { + if s.Internal.MarketRetryPublishDeal == nil { + return ErrNotSupported + } + return s.Internal.MarketRetryPublishDeal(p0, p1) +} + +func (s *StorageMinerStub) MarketRetryPublishDeal(p0 context.Context, p1 cid.Cid) error { + return ErrNotSupported +} + func (s *StorageMinerStruct) MarketSetAsk(p0 context.Context, p1 types.BigInt, p2 types.BigInt, p3 abi.ChainEpoch, p4 abi.PaddedPieceSize, p5 abi.PaddedPieceSize) error { if s.Internal.MarketSetAsk == nil { return ErrNotSupported @@ -4218,6 +4459,17 @@ func (s *StorageMinerStub) ReturnFinalizeSector(p0 context.Context, p1 storiface return ErrNotSupported } +func (s *StorageMinerStruct) ReturnGenerateSectorKeyFromData(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + if s.Internal.ReturnGenerateSectorKeyFromData == nil { + return ErrNotSupported + } + return s.Internal.ReturnGenerateSectorKeyFromData(p0, p1, p2) +} + +func (s *StorageMinerStub) ReturnGenerateSectorKeyFromData(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { + return ErrNotSupported +} + func (s *StorageMinerStruct) ReturnMoveStorage(p0 context.Context, p1 storiface.CallID, p2 *storiface.CallError) error { if s.Internal.ReturnMoveStorage == nil { return ErrNotSupported @@ -4229,6 +4481,28 @@ func (s *StorageMinerStub) ReturnMoveStorage(p0 context.Context, p1 storiface.Ca return ErrNotSupported } +func (s *StorageMinerStruct) ReturnProveReplicaUpdate1(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaVanillaProofs, p3 *storiface.CallError) error { + if s.Internal.ReturnProveReplicaUpdate1 == nil { + return ErrNotSupported + } + return s.Internal.ReturnProveReplicaUpdate1(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) ReturnProveReplicaUpdate1(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaVanillaProofs, p3 *storiface.CallError) error { + return ErrNotSupported +} + +func (s *StorageMinerStruct) ReturnProveReplicaUpdate2(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateProof, p3 *storiface.CallError) error { + if s.Internal.ReturnProveReplicaUpdate2 == nil { + return ErrNotSupported + } + return s.Internal.ReturnProveReplicaUpdate2(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) ReturnProveReplicaUpdate2(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateProof, p3 *storiface.CallError) error { + return ErrNotSupported +} + func (s *StorageMinerStruct) ReturnReadPiece(p0 context.Context, p1 storiface.CallID, p2 bool, p3 *storiface.CallError) error { if s.Internal.ReturnReadPiece == nil { return ErrNotSupported @@ -4251,6 +4525,17 @@ func (s *StorageMinerStub) ReturnReleaseUnsealed(p0 context.Context, p1 storifac return ErrNotSupported } +func (s *StorageMinerStruct) ReturnReplicaUpdate(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateOut, p3 *storiface.CallError) error { + if s.Internal.ReturnReplicaUpdate == nil { + return ErrNotSupported + } + return s.Internal.ReturnReplicaUpdate(p0, p1, p2, p3) +} + +func (s *StorageMinerStub) ReturnReplicaUpdate(p0 context.Context, p1 storiface.CallID, p2 storage.ReplicaUpdateOut, p3 *storiface.CallError) error { + return ErrNotSupported +} + func (s *StorageMinerStruct) ReturnSealCommit1(p0 context.Context, p1 storiface.CallID, p2 storage.Commit1Out, p3 *storiface.CallError) error { if s.Internal.ReturnSealCommit1 == nil { return ErrNotSupported @@ -4394,14 +4679,25 @@ func (s *StorageMinerStub) SectorGetSealDelay(p0 context.Context) (time.Duration return *new(time.Duration), ErrNotSupported } -func (s *StorageMinerStruct) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber) error { +func (s *StorageMinerStruct) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber, p2 bool) error { if s.Internal.SectorMarkForUpgrade == nil { return ErrNotSupported } - return s.Internal.SectorMarkForUpgrade(p0, p1) + return s.Internal.SectorMarkForUpgrade(p0, p1, p2) } -func (s *StorageMinerStub) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber) error { +func (s *StorageMinerStub) SectorMarkForUpgrade(p0 context.Context, p1 abi.SectorNumber, p2 bool) error { + return ErrNotSupported +} + +func (s *StorageMinerStruct) SectorMatchPendingPiecesToOpenSectors(p0 context.Context) error { + if s.Internal.SectorMatchPendingPiecesToOpenSectors == nil { + return ErrNotSupported + } + return s.Internal.SectorMatchPendingPiecesToOpenSectors(p0) +} + +func (s *StorageMinerStub) SectorMatchPendingPiecesToOpenSectors(p0 context.Context) error { return ErrNotSupported } @@ -4647,6 +4943,17 @@ func (s *StorageMinerStub) StorageFindSector(p0 context.Context, p1 abi.SectorID return *new([]stores.SectorStorageInfo), ErrNotSupported } +func (s *StorageMinerStruct) StorageGetLocks(p0 context.Context) (storiface.SectorLocks, error) { + if s.Internal.StorageGetLocks == nil { + return *new(storiface.SectorLocks), ErrNotSupported + } + return s.Internal.StorageGetLocks(p0) +} + +func (s *StorageMinerStub) StorageGetLocks(p0 context.Context) (storiface.SectorLocks, error) { + return *new(storiface.SectorLocks), ErrNotSupported +} + func (s *StorageMinerStruct) StorageInfo(p0 context.Context, p1 stores.ID) (stores.StorageInfo, error) { if s.Internal.StorageInfo == nil { return *new(stores.StorageInfo), ErrNotSupported @@ -4878,6 +5185,17 @@ func (s *WorkerStub) FinalizeSector(p0 context.Context, p1 storage.SectorRef, p2 return *new(storiface.CallID), ErrNotSupported } +func (s *WorkerStruct) GenerateSectorKeyFromData(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) { + if s.Internal.GenerateSectorKeyFromData == nil { + return *new(storiface.CallID), ErrNotSupported + } + return s.Internal.GenerateSectorKeyFromData(p0, p1, p2) +} + +func (s *WorkerStub) GenerateSectorKeyFromData(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid) (storiface.CallID, error) { + return *new(storiface.CallID), ErrNotSupported +} + func (s *WorkerStruct) Info(p0 context.Context) (storiface.WorkerInfo, error) { if s.Internal.Info == nil { return *new(storiface.WorkerInfo), ErrNotSupported @@ -4922,6 +5240,28 @@ func (s *WorkerStub) ProcessSession(p0 context.Context) (uuid.UUID, error) { return *new(uuid.UUID), ErrNotSupported } +func (s *WorkerStruct) ProveReplicaUpdate1(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid) (storiface.CallID, error) { + if s.Internal.ProveReplicaUpdate1 == nil { + return *new(storiface.CallID), ErrNotSupported + } + return s.Internal.ProveReplicaUpdate1(p0, p1, p2, p3, p4) +} + +func (s *WorkerStub) ProveReplicaUpdate1(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid) (storiface.CallID, error) { + return *new(storiface.CallID), ErrNotSupported +} + +func (s *WorkerStruct) ProveReplicaUpdate2(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid, p5 storage.ReplicaVanillaProofs) (storiface.CallID, error) { + if s.Internal.ProveReplicaUpdate2 == nil { + return *new(storiface.CallID), ErrNotSupported + } + return s.Internal.ProveReplicaUpdate2(p0, p1, p2, p3, p4, p5) +} + +func (s *WorkerStub) ProveReplicaUpdate2(p0 context.Context, p1 storage.SectorRef, p2 cid.Cid, p3 cid.Cid, p4 cid.Cid, p5 storage.ReplicaVanillaProofs) (storiface.CallID, error) { + return *new(storiface.CallID), ErrNotSupported +} + func (s *WorkerStruct) ReleaseUnsealed(p0 context.Context, p1 storage.SectorRef, p2 []storage.Range) (storiface.CallID, error) { if s.Internal.ReleaseUnsealed == nil { return *new(storiface.CallID), ErrNotSupported @@ -4944,6 +5284,17 @@ func (s *WorkerStub) Remove(p0 context.Context, p1 abi.SectorID) error { return ErrNotSupported } +func (s *WorkerStruct) ReplicaUpdate(p0 context.Context, p1 storage.SectorRef, p2 []abi.PieceInfo) (storiface.CallID, error) { + if s.Internal.ReplicaUpdate == nil { + return *new(storiface.CallID), ErrNotSupported + } + return s.Internal.ReplicaUpdate(p0, p1, p2) +} + +func (s *WorkerStub) ReplicaUpdate(p0 context.Context, p1 storage.SectorRef, p2 []abi.PieceInfo) (storiface.CallID, error) { + return *new(storiface.CallID), ErrNotSupported +} + func (s *WorkerStruct) SealCommit1(p0 context.Context, p1 storage.SectorRef, p2 abi.SealRandomness, p3 abi.InteractiveSealRandomness, p4 []abi.PieceInfo, p5 storage.SectorCids) (storiface.CallID, error) { if s.Internal.SealCommit1 == nil { return *new(storiface.CallID), ErrNotSupported diff --git a/api/types.go b/api/types.go index 7f9152d22..756bfd8dd 100644 --- a/api/types.go +++ b/api/types.go @@ -5,13 +5,14 @@ import ( "fmt" "time" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/lotus/chain/types" - datatransfer "github.com/filecoin-project/go-data-transfer" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/types" "github.com/ipfs/go-cid" + "github.com/ipfs/go-graphsync" + "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" pubsub "github.com/libp2p/go-libp2p-pubsub" ma "github.com/multiformats/go-multiaddr" @@ -54,6 +55,30 @@ type MessageSendSpec struct { MaxFee abi.TokenAmount } +// GraphSyncDataTransfer provides diagnostics on a data transfer happening over graphsync +type GraphSyncDataTransfer struct { + // GraphSync request id for this transfer + RequestID graphsync.RequestID + // Graphsync state for this transfer + RequestState string + // If a channel ID is present, indicates whether this is the current graphsync request for this channel + // (could have changed in a restart) + IsCurrentChannelRequest bool + // Data transfer channel ID for this transfer + ChannelID *datatransfer.ChannelID + // Data transfer state for this transfer + ChannelState *DataTransferChannel + // Diagnostic information about this request -- and unexpected inconsistencies in + // request state + Diagnostics []string +} + +// TransferDiagnostics give current information about transfers going over graphsync that may be helpful for debugging +type TransferDiagnostics struct { + ReceivingTransfers []*GraphSyncDataTransfer + SendingTransfers []*GraphSyncDataTransfer +} + type DataTransferChannel struct { TransferID datatransfer.TransferID Status datatransfer.Status @@ -105,6 +130,28 @@ type NetBlockList struct { IPSubnets []string } +type NetStat struct { + System *network.ScopeStat `json:",omitempty"` + Transient *network.ScopeStat `json:",omitempty"` + Services map[string]network.ScopeStat `json:",omitempty"` + Protocols map[string]network.ScopeStat `json:",omitempty"` + Peers map[string]network.ScopeStat `json:",omitempty"` +} + +type NetLimit struct { + Dynamic bool `json:",omitempty"` + // set if Dynamic is false + Memory int64 `json:",omitempty"` + // set if Dynamic is true + MemoryFraction float64 `json:",omitempty"` + MinMemory int64 `json:",omitempty"` + MaxMemory int64 `json:",omitempty"` + + Streams, StreamsInbound, StreamsOutbound int + Conns, ConnsInbound, ConnsOutbound int + FD int +} + type ExtendedPeerInfo struct { ID peer.ID Agent string @@ -194,6 +241,49 @@ type RetrievalInfo struct { TransferChannelID *datatransfer.ChannelID DataTransfer *DataTransferChannel + + // optional event if part of ClientGetRetrievalUpdates + Event *retrievalmarket.ClientEvent +} + +type RestrievalRes struct { + DealID retrievalmarket.DealID +} + +// Selector specifies ipld selector string +// - if the string starts with '{', it's interpreted as json selector string +// see https://ipld.io/specs/selectors/ and https://ipld.io/specs/selectors/fixtures/selector-fixtures-1/ +// - otherwise the string is interpreted as ipld-selector-text-lite (simple ipld path) +// see https://github.com/ipld/go-ipld-selector-text-lite +type Selector string + +type DagSpec struct { + // DataSelector matches data to be retrieved + // - when using textselector, the path specifies subtree + // - the matched graph must have a single root + DataSelector *Selector + + // ExportMerkleProof is applicable only when exporting to a CAR file via a path textselector + // When true, in addition to the selection target, the resulting CAR will contain every block along the + // path back to, and including the original root + // When false the resulting CAR contains only the blocks of the target subdag + ExportMerkleProof bool +} + +type ExportRef struct { + Root cid.Cid + + // DAGs array specifies a list of DAGs to export + // - If exporting into unixfs files, only one DAG is supported, DataSelector is only used to find the targeted root node + // - If exporting into a car file + // - When exactly one text-path DataSelector is specified exports the subgraph and its full merkle-path from the original root + // - Otherwise ( multiple paths and/or JSON selector specs) determines each individual subroot and exports the subtrees as a multi-root car + // - When not specified defaults to a single DAG: + // - Data - the entire DAG: `{"R":{"l":{"none":{}},":>":{"a":{">":{"@":{}}}}}}` + DAGs []DagSpec + + FromLocalCAR string // if specified, get data from a local CARv2 file. + DealID retrievalmarket.DealID } type FullNodeServer func(path string, api FullNode) error diff --git a/api/v0api/full.go b/api/v0api/full.go index f6af849ed..d8fc5c6c7 100644 --- a/api/v0api/full.go +++ b/api/v0api/full.go @@ -12,6 +12,7 @@ import ( "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" "github.com/ipfs/go-cid" + textselector "github.com/ipld/go-ipld-selector-text-lite" "github.com/libp2p/go-libp2p-core/peer" "github.com/filecoin-project/lotus/api" @@ -326,10 +327,10 @@ type FullNode interface { // ClientMinerQueryOffer returns a QueryOffer for the specific miner and file. ClientMinerQueryOffer(ctx context.Context, miner address.Address, root cid.Cid, piece *cid.Cid) (api.QueryOffer, error) //perm:read // ClientRetrieve initiates the retrieval of a file, as specified in the order. - ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error //perm:admin + ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef) error //perm:admin // ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel // of status updates. - ClientRetrieveWithEvents(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin + ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) //perm:admin // ClientQueryAsk returns a signed StorageAsk from the specified miner. // ClientListRetrievals returns information about retrievals made by the local client ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, error) //perm:write @@ -715,3 +716,37 @@ type FullNode interface { // the path specified when calling CreateBackup is within the base path CreateBackup(ctx context.Context, fpath string) error //perm:admin } + +func OfferOrder(o api.QueryOffer, client address.Address) RetrievalOrder { + return RetrievalOrder{ + Root: o.Root, + Piece: o.Piece, + Size: o.Size, + Total: o.MinPrice, + UnsealPrice: o.UnsealPrice, + PaymentInterval: o.PaymentInterval, + PaymentIntervalIncrease: o.PaymentIntervalIncrease, + Client: client, + + Miner: o.Miner, + MinerPeer: &o.MinerPeer, + } +} + +type RetrievalOrder struct { + // TODO: make this less unixfs specific + Root cid.Cid + Piece *cid.Cid + DatamodelPathSelector *textselector.Expression + Size uint64 + + FromLocalCAR string // if specified, get data from a local CARv2 file. + // TODO: support offset + Total types.BigInt + UnsealPrice types.BigInt + PaymentInterval uint64 + PaymentIntervalIncrease uint64 + Client address.Address + Miner address.Address + MinerPeer *retrievalmarket.RetrievalPeer +} diff --git a/api/v0api/gateway.go b/api/v0api/gateway.go index 18a5ec7d6..e3ba56899 100644 --- a/api/v0api/gateway.go +++ b/api/v0api/gateway.go @@ -8,7 +8,7 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/dline" - "github.com/filecoin-project/go-state-types/network" + abinetwork "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" @@ -57,7 +57,7 @@ type Gateway interface { StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) - StateNetworkVersion(context.Context, types.TipSetKey) (network.Version, error) + StateNetworkVersion(context.Context, types.TipSetKey) (abinetwork.Version, error) StateSearchMsg(ctx context.Context, msg cid.Cid) (*api.MsgLookup, error) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) StateVerifiedClientStatus(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*abi.StoragePower, error) diff --git a/api/v0api/proxy_gen.go b/api/v0api/proxy_gen.go index 70fa492c4..5befe0ef9 100644 --- a/api/v0api/proxy_gen.go +++ b/api/v0api/proxy_gen.go @@ -13,7 +13,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" - "github.com/filecoin-project/go-state-types/network" + abinetwork "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/api" apitypes "github.com/filecoin-project/lotus/api/types" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" @@ -127,11 +127,11 @@ type FullNodeStruct struct { ClientRestartDataTransfer func(p0 context.Context, p1 datatransfer.TransferID, p2 peer.ID, p3 bool) error `perm:"write"` - ClientRetrieve func(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) error `perm:"admin"` + ClientRetrieve func(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error `perm:"admin"` ClientRetrieveTryRestartInsufficientFunds func(p0 context.Context, p1 address.Address) error `perm:"write"` - ClientRetrieveWithEvents func(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"` + ClientRetrieveWithEvents func(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) `perm:"admin"` ClientStartDeal func(p0 context.Context, p1 *api.StartDealParams) (*cid.Cid, error) `perm:"admin"` @@ -455,7 +455,7 @@ type GatewayStruct struct { StateMinerProvingDeadline func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (*dline.Info, error) `` - StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (network.Version, error) `` + StateNetworkVersion func(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) `` StateSearchMsg func(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) `` @@ -969,14 +969,14 @@ func (s *FullNodeStub) ClientRestartDataTransfer(p0 context.Context, p1 datatran return ErrNotSupported } -func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) error { +func (s *FullNodeStruct) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error { if s.Internal.ClientRetrieve == nil { return ErrNotSupported } return s.Internal.ClientRetrieve(p0, p1, p2) } -func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) error { +func (s *FullNodeStub) ClientRetrieve(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) error { return ErrNotSupported } @@ -991,14 +991,14 @@ func (s *FullNodeStub) ClientRetrieveTryRestartInsufficientFunds(p0 context.Cont return ErrNotSupported } -func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { +func (s *FullNodeStruct) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { if s.Internal.ClientRetrieveWithEvents == nil { return nil, ErrNotSupported } return s.Internal.ClientRetrieveWithEvents(p0, p1, p2) } -func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 api.RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { +func (s *FullNodeStub) ClientRetrieveWithEvents(p0 context.Context, p1 RetrievalOrder, p2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { return nil, ErrNotSupported } @@ -2707,15 +2707,15 @@ func (s *GatewayStub) StateMinerProvingDeadline(p0 context.Context, p1 address.A return nil, ErrNotSupported } -func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (network.Version, error) { +func (s *GatewayStruct) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) { if s.Internal.StateNetworkVersion == nil { - return *new(network.Version), ErrNotSupported + return *new(abinetwork.Version), ErrNotSupported } return s.Internal.StateNetworkVersion(p0, p1) } -func (s *GatewayStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (network.Version, error) { - return *new(network.Version), ErrNotSupported +func (s *GatewayStub) StateNetworkVersion(p0 context.Context, p1 types.TipSetKey) (abinetwork.Version, error) { + return *new(abinetwork.Version), ErrNotSupported } func (s *GatewayStruct) StateSearchMsg(p0 context.Context, p1 cid.Cid) (*api.MsgLookup, error) { diff --git a/api/v0api/v0mocks/mock_full.go b/api/v0api/v0mocks/mock_full.go index cbdff83b4..5351525ef 100644 --- a/api/v0api/v0mocks/mock_full.go +++ b/api/v0api/v0mocks/mock_full.go @@ -21,8 +21,9 @@ import ( network "github.com/filecoin-project/go-state-types/network" api "github.com/filecoin-project/lotus/api" apitypes "github.com/filecoin-project/lotus/api/types" + v0api "github.com/filecoin-project/lotus/api/v0api" miner "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - hierarchical "github.com/filecoin-project/lotus/chain/consensus/hierarchical" + schema "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" types "github.com/filecoin-project/lotus/chain/types" alerting "github.com/filecoin-project/lotus/journal/alerting" marketevents "github.com/filecoin-project/lotus/markets/loggers" @@ -63,18 +64,18 @@ func (m *MockFullNode) EXPECT() *MockFullNodeMockRecorder { } // AddSubnet mocks base method. -func (m *MockFullNode) AddSubnet(arg0 context.Context, arg1 address.Address, arg2 hierarchical.SubnetID, arg3 string, arg4 uint64, arg5 big.Int, arg6 address.Address) (address.Address, error) { +func (m *MockFullNode) AddSubnet(arg0 context.Context, arg1 address.Address, arg2 address.SubnetID, arg3 string, arg4 uint64, arg5 big.Int, arg6 abi.ChainEpoch, arg7 address.Address) (address.Address, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddSubnet", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret := m.ctrl.Call(m, "AddSubnet", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) ret0, _ := ret[0].(address.Address) ret1, _ := ret[1].(error) return ret0, ret1 } // AddSubnet indicates an expected call of AddSubnet. -func (mr *MockFullNodeMockRecorder) AddSubnet(arg0, arg1, arg2, arg3, arg4, arg5, arg6 interface{}) *gomock.Call { +func (mr *MockFullNodeMockRecorder) AddSubnet(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnet", reflect.TypeOf((*MockFullNode)(nil).AddSubnet), arg0, arg1, arg2, arg3, arg4, arg5, arg6) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddSubnet", reflect.TypeOf((*MockFullNode)(nil).AddSubnet), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) } // AuthNew mocks base method. @@ -776,7 +777,7 @@ func (mr *MockFullNodeMockRecorder) ClientRestartDataTransfer(arg0, arg1, arg2, } // ClientRetrieve mocks base method. -func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) error { +func (m *MockFullNode) ClientRetrieve(arg0 context.Context, arg1 v0api.RetrievalOrder, arg2 *api.FileRef) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientRetrieve", arg0, arg1, arg2) ret0, _ := ret[0].(error) @@ -804,7 +805,7 @@ func (mr *MockFullNodeMockRecorder) ClientRetrieveTryRestartInsufficientFunds(ar } // ClientRetrieveWithEvents mocks base method. -func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { +func (m *MockFullNode) ClientRetrieveWithEvents(arg0 context.Context, arg1 v0api.RetrievalOrder, arg2 *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClientRetrieveWithEvents", arg0, arg1, arg2) ret0, _ := ret[0].(<-chan marketevents.RetrievalEvent) @@ -877,6 +878,21 @@ func (mr *MockFullNodeMockRecorder) CreateBackup(arg0, arg1 interface{}) *gomock return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateBackup", reflect.TypeOf((*MockFullNode)(nil).CreateBackup), arg0, arg1) } +// CrossMsgResolve mocks base method. +func (m *MockFullNode) CrossMsgResolve(arg0 context.Context, arg1 address.SubnetID, arg2 cid.Cid, arg3 address.SubnetID) ([]types.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CrossMsgResolve", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]types.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CrossMsgResolve indicates an expected call of CrossMsgResolve. +func (mr *MockFullNodeMockRecorder) CrossMsgResolve(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CrossMsgResolve", reflect.TypeOf((*MockFullNode)(nil).CrossMsgResolve), arg0, arg1, arg2, arg3) +} + // Discover mocks base method. func (m *MockFullNode) Discover(arg0 context.Context) (apitypes.OpenRPCDocument, error) { m.ctrl.T.Helper() @@ -892,6 +908,21 @@ func (mr *MockFullNodeMockRecorder) Discover(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Discover", reflect.TypeOf((*MockFullNode)(nil).Discover), arg0) } +// FundSubnet mocks base method. +func (m *MockFullNode) FundSubnet(arg0 context.Context, arg1 address.Address, arg2 address.SubnetID, arg3 big.Int) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FundSubnet", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FundSubnet indicates an expected call of FundSubnet. +func (mr *MockFullNodeMockRecorder) FundSubnet(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FundSubnet", reflect.TypeOf((*MockFullNode)(nil).FundSubnet), arg0, arg1, arg2, arg3) +} + // GasEstimateFeeCap mocks base method. func (m *MockFullNode) GasEstimateFeeCap(arg0 context.Context, arg1 *types.Message, arg2 int64, arg3 types.TipSetKey) (big.Int, error) { m.ctrl.T.Helper() @@ -952,6 +983,21 @@ func (mr *MockFullNodeMockRecorder) GasEstimateMessageGas(arg0, arg1, arg2, arg3 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GasEstimateMessageGas", reflect.TypeOf((*MockFullNode)(nil).GasEstimateMessageGas), arg0, arg1, arg2, arg3) } +// GetCrossMsgsPool mocks base method. +func (m *MockFullNode) GetCrossMsgsPool(arg0 context.Context, arg1 address.SubnetID, arg2 abi.ChainEpoch) ([]*types.Message, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCrossMsgsPool", arg0, arg1, arg2) + ret0, _ := ret[0].([]*types.Message) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCrossMsgsPool indicates an expected call of GetCrossMsgsPool. +func (mr *MockFullNodeMockRecorder) GetCrossMsgsPool(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCrossMsgsPool", reflect.TypeOf((*MockFullNode)(nil).GetCrossMsgsPool), arg0, arg1, arg2) +} + // ID mocks base method. func (m *MockFullNode) ID(arg0 context.Context) (peer.ID, error) { m.ctrl.T.Helper() @@ -968,7 +1014,7 @@ func (mr *MockFullNodeMockRecorder) ID(arg0 interface{}) *gomock.Call { } // JoinSubnet mocks base method. -func (m *MockFullNode) JoinSubnet(arg0 context.Context, arg1 address.Address, arg2 big.Int, arg3 hierarchical.SubnetID) (cid.Cid, error) { +func (m *MockFullNode) JoinSubnet(arg0 context.Context, arg1 address.Address, arg2 big.Int, arg3 address.SubnetID) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "JoinSubnet", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(cid.Cid) @@ -983,7 +1029,7 @@ func (mr *MockFullNodeMockRecorder) JoinSubnet(arg0, arg1, arg2, arg3 interface{ } // KillSubnet mocks base method. -func (m *MockFullNode) KillSubnet(arg0 context.Context, arg1 address.Address, arg2 hierarchical.SubnetID) (cid.Cid, error) { +func (m *MockFullNode) KillSubnet(arg0 context.Context, arg1 address.Address, arg2 address.SubnetID) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "KillSubnet", arg0, arg1, arg2) ret0, _ := ret[0].(cid.Cid) @@ -998,7 +1044,7 @@ func (mr *MockFullNodeMockRecorder) KillSubnet(arg0, arg1, arg2 interface{}) *go } // LeaveSubnet mocks base method. -func (m *MockFullNode) LeaveSubnet(arg0 context.Context, arg1 address.Address, arg2 hierarchical.SubnetID) (cid.Cid, error) { +func (m *MockFullNode) LeaveSubnet(arg0 context.Context, arg1 address.Address, arg2 address.SubnetID) (cid.Cid, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "LeaveSubnet", arg0, arg1, arg2) ret0, _ := ret[0].(cid.Cid) @@ -1012,6 +1058,21 @@ func (mr *MockFullNodeMockRecorder) LeaveSubnet(arg0, arg1, arg2 interface{}) *g return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LeaveSubnet", reflect.TypeOf((*MockFullNode)(nil).LeaveSubnet), arg0, arg1, arg2) } +// ListCheckpoints mocks base method. +func (m *MockFullNode) ListCheckpoints(arg0 context.Context, arg1 address.SubnetID, arg2 int) ([]*schema.Checkpoint, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListCheckpoints", arg0, arg1, arg2) + ret0, _ := ret[0].([]*schema.Checkpoint) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListCheckpoints indicates an expected call of ListCheckpoints. +func (mr *MockFullNodeMockRecorder) ListCheckpoints(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListCheckpoints", reflect.TypeOf((*MockFullNode)(nil).ListCheckpoints), arg0, arg1, arg2) +} + // LogAlerts mocks base method. func (m *MockFullNode) LogAlerts(arg0 context.Context) ([]alerting.Alert, error) { m.ctrl.T.Helper() @@ -1131,7 +1192,7 @@ func (mr *MockFullNodeMockRecorder) MarketWithdraw(arg0, arg1, arg2, arg3 interf } // MineSubnet mocks base method. -func (m *MockFullNode) MineSubnet(arg0 context.Context, arg1 address.Address, arg2 hierarchical.SubnetID, arg3 bool) error { +func (m *MockFullNode) MineSubnet(arg0 context.Context, arg1 address.Address, arg2 address.SubnetID, arg3 bool) error { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "MineSubnet", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) @@ -1798,6 +1859,21 @@ func (mr *MockFullNodeMockRecorder) NetFindPeer(arg0, arg1 interface{}) *gomock. return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetFindPeer", reflect.TypeOf((*MockFullNode)(nil).NetFindPeer), arg0, arg1) } +// NetLimit mocks base method. +func (m *MockFullNode) NetLimit(arg0 context.Context, arg1 string) (api.NetLimit, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetLimit", arg0, arg1) + ret0, _ := ret[0].(api.NetLimit) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetLimit indicates an expected call of NetLimit. +func (mr *MockFullNodeMockRecorder) NetLimit(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetLimit", reflect.TypeOf((*MockFullNode)(nil).NetLimit), arg0, arg1) +} + // NetPeerInfo mocks base method. func (m *MockFullNode) NetPeerInfo(arg0 context.Context, arg1 peer.ID) (*api.ExtendedPeerInfo, error) { m.ctrl.T.Helper() @@ -1843,6 +1919,35 @@ func (mr *MockFullNodeMockRecorder) NetPubsubScores(arg0 interface{}) *gomock.Ca return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetPubsubScores", reflect.TypeOf((*MockFullNode)(nil).NetPubsubScores), arg0) } +// NetSetLimit mocks base method. +func (m *MockFullNode) NetSetLimit(arg0 context.Context, arg1 string, arg2 api.NetLimit) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetSetLimit", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// NetSetLimit indicates an expected call of NetSetLimit. +func (mr *MockFullNodeMockRecorder) NetSetLimit(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetSetLimit", reflect.TypeOf((*MockFullNode)(nil).NetSetLimit), arg0, arg1, arg2) +} + +// NetStat mocks base method. +func (m *MockFullNode) NetStat(arg0 context.Context, arg1 string) (api.NetStat, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NetStat", arg0, arg1) + ret0, _ := ret[0].(api.NetStat) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NetStat indicates an expected call of NetStat. +func (mr *MockFullNodeMockRecorder) NetStat(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NetStat", reflect.TypeOf((*MockFullNode)(nil).NetStat), arg0, arg1) +} + // PaychAllocateLane mocks base method. func (m *MockFullNode) PaychAllocateLane(arg0 context.Context, arg1 address.Address) (uint64, error) { m.ctrl.T.Helper() @@ -2082,6 +2187,21 @@ func (mr *MockFullNodeMockRecorder) PaychVoucherSubmit(arg0, arg1, arg2, arg3, a return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PaychVoucherSubmit", reflect.TypeOf((*MockFullNode)(nil).PaychVoucherSubmit), arg0, arg1, arg2, arg3, arg4) } +// ReleaseFunds mocks base method. +func (m *MockFullNode) ReleaseFunds(arg0 context.Context, arg1 address.Address, arg2 address.SubnetID, arg3 big.Int) (cid.Cid, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReleaseFunds", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(cid.Cid) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReleaseFunds indicates an expected call of ReleaseFunds. +func (mr *MockFullNodeMockRecorder) ReleaseFunds(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReleaseFunds", reflect.TypeOf((*MockFullNode)(nil).ReleaseFunds), arg0, arg1, arg2, arg3) +} + // Session mocks base method. func (m *MockFullNode) Session(arg0 context.Context) (uuid.UUID, error) { m.ctrl.T.Helper() @@ -2948,6 +3068,20 @@ func (mr *MockFullNodeMockRecorder) SyncSubmitBlock(arg0, arg1 interface{}) *gom return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncSubmitBlock", reflect.TypeOf((*MockFullNode)(nil).SyncSubmitBlock), arg0, arg1) } +// SyncSubnet mocks base method. +func (m *MockFullNode) SyncSubnet(arg0 context.Context, arg1 address.SubnetID, arg2 bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SyncSubnet", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// SyncSubnet indicates an expected call of SyncSubnet. +func (mr *MockFullNodeMockRecorder) SyncSubnet(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncSubnet", reflect.TypeOf((*MockFullNode)(nil).SyncSubnet), arg0, arg1, arg2) +} + // SyncUnmarkAllBad mocks base method. func (m *MockFullNode) SyncUnmarkAllBad(arg0 context.Context) error { m.ctrl.T.Helper() @@ -2991,6 +3125,21 @@ func (mr *MockFullNodeMockRecorder) SyncValidateTipset(arg0, arg1 interface{}) * return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SyncValidateTipset", reflect.TypeOf((*MockFullNode)(nil).SyncValidateTipset), arg0, arg1) } +// ValidateCheckpoint mocks base method. +func (m *MockFullNode) ValidateCheckpoint(arg0 context.Context, arg1 address.SubnetID, arg2 abi.ChainEpoch) (*schema.Checkpoint, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ValidateCheckpoint", arg0, arg1, arg2) + ret0, _ := ret[0].(*schema.Checkpoint) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateCheckpoint indicates an expected call of ValidateCheckpoint. +func (mr *MockFullNodeMockRecorder) ValidateCheckpoint(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateCheckpoint", reflect.TypeOf((*MockFullNode)(nil).ValidateCheckpoint), arg0, arg1, arg2) +} + // Version mocks base method. func (m *MockFullNode) Version(arg0 context.Context) (api.APIVersion, error) { m.ctrl.T.Helper() diff --git a/api/v0api/v1_wrapper.go b/api/v0api/v1_wrapper.go index 7f7291600..7e0d7a94a 100644 --- a/api/v0api/v1_wrapper.go +++ b/api/v0api/v1_wrapper.go @@ -3,7 +3,10 @@ package v0api import ( "context" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" + marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/chain/types" @@ -108,7 +111,7 @@ func (w *WrapperV1Full) MsigApproveTxnHash(ctx context.Context, msig address.Add } func (w *WrapperV1Full) MsigCancel(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (cid.Cid, error) { - p, err := w.FullNode.MsigCancel(ctx, msig, txID, to, amt, src, method, params) + p, err := w.FullNode.MsigCancelTxnHash(ctx, msig, txID, to, amt, src, method, params) if err != nil { return cid.Undef, xerrors.Errorf("creating prototype: %w", err) } @@ -194,4 +197,144 @@ func (w *WrapperV1Full) ChainGetRandomnessFromBeacon(ctx context.Context, tsk ty return w.StateGetRandomnessFromBeacon(ctx, personalization, randEpoch, entropy, tsk) } +func (w *WrapperV1Full) ClientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef) error { + events := make(chan marketevents.RetrievalEvent) + go w.clientRetrieve(ctx, order, ref, events) + + for { + select { + case evt, ok := <-events: + if !ok { // done successfully + return nil + } + + if evt.Err != "" { + return xerrors.Errorf("retrieval failed: %s", evt.Err) + } + case <-ctx.Done(): + return xerrors.Errorf("retrieval timed out") + } + } +} + +func (w *WrapperV1Full) ClientRetrieveWithEvents(ctx context.Context, order RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { + events := make(chan marketevents.RetrievalEvent) + go w.clientRetrieve(ctx, order, ref, events) + return events, nil +} + +func readSubscribeEvents(ctx context.Context, dealID retrievalmarket.DealID, subscribeEvents <-chan api.RetrievalInfo, events chan marketevents.RetrievalEvent) error { + for { + var subscribeEvent api.RetrievalInfo + var evt retrievalmarket.ClientEvent + select { + case <-ctx.Done(): + return xerrors.New("Retrieval Timed Out") + case subscribeEvent = <-subscribeEvents: + if subscribeEvent.ID != dealID { + // we can't check the deal ID ahead of time because: + // 1. We need to subscribe before retrieving. + // 2. We won't know the deal ID until after retrieving. + continue + } + if subscribeEvent.Event != nil { + evt = *subscribeEvent.Event + } + } + + select { + case <-ctx.Done(): + return xerrors.New("Retrieval Timed Out") + case events <- marketevents.RetrievalEvent{ + Event: evt, + Status: subscribeEvent.Status, + BytesReceived: subscribeEvent.BytesReceived, + FundsSpent: subscribeEvent.TotalPaid, + }: + } + + switch subscribeEvent.Status { + case retrievalmarket.DealStatusCompleted: + return nil + case retrievalmarket.DealStatusRejected: + return xerrors.Errorf("Retrieval Proposal Rejected: %s", subscribeEvent.Message) + case + retrievalmarket.DealStatusDealNotFound, + retrievalmarket.DealStatusErrored: + return xerrors.Errorf("Retrieval Error: %s", subscribeEvent.Message) + } + } +} + +func (w *WrapperV1Full) clientRetrieve(ctx context.Context, order RetrievalOrder, ref *api.FileRef, events chan marketevents.RetrievalEvent) { + defer close(events) + + finish := func(e error) { + if e != nil { + events <- marketevents.RetrievalEvent{Err: e.Error(), FundsSpent: big.Zero()} + } + } + + var dealID retrievalmarket.DealID + if order.FromLocalCAR == "" { + // Subscribe to events before retrieving to avoid losing events. + subscribeCtx, cancel := context.WithCancel(ctx) + defer cancel() + retrievalEvents, err := w.ClientGetRetrievalUpdates(subscribeCtx) + + if err != nil { + finish(xerrors.Errorf("GetRetrievalUpdates failed: %w", err)) + return + } + + retrievalRes, err := w.FullNode.ClientRetrieve(ctx, api.RetrievalOrder{ + Root: order.Root, + Piece: order.Piece, + Size: order.Size, + Total: order.Total, + UnsealPrice: order.UnsealPrice, + PaymentInterval: order.PaymentInterval, + PaymentIntervalIncrease: order.PaymentIntervalIncrease, + Client: order.Client, + Miner: order.Miner, + MinerPeer: order.MinerPeer, + }) + + if err != nil { + finish(xerrors.Errorf("Retrieve failed: %w", err)) + return + } + + dealID = retrievalRes.DealID + + err = readSubscribeEvents(ctx, retrievalRes.DealID, retrievalEvents, events) + if err != nil { + finish(xerrors.Errorf("Retrieve: %w", err)) + return + } + } + + // If ref is nil, it only fetches the data into the configured blockstore. + if ref == nil { + finish(nil) + return + } + + eref := api.ExportRef{ + Root: order.Root, + FromLocalCAR: order.FromLocalCAR, + DealID: dealID, + } + + if order.DatamodelPathSelector != nil { + s := api.Selector(*order.DatamodelPathSelector) + eref.DAGs = append(eref.DAGs, api.DagSpec{ + DataSelector: &s, + ExportMerkleProof: true, + }) + } + + finish(w.ClientExport(ctx, eref, *ref)) +} + var _ FullNode = &WrapperV1Full{} diff --git a/api/version.go b/api/version.go index 2c87fe0a4..228dcbd10 100644 --- a/api/version.go +++ b/api/version.go @@ -54,11 +54,11 @@ func VersionForType(nodeType NodeType) (Version, error) { // semver versions of the rpc api exposed var ( - FullAPIVersion0 = newVer(1, 4, 0) - FullAPIVersion1 = newVer(2, 1, 0) + FullAPIVersion0 = newVer(1, 5, 0) + FullAPIVersion1 = newVer(2, 2, 0) - MinerAPIVersion0 = newVer(1, 2, 0) - WorkerAPIVersion0 = newVer(1, 1, 0) + MinerAPIVersion0 = newVer(1, 3, 0) + WorkerAPIVersion0 = newVer(1, 5, 0) ) //nolint:varcheck,deadcode diff --git a/blockstore/api.go b/blockstore/api.go index 6715b4766..dc4c03452 100644 --- a/blockstore/api.go +++ b/blockstore/api.go @@ -25,35 +25,35 @@ func NewAPIBlockstore(cio ChainIO) Blockstore { return Adapt(bs) // return an adapted blockstore. } -func (a *apiBlockstore) DeleteBlock(cid.Cid) error { +func (a *apiBlockstore) DeleteBlock(context.Context, cid.Cid) error { return xerrors.New("not supported") } -func (a *apiBlockstore) Has(c cid.Cid) (bool, error) { - return a.api.ChainHasObj(context.TODO(), c) +func (a *apiBlockstore) Has(ctx context.Context, c cid.Cid) (bool, error) { + return a.api.ChainHasObj(ctx, c) } -func (a *apiBlockstore) Get(c cid.Cid) (blocks.Block, error) { - bb, err := a.api.ChainReadObj(context.TODO(), c) +func (a *apiBlockstore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { + bb, err := a.api.ChainReadObj(ctx, c) if err != nil { return nil, err } return blocks.NewBlockWithCid(bb, c) } -func (a *apiBlockstore) GetSize(c cid.Cid) (int, error) { - bb, err := a.api.ChainReadObj(context.TODO(), c) +func (a *apiBlockstore) GetSize(ctx context.Context, c cid.Cid) (int, error) { + bb, err := a.api.ChainReadObj(ctx, c) if err != nil { return 0, err } return len(bb), nil } -func (a *apiBlockstore) Put(blocks.Block) error { +func (a *apiBlockstore) Put(context.Context, blocks.Block) error { return xerrors.New("not supported") } -func (a *apiBlockstore) PutMany([]blocks.Block) error { +func (a *apiBlockstore) PutMany(context.Context, []blocks.Block) error { return xerrors.New("not supported") } diff --git a/blockstore/autobatch.go b/blockstore/autobatch.go new file mode 100644 index 000000000..c2c281446 --- /dev/null +++ b/blockstore/autobatch.go @@ -0,0 +1,262 @@ +package blockstore + +import ( + "context" + "sync" + "time" + + "golang.org/x/xerrors" + + block "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" +) + +// autolog is a logger for the autobatching blockstore. It is subscoped from the +// blockstore logger. +var autolog = log.Named("auto") + +// contains the same set of blocks twice, once as an ordered list for flushing, and as a map for fast access +type blockBatch struct { + blockList []block.Block + blockMap map[cid.Cid]block.Block +} + +type AutobatchBlockstore struct { + // TODO: drop if memory consumption is too high + addedCids map[cid.Cid]struct{} + + stateLock sync.Mutex + bufferedBatch blockBatch + + flushingBatch blockBatch + flushErr error + + flushCh chan struct{} + + doFlushLock sync.Mutex + flushRetryDelay time.Duration + doneCh chan struct{} + shutdown context.CancelFunc + + backingBs Blockstore + + bufferCapacity int + bufferSize int +} + +func NewAutobatch(ctx context.Context, backingBs Blockstore, bufferCapacity int) *AutobatchBlockstore { + ctx, cancel := context.WithCancel(ctx) + bs := &AutobatchBlockstore{ + addedCids: make(map[cid.Cid]struct{}), + backingBs: backingBs, + bufferCapacity: bufferCapacity, + flushCh: make(chan struct{}, 1), + doneCh: make(chan struct{}), + // could be made configable + flushRetryDelay: time.Millisecond * 100, + shutdown: cancel, + } + + bs.bufferedBatch.blockMap = make(map[cid.Cid]block.Block) + + go bs.flushWorker(ctx) + + return bs +} + +func (bs *AutobatchBlockstore) Put(ctx context.Context, blk block.Block) error { + bs.stateLock.Lock() + defer bs.stateLock.Unlock() + + _, ok := bs.addedCids[blk.Cid()] + if !ok { + bs.addedCids[blk.Cid()] = struct{}{} + bs.bufferedBatch.blockList = append(bs.bufferedBatch.blockList, blk) + bs.bufferedBatch.blockMap[blk.Cid()] = blk + bs.bufferSize += len(blk.RawData()) + if bs.bufferSize >= bs.bufferCapacity { + // signal that a flush is appropriate, may be ignored + select { + case bs.flushCh <- struct{}{}: + default: + // do nothing + } + } + } + + return nil +} + +func (bs *AutobatchBlockstore) flushWorker(ctx context.Context) { + defer close(bs.doneCh) + for { + select { + case <-bs.flushCh: + // TODO: check if we _should_ actually flush. We could get a spurious wakeup + // here. + putErr := bs.doFlush(ctx, false) + for putErr != nil { + select { + case <-ctx.Done(): + return + case <-time.After(bs.flushRetryDelay): + autolog.Errorf("FLUSH ERRORED: %w, retrying after %v", putErr, bs.flushRetryDelay) + putErr = bs.doFlush(ctx, true) + } + } + case <-ctx.Done(): + // Do one last flush. + _ = bs.doFlush(ctx, false) + return + } + } +} + +// caller must NOT hold stateLock +// set retryOnly to true to only retry a failed flush and not flush anything new. +func (bs *AutobatchBlockstore) doFlush(ctx context.Context, retryOnly bool) error { + bs.doFlushLock.Lock() + defer bs.doFlushLock.Unlock() + + // If we failed to flush last time, try flushing again. + if bs.flushErr != nil { + bs.flushErr = bs.backingBs.PutMany(ctx, bs.flushingBatch.blockList) + } + + // If we failed, or we're _only_ retrying, bail. + if retryOnly || bs.flushErr != nil { + return bs.flushErr + } + + // Then take the current batch... + bs.stateLock.Lock() + // We do NOT clear addedCids here, because its purpose is to expedite Puts + bs.flushingBatch = bs.bufferedBatch + bs.bufferedBatch.blockList = make([]block.Block, 0, len(bs.flushingBatch.blockList)) + bs.bufferedBatch.blockMap = make(map[cid.Cid]block.Block, len(bs.flushingBatch.blockMap)) + bs.stateLock.Unlock() + + // And try to flush it. + bs.flushErr = bs.backingBs.PutMany(ctx, bs.flushingBatch.blockList) + + // If we succeeded, reset the batch. Otherwise, we'll try again next time. + if bs.flushErr == nil { + bs.stateLock.Lock() + bs.flushingBatch = blockBatch{} + bs.stateLock.Unlock() + } + + return bs.flushErr +} + +// caller must NOT hold stateLock +func (bs *AutobatchBlockstore) Flush(ctx context.Context) error { + return bs.doFlush(ctx, false) +} + +func (bs *AutobatchBlockstore) Shutdown(ctx context.Context) error { + // TODO: Prevent puts after we call this to avoid losing data. + bs.shutdown() + select { + case <-bs.doneCh: + case <-ctx.Done(): + return ctx.Err() + } + + bs.doFlushLock.Lock() + defer bs.doFlushLock.Unlock() + + return bs.flushErr +} + +func (bs *AutobatchBlockstore) Get(ctx context.Context, c cid.Cid) (block.Block, error) { + // may seem backward to check the backingBs first, but that is the likeliest case + blk, err := bs.backingBs.Get(ctx, c) + if err == nil { + return blk, nil + } + + if err != ErrNotFound { + return blk, err + } + + bs.stateLock.Lock() + defer bs.stateLock.Unlock() + v, ok := bs.flushingBatch.blockMap[c] + if ok { + return v, nil + } + + v, ok = bs.bufferedBatch.blockMap[c] + if ok { + return v, nil + } + + return bs.Get(ctx, c) +} + +func (bs *AutobatchBlockstore) DeleteBlock(context.Context, cid.Cid) error { + // if we wanted to support this, we would have to: + // - flush + // - delete from the backingBs (if present) + // - remove from addedCids (if present) + // - if present in addedCids, also walk the ordered lists and remove if present + return xerrors.New("deletion is unsupported") +} + +func (bs *AutobatchBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error { + // see note in DeleteBlock() + return xerrors.New("deletion is unsupported") +} + +func (bs *AutobatchBlockstore) Has(ctx context.Context, c cid.Cid) (bool, error) { + _, err := bs.Get(ctx, c) + if err == nil { + return true, nil + } + if err == ErrNotFound { + return false, nil + } + + return false, err +} + +func (bs *AutobatchBlockstore) GetSize(ctx context.Context, c cid.Cid) (int, error) { + blk, err := bs.Get(ctx, c) + if err != nil { + return 0, err + } + + return len(blk.RawData()), nil +} + +func (bs *AutobatchBlockstore) PutMany(ctx context.Context, blks []block.Block) error { + for _, blk := range blks { + if err := bs.Put(ctx, blk); err != nil { + return err + } + } + + return nil +} + +func (bs *AutobatchBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { + if err := bs.Flush(ctx); err != nil { + return nil, err + } + + return bs.backingBs.AllKeysChan(ctx) +} + +func (bs *AutobatchBlockstore) HashOnRead(enabled bool) { + bs.backingBs.HashOnRead(enabled) +} + +func (bs *AutobatchBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) error { + blk, err := bs.Get(ctx, cid) + if err != nil { + return err + } + + return callback(blk.RawData()) +} diff --git a/blockstore/autobatch_test.go b/blockstore/autobatch_test.go new file mode 100644 index 000000000..57a3b7d6c --- /dev/null +++ b/blockstore/autobatch_test.go @@ -0,0 +1,34 @@ +package blockstore + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAutobatchBlockstore(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ab := NewAutobatch(ctx, NewMemory(), len(b0.RawData())+len(b1.RawData())-1) + + require.NoError(t, ab.Put(ctx, b0)) + require.NoError(t, ab.Put(ctx, b1)) + require.NoError(t, ab.Put(ctx, b2)) + + v0, err := ab.Get(ctx, b0.Cid()) + require.NoError(t, err) + require.Equal(t, b0.RawData(), v0.RawData()) + + v1, err := ab.Get(ctx, b1.Cid()) + require.NoError(t, err) + require.Equal(t, b1.RawData(), v1.RawData()) + + v2, err := ab.Get(ctx, b2.Cid()) + require.NoError(t, err) + require.Equal(t, b2.RawData(), v2.RawData()) + + require.NoError(t, ab.Flush(ctx)) + require.NoError(t, ab.Shutdown(ctx)) +} diff --git a/blockstore/badger/blockstore.go b/blockstore/badger/blockstore.go index a0b51d8df..270e5b820 100644 --- a/blockstore/badger/blockstore.go +++ b/blockstore/badger/blockstore.go @@ -525,7 +525,7 @@ func (b *Blockstore) Size() (int64, error) { // View implements blockstore.Viewer, which leverages zero-copy read-only // access to values. -func (b *Blockstore) View(cid cid.Cid, fn func([]byte) error) error { +func (b *Blockstore) View(ctx context.Context, cid cid.Cid, fn func([]byte) error) error { if err := b.access(); err != nil { return err } @@ -552,7 +552,7 @@ func (b *Blockstore) View(cid cid.Cid, fn func([]byte) error) error { } // Has implements Blockstore.Has. -func (b *Blockstore) Has(cid cid.Cid) (bool, error) { +func (b *Blockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { if err := b.access(); err != nil { return false, err } @@ -582,7 +582,7 @@ func (b *Blockstore) Has(cid cid.Cid) (bool, error) { } // Get implements Blockstore.Get. -func (b *Blockstore) Get(cid cid.Cid) (blocks.Block, error) { +func (b *Blockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { if !cid.Defined() { return nil, blockstore.ErrNotFound } @@ -619,7 +619,7 @@ func (b *Blockstore) Get(cid cid.Cid) (blocks.Block, error) { } // GetSize implements Blockstore.GetSize. -func (b *Blockstore) GetSize(cid cid.Cid) (int, error) { +func (b *Blockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { if err := b.access(); err != nil { return 0, err } @@ -652,7 +652,7 @@ func (b *Blockstore) GetSize(cid cid.Cid) (int, error) { } // Put implements Blockstore.Put. -func (b *Blockstore) Put(block blocks.Block) error { +func (b *Blockstore) Put(ctx context.Context, block blocks.Block) error { if err := b.access(); err != nil { return err } @@ -691,7 +691,7 @@ func (b *Blockstore) Put(block blocks.Block) error { } // PutMany implements Blockstore.PutMany. -func (b *Blockstore) PutMany(blocks []blocks.Block) error { +func (b *Blockstore) PutMany(ctx context.Context, blocks []blocks.Block) error { if err := b.access(); err != nil { return err } @@ -755,7 +755,7 @@ func (b *Blockstore) PutMany(blocks []blocks.Block) error { } // DeleteBlock implements Blockstore.DeleteBlock. -func (b *Blockstore) DeleteBlock(cid cid.Cid) error { +func (b *Blockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { if err := b.access(); err != nil { return err } @@ -774,7 +774,7 @@ func (b *Blockstore) DeleteBlock(cid cid.Cid) error { }) } -func (b *Blockstore) DeleteMany(cids []cid.Cid) error { +func (b *Blockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error { if err := b.access(); err != nil { return err } diff --git a/blockstore/badger/blockstore_test.go b/blockstore/badger/blockstore_test.go index d8ef5241b..4619d4ec3 100644 --- a/blockstore/badger/blockstore_test.go +++ b/blockstore/badger/blockstore_test.go @@ -2,6 +2,7 @@ package badgerbs import ( "bytes" + "context" "fmt" "io/ioutil" "os" @@ -98,6 +99,7 @@ func openBlockstore(optsSupplier func(path string) Options) func(tb testing.TB, } func testMove(t *testing.T, optsF func(string) Options) { + ctx := context.Background() basePath, err := ioutil.TempDir("", "") if err != nil { t.Fatal(err) @@ -122,7 +124,7 @@ func testMove(t *testing.T, optsF func(string) Options) { // add some blocks for i := 0; i < 10; i++ { blk := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i))) - err := db.Put(blk) + err := db.Put(ctx, blk) if err != nil { t.Fatal(err) } @@ -132,7 +134,7 @@ func testMove(t *testing.T, optsF func(string) Options) { // delete some of them for i := 5; i < 10; i++ { c := have[i].Cid() - err := db.DeleteBlock(c) + err := db.DeleteBlock(ctx, c) if err != nil { t.Fatal(err) } @@ -145,7 +147,7 @@ func testMove(t *testing.T, optsF func(string) Options) { g.Go(func() error { for i := 10; i < 1000; i++ { blk := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i))) - err := db.Put(blk) + err := db.Put(ctx, blk) if err != nil { return err } @@ -165,7 +167,7 @@ func testMove(t *testing.T, optsF func(string) Options) { // now check that we have all the blocks in have and none in the deleted lists checkBlocks := func() { for _, blk := range have { - has, err := db.Has(blk.Cid()) + has, err := db.Has(ctx, blk.Cid()) if err != nil { t.Fatal(err) } @@ -174,7 +176,7 @@ func testMove(t *testing.T, optsF func(string) Options) { t.Fatal("missing block") } - blk2, err := db.Get(blk.Cid()) + blk2, err := db.Get(ctx, blk.Cid()) if err != nil { t.Fatal(err) } @@ -185,7 +187,7 @@ func testMove(t *testing.T, optsF func(string) Options) { } for _, c := range deleted { - has, err := db.Has(c) + has, err := db.Has(ctx, c) if err != nil { t.Fatal(err) } diff --git a/blockstore/badger/blockstore_test_suite.go b/blockstore/badger/blockstore_test_suite.go index 93be82ac8..167d1b2ab 100644 --- a/blockstore/badger/blockstore_test_suite.go +++ b/blockstore/badger/blockstore_test_suite.go @@ -44,28 +44,31 @@ func (s *Suite) RunTests(t *testing.T, prefix string) { } func (s *Suite) TestGetWhenKeyNotPresent(t *testing.T) { + ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() } c := cid.NewCidV0(u.Hash([]byte("stuff"))) - bl, err := bs.Get(c) + bl, err := bs.Get(ctx, c) require.Nil(t, bl) require.Equal(t, blockstore.ErrNotFound, err) } func (s *Suite) TestGetWhenKeyIsNil(t *testing.T) { + ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() } - _, err := bs.Get(cid.Undef) + _, err := bs.Get(ctx, cid.Undef) require.Equal(t, blockstore.ErrNotFound, err) } func (s *Suite) TestPutThenGetBlock(t *testing.T) { + ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() @@ -73,15 +76,16 @@ func (s *Suite) TestPutThenGetBlock(t *testing.T) { orig := blocks.NewBlock([]byte("some data")) - err := bs.Put(orig) + err := bs.Put(ctx, orig) require.NoError(t, err) - fetched, err := bs.Get(orig.Cid()) + fetched, err := bs.Get(ctx, orig.Cid()) require.NoError(t, err) require.Equal(t, orig.RawData(), fetched.RawData()) } func (s *Suite) TestHas(t *testing.T) { + ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() @@ -89,19 +93,20 @@ func (s *Suite) TestHas(t *testing.T) { orig := blocks.NewBlock([]byte("some data")) - err := bs.Put(orig) + err := bs.Put(ctx, orig) require.NoError(t, err) - ok, err := bs.Has(orig.Cid()) + ok, err := bs.Has(ctx, orig.Cid()) require.NoError(t, err) require.True(t, ok) - ok, err = bs.Has(blocks.NewBlock([]byte("another thing")).Cid()) + ok, err = bs.Has(ctx, blocks.NewBlock([]byte("another thing")).Cid()) require.NoError(t, err) require.False(t, ok) } func (s *Suite) TestCidv0v1(t *testing.T) { + ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() @@ -109,15 +114,17 @@ func (s *Suite) TestCidv0v1(t *testing.T) { orig := blocks.NewBlock([]byte("some data")) - err := bs.Put(orig) + err := bs.Put(ctx, orig) require.NoError(t, err) - fetched, err := bs.Get(cid.NewCidV1(cid.DagProtobuf, orig.Cid().Hash())) + fetched, err := bs.Get(ctx, cid.NewCidV1(cid.DagProtobuf, orig.Cid().Hash())) require.NoError(t, err) require.Equal(t, orig.RawData(), fetched.RawData()) } func (s *Suite) TestPutThenGetSizeBlock(t *testing.T) { + ctx := context.Background() + bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() @@ -127,21 +134,21 @@ func (s *Suite) TestPutThenGetSizeBlock(t *testing.T) { missingBlock := blocks.NewBlock([]byte("missingBlock")) emptyBlock := blocks.NewBlock([]byte{}) - err := bs.Put(block) + err := bs.Put(ctx, block) require.NoError(t, err) - blockSize, err := bs.GetSize(block.Cid()) + blockSize, err := bs.GetSize(ctx, block.Cid()) require.NoError(t, err) require.Len(t, block.RawData(), blockSize) - err = bs.Put(emptyBlock) + err = bs.Put(ctx, emptyBlock) require.NoError(t, err) - emptySize, err := bs.GetSize(emptyBlock.Cid()) + emptySize, err := bs.GetSize(ctx, emptyBlock.Cid()) require.NoError(t, err) require.Zero(t, emptySize) - missingSize, err := bs.GetSize(missingBlock.Cid()) + missingSize, err := bs.GetSize(ctx, missingBlock.Cid()) require.Equal(t, blockstore.ErrNotFound, err) require.Equal(t, -1, missingSize) } @@ -203,6 +210,7 @@ func (s *Suite) TestDoubleClose(t *testing.T) { } func (s *Suite) TestReopenPutGet(t *testing.T) { + ctx := context.Background() bs, path := s.NewBlockstore(t) c, ok := bs.(io.Closer) if !ok { @@ -210,7 +218,7 @@ func (s *Suite) TestReopenPutGet(t *testing.T) { } orig := blocks.NewBlock([]byte("some data")) - err := bs.Put(orig) + err := bs.Put(ctx, orig) require.NoError(t, err) err = c.Close() @@ -219,7 +227,7 @@ func (s *Suite) TestReopenPutGet(t *testing.T) { bs, err = s.OpenBlockstore(t, path) require.NoError(t, err) - fetched, err := bs.Get(orig.Cid()) + fetched, err := bs.Get(ctx, orig.Cid()) require.NoError(t, err) require.Equal(t, orig.RawData(), fetched.RawData()) @@ -228,6 +236,7 @@ func (s *Suite) TestReopenPutGet(t *testing.T) { } func (s *Suite) TestPutMany(t *testing.T) { + ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() @@ -238,15 +247,15 @@ func (s *Suite) TestPutMany(t *testing.T) { blocks.NewBlock([]byte("foo2")), blocks.NewBlock([]byte("foo3")), } - err := bs.PutMany(blks) + err := bs.PutMany(ctx, blks) require.NoError(t, err) for _, blk := range blks { - fetched, err := bs.Get(blk.Cid()) + fetched, err := bs.Get(ctx, blk.Cid()) require.NoError(t, err) require.Equal(t, blk.RawData(), fetched.RawData()) - ok, err := bs.Has(blk.Cid()) + ok, err := bs.Has(ctx, blk.Cid()) require.NoError(t, err) require.True(t, ok) } @@ -259,6 +268,7 @@ func (s *Suite) TestPutMany(t *testing.T) { } func (s *Suite) TestDelete(t *testing.T) { + ctx := context.Background() bs, _ := s.NewBlockstore(t) if c, ok := bs.(io.Closer); ok { defer func() { require.NoError(t, c.Close()) }() @@ -269,10 +279,10 @@ func (s *Suite) TestDelete(t *testing.T) { blocks.NewBlock([]byte("foo2")), blocks.NewBlock([]byte("foo3")), } - err := bs.PutMany(blks) + err := bs.PutMany(ctx, blks) require.NoError(t, err) - err = bs.DeleteBlock(blks[1].Cid()) + err = bs.DeleteBlock(ctx, blks[1].Cid()) require.NoError(t, err) ch, err := bs.AllKeysChan(context.Background()) @@ -285,17 +295,17 @@ func (s *Suite) TestDelete(t *testing.T) { cid.NewCidV1(cid.Raw, blks[2].Cid().Hash()), }) - has, err := bs.Has(blks[1].Cid()) + has, err := bs.Has(ctx, blks[1].Cid()) require.NoError(t, err) require.False(t, has) - } func insertBlocks(t *testing.T, bs blockstore.BasicBlockstore, count int) []cid.Cid { + ctx := context.Background() keys := make([]cid.Cid, count) for i := 0; i < count; i++ { block := blocks.NewBlock([]byte(fmt.Sprintf("some data %d", i))) - err := bs.Put(block) + err := bs.Put(ctx, block) require.NoError(t, err) // NewBlock assigns a CIDv0; we convert it to CIDv1 because that's what // the store returns. diff --git a/blockstore/blockstore.go b/blockstore/blockstore.go index 8ede31eb9..409c100cf 100644 --- a/blockstore/blockstore.go +++ b/blockstore/blockstore.go @@ -1,6 +1,8 @@ package blockstore import ( + "context" + cid "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" logging "github.com/ipfs/go-log/v2" @@ -27,7 +29,7 @@ type BasicBlockstore = blockstore.Blockstore type Viewer = blockstore.Viewer type BatchDeleter interface { - DeleteMany(cids []cid.Cid) error + DeleteMany(ctx context.Context, cids []cid.Cid) error } // BlockstoreIterator is a trait for efficient iteration @@ -93,17 +95,17 @@ type adaptedBlockstore struct { var _ Blockstore = (*adaptedBlockstore)(nil) -func (a *adaptedBlockstore) View(cid cid.Cid, callback func([]byte) error) error { - blk, err := a.Get(cid) +func (a *adaptedBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) error { + blk, err := a.Get(ctx, cid) if err != nil { return err } return callback(blk.RawData()) } -func (a *adaptedBlockstore) DeleteMany(cids []cid.Cid) error { +func (a *adaptedBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error { for _, cid := range cids { - err := a.DeleteBlock(cid) + err := a.DeleteBlock(ctx, cid) if err != nil { return err } diff --git a/blockstore/buffered.go b/blockstore/buffered.go index 5d3d38f78..8e23b5362 100644 --- a/blockstore/buffered.go +++ b/blockstore/buffered.go @@ -88,34 +88,34 @@ func (bs *BufferedBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, return out, nil } -func (bs *BufferedBlockstore) DeleteBlock(c cid.Cid) error { - if err := bs.read.DeleteBlock(c); err != nil { +func (bs *BufferedBlockstore) DeleteBlock(ctx context.Context, c cid.Cid) error { + if err := bs.read.DeleteBlock(ctx, c); err != nil { return err } - return bs.write.DeleteBlock(c) + return bs.write.DeleteBlock(ctx, c) } -func (bs *BufferedBlockstore) DeleteMany(cids []cid.Cid) error { - if err := bs.read.DeleteMany(cids); err != nil { +func (bs *BufferedBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) error { + if err := bs.read.DeleteMany(ctx, cids); err != nil { return err } - return bs.write.DeleteMany(cids) + return bs.write.DeleteMany(ctx, cids) } -func (bs *BufferedBlockstore) View(c cid.Cid, callback func([]byte) error) error { +func (bs *BufferedBlockstore) View(ctx context.Context, c cid.Cid, callback func([]byte) error) error { // both stores are viewable. - if err := bs.write.View(c, callback); err == ErrNotFound { + if err := bs.write.View(ctx, c, callback); err == ErrNotFound { // not found in write blockstore; fall through. } else { return err // propagate errors, or nil, i.e. found. } - return bs.read.View(c, callback) + return bs.read.View(ctx, c, callback) } -func (bs *BufferedBlockstore) Get(c cid.Cid) (block.Block, error) { - if out, err := bs.write.Get(c); err != nil { +func (bs *BufferedBlockstore) Get(ctx context.Context, c cid.Cid) (block.Block, error) { + if out, err := bs.write.Get(ctx, c); err != nil { if err != ErrNotFound { return nil, err } @@ -123,20 +123,20 @@ func (bs *BufferedBlockstore) Get(c cid.Cid) (block.Block, error) { return out, nil } - return bs.read.Get(c) + return bs.read.Get(ctx, c) } -func (bs *BufferedBlockstore) GetSize(c cid.Cid) (int, error) { - s, err := bs.read.GetSize(c) +func (bs *BufferedBlockstore) GetSize(ctx context.Context, c cid.Cid) (int, error) { + s, err := bs.read.GetSize(ctx, c) if err == ErrNotFound || s == 0 { - return bs.write.GetSize(c) + return bs.write.GetSize(ctx, c) } return s, err } -func (bs *BufferedBlockstore) Put(blk block.Block) error { - has, err := bs.read.Has(blk.Cid()) // TODO: consider dropping this check +func (bs *BufferedBlockstore) Put(ctx context.Context, blk block.Block) error { + has, err := bs.read.Has(ctx, blk.Cid()) // TODO: consider dropping this check if err != nil { return err } @@ -145,11 +145,11 @@ func (bs *BufferedBlockstore) Put(blk block.Block) error { return nil } - return bs.write.Put(blk) + return bs.write.Put(ctx, blk) } -func (bs *BufferedBlockstore) Has(c cid.Cid) (bool, error) { - has, err := bs.write.Has(c) +func (bs *BufferedBlockstore) Has(ctx context.Context, c cid.Cid) (bool, error) { + has, err := bs.write.Has(ctx, c) if err != nil { return false, err } @@ -157,7 +157,7 @@ func (bs *BufferedBlockstore) Has(c cid.Cid) (bool, error) { return true, nil } - return bs.read.Has(c) + return bs.read.Has(ctx, c) } func (bs *BufferedBlockstore) HashOnRead(hor bool) { @@ -165,8 +165,8 @@ func (bs *BufferedBlockstore) HashOnRead(hor bool) { bs.write.HashOnRead(hor) } -func (bs *BufferedBlockstore) PutMany(blks []block.Block) error { - return bs.write.PutMany(blks) +func (bs *BufferedBlockstore) PutMany(ctx context.Context, blks []block.Block) error { + return bs.write.PutMany(ctx, blks) } func (bs *BufferedBlockstore) Read() Blockstore { diff --git a/blockstore/discard.go b/blockstore/discard.go index afd0651bc..575c752d4 100644 --- a/blockstore/discard.go +++ b/blockstore/discard.go @@ -18,39 +18,39 @@ func NewDiscardStore(bs Blockstore) Blockstore { return &discardstore{bs: bs} } -func (b *discardstore) Has(cid cid.Cid) (bool, error) { - return b.bs.Has(cid) +func (b *discardstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { + return b.bs.Has(ctx, cid) } func (b *discardstore) HashOnRead(hor bool) { b.bs.HashOnRead(hor) } -func (b *discardstore) Get(cid cid.Cid) (blocks.Block, error) { - return b.bs.Get(cid) +func (b *discardstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { + return b.bs.Get(ctx, cid) } -func (b *discardstore) GetSize(cid cid.Cid) (int, error) { - return b.bs.GetSize(cid) +func (b *discardstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { + return b.bs.GetSize(ctx, cid) } -func (b *discardstore) View(cid cid.Cid, f func([]byte) error) error { - return b.bs.View(cid, f) +func (b *discardstore) View(ctx context.Context, cid cid.Cid, f func([]byte) error) error { + return b.bs.View(ctx, cid, f) } -func (b *discardstore) Put(blk blocks.Block) error { +func (b *discardstore) Put(ctx context.Context, blk blocks.Block) error { return nil } -func (b *discardstore) PutMany(blks []blocks.Block) error { +func (b *discardstore) PutMany(ctx context.Context, blks []blocks.Block) error { return nil } -func (b *discardstore) DeleteBlock(cid cid.Cid) error { +func (b *discardstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { return nil } -func (b *discardstore) DeleteMany(cids []cid.Cid) error { +func (b *discardstore) DeleteMany(ctx context.Context, cids []cid.Cid) error { return nil } diff --git a/blockstore/fallback.go b/blockstore/fallback.go index 5f220f941..3d0acd36d 100644 --- a/blockstore/fallback.go +++ b/blockstore/fallback.go @@ -71,14 +71,14 @@ func (fbs *FallbackStore) getFallback(c cid.Cid) (blocks.Block, error) { // chain bitswap puts blocks in temp blockstore which is cleaned up // every few min (to drop any messages we fetched but don't want) // in this case we want to keep this block around - if err := fbs.Put(b); err != nil { + if err := fbs.Put(ctx, b); err != nil { return nil, xerrors.Errorf("persisting fallback-fetched block: %w", err) } return b, nil } -func (fbs *FallbackStore) Get(c cid.Cid) (blocks.Block, error) { - b, err := fbs.Blockstore.Get(c) +func (fbs *FallbackStore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { + b, err := fbs.Blockstore.Get(ctx, c) switch err { case nil: return b, nil @@ -89,8 +89,8 @@ func (fbs *FallbackStore) Get(c cid.Cid) (blocks.Block, error) { } } -func (fbs *FallbackStore) GetSize(c cid.Cid) (int, error) { - sz, err := fbs.Blockstore.GetSize(c) +func (fbs *FallbackStore) GetSize(ctx context.Context, c cid.Cid) (int, error) { + sz, err := fbs.Blockstore.GetSize(ctx, c) switch err { case nil: return sz, nil diff --git a/blockstore/idstore.go b/blockstore/idstore.go index e6148ff04..c6281998a 100644 --- a/blockstore/idstore.go +++ b/blockstore/idstore.go @@ -38,7 +38,7 @@ func decodeCid(cid cid.Cid) (inline bool, data []byte, err error) { return false, nil, err } -func (b *idstore) Has(cid cid.Cid) (bool, error) { +func (b *idstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { inline, _, err := decodeCid(cid) if err != nil { return false, xerrors.Errorf("error decoding Cid: %w", err) @@ -48,10 +48,10 @@ func (b *idstore) Has(cid cid.Cid) (bool, error) { return true, nil } - return b.bs.Has(cid) + return b.bs.Has(ctx, cid) } -func (b *idstore) Get(cid cid.Cid) (blocks.Block, error) { +func (b *idstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { inline, data, err := decodeCid(cid) if err != nil { return nil, xerrors.Errorf("error decoding Cid: %w", err) @@ -61,10 +61,10 @@ func (b *idstore) Get(cid cid.Cid) (blocks.Block, error) { return blocks.NewBlockWithCid(data, cid) } - return b.bs.Get(cid) + return b.bs.Get(ctx, cid) } -func (b *idstore) GetSize(cid cid.Cid) (int, error) { +func (b *idstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { inline, data, err := decodeCid(cid) if err != nil { return 0, xerrors.Errorf("error decoding Cid: %w", err) @@ -74,10 +74,10 @@ func (b *idstore) GetSize(cid cid.Cid) (int, error) { return len(data), err } - return b.bs.GetSize(cid) + return b.bs.GetSize(ctx, cid) } -func (b *idstore) View(cid cid.Cid, cb func([]byte) error) error { +func (b *idstore) View(ctx context.Context, cid cid.Cid, cb func([]byte) error) error { inline, data, err := decodeCid(cid) if err != nil { return xerrors.Errorf("error decoding Cid: %w", err) @@ -87,10 +87,10 @@ func (b *idstore) View(cid cid.Cid, cb func([]byte) error) error { return cb(data) } - return b.bs.View(cid, cb) + return b.bs.View(ctx, cid, cb) } -func (b *idstore) Put(blk blocks.Block) error { +func (b *idstore) Put(ctx context.Context, blk blocks.Block) error { inline, _, err := decodeCid(blk.Cid()) if err != nil { return xerrors.Errorf("error decoding Cid: %w", err) @@ -100,10 +100,10 @@ func (b *idstore) Put(blk blocks.Block) error { return nil } - return b.bs.Put(blk) + return b.bs.Put(ctx, blk) } -func (b *idstore) PutMany(blks []blocks.Block) error { +func (b *idstore) PutMany(ctx context.Context, blks []blocks.Block) error { toPut := make([]blocks.Block, 0, len(blks)) for _, blk := range blks { inline, _, err := decodeCid(blk.Cid()) @@ -118,13 +118,13 @@ func (b *idstore) PutMany(blks []blocks.Block) error { } if len(toPut) > 0 { - return b.bs.PutMany(toPut) + return b.bs.PutMany(ctx, toPut) } return nil } -func (b *idstore) DeleteBlock(cid cid.Cid) error { +func (b *idstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { inline, _, err := decodeCid(cid) if err != nil { return xerrors.Errorf("error decoding Cid: %w", err) @@ -134,10 +134,10 @@ func (b *idstore) DeleteBlock(cid cid.Cid) error { return nil } - return b.bs.DeleteBlock(cid) + return b.bs.DeleteBlock(ctx, cid) } -func (b *idstore) DeleteMany(cids []cid.Cid) error { +func (b *idstore) DeleteMany(ctx context.Context, cids []cid.Cid) error { toDelete := make([]cid.Cid, 0, len(cids)) for _, cid := range cids { inline, _, err := decodeCid(cid) @@ -152,7 +152,7 @@ func (b *idstore) DeleteMany(cids []cid.Cid) error { } if len(toDelete) > 0 { - return b.bs.DeleteMany(toDelete) + return b.bs.DeleteMany(ctx, toDelete) } return nil diff --git a/blockstore/ipfs.go b/blockstore/ipfs.go index 51b4bd951..787c71d7d 100644 --- a/blockstore/ipfs.go +++ b/blockstore/ipfs.go @@ -79,12 +79,12 @@ func NewRemoteIPFSBlockstore(ctx context.Context, maddr multiaddr.Multiaddr, onl return Adapt(bs), nil } -func (i *IPFSBlockstore) DeleteBlock(cid cid.Cid) error { +func (i *IPFSBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) error { return xerrors.Errorf("not supported") } -func (i *IPFSBlockstore) Has(cid cid.Cid) (bool, error) { - _, err := i.offlineAPI.Block().Stat(i.ctx, path.IpldPath(cid)) +func (i *IPFSBlockstore) Has(ctx context.Context, cid cid.Cid) (bool, error) { + _, err := i.offlineAPI.Block().Stat(ctx, path.IpldPath(cid)) if err != nil { // The underlying client is running in Offline mode. // Stat() will fail with an err if the block isn't in the @@ -99,8 +99,8 @@ func (i *IPFSBlockstore) Has(cid cid.Cid) (bool, error) { return true, nil } -func (i *IPFSBlockstore) Get(cid cid.Cid) (blocks.Block, error) { - rd, err := i.api.Block().Get(i.ctx, path.IpldPath(cid)) +func (i *IPFSBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { + rd, err := i.api.Block().Get(ctx, path.IpldPath(cid)) if err != nil { return nil, xerrors.Errorf("getting ipfs block: %w", err) } @@ -113,8 +113,8 @@ func (i *IPFSBlockstore) Get(cid cid.Cid) (blocks.Block, error) { return blocks.NewBlockWithCid(data, cid) } -func (i *IPFSBlockstore) GetSize(cid cid.Cid) (int, error) { - st, err := i.api.Block().Stat(i.ctx, path.IpldPath(cid)) +func (i *IPFSBlockstore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { + st, err := i.api.Block().Stat(ctx, path.IpldPath(cid)) if err != nil { return 0, xerrors.Errorf("getting ipfs block: %w", err) } @@ -122,23 +122,23 @@ func (i *IPFSBlockstore) GetSize(cid cid.Cid) (int, error) { return st.Size(), nil } -func (i *IPFSBlockstore) Put(block blocks.Block) error { +func (i *IPFSBlockstore) Put(ctx context.Context, block blocks.Block) error { mhd, err := multihash.Decode(block.Cid().Hash()) if err != nil { return err } - _, err = i.api.Block().Put(i.ctx, bytes.NewReader(block.RawData()), + _, err = i.api.Block().Put(ctx, bytes.NewReader(block.RawData()), options.Block.Hash(mhd.Code, mhd.Length), options.Block.Format(cid.CodecToStr[block.Cid().Type()])) return err } -func (i *IPFSBlockstore) PutMany(blocks []blocks.Block) error { +func (i *IPFSBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error { // TODO: could be done in parallel for _, block := range blocks { - if err := i.Put(block); err != nil { + if err := i.Put(ctx, block); err != nil { return err } } diff --git a/blockstore/mem.go b/blockstore/mem.go index 8ea69d46a..d6b14f002 100644 --- a/blockstore/mem.go +++ b/blockstore/mem.go @@ -15,24 +15,24 @@ func NewMemory() MemBlockstore { // MemBlockstore is a terminal blockstore that keeps blocks in memory. type MemBlockstore map[cid.Cid]blocks.Block -func (m MemBlockstore) DeleteBlock(k cid.Cid) error { +func (m MemBlockstore) DeleteBlock(ctx context.Context, k cid.Cid) error { delete(m, k) return nil } -func (m MemBlockstore) DeleteMany(ks []cid.Cid) error { +func (m MemBlockstore) DeleteMany(ctx context.Context, ks []cid.Cid) error { for _, k := range ks { delete(m, k) } return nil } -func (m MemBlockstore) Has(k cid.Cid) (bool, error) { +func (m MemBlockstore) Has(ctx context.Context, k cid.Cid) (bool, error) { _, ok := m[k] return ok, nil } -func (m MemBlockstore) View(k cid.Cid, callback func([]byte) error) error { +func (m MemBlockstore) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error { b, ok := m[k] if !ok { return ErrNotFound @@ -40,7 +40,7 @@ func (m MemBlockstore) View(k cid.Cid, callback func([]byte) error) error { return callback(b.RawData()) } -func (m MemBlockstore) Get(k cid.Cid) (blocks.Block, error) { +func (m MemBlockstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) { b, ok := m[k] if !ok { return nil, ErrNotFound @@ -49,7 +49,7 @@ func (m MemBlockstore) Get(k cid.Cid) (blocks.Block, error) { } // GetSize returns the CIDs mapped BlockSize -func (m MemBlockstore) GetSize(k cid.Cid) (int, error) { +func (m MemBlockstore) GetSize(ctx context.Context, k cid.Cid) (int, error) { b, ok := m[k] if !ok { return 0, ErrNotFound @@ -58,7 +58,7 @@ func (m MemBlockstore) GetSize(k cid.Cid) (int, error) { } // Put puts a given block to the underlying datastore -func (m MemBlockstore) Put(b blocks.Block) error { +func (m MemBlockstore) Put(ctx context.Context, b blocks.Block) error { // Convert to a basic block for safety, but try to reuse the existing // block if it's already a basic block. k := b.Cid() @@ -76,9 +76,9 @@ func (m MemBlockstore) Put(b blocks.Block) error { // PutMany puts a slice of blocks at the same time using batching // capabilities of the underlying datastore whenever possible. -func (m MemBlockstore) PutMany(bs []blocks.Block) error { +func (m MemBlockstore) PutMany(ctx context.Context, bs []blocks.Block) error { for _, b := range bs { - _ = m.Put(b) // can't fail + _ = m.Put(ctx, b) // can't fail } return nil } diff --git a/blockstore/splitstore/markset.go b/blockstore/splitstore/markset.go index 218681e13..f173be575 100644 --- a/blockstore/splitstore/markset.go +++ b/blockstore/splitstore/markset.go @@ -10,20 +10,12 @@ import ( var errMarkSetClosed = errors.New("markset closed") -// MarkSet is a utility to keep track of seen CID, and later query for them. -// -// * If the expected dataset is large, it can be backed by a datastore (e.g. bbolt). -// * If a probabilistic result is acceptable, it can be backed by a bloom filter +// MarkSet is an interface for tracking CIDs during chain and object walks type MarkSet interface { + ObjectVisitor Mark(cid.Cid) error Has(cid.Cid) (bool, error) Close() error - SetConcurrent() -} - -type MarkSetVisitor interface { - MarkSet - ObjectVisitor } type MarkSetEnv interface { @@ -31,11 +23,7 @@ type MarkSetEnv interface { // name is a unique name for this markset, mapped to the filesystem in disk-backed environments // sizeHint is a hint about the expected size of the markset Create(name string, sizeHint int64) (MarkSet, error) - // CreateVisitor is like Create, but returns a wider interface that supports atomic visits. - // It may not be supported by some markset types (e.g. bloom). - CreateVisitor(name string, sizeHint int64) (MarkSetVisitor, error) - // SupportsVisitor returns true if the marksets created by this environment support the visitor interface. - SupportsVisitor() bool + // Close closes the markset Close() error } diff --git a/blockstore/splitstore/markset_badger.go b/blockstore/splitstore/markset_badger.go index ae06a69f8..e30334b89 100644 --- a/blockstore/splitstore/markset_badger.go +++ b/blockstore/splitstore/markset_badger.go @@ -34,7 +34,6 @@ type BadgerMarkSet struct { } var _ MarkSet = (*BadgerMarkSet)(nil) -var _ MarkSetVisitor = (*BadgerMarkSet)(nil) var badgerMarkSetBatchSize = 16384 @@ -48,7 +47,7 @@ func NewBadgerMarkSetEnv(path string) (MarkSetEnv, error) { return &BadgerMarkSetEnv{path: msPath}, nil } -func (e *BadgerMarkSetEnv) create(name string, sizeHint int64) (*BadgerMarkSet, error) { +func (e *BadgerMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) { name += ".tmp" path := filepath.Join(e.path, name) @@ -68,16 +67,6 @@ func (e *BadgerMarkSetEnv) create(name string, sizeHint int64) (*BadgerMarkSet, return ms, nil } -func (e *BadgerMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) { - return e.create(name, sizeHint) -} - -func (e *BadgerMarkSetEnv) CreateVisitor(name string, sizeHint int64) (MarkSetVisitor, error) { - return e.create(name, sizeHint) -} - -func (e *BadgerMarkSetEnv) SupportsVisitor() bool { return true } - func (e *BadgerMarkSetEnv) Close() error { return os.RemoveAll(e.path) } diff --git a/blockstore/splitstore/markset_map.go b/blockstore/splitstore/markset_map.go index 07a7ae70d..fda964663 100644 --- a/blockstore/splitstore/markset_map.go +++ b/blockstore/splitstore/markset_map.go @@ -13,42 +13,27 @@ var _ MarkSetEnv = (*MapMarkSetEnv)(nil) type MapMarkSet struct { mx sync.RWMutex set map[string]struct{} - - ts bool } var _ MarkSet = (*MapMarkSet)(nil) -var _ MarkSetVisitor = (*MapMarkSet)(nil) func NewMapMarkSetEnv() (*MapMarkSetEnv, error) { return &MapMarkSetEnv{}, nil } -func (e *MapMarkSetEnv) create(name string, sizeHint int64) (*MapMarkSet, error) { +func (e *MapMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) { return &MapMarkSet{ set: make(map[string]struct{}, sizeHint), }, nil } -func (e *MapMarkSetEnv) Create(name string, sizeHint int64) (MarkSet, error) { - return e.create(name, sizeHint) -} - -func (e *MapMarkSetEnv) CreateVisitor(name string, sizeHint int64) (MarkSetVisitor, error) { - return e.create(name, sizeHint) -} - -func (e *MapMarkSetEnv) SupportsVisitor() bool { return true } - func (e *MapMarkSetEnv) Close() error { return nil } func (s *MapMarkSet) Mark(cid cid.Cid) error { - if s.ts { - s.mx.Lock() - defer s.mx.Unlock() - } + s.mx.Lock() + defer s.mx.Unlock() if s.set == nil { return errMarkSetClosed @@ -59,10 +44,8 @@ func (s *MapMarkSet) Mark(cid cid.Cid) error { } func (s *MapMarkSet) Has(cid cid.Cid) (bool, error) { - if s.ts { - s.mx.RLock() - defer s.mx.RUnlock() - } + s.mx.RLock() + defer s.mx.RUnlock() if s.set == nil { return false, errMarkSetClosed @@ -73,10 +56,8 @@ func (s *MapMarkSet) Has(cid cid.Cid) (bool, error) { } func (s *MapMarkSet) Visit(c cid.Cid) (bool, error) { - if s.ts { - s.mx.Lock() - defer s.mx.Unlock() - } + s.mx.Lock() + defer s.mx.Unlock() if s.set == nil { return false, errMarkSetClosed @@ -92,14 +73,9 @@ func (s *MapMarkSet) Visit(c cid.Cid) (bool, error) { } func (s *MapMarkSet) Close() error { - if s.ts { - s.mx.Lock() - defer s.mx.Unlock() - } + s.mx.Lock() + defer s.mx.Unlock() + s.set = nil return nil } - -func (s *MapMarkSet) SetConcurrent() { - s.ts = true -} diff --git a/blockstore/splitstore/markset_test.go b/blockstore/splitstore/markset_test.go index a4a42e860..de9421f08 100644 --- a/blockstore/splitstore/markset_test.go +++ b/blockstore/splitstore/markset_test.go @@ -167,7 +167,7 @@ func testMarkSetVisitor(t *testing.T, lsType string) { } defer env.Close() //nolint:errcheck - visitor, err := env.CreateVisitor("test", 0) + visitor, err := env.Create("test", 0) if err != nil { t.Fatal(err) } diff --git a/blockstore/splitstore/splitstore.go b/blockstore/splitstore/splitstore.go index 0e34fe952..62cb2459e 100644 --- a/blockstore/splitstore/splitstore.go +++ b/blockstore/splitstore/splitstore.go @@ -18,6 +18,8 @@ import ( "github.com/filecoin-project/go-state-types/abi" bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/metrics" @@ -47,6 +49,9 @@ var ( enableDebugLog = false // set this to true if you want to track origin stack traces in the write log enableDebugLogWriteTraces = false + + // upgradeBoundary is the boundary before and after an upgrade where we suppress compaction + upgradeBoundary = build.Finality ) func init() { @@ -98,6 +103,12 @@ type ChainAccessor interface { SubscribeHeadChanges(change func(revert []*types.TipSet, apply []*types.TipSet) error) } +// upgradeRange is a precomputed epoch range during which we shouldn't compact so as to not +// interfere with an upgrade +type upgradeRange struct { + start, end abi.ChainEpoch +} + // hotstore is the interface that must be satisfied by the hot blockstore; it is an extension // of the Blockstore interface with the traits we need for compaction. type hotstore interface { @@ -125,6 +136,8 @@ type SplitStore struct { cold bstore.Blockstore hot hotstore + upgrades []upgradeRange + markSetEnv MarkSetEnv markSetSize int64 @@ -173,10 +186,6 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co return nil, err } - if !markSetEnv.SupportsVisitor() { - return nil, xerrors.Errorf("markset type does not support atomic visitors") - } - // and now we can make a SplitStore ss := &SplitStore{ cfg: cfg, @@ -203,17 +212,17 @@ func Open(path string, ds dstore.Datastore, hot, cold bstore.Blockstore, cfg *Co } // Blockstore interface -func (s *SplitStore) DeleteBlock(_ cid.Cid) error { +func (s *SplitStore) DeleteBlock(_ context.Context, _ cid.Cid) error { // afaict we don't seem to be using this method, so it's not implemented return errors.New("DeleteBlock not implemented on SplitStore; don't do this Luke!") //nolint } -func (s *SplitStore) DeleteMany(_ []cid.Cid) error { +func (s *SplitStore) DeleteMany(_ context.Context, _ []cid.Cid) error { // afaict we don't seem to be using this method, so it's not implemented return errors.New("DeleteMany not implemented on SplitStore; don't do this Luke!") //nolint } -func (s *SplitStore) Has(cid cid.Cid) (bool, error) { +func (s *SplitStore) Has(ctx context.Context, cid cid.Cid) (bool, error) { if isIdentiyCid(cid) { return true, nil } @@ -221,7 +230,7 @@ func (s *SplitStore) Has(cid cid.Cid) (bool, error) { s.txnLk.RLock() defer s.txnLk.RUnlock() - has, err := s.hot.Has(cid) + has, err := s.hot.Has(ctx, cid) if err != nil { return has, err @@ -232,10 +241,10 @@ func (s *SplitStore) Has(cid cid.Cid) (bool, error) { return true, nil } - return s.cold.Has(cid) + return s.cold.Has(ctx, cid) } -func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) { +func (s *SplitStore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { if isIdentiyCid(cid) { data, err := decodeIdentityCid(cid) if err != nil { @@ -248,7 +257,7 @@ func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) { s.txnLk.RLock() defer s.txnLk.RUnlock() - blk, err := s.hot.Get(cid) + blk, err := s.hot.Get(ctx, cid) switch err { case nil: @@ -260,7 +269,7 @@ func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) { s.debug.LogReadMiss(cid) } - blk, err = s.cold.Get(cid) + blk, err = s.cold.Get(ctx, cid) if err == nil { stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) @@ -272,7 +281,7 @@ func (s *SplitStore) Get(cid cid.Cid) (blocks.Block, error) { } } -func (s *SplitStore) GetSize(cid cid.Cid) (int, error) { +func (s *SplitStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { if isIdentiyCid(cid) { data, err := decodeIdentityCid(cid) if err != nil { @@ -285,7 +294,7 @@ func (s *SplitStore) GetSize(cid cid.Cid) (int, error) { s.txnLk.RLock() defer s.txnLk.RUnlock() - size, err := s.hot.GetSize(cid) + size, err := s.hot.GetSize(ctx, cid) switch err { case nil: @@ -297,7 +306,7 @@ func (s *SplitStore) GetSize(cid cid.Cid) (int, error) { s.debug.LogReadMiss(cid) } - size, err = s.cold.GetSize(cid) + size, err = s.cold.GetSize(ctx, cid) if err == nil { stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) } @@ -308,7 +317,7 @@ func (s *SplitStore) GetSize(cid cid.Cid) (int, error) { } } -func (s *SplitStore) Put(blk blocks.Block) error { +func (s *SplitStore) Put(ctx context.Context, blk blocks.Block) error { if isIdentiyCid(blk.Cid()) { return nil } @@ -316,7 +325,7 @@ func (s *SplitStore) Put(blk blocks.Block) error { s.txnLk.RLock() defer s.txnLk.RUnlock() - err := s.hot.Put(blk) + err := s.hot.Put(ctx, blk) if err != nil { return err } @@ -327,7 +336,7 @@ func (s *SplitStore) Put(blk blocks.Block) error { return nil } -func (s *SplitStore) PutMany(blks []blocks.Block) error { +func (s *SplitStore) PutMany(ctx context.Context, blks []blocks.Block) error { // filter identites idcids := 0 for _, blk := range blks { @@ -361,7 +370,7 @@ func (s *SplitStore) PutMany(blks []blocks.Block) error { s.txnLk.RLock() defer s.txnLk.RUnlock() - err := s.hot.PutMany(blks) + err := s.hot.PutMany(ctx, blks) if err != nil { return err } @@ -417,7 +426,7 @@ func (s *SplitStore) HashOnRead(enabled bool) { s.cold.HashOnRead(enabled) } -func (s *SplitStore) View(cid cid.Cid, cb func([]byte) error) error { +func (s *SplitStore) View(ctx context.Context, cid cid.Cid, cb func([]byte) error) error { if isIdentiyCid(cid) { data, err := decodeIdentityCid(cid) if err != nil { @@ -438,14 +447,14 @@ func (s *SplitStore) View(cid cid.Cid, cb func([]byte) error) error { s.protectView(cid) defer s.viewDone() - err := s.hot.View(cid, cb) + err := s.hot.View(ctx, cid, cb) switch err { case bstore.ErrNotFound: if s.isWarm() { s.debug.LogReadMiss(cid) } - err = s.cold.View(cid, cb) + err = s.cold.View(ctx, cid, cb) if err == nil { stats.Record(s.ctx, metrics.SplitstoreMiss.M(1)) } @@ -463,16 +472,33 @@ func (s *SplitStore) isWarm() bool { } // State tracking -func (s *SplitStore) Start(chain ChainAccessor) error { +func (s *SplitStore) Start(chain ChainAccessor, us stmgr.UpgradeSchedule) error { s.chain = chain curTs := chain.GetHeaviestTipSet() + // precompute the upgrade boundaries + s.upgrades = make([]upgradeRange, 0, len(us)) + for _, upgrade := range us { + boundary := upgrade.Height + for _, pre := range upgrade.PreMigrations { + preMigrationBoundary := upgrade.Height - pre.StartWithin + if preMigrationBoundary < boundary { + boundary = preMigrationBoundary + } + } + + upgradeStart := boundary - upgradeBoundary + upgradeEnd := upgrade.Height + upgradeBoundary + + s.upgrades = append(s.upgrades, upgradeRange{start: upgradeStart, end: upgradeEnd}) + } + // should we warmup warmup := false // load base epoch from metadata ds // if none, then use current epoch because it's a fresh start - bs, err := s.ds.Get(baseEpochKey) + bs, err := s.ds.Get(s.ctx, baseEpochKey) switch err { case nil: s.baseEpoch = bytesToEpoch(bs) @@ -493,7 +519,7 @@ func (s *SplitStore) Start(chain ChainAccessor) error { } // load warmup epoch from metadata ds - bs, err = s.ds.Get(warmupEpochKey) + bs, err = s.ds.Get(s.ctx, warmupEpochKey) switch err { case nil: s.warmupEpoch = bytesToEpoch(bs) @@ -506,7 +532,7 @@ func (s *SplitStore) Start(chain ChainAccessor) error { } // load markSetSize from metadata ds to provide a size hint for marksets - bs, err = s.ds.Get(markSetSizeKey) + bs, err = s.ds.Get(s.ctx, markSetSizeKey) switch err { case nil: s.markSetSize = bytesToInt64(bs) @@ -517,7 +543,7 @@ func (s *SplitStore) Start(chain ChainAccessor) error { } // load compactionIndex from metadata ds to provide a hint as to when to perform moving gc - bs, err = s.ds.Get(compactionIndexKey) + bs, err = s.ds.Get(s.ctx, compactionIndexKey) switch err { case nil: s.compactionIndex = bytesToInt64(bs) @@ -579,5 +605,5 @@ func (s *SplitStore) checkClosing() error { func (s *SplitStore) setBaseEpoch(epoch abi.ChainEpoch) error { s.baseEpoch = epoch - return s.ds.Put(baseEpochKey, epochToBytes(epoch)) + return s.ds.Put(s.ctx, baseEpochKey, epochToBytes(epoch)) } diff --git a/blockstore/splitstore/splitstore_check.go b/blockstore/splitstore/splitstore_check.go index 8907abf9e..0b4cfe044 100644 --- a/blockstore/splitstore/splitstore_check.go +++ b/blockstore/splitstore/splitstore_check.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "path/filepath" + "sync" "sync/atomic" "time" @@ -67,7 +68,10 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error { } defer output.Close() //nolint:errcheck + var mx sync.Mutex write := func(format string, args ...interface{}) { + mx.Lock() + defer mx.Unlock() _, err := fmt.Fprintf(output, format+"\n", args...) if err != nil { log.Warnf("error writing check output: %s", err) @@ -82,9 +86,10 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error { write("compaction index: %d", s.compactionIndex) write("--") - var coldCnt, missingCnt int64 + coldCnt := new(int64) + missingCnt := new(int64) - visitor, err := s.markSetEnv.CreateVisitor("check", 0) + visitor, err := s.markSetEnv.Create("check", 0) if err != nil { return xerrors.Errorf("error creating visitor: %w", err) } @@ -96,7 +101,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error { return errStopWalk } - has, err := s.hot.Has(c) + has, err := s.hot.Has(s.ctx, c) if err != nil { return xerrors.Errorf("error checking hotstore: %w", err) } @@ -105,16 +110,16 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error { return nil } - has, err = s.cold.Has(c) + has, err = s.cold.Has(s.ctx, c) if err != nil { return xerrors.Errorf("error checking coldstore: %w", err) } if has { - coldCnt++ + atomic.AddInt64(coldCnt, 1) write("cold object reference: %s", c) } else { - missingCnt++ + atomic.AddInt64(missingCnt, 1) write("missing object reference: %s", c) return errStopWalk } @@ -128,9 +133,9 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error { return err } - log.Infow("check done", "cold", coldCnt, "missing", missingCnt) + log.Infow("check done", "cold", *coldCnt, "missing", *missingCnt) write("--") - write("cold: %d missing: %d", coldCnt, missingCnt) + write("cold: %d missing: %d", *coldCnt, *missingCnt) write("DONE") return nil diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index 4ff38a5fb..20f99af35 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -5,6 +5,7 @@ import ( "errors" "runtime" "sort" + "sync" "sync/atomic" "time" @@ -99,6 +100,12 @@ func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { return nil } + if s.isNearUpgrade(epoch) { + // we are near an upgrade epoch, suppress compaction + atomic.StoreInt32(&s.compacting, 0) + return nil + } + if epoch-s.baseEpoch > CompactionThreshold { // it's time to compact -- prepare the transaction and go! s.beginTxnProtect() @@ -121,6 +128,16 @@ func (s *SplitStore) HeadChange(_, apply []*types.TipSet) error { return nil } +func (s *SplitStore) isNearUpgrade(epoch abi.ChainEpoch) bool { + for _, upgrade := range s.upgrades { + if epoch >= upgrade.start && epoch <= upgrade.end { + return true + } + } + + return false +} + // transactionally protect incoming tipsets func (s *SplitStore) protectTipSets(apply []*types.TipSet) { s.txnLk.RLock() @@ -211,7 +228,7 @@ func (s *SplitStore) trackTxnRefMany(cids []cid.Cid) { } // protect all pending transactional references -func (s *SplitStore) protectTxnRefs(markSet MarkSetVisitor) error { +func (s *SplitStore) protectTxnRefs(markSet MarkSet) error { for { var txnRefs map[cid.Cid]struct{} @@ -283,14 +300,14 @@ func (s *SplitStore) protectTxnRefs(markSet MarkSetVisitor) error { // transactionally protect a reference by walking the object and marking. // concurrent markings are short circuited by checking the markset. -func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSetVisitor) error { +func (s *SplitStore) doTxnProtect(root cid.Cid, markSet MarkSet) error { if err := s.checkClosing(); err != nil { return err } // Note: cold objects are deleted heaviest first, so the consituents of an object // cannot be deleted before the object itself. - return s.walkObjectIncomplete(root, tmpVisitor(), + return s.walkObjectIncomplete(root, newTmpVisitor(), func(c cid.Cid) error { if isUnitaryObject(c) { return errStopWalk @@ -381,7 +398,7 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { log.Infow("running compaction", "currentEpoch", currentEpoch, "baseEpoch", s.baseEpoch, "boundaryEpoch", boundaryEpoch, "inclMsgsEpoch", inclMsgsEpoch, "compactionIndex", s.compactionIndex) - markSet, err := s.markSetEnv.CreateVisitor("live", s.markSetSize) + markSet, err := s.markSetEnv.Create("live", s.markSetSize) if err != nil { return xerrors.Errorf("error creating mark set: %w", err) } @@ -561,13 +578,13 @@ func (s *SplitStore) doCompact(curTs *types.TipSet) error { return xerrors.Errorf("error saving base epoch: %w", err) } - err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize)) + err = s.ds.Put(s.ctx, markSetSizeKey, int64ToBytes(s.markSetSize)) if err != nil { return xerrors.Errorf("error saving mark set size: %w", err) } s.compactionIndex++ - err = s.ds.Put(compactionIndexKey, int64ToBytes(s.compactionIndex)) + err = s.ds.Put(s.ctx, compactionIndexKey, int64ToBytes(s.compactionIndex)) if err != nil { return xerrors.Errorf("error saving compaction index: %w", err) } @@ -586,8 +603,8 @@ func (s *SplitStore) beginTxnProtect() { s.txnMissing = make(map[cid.Cid]struct{}) } -func (s *SplitStore) beginTxnMarking(markSet MarkSetVisitor) { - markSet.SetConcurrent() +func (s *SplitStore) beginTxnMarking(markSet MarkSet) { + log.Info("beginning transactional marking") } func (s *SplitStore) endTxnProtect() { @@ -605,26 +622,33 @@ func (s *SplitStore) endTxnProtect() { func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEpoch, visitor ObjectVisitor, f func(cid.Cid) error) error { - var walked *cid.Set - toWalk := ts.Cids() - walkCnt := 0 - scanCnt := 0 + var walked ObjectVisitor + var mx sync.Mutex + // we copy the tipset first into a new slice, which allows us to reuse it in every epoch. + toWalk := make([]cid.Cid, len(ts.Cids())) + copy(toWalk, ts.Cids()) + walkCnt := new(int64) + scanCnt := new(int64) stopWalk := func(_ cid.Cid) error { return errStopWalk } walkBlock := func(c cid.Cid) error { - if !walked.Visit(c) { + visit, err := walked.Visit(c) + if err != nil { + return err + } + if !visit { return nil } - walkCnt++ + atomic.AddInt64(walkCnt, 1) if err := f(c); err != nil { return err } var hdr types.BlockHeader - err := s.view(c, func(data []byte) error { + err = s.view(c, func(data []byte) error { return hdr.UnmarshalCBOR(bytes.NewBuffer(data)) }) @@ -660,11 +684,13 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp if err := s.walkObject(hdr.ParentStateRoot, visitor, f); err != nil { return xerrors.Errorf("error walking state root (cid: %s): %w", hdr.ParentStateRoot, err) } - scanCnt++ + atomic.AddInt64(scanCnt, 1) } if hdr.Height > 0 { + mx.Lock() toWalk = append(toWalk, hdr.Parents...) + mx.Unlock() } return nil @@ -676,20 +702,43 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp return err } + workers := len(toWalk) + if workers > runtime.NumCPU()/2 { + workers = runtime.NumCPU() / 2 + } + if workers < 2 { + workers = 2 + } + // the walk is BFS, so we can reset the walked set in every iteration and avoid building up // a set that contains all blocks (1M epochs -> 5M blocks -> 200MB worth of memory and growing // over time) - walked = cid.NewSet() - walking := toWalk - toWalk = nil - for _, c := range walking { - if err := walkBlock(c); err != nil { - return xerrors.Errorf("error walking block (cid: %s): %w", c, err) - } + walked = newConcurrentVisitor() + workch := make(chan cid.Cid, len(toWalk)) + for _, c := range toWalk { + workch <- c + } + close(workch) + toWalk = toWalk[:0] + + g := new(errgroup.Group) + for i := 0; i < workers; i++ { + g.Go(func() error { + for c := range workch { + if err := walkBlock(c); err != nil { + return xerrors.Errorf("error walking block (cid: %s): %w", c, err) + } + } + return nil + }) + } + + if err := g.Wait(); err != nil { + return err } } - log.Infow("chain walk done", "walked", walkCnt, "scanned", scanCnt) + log.Infow("chain walk done", "walked", *walkCnt, "scanned", *scanCnt) return nil } @@ -819,10 +868,10 @@ func (s *SplitStore) view(c cid.Cid, cb func([]byte) error) error { return cb(data) } - err := s.hot.View(c, cb) + err := s.hot.View(s.ctx, c, cb) switch err { case bstore.ErrNotFound: - return s.cold.View(c, cb) + return s.cold.View(s.ctx, c, cb) default: return err @@ -834,13 +883,13 @@ func (s *SplitStore) has(c cid.Cid) (bool, error) { return true, nil } - has, err := s.hot.Has(c) + has, err := s.hot.Has(s.ctx, c) if has || err != nil { return has, err } - return s.cold.Has(c) + return s.cold.Has(s.ctx, c) } func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { @@ -851,7 +900,7 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { return err } - blk, err := s.hot.Get(c) + blk, err := s.hot.Get(s.ctx, c) if err != nil { if err == bstore.ErrNotFound { log.Warnf("hotstore missing block %s", c) @@ -863,7 +912,7 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { batch = append(batch, blk) if len(batch) == batchSize { - err = s.cold.PutMany(batch) + err = s.cold.PutMany(s.ctx, batch) if err != nil { return xerrors.Errorf("error putting batch to coldstore: %w", err) } @@ -872,7 +921,7 @@ func (s *SplitStore) moveColdBlocks(cold []cid.Cid) error { } if len(batch) > 0 { - err := s.cold.PutMany(batch) + err := s.cold.PutMany(s.ctx, batch) if err != nil { return xerrors.Errorf("error putting batch to coldstore: %w", err) } @@ -995,7 +1044,7 @@ func (s *SplitStore) purgeBatch(cids []cid.Cid, deleteBatch func([]cid.Cid) erro return nil } -func (s *SplitStore) purge(cids []cid.Cid, markSet MarkSetVisitor) error { +func (s *SplitStore) purge(cids []cid.Cid, markSet MarkSet) error { deadCids := make([]cid.Cid, 0, batchSize) var purgeCnt, liveCnt int defer func() { @@ -1042,7 +1091,7 @@ func (s *SplitStore) purge(cids []cid.Cid, markSet MarkSetVisitor) error { deadCids = append(deadCids, c) } - err := s.hot.DeleteMany(deadCids) + err := s.hot.DeleteMany(s.ctx, deadCids) if err != nil { return xerrors.Errorf("error purging cold objects: %w", err) } @@ -1061,7 +1110,7 @@ func (s *SplitStore) purge(cids []cid.Cid, markSet MarkSetVisitor) error { // have this gem[TM]. // My best guess is that they are parent message receipts or yet to be computed state roots; magik // thinks the cause may be block validation. -func (s *SplitStore) waitForMissingRefs(markSet MarkSetVisitor) { +func (s *SplitStore) waitForMissingRefs(markSet MarkSet) { s.txnLk.Lock() missing := s.txnMissing s.txnMissing = nil @@ -1090,7 +1139,7 @@ func (s *SplitStore) waitForMissingRefs(markSet MarkSetVisitor) { } towalk := missing - visitor := tmpVisitor() + visitor := newTmpVisitor() missing = make(map[cid.Cid]struct{}) for c := range towalk { diff --git a/blockstore/splitstore/splitstore_expose.go b/blockstore/splitstore/splitstore_expose.go index 1065e460c..6f838229d 100644 --- a/blockstore/splitstore/splitstore_expose.go +++ b/blockstore/splitstore/splitstore_expose.go @@ -20,28 +20,28 @@ func (s *SplitStore) Expose() bstore.Blockstore { return &exposedSplitStore{s: s} } -func (es *exposedSplitStore) DeleteBlock(_ cid.Cid) error { +func (es *exposedSplitStore) DeleteBlock(_ context.Context, _ cid.Cid) error { return errors.New("DeleteBlock: operation not supported") } -func (es *exposedSplitStore) DeleteMany(_ []cid.Cid) error { +func (es *exposedSplitStore) DeleteMany(_ context.Context, _ []cid.Cid) error { return errors.New("DeleteMany: operation not supported") } -func (es *exposedSplitStore) Has(c cid.Cid) (bool, error) { +func (es *exposedSplitStore) Has(ctx context.Context, c cid.Cid) (bool, error) { if isIdentiyCid(c) { return true, nil } - has, err := es.s.hot.Has(c) + has, err := es.s.hot.Has(ctx, c) if has || err != nil { return has, err } - return es.s.cold.Has(c) + return es.s.cold.Has(ctx, c) } -func (es *exposedSplitStore) Get(c cid.Cid) (blocks.Block, error) { +func (es *exposedSplitStore) Get(ctx context.Context, c cid.Cid) (blocks.Block, error) { if isIdentiyCid(c) { data, err := decodeIdentityCid(c) if err != nil { @@ -51,16 +51,16 @@ func (es *exposedSplitStore) Get(c cid.Cid) (blocks.Block, error) { return blocks.NewBlockWithCid(data, c) } - blk, err := es.s.hot.Get(c) + blk, err := es.s.hot.Get(ctx, c) switch err { case bstore.ErrNotFound: - return es.s.cold.Get(c) + return es.s.cold.Get(ctx, c) default: return blk, err } } -func (es *exposedSplitStore) GetSize(c cid.Cid) (int, error) { +func (es *exposedSplitStore) GetSize(ctx context.Context, c cid.Cid) (int, error) { if isIdentiyCid(c) { data, err := decodeIdentityCid(c) if err != nil { @@ -70,21 +70,21 @@ func (es *exposedSplitStore) GetSize(c cid.Cid) (int, error) { return len(data), nil } - size, err := es.s.hot.GetSize(c) + size, err := es.s.hot.GetSize(ctx, c) switch err { case bstore.ErrNotFound: - return es.s.cold.GetSize(c) + return es.s.cold.GetSize(ctx, c) default: return size, err } } -func (es *exposedSplitStore) Put(blk blocks.Block) error { - return es.s.Put(blk) +func (es *exposedSplitStore) Put(ctx context.Context, blk blocks.Block) error { + return es.s.Put(ctx, blk) } -func (es *exposedSplitStore) PutMany(blks []blocks.Block) error { - return es.s.PutMany(blks) +func (es *exposedSplitStore) PutMany(ctx context.Context, blks []blocks.Block) error { + return es.s.PutMany(ctx, blks) } func (es *exposedSplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { @@ -93,7 +93,7 @@ func (es *exposedSplitStore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, e func (es *exposedSplitStore) HashOnRead(enabled bool) {} -func (es *exposedSplitStore) View(c cid.Cid, f func([]byte) error) error { +func (es *exposedSplitStore) View(ctx context.Context, c cid.Cid, f func([]byte) error) error { if isIdentiyCid(c) { data, err := decodeIdentityCid(c) if err != nil { @@ -103,10 +103,10 @@ func (es *exposedSplitStore) View(c cid.Cid, f func([]byte) error) error { return f(data) } - err := es.s.hot.View(c, f) + err := es.s.hot.View(ctx, c, f) switch err { case bstore.ErrNotFound: - return es.s.cold.View(c, f) + return es.s.cold.View(ctx, c, f) default: return err diff --git a/blockstore/splitstore/splitstore_test.go b/blockstore/splitstore/splitstore_test.go index df9984d41..7d84e0a4c 100644 --- a/blockstore/splitstore/splitstore_test.go +++ b/blockstore/splitstore/splitstore_test.go @@ -11,6 +11,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/types/mock" @@ -29,6 +30,7 @@ func init() { } func testSplitStore(t *testing.T, cfg *Config) { + ctx := context.Background() chain := &mockChain{t: t} // the myriads of stores @@ -38,7 +40,7 @@ func testSplitStore(t *testing.T, cfg *Config) { // this is necessary to avoid the garbage mock puts in the blocks garbage := blocks.NewBlock([]byte{1, 2, 3}) - err := cold.Put(garbage) + err := cold.Put(ctx, garbage) if err != nil { t.Fatal(err) } @@ -59,21 +61,21 @@ func testSplitStore(t *testing.T, cfg *Config) { t.Fatal(err) } - err = cold.Put(blk) + err = cold.Put(ctx, blk) if err != nil { t.Fatal(err) } // create a garbage block that is protected with a rgistered protector protected := blocks.NewBlock([]byte("protected!")) - err = hot.Put(protected) + err = hot.Put(ctx, protected) if err != nil { t.Fatal(err) } // and another one that is not protected unprotected := blocks.NewBlock([]byte("unprotected!")) - err = hot.Put(unprotected) + err = hot.Put(ctx, unprotected) if err != nil { t.Fatal(err) } @@ -90,7 +92,7 @@ func testSplitStore(t *testing.T, cfg *Config) { return protect(protected.Cid()) }) - err = ss.Start(chain) + err = ss.Start(chain, nil) if err != nil { t.Fatal(err) } @@ -108,11 +110,11 @@ func testSplitStore(t *testing.T, cfg *Config) { if err != nil { t.Fatal(err) } - err = ss.Put(stateRoot) + err = ss.Put(ctx, stateRoot) if err != nil { t.Fatal(err) } - err = ss.Put(sblk) + err = ss.Put(ctx, sblk) if err != nil { t.Fatal(err) } @@ -175,7 +177,7 @@ func testSplitStore(t *testing.T, cfg *Config) { } // ensure our protected block is still there - has, err := hot.Has(protected.Cid()) + has, err := hot.Has(ctx, protected.Cid()) if err != nil { t.Fatal(err) } @@ -185,7 +187,7 @@ func testSplitStore(t *testing.T, cfg *Config) { } // ensure our unprotected block is in the coldstore now - has, err = hot.Has(unprotected.Cid()) + has, err = hot.Has(ctx, unprotected.Cid()) if err != nil { t.Fatal(err) } @@ -194,7 +196,7 @@ func testSplitStore(t *testing.T, cfg *Config) { t.Fatal("unprotected block is still in hotstore") } - has, err = cold.Has(unprotected.Cid()) + has, err = cold.Has(ctx, unprotected.Cid()) if err != nil { t.Fatal(err) } @@ -220,6 +222,141 @@ func TestSplitStoreCompactionWithBadger(t *testing.T) { testSplitStore(t, &Config{MarkSetType: "badger"}) } +func TestSplitStoreSuppressCompactionNearUpgrade(t *testing.T) { + ctx := context.Background() + chain := &mockChain{t: t} + + // the myriads of stores + ds := dssync.MutexWrap(datastore.NewMapDatastore()) + hot := newMockStore() + cold := newMockStore() + + // this is necessary to avoid the garbage mock puts in the blocks + garbage := blocks.NewBlock([]byte{1, 2, 3}) + err := cold.Put(ctx, garbage) + if err != nil { + t.Fatal(err) + } + + // genesis + genBlock := mock.MkBlock(nil, 0, 0) + genBlock.Messages = garbage.Cid() + genBlock.ParentMessageReceipts = garbage.Cid() + genBlock.ParentStateRoot = garbage.Cid() + genBlock.Timestamp = uint64(time.Now().Unix()) + + genTs := mock.TipSet(genBlock) + chain.push(genTs) + + // put the genesis block to cold store + blk, err := genBlock.ToStorageBlock() + if err != nil { + t.Fatal(err) + } + + err = cold.Put(ctx, blk) + if err != nil { + t.Fatal(err) + } + + // open the splitstore + ss, err := Open("", ds, hot, cold, &Config{MarkSetType: "map"}) + if err != nil { + t.Fatal(err) + } + defer ss.Close() //nolint + + // create an upgrade schedule that will suppress compaction during the test + upgradeBoundary = 0 + upgrade := stmgr.Upgrade{ + Height: 10, + PreMigrations: []stmgr.PreMigration{{StartWithin: 10}}, + } + + err = ss.Start(chain, []stmgr.Upgrade{upgrade}) + if err != nil { + t.Fatal(err) + } + + mkBlock := func(curTs *types.TipSet, i int, stateRoot blocks.Block) *types.TipSet { + blk := mock.MkBlock(curTs, uint64(i), uint64(i)) + + blk.Messages = garbage.Cid() + blk.ParentMessageReceipts = garbage.Cid() + blk.ParentStateRoot = stateRoot.Cid() + blk.Timestamp = uint64(time.Now().Unix()) + + sblk, err := blk.ToStorageBlock() + if err != nil { + t.Fatal(err) + } + err = ss.Put(ctx, stateRoot) + if err != nil { + t.Fatal(err) + } + err = ss.Put(ctx, sblk) + if err != nil { + t.Fatal(err) + } + ts := mock.TipSet(blk) + chain.push(ts) + + return ts + } + + waitForCompaction := func() { + for atomic.LoadInt32(&ss.compacting) == 1 { + time.Sleep(100 * time.Millisecond) + } + } + + curTs := genTs + for i := 1; i < 10; i++ { + stateRoot := blocks.NewBlock([]byte{byte(i), 3, 3, 7}) + curTs = mkBlock(curTs, i, stateRoot) + waitForCompaction() + } + + countBlocks := func(bs blockstore.Blockstore) int { + count := 0 + _ = bs.(blockstore.BlockstoreIterator).ForEachKey(func(_ cid.Cid) error { + count++ + return nil + }) + return count + } + + // we should not have compacted due to suppression and everything should still be hot + hotCnt := countBlocks(hot) + coldCnt := countBlocks(cold) + + if hotCnt != 20 { + t.Errorf("expected %d blocks, but got %d", 20, hotCnt) + } + + if coldCnt != 2 { + t.Errorf("expected %d blocks, but got %d", 2, coldCnt) + } + + // put some more blocks, now we should compact + for i := 10; i < 20; i++ { + stateRoot := blocks.NewBlock([]byte{byte(i), 3, 3, 7}) + curTs = mkBlock(curTs, i, stateRoot) + waitForCompaction() + } + + hotCnt = countBlocks(hot) + coldCnt = countBlocks(cold) + + if hotCnt != 24 { + t.Errorf("expected %d blocks, but got %d", 24, hotCnt) + } + + if coldCnt != 18 { + t.Errorf("expected %d blocks, but got %d", 18, coldCnt) + } +} + type mockChain struct { t testing.TB @@ -296,7 +433,7 @@ func newMockStore() *mockStore { return &mockStore{set: make(map[cid.Cid]blocks.Block)} } -func (b *mockStore) Has(cid cid.Cid) (bool, error) { +func (b *mockStore) Has(_ context.Context, cid cid.Cid) (bool, error) { b.mx.Lock() defer b.mx.Unlock() _, ok := b.set[cid] @@ -305,7 +442,7 @@ func (b *mockStore) Has(cid cid.Cid) (bool, error) { func (b *mockStore) HashOnRead(hor bool) {} -func (b *mockStore) Get(cid cid.Cid) (blocks.Block, error) { +func (b *mockStore) Get(_ context.Context, cid cid.Cid) (blocks.Block, error) { b.mx.Lock() defer b.mx.Unlock() @@ -316,8 +453,8 @@ func (b *mockStore) Get(cid cid.Cid) (blocks.Block, error) { return blk, nil } -func (b *mockStore) GetSize(cid cid.Cid) (int, error) { - blk, err := b.Get(cid) +func (b *mockStore) GetSize(ctx context.Context, cid cid.Cid) (int, error) { + blk, err := b.Get(ctx, cid) if err != nil { return 0, err } @@ -325,15 +462,15 @@ func (b *mockStore) GetSize(cid cid.Cid) (int, error) { return len(blk.RawData()), nil } -func (b *mockStore) View(cid cid.Cid, f func([]byte) error) error { - blk, err := b.Get(cid) +func (b *mockStore) View(ctx context.Context, cid cid.Cid, f func([]byte) error) error { + blk, err := b.Get(ctx, cid) if err != nil { return err } return f(blk.RawData()) } -func (b *mockStore) Put(blk blocks.Block) error { +func (b *mockStore) Put(_ context.Context, blk blocks.Block) error { b.mx.Lock() defer b.mx.Unlock() @@ -341,7 +478,7 @@ func (b *mockStore) Put(blk blocks.Block) error { return nil } -func (b *mockStore) PutMany(blks []blocks.Block) error { +func (b *mockStore) PutMany(_ context.Context, blks []blocks.Block) error { b.mx.Lock() defer b.mx.Unlock() @@ -351,7 +488,7 @@ func (b *mockStore) PutMany(blks []blocks.Block) error { return nil } -func (b *mockStore) DeleteBlock(cid cid.Cid) error { +func (b *mockStore) DeleteBlock(_ context.Context, cid cid.Cid) error { b.mx.Lock() defer b.mx.Unlock() @@ -359,7 +496,7 @@ func (b *mockStore) DeleteBlock(cid cid.Cid) error { return nil } -func (b *mockStore) DeleteMany(cids []cid.Cid) error { +func (b *mockStore) DeleteMany(_ context.Context, cids []cid.Cid) error { b.mx.Lock() defer b.mx.Unlock() diff --git a/blockstore/splitstore/splitstore_warmup.go b/blockstore/splitstore/splitstore_warmup.go index 216de571a..0670bd0f6 100644 --- a/blockstore/splitstore/splitstore_warmup.go +++ b/blockstore/splitstore/splitstore_warmup.go @@ -1,6 +1,7 @@ package splitstore import ( + "sync" "sync/atomic" "time" @@ -55,12 +56,13 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { if WarmupBoundary < epoch { boundaryEpoch = epoch - WarmupBoundary } + var mx sync.Mutex batchHot := make([]blocks.Block, 0, batchSize) - count := int64(0) - xcount := int64(0) - missing := int64(0) + count := new(int64) + xcount := new(int64) + missing := new(int64) - visitor, err := s.markSetEnv.CreateVisitor("warmup", 0) + visitor, err := s.markSetEnv.Create("warmup", 0) if err != nil { return xerrors.Errorf("error creating visitor: %w", err) } @@ -73,9 +75,9 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { return errStopWalk } - count++ + atomic.AddInt64(count, 1) - has, err := s.hot.Has(c) + has, err := s.hot.Has(s.ctx, c) if err != nil { return err } @@ -84,25 +86,28 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { return nil } - blk, err := s.cold.Get(c) + blk, err := s.cold.Get(s.ctx, c) if err != nil { if err == bstore.ErrNotFound { - missing++ + atomic.AddInt64(missing, 1) return errStopWalk } return err } - xcount++ + atomic.AddInt64(xcount, 1) + mx.Lock() batchHot = append(batchHot, blk) if len(batchHot) == batchSize { - err = s.hot.PutMany(batchHot) + err = s.hot.PutMany(s.ctx, batchHot) if err != nil { + mx.Unlock() return err } batchHot = batchHot[:0] } + mx.Unlock() return nil }) @@ -112,22 +117,22 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { } if len(batchHot) > 0 { - err = s.hot.PutMany(batchHot) + err = s.hot.PutMany(s.ctx, batchHot) if err != nil { return err } } - log.Infow("warmup stats", "visited", count, "warm", xcount, "missing", missing) + log.Infow("warmup stats", "visited", *count, "warm", *xcount, "missing", *missing) - s.markSetSize = count + count>>2 // overestimate a bit - err = s.ds.Put(markSetSizeKey, int64ToBytes(s.markSetSize)) + s.markSetSize = *count + *count>>2 // overestimate a bit + err = s.ds.Put(s.ctx, markSetSizeKey, int64ToBytes(s.markSetSize)) if err != nil { log.Warnf("error saving mark set size: %s", err) } // save the warmup epoch - err = s.ds.Put(warmupEpochKey, epochToBytes(epoch)) + err = s.ds.Put(s.ctx, warmupEpochKey, epochToBytes(epoch)) if err != nil { return xerrors.Errorf("error saving warm up epoch: %w", err) } @@ -136,7 +141,7 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { s.mx.Unlock() // also save the compactionIndex, as this is used as an indicator of warmup for upgraded nodes - err = s.ds.Put(compactionIndexKey, int64ToBytes(s.compactionIndex)) + err = s.ds.Put(s.ctx, compactionIndexKey, int64ToBytes(s.compactionIndex)) if err != nil { return xerrors.Errorf("error saving compaction index: %w", err) } diff --git a/blockstore/splitstore/visitor.go b/blockstore/splitstore/visitor.go index f89c8f389..9dfbb78e7 100644 --- a/blockstore/splitstore/visitor.go +++ b/blockstore/splitstore/visitor.go @@ -1,6 +1,8 @@ package splitstore import ( + "sync" + cid "github.com/ipfs/go-cid" ) @@ -17,16 +19,34 @@ func (v *noopVisitor) Visit(_ cid.Cid) (bool, error) { return true, nil } -type cidSetVisitor struct { +type tmpVisitor struct { set *cid.Set } -var _ ObjectVisitor = (*cidSetVisitor)(nil) +var _ ObjectVisitor = (*tmpVisitor)(nil) -func (v *cidSetVisitor) Visit(c cid.Cid) (bool, error) { +func (v *tmpVisitor) Visit(c cid.Cid) (bool, error) { return v.set.Visit(c), nil } -func tmpVisitor() ObjectVisitor { - return &cidSetVisitor{set: cid.NewSet()} +func newTmpVisitor() ObjectVisitor { + return &tmpVisitor{set: cid.NewSet()} +} + +type concurrentVisitor struct { + mx sync.Mutex + set *cid.Set +} + +var _ ObjectVisitor = (*concurrentVisitor)(nil) + +func newConcurrentVisitor() *concurrentVisitor { + return &concurrentVisitor{set: cid.NewSet()} +} + +func (v *concurrentVisitor) Visit(c cid.Cid) (bool, error) { + v.mx.Lock() + defer v.mx.Unlock() + + return v.set.Visit(c), nil } diff --git a/blockstore/sync.go b/blockstore/sync.go index 848ccd19d..1b4ad8297 100644 --- a/blockstore/sync.go +++ b/blockstore/sync.go @@ -20,53 +20,53 @@ type SyncBlockstore struct { bs MemBlockstore // specifically use a memStore to save indirection overhead. } -func (m *SyncBlockstore) DeleteBlock(k cid.Cid) error { +func (m *SyncBlockstore) DeleteBlock(ctx context.Context, k cid.Cid) error { m.mu.Lock() defer m.mu.Unlock() - return m.bs.DeleteBlock(k) + return m.bs.DeleteBlock(ctx, k) } -func (m *SyncBlockstore) DeleteMany(ks []cid.Cid) error { +func (m *SyncBlockstore) DeleteMany(ctx context.Context, ks []cid.Cid) error { m.mu.Lock() defer m.mu.Unlock() - return m.bs.DeleteMany(ks) + return m.bs.DeleteMany(ctx, ks) } -func (m *SyncBlockstore) Has(k cid.Cid) (bool, error) { +func (m *SyncBlockstore) Has(ctx context.Context, k cid.Cid) (bool, error) { m.mu.RLock() defer m.mu.RUnlock() - return m.bs.Has(k) + return m.bs.Has(ctx, k) } -func (m *SyncBlockstore) View(k cid.Cid, callback func([]byte) error) error { +func (m *SyncBlockstore) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error { m.mu.RLock() defer m.mu.RUnlock() - return m.bs.View(k, callback) + return m.bs.View(ctx, k, callback) } -func (m *SyncBlockstore) Get(k cid.Cid) (blocks.Block, error) { +func (m *SyncBlockstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) { m.mu.RLock() defer m.mu.RUnlock() - return m.bs.Get(k) + return m.bs.Get(ctx, k) } -func (m *SyncBlockstore) GetSize(k cid.Cid) (int, error) { +func (m *SyncBlockstore) GetSize(ctx context.Context, k cid.Cid) (int, error) { m.mu.RLock() defer m.mu.RUnlock() - return m.bs.GetSize(k) + return m.bs.GetSize(ctx, k) } -func (m *SyncBlockstore) Put(b blocks.Block) error { +func (m *SyncBlockstore) Put(ctx context.Context, b blocks.Block) error { m.mu.Lock() defer m.mu.Unlock() - return m.bs.Put(b) + return m.bs.Put(ctx, b) } -func (m *SyncBlockstore) PutMany(bs []blocks.Block) error { +func (m *SyncBlockstore) PutMany(ctx context.Context, bs []blocks.Block) error { m.mu.Lock() defer m.mu.Unlock() - return m.bs.PutMany(bs) + return m.bs.PutMany(ctx, bs) } func (m *SyncBlockstore) AllKeysChan(ctx context.Context) (<-chan cid.Cid, error) { diff --git a/blockstore/timed.go b/blockstore/timed.go index b279943b6..8f226b0b1 100644 --- a/blockstore/timed.go +++ b/blockstore/timed.go @@ -92,28 +92,28 @@ func (t *TimedCacheBlockstore) rotate() { t.mu.Unlock() } -func (t *TimedCacheBlockstore) Put(b blocks.Block) error { +func (t *TimedCacheBlockstore) Put(ctx context.Context, b blocks.Block) error { // Don't check the inactive set here. We want to keep this block for at // least one interval. t.mu.Lock() defer t.mu.Unlock() - return t.active.Put(b) + return t.active.Put(ctx, b) } -func (t *TimedCacheBlockstore) PutMany(bs []blocks.Block) error { +func (t *TimedCacheBlockstore) PutMany(ctx context.Context, bs []blocks.Block) error { t.mu.Lock() defer t.mu.Unlock() - return t.active.PutMany(bs) + return t.active.PutMany(ctx, bs) } -func (t *TimedCacheBlockstore) View(k cid.Cid, callback func([]byte) error) error { +func (t *TimedCacheBlockstore) View(ctx context.Context, k cid.Cid, callback func([]byte) error) error { // The underlying blockstore is always a "mem" blockstore so there's no difference, // from a performance perspective, between view & get. So we call Get to avoid // calling an arbitrary callback while holding a lock. t.mu.RLock() - block, err := t.active.Get(k) + block, err := t.active.Get(ctx, k) if err == ErrNotFound { - block, err = t.inactive.Get(k) + block, err = t.inactive.Get(ctx, k) } t.mu.RUnlock() @@ -123,51 +123,51 @@ func (t *TimedCacheBlockstore) View(k cid.Cid, callback func([]byte) error) erro return callback(block.RawData()) } -func (t *TimedCacheBlockstore) Get(k cid.Cid) (blocks.Block, error) { +func (t *TimedCacheBlockstore) Get(ctx context.Context, k cid.Cid) (blocks.Block, error) { t.mu.RLock() defer t.mu.RUnlock() - b, err := t.active.Get(k) + b, err := t.active.Get(ctx, k) if err == ErrNotFound { - b, err = t.inactive.Get(k) + b, err = t.inactive.Get(ctx, k) } return b, err } -func (t *TimedCacheBlockstore) GetSize(k cid.Cid) (int, error) { +func (t *TimedCacheBlockstore) GetSize(ctx context.Context, k cid.Cid) (int, error) { t.mu.RLock() defer t.mu.RUnlock() - size, err := t.active.GetSize(k) + size, err := t.active.GetSize(ctx, k) if err == ErrNotFound { - size, err = t.inactive.GetSize(k) + size, err = t.inactive.GetSize(ctx, k) } return size, err } -func (t *TimedCacheBlockstore) Has(k cid.Cid) (bool, error) { +func (t *TimedCacheBlockstore) Has(ctx context.Context, k cid.Cid) (bool, error) { t.mu.RLock() defer t.mu.RUnlock() - if has, err := t.active.Has(k); err != nil { + if has, err := t.active.Has(ctx, k); err != nil { return false, err } else if has { return true, nil } - return t.inactive.Has(k) + return t.inactive.Has(ctx, k) } func (t *TimedCacheBlockstore) HashOnRead(_ bool) { // no-op } -func (t *TimedCacheBlockstore) DeleteBlock(k cid.Cid) error { +func (t *TimedCacheBlockstore) DeleteBlock(ctx context.Context, k cid.Cid) error { t.mu.Lock() defer t.mu.Unlock() - return multierr.Combine(t.active.DeleteBlock(k), t.inactive.DeleteBlock(k)) + return multierr.Combine(t.active.DeleteBlock(ctx, k), t.inactive.DeleteBlock(ctx, k)) } -func (t *TimedCacheBlockstore) DeleteMany(ks []cid.Cid) error { +func (t *TimedCacheBlockstore) DeleteMany(ctx context.Context, ks []cid.Cid) error { t.mu.Lock() defer t.mu.Unlock() - return multierr.Combine(t.active.DeleteMany(ks), t.inactive.DeleteMany(ks)) + return multierr.Combine(t.active.DeleteMany(ctx, ks), t.inactive.DeleteMany(ctx, ks)) } func (t *TimedCacheBlockstore) AllKeysChan(_ context.Context) (<-chan cid.Cid, error) { diff --git a/blockstore/timed_test.go b/blockstore/timed_test.go index d5fefff94..16795f047 100644 --- a/blockstore/timed_test.go +++ b/blockstore/timed_test.go @@ -19,6 +19,8 @@ func TestTimedCacheBlockstoreSimple(t *testing.T) { tc.clock = mClock tc.doneRotatingCh = make(chan struct{}) + ctx := context.Background() + _ = tc.Start(context.Background()) mClock.Add(1) // IDK why it is needed but it makes it work @@ -27,18 +29,18 @@ func TestTimedCacheBlockstoreSimple(t *testing.T) { }() b1 := blocks.NewBlock([]byte("foo")) - require.NoError(t, tc.Put(b1)) + require.NoError(t, tc.Put(ctx, b1)) b2 := blocks.NewBlock([]byte("bar")) - require.NoError(t, tc.Put(b2)) + require.NoError(t, tc.Put(ctx, b2)) b3 := blocks.NewBlock([]byte("baz")) - b1out, err := tc.Get(b1.Cid()) + b1out, err := tc.Get(ctx, b1.Cid()) require.NoError(t, err) require.Equal(t, b1.RawData(), b1out.RawData()) - has, err := tc.Has(b1.Cid()) + has, err := tc.Has(ctx, b1.Cid()) require.NoError(t, err) require.True(t, has) @@ -46,17 +48,17 @@ func TestTimedCacheBlockstoreSimple(t *testing.T) { <-tc.doneRotatingCh // We should still have everything. - has, err = tc.Has(b1.Cid()) + has, err = tc.Has(ctx, b1.Cid()) require.NoError(t, err) require.True(t, has) - has, err = tc.Has(b2.Cid()) + has, err = tc.Has(ctx, b2.Cid()) require.NoError(t, err) require.True(t, has) // extend b2, add b3. - require.NoError(t, tc.Put(b2)) - require.NoError(t, tc.Put(b3)) + require.NoError(t, tc.Put(ctx, b2)) + require.NoError(t, tc.Put(ctx, b3)) // all keys once. allKeys, err := tc.AllKeysChan(context.Background()) @@ -71,15 +73,15 @@ func TestTimedCacheBlockstoreSimple(t *testing.T) { <-tc.doneRotatingCh // should still have b2, and b3, but not b1 - has, err = tc.Has(b1.Cid()) + has, err = tc.Has(ctx, b1.Cid()) require.NoError(t, err) require.False(t, has) - has, err = tc.Has(b2.Cid()) + has, err = tc.Has(ctx, b2.Cid()) require.NoError(t, err) require.True(t, has) - has, err = tc.Has(b3.Cid()) + has, err = tc.Has(ctx, b3.Cid()) require.NoError(t, err) require.True(t, has) } diff --git a/blockstore/union.go b/blockstore/union.go index a99ba2591..f54a86590 100644 --- a/blockstore/union.go +++ b/blockstore/union.go @@ -19,72 +19,72 @@ func Union(stores ...Blockstore) Blockstore { return unionBlockstore(stores) } -func (m unionBlockstore) Has(cid cid.Cid) (has bool, err error) { +func (m unionBlockstore) Has(ctx context.Context, cid cid.Cid) (has bool, err error) { for _, bs := range m { - if has, err = bs.Has(cid); has || err != nil { + if has, err = bs.Has(ctx, cid); has || err != nil { break } } return has, err } -func (m unionBlockstore) Get(cid cid.Cid) (blk blocks.Block, err error) { +func (m unionBlockstore) Get(ctx context.Context, cid cid.Cid) (blk blocks.Block, err error) { for _, bs := range m { - if blk, err = bs.Get(cid); err == nil || err != ErrNotFound { + if blk, err = bs.Get(ctx, cid); err == nil || err != ErrNotFound { break } } return blk, err } -func (m unionBlockstore) View(cid cid.Cid, callback func([]byte) error) (err error) { +func (m unionBlockstore) View(ctx context.Context, cid cid.Cid, callback func([]byte) error) (err error) { for _, bs := range m { - if err = bs.View(cid, callback); err == nil || err != ErrNotFound { + if err = bs.View(ctx, cid, callback); err == nil || err != ErrNotFound { break } } return err } -func (m unionBlockstore) GetSize(cid cid.Cid) (size int, err error) { +func (m unionBlockstore) GetSize(ctx context.Context, cid cid.Cid) (size int, err error) { for _, bs := range m { - if size, err = bs.GetSize(cid); err == nil || err != ErrNotFound { + if size, err = bs.GetSize(ctx, cid); err == nil || err != ErrNotFound { break } } return size, err } -func (m unionBlockstore) Put(block blocks.Block) (err error) { +func (m unionBlockstore) Put(ctx context.Context, block blocks.Block) (err error) { for _, bs := range m { - if err = bs.Put(block); err != nil { + if err = bs.Put(ctx, block); err != nil { break } } return err } -func (m unionBlockstore) PutMany(blks []blocks.Block) (err error) { +func (m unionBlockstore) PutMany(ctx context.Context, blks []blocks.Block) (err error) { for _, bs := range m { - if err = bs.PutMany(blks); err != nil { + if err = bs.PutMany(ctx, blks); err != nil { break } } return err } -func (m unionBlockstore) DeleteBlock(cid cid.Cid) (err error) { +func (m unionBlockstore) DeleteBlock(ctx context.Context, cid cid.Cid) (err error) { for _, bs := range m { - if err = bs.DeleteBlock(cid); err != nil { + if err = bs.DeleteBlock(ctx, cid); err != nil { break } } return err } -func (m unionBlockstore) DeleteMany(cids []cid.Cid) (err error) { +func (m unionBlockstore) DeleteMany(ctx context.Context, cids []cid.Cid) (err error) { for _, bs := range m { - if err = bs.DeleteMany(cids); err != nil { + if err = bs.DeleteMany(ctx, cids); err != nil { break } } diff --git a/blockstore/union_test.go b/blockstore/union_test.go index b62026892..3ae8c1d49 100644 --- a/blockstore/union_test.go +++ b/blockstore/union_test.go @@ -15,79 +15,81 @@ var ( ) func TestUnionBlockstore_Get(t *testing.T) { + ctx := context.Background() m1 := NewMemory() m2 := NewMemory() - _ = m1.Put(b1) - _ = m2.Put(b2) + _ = m1.Put(ctx, b1) + _ = m2.Put(ctx, b2) u := Union(m1, m2) - v1, err := u.Get(b1.Cid()) + v1, err := u.Get(ctx, b1.Cid()) require.NoError(t, err) require.Equal(t, b1.RawData(), v1.RawData()) - v2, err := u.Get(b2.Cid()) + v2, err := u.Get(ctx, b2.Cid()) require.NoError(t, err) require.Equal(t, b2.RawData(), v2.RawData()) } func TestUnionBlockstore_Put_PutMany_Delete_AllKeysChan(t *testing.T) { + ctx := context.Background() m1 := NewMemory() m2 := NewMemory() u := Union(m1, m2) - err := u.Put(b0) + err := u.Put(ctx, b0) require.NoError(t, err) var has bool // write was broadcasted to all stores. - has, _ = m1.Has(b0.Cid()) + has, _ = m1.Has(ctx, b0.Cid()) require.True(t, has) - has, _ = m2.Has(b0.Cid()) + has, _ = m2.Has(ctx, b0.Cid()) require.True(t, has) - has, _ = u.Has(b0.Cid()) + has, _ = u.Has(ctx, b0.Cid()) require.True(t, has) // put many. - err = u.PutMany([]blocks.Block{b1, b2}) + err = u.PutMany(ctx, []blocks.Block{b1, b2}) require.NoError(t, err) // write was broadcasted to all stores. - has, _ = m1.Has(b1.Cid()) + has, _ = m1.Has(ctx, b1.Cid()) require.True(t, has) - has, _ = m1.Has(b2.Cid()) + has, _ = m1.Has(ctx, b2.Cid()) require.True(t, has) - has, _ = m2.Has(b1.Cid()) + has, _ = m2.Has(ctx, b1.Cid()) require.True(t, has) - has, _ = m2.Has(b2.Cid()) + has, _ = m2.Has(ctx, b2.Cid()) require.True(t, has) // also in the union store. - has, _ = u.Has(b1.Cid()) + has, _ = u.Has(ctx, b1.Cid()) require.True(t, has) - has, _ = u.Has(b2.Cid()) + has, _ = u.Has(ctx, b2.Cid()) require.True(t, has) // deleted from all stores. - err = u.DeleteBlock(b1.Cid()) + err = u.DeleteBlock(ctx, b1.Cid()) require.NoError(t, err) - has, _ = u.Has(b1.Cid()) + has, _ = u.Has(ctx, b1.Cid()) require.False(t, has) - has, _ = m1.Has(b1.Cid()) + has, _ = m1.Has(ctx, b1.Cid()) require.False(t, has) - has, _ = m2.Has(b1.Cid()) + has, _ = m2.Has(ctx, b1.Cid()) require.False(t, has) // check that AllKeysChan returns b0 and b2, twice (once per backing store) diff --git a/build/bootstrap/butterflynet.pi b/build/bootstrap/butterflynet.pi index fbfa1e92c..1972adc5a 100644 --- a/build/bootstrap/butterflynet.pi +++ b/build/bootstrap/butterflynet.pi @@ -1,2 +1,2 @@ -/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWBzv5sf4eTyo8cjJGfGnpxo6QkEPkRShG9GqjE2A5QaW5 -/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWBo9TSD4XXRFtu6snv6QNYvXgRaSaVb116YiYEsDWgKtq +/dns4/bootstrap-0.butterfly.fildev.network/tcp/1347/p2p/12D3KooWBdRCBLUeKvoy22u5DcXs61adFn31v8WWCZgmBjDCjbsC +/dns4/bootstrap-1.butterfly.fildev.network/tcp/1347/p2p/12D3KooWDUQJBA18njjXnG9RtLxoN3muvdU7PEy55QorUEsdAqdy diff --git a/build/genesis/butterflynet.car b/build/genesis/butterflynet.car index cb8401042..c79eab38e 100644 Binary files a/build/genesis/butterflynet.car and b/build/genesis/butterflynet.car differ diff --git a/build/openrpc/full.json.gz b/build/openrpc/full.json.gz index f4e71402d..ae8f0b866 100644 Binary files a/build/openrpc/full.json.gz and b/build/openrpc/full.json.gz differ diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz index 6727576ef..2cb9ab35a 100644 Binary files a/build/openrpc/miner.json.gz and b/build/openrpc/miner.json.gz differ diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz index 79e2aa85a..d6c6806ca 100644 Binary files a/build/openrpc/worker.json.gz and b/build/openrpc/worker.json.gz differ diff --git a/build/params_2k.go b/build/params_2k.go index 41dc267ad..00250ed35 100644 --- a/build/params_2k.go +++ b/build/params_2k.go @@ -12,13 +12,12 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/chain/actors/policy" - miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner" ) const BootstrappersFile = "" const GenesisFile = "" -const GenesisNetworkVersion = network.Version14 +const GenesisNetworkVersion = network.Version15 var UpgradeBreezeHeight = abi.ChainEpoch(-1) @@ -48,6 +47,8 @@ var UpgradeHyperdriveHeight = abi.ChainEpoch(-16) var UpgradeChocolateHeight = abi.ChainEpoch(-17) +var UpgradeOhSnapHeight = abi.ChainEpoch(-18) + var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, } @@ -89,11 +90,10 @@ func init() { UpgradeTurboHeight = getUpgradeHeight("LOTUS_ACTORSV4_HEIGHT", UpgradeTurboHeight) UpgradeHyperdriveHeight = getUpgradeHeight("LOTUS_HYPERDRIVE_HEIGHT", UpgradeHyperdriveHeight) UpgradeChocolateHeight = getUpgradeHeight("LOTUS_CHOCOLATE_HEIGHT", UpgradeChocolateHeight) + UpgradeOhSnapHeight = getUpgradeHeight("LOTUS_OHSNAP_HEIGHT", UpgradeOhSnapHeight) BuildType |= Build2k - // To test out what this proposal would like on devnets / testnets: https://github.com/filecoin-project/FIPs/pull/190 - miner6.FaultMaxAge = miner6.WPoStProvingPeriod * 42 } const BlockDelaySecs = uint64(1) diff --git a/build/params_butterfly.go b/build/params_butterfly.go index 70d1cff95..776a31714 100644 --- a/build/params_butterfly.go +++ b/build/params_butterfly.go @@ -9,7 +9,6 @@ import ( "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/chain/actors/policy" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" - miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner" "github.com/ipfs/go-cid" ) @@ -17,7 +16,7 @@ var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, } -const GenesisNetworkVersion = network.Version13 +const GenesisNetworkVersion = network.Version14 const BootstrappersFile = "butterflynet.pi" const GenesisFile = "butterflynet.car" @@ -41,12 +40,17 @@ const UpgradeTrustHeight = -13 const UpgradeNorwegianHeight = -14 const UpgradeTurboHeight = -15 const UpgradeHyperdriveHeight = -16 -const UpgradeChocolateHeight = 6360 +const UpgradeChocolateHeight = -17 + +// 2022-01-17T19:00:00Z +const UpgradeOhSnapHeight = 30262 func init() { policy.SetConsensusMinerMinPower(abi.NewStoragePower(2 << 30)) policy.SetSupportedProofTypes( abi.RegisteredSealProof_StackedDrg512MiBV1, + abi.RegisteredSealProof_StackedDrg32GiBV1, + abi.RegisteredSealProof_StackedDrg64GiBV1, ) SetAddressNetwork(address.Testnet) @@ -54,9 +58,6 @@ func init() { Devnet = true BuildType = BuildButterflynet - - // To test out what this proposal would like on devnets / testnets: https://github.com/filecoin-project/FIPs/pull/190 - miner6.FaultMaxAge = miner6.WPoStProvingPeriod * 42 } const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds) diff --git a/build/params_calibnet.go b/build/params_calibnet.go index be16d08b9..4da2269ee 100644 --- a/build/params_calibnet.go +++ b/build/params_calibnet.go @@ -9,7 +9,6 @@ import ( "github.com/filecoin-project/go-state-types/network" "github.com/filecoin-project/lotus/chain/actors/policy" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" - miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner" "github.com/ipfs/go-cid" ) @@ -55,6 +54,8 @@ const UpgradeHyperdriveHeight = 420 const UpgradeChocolateHeight = 312746 +const UpgradeOhSnapHeight = 99999999 + func init() { policy.SetConsensusMinerMinPower(abi.NewStoragePower(32 << 30)) policy.SetSupportedProofTypes( @@ -68,8 +69,6 @@ func init() { BuildType = BuildCalibnet - // To test out what this proposal would like on devnets / testnets: https://github.com/filecoin-project/FIPs/pull/190 - miner6.FaultMaxAge = miner6.WPoStProvingPeriod * 42 } const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds) diff --git a/build/params_interop.go b/build/params_interop.go index e928da8a0..a483e7188 100644 --- a/build/params_interop.go +++ b/build/params_interop.go @@ -12,10 +12,8 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/network" - builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" - miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner" - "github.com/filecoin-project/lotus/chain/actors/policy" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" ) const BootstrappersFile = "interopnet.pi" @@ -49,6 +47,7 @@ var UpgradeTurboHeight = abi.ChainEpoch(-15) var UpgradeHyperdriveHeight = abi.ChainEpoch(-16) var UpgradeChocolateHeight = abi.ChainEpoch(-17) +var UpgradeOhSnapHeight = abi.ChainEpoch(-18) var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, @@ -99,8 +98,6 @@ func init() { SetAddressNetwork(address.Testnet) Devnet = true - // To test out what this proposal would like on devnets / testnets: https://github.com/filecoin-project/FIPs/pull/190 - miner6.FaultMaxAge = miner6.WPoStProvingPeriod * 42 } const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds) diff --git a/build/params_mainnet.go b/build/params_mainnet.go index e6b730335..6efc6d62f 100644 --- a/build/params_mainnet.go +++ b/build/params_mainnet.go @@ -62,18 +62,20 @@ const UpgradeNorwegianHeight = 665280 const UpgradeTurboHeight = 712320 // 2021-06-30T22:00:00Z -var UpgradeHyperdriveHeight = abi.ChainEpoch(892800) +const UpgradeHyperdriveHeight = 892800 -// ??? -var UpgradeChocolateHeight = abi.ChainEpoch(999999999) +// 2021-10-26T13:30:00Z +const UpgradeChocolateHeight = 1231620 + +var UpgradeOhSnapHeight = abi.ChainEpoch(999999999999) func init() { if os.Getenv("LOTUS_USE_TEST_ADDRESSES") != "1" { SetAddressNetwork(address.Mainnet) } - if os.Getenv("LOTUS_DISABLE_CHOCOLATE") == "1" { - UpgradeChocolateHeight = math.MaxInt64 + if os.Getenv("LOTUS_DISABLE_SNAPDEALS") == "1" { + UpgradeOhSnapHeight = math.MaxInt64 } Devnet = false diff --git a/build/params_nerpanet.go b/build/params_nerpanet.go new file mode 100644 index 000000000..0e2913adc --- /dev/null +++ b/build/params_nerpanet.go @@ -0,0 +1,87 @@ +//go:build nerpanet +// +build nerpanet + +package build + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/ipfs/go-cid" + + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" +) + +var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ + 0: DrandMainnet, +} + +const GenesisNetworkVersion = network.Version0 + +const BootstrappersFile = "nerpanet.pi" +const GenesisFile = "nerpanet.car" + +const UpgradeBreezeHeight = -1 +const BreezeGasTampingDuration = 0 + +const UpgradeSmokeHeight = -1 + +const UpgradeIgnitionHeight = -2 +const UpgradeRefuelHeight = -3 + +const UpgradeLiftoffHeight = -5 + +const UpgradeAssemblyHeight = 30 // critical: the network can bootstrap from v1 only +const UpgradeTapeHeight = 60 + +const UpgradeKumquatHeight = 90 + +const UpgradeCalicoHeight = 100 +const UpgradePersianHeight = UpgradeCalicoHeight + (builtin2.EpochsInHour * 1) + +const UpgradeClausHeight = 250 + +const UpgradeOrangeHeight = 300 + +const UpgradeTrustHeight = 600 +const UpgradeNorwegianHeight = 201000 +const UpgradeTurboHeight = 203000 +const UpgradeHyperdriveHeight = 379178 + +const UpgradeChocolateHeight = 999999999 + +func init() { + // Minimum block production power is set to 4 TiB + // Rationale is to discourage small-scale miners from trying to take over the network + // One needs to invest in ~2.3x the compute to break consensus, making it not worth it + // + // DOWNSIDE: the fake-seals need to be kept alive/protected, otherwise network will seize + // + policy.SetConsensusMinerMinPower(abi.NewStoragePower(4 << 40)) + + policy.SetSupportedProofTypes( + abi.RegisteredSealProof_StackedDrg512MiBV1, + abi.RegisteredSealProof_StackedDrg32GiBV1, + abi.RegisteredSealProof_StackedDrg64GiBV1, + ) + + // Lower the most time-consuming parts of PoRep + policy.SetPreCommitChallengeDelay(10) + + // TODO - make this a variable + //miner.WPoStChallengeLookback = abi.ChainEpoch(2) + + Devnet = false + + BuildType = BuildNerpanet + +} + +const BlockDelaySecs = uint64(builtin2.EpochDurationSeconds) + +const PropagationDelaySecs = uint64(6) + +// BootstrapPeerThreshold is the minimum number peers we need to track for a sync worker to start +const BootstrapPeerThreshold = 4 + +var WhitelistedBlock = cid.Undef diff --git a/build/params_shared_vals.go b/build/params_shared_vals.go index 22d1c30e3..704c84639 100644 --- a/build/params_shared_vals.go +++ b/build/params_shared_vals.go @@ -28,7 +28,15 @@ const UnixfsLinksPerLevel = 1024 const AllowableClockDriftSecs = uint64(1) // TODO: This is still terrible...What's the impact of updating this before mainnet actually upgrades -const NewestNetworkVersion = network.Version14 +/* inline-gen template + +const NewestNetworkVersion = network.Version{{.latestNetworkVersion}} + +/* inline-gen start */ + +const NewestNetworkVersion = network.Version15 + +/* inline-gen end */ // Epochs const ForkLengthThreshold = Finality diff --git a/build/params_testground.go b/build/params_testground.go index 48b76f82c..41c46d41e 100644 --- a/build/params_testground.go +++ b/build/params_testground.go @@ -99,6 +99,7 @@ var ( UpgradeTurboHeight abi.ChainEpoch = -14 UpgradeHyperdriveHeight abi.ChainEpoch = -15 UpgradeChocolateHeight abi.ChainEpoch = -16 + UpgradeOhSnapHeight abi.ChainEpoch = -17 DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, @@ -106,8 +107,8 @@ var ( GenesisNetworkVersion = network.Version0 - NewestNetworkVersion = network.Version14 - ActorUpgradeNetworkVersion = network.Version4 + NewestNetworkVersion = network.Version15 + ActorUpgradeNetworkVersion = network.Version15 Devnet = true ZeroAddress = MustParseAddress("f3yaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaby2smx7a") diff --git a/build/version.go b/build/version.go index 9795f5e99..5745c0e8d 100644 --- a/build/version.go +++ b/build/version.go @@ -37,7 +37,7 @@ func BuildTypeString() string { } // BuildVersion is the local build version -const BuildVersion = "1.13.1-dev" +const BuildVersion = "1.15.0-dev" func UserVersion() string { if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" { diff --git a/chain/actors/builtin/account/account.go b/chain/actors/builtin/account/account.go index 249ce133f..57ea510bb 100644 --- a/chain/actors/builtin/account/account.go +++ b/chain/actors/builtin/account/account.go @@ -23,6 +23,8 @@ import ( builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" ) func init() { @@ -50,6 +52,10 @@ func init() { builtin.RegisterActorState(builtin6.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load6(store, root) }) + + builtin.RegisterActorState(builtin7.AccountActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) } var Methods = builtin4.MethodsAccount @@ -75,6 +81,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.AccountActorCodeID: return load6(store, act.Head) + case builtin7.AccountActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -100,6 +109,9 @@ func MakeState(store adt.Store, av actors.Version, addr address.Address) (State, case actors.Version6: return make6(store, addr) + case actors.Version7: + return make7(store, addr) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -125,6 +137,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.AccountActorCodeID, nil + case actors.Version7: + return builtin7.AccountActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/builtin/account/v7.go b/chain/actors/builtin/account/v7.go new file mode 100644 index 000000000..883776cf8 --- /dev/null +++ b/chain/actors/builtin/account/v7.go @@ -0,0 +1,40 @@ +package account + +import ( + "github.com/filecoin-project/go-address" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + account7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/account" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store, addr address.Address) (State, error) { + out := state7{store: store} + out.State = account7.State{Address: addr} + return &out, nil +} + +type state7 struct { + account7.State + store adt.Store +} + +func (s *state7) PubkeyAddress() (address.Address, error) { + return s.Address, nil +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/builtin.go b/chain/actors/builtin/builtin.go index 1687e6064..febbca479 100644 --- a/chain/actors/builtin/builtin.go +++ b/chain/actors/builtin/builtin.go @@ -23,47 +23,50 @@ import ( builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" smoothing6 "github.com/filecoin-project/specs-actors/v6/actors/util/smoothing" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + smoothing7 "github.com/filecoin-project/specs-actors/v7/actors/util/smoothing" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/types" - miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner" - proof6 "github.com/filecoin-project/specs-actors/v6/actors/runtime/proof" + miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" ) -var SystemActorAddr = builtin6.SystemActorAddr -var BurntFundsActorAddr = builtin6.BurntFundsActorAddr -var CronActorAddr = builtin6.CronActorAddr -var RewardActorAddr = builtin6.RewardActorAddr +var SystemActorAddr = builtin7.SystemActorAddr +var BurntFundsActorAddr = builtin7.BurntFundsActorAddr +var CronActorAddr = builtin7.CronActorAddr var SaftAddress = makeAddress("t0122") var ReserveAddress = makeAddress("t090") var RootVerifierAddress = makeAddress("t080") var ( - ExpectedLeadersPerEpoch = builtin6.ExpectedLeadersPerEpoch + ExpectedLeadersPerEpoch = builtin7.ExpectedLeadersPerEpoch ) const ( - EpochDurationSeconds = builtin6.EpochDurationSeconds - EpochsInDay = builtin6.EpochsInDay - SecondsInDay = builtin6.SecondsInDay + EpochDurationSeconds = builtin7.EpochDurationSeconds + EpochsInDay = builtin7.EpochsInDay + SecondsInDay = builtin7.SecondsInDay ) const ( - MethodSend = builtin6.MethodSend - MethodConstructor = builtin6.MethodConstructor + MethodSend = builtin7.MethodSend + MethodConstructor = builtin7.MethodConstructor ) // These are all just type aliases across actor versions. In the future, that might change // and we might need to do something fancier. -type SectorInfo = proof6.SectorInfo -type PoStProof = proof6.PoStProof +type SectorInfo = proof7.SectorInfo +type ExtendedSectorInfo = proof7.ExtendedSectorInfo +type PoStProof = proof7.PoStProof type FilterEstimate = smoothing0.FilterEstimate func QAPowerForWeight(size abi.SectorSize, duration abi.ChainEpoch, dealWeight, verifiedWeight abi.DealWeight) abi.StoragePower { - return miner6.QAPowerForWeight(size, duration, dealWeight, verifiedWeight) + return miner7.QAPowerForWeight(size, duration, dealWeight, verifiedWeight) } func FromV0FilterEstimate(v0 smoothing0.FilterEstimate) FilterEstimate { @@ -102,6 +105,12 @@ func FromV6FilterEstimate(v6 smoothing6.FilterEstimate) FilterEstimate { } +func FromV7FilterEstimate(v7 smoothing7.FilterEstimate) FilterEstimate { + + return (FilterEstimate)(v7) + +} + type ActorStateLoader func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) var ActorStateLoaders = make(map[cid.Cid]ActorStateLoader) @@ -139,6 +148,9 @@ func ActorNameByCode(c cid.Cid) string { case builtin6.IsBuiltinActor(c): return builtin6.ActorNameByCode(c) + case builtin7.IsBuiltinActor(c): + return builtin7.ActorNameByCode(c) + default: return "" } @@ -170,6 +182,10 @@ func IsBuiltinActor(c cid.Cid) bool { return true } + if builtin7.IsBuiltinActor(c) { + return true + } + return false } @@ -199,6 +215,10 @@ func IsAccountActor(c cid.Cid) bool { return true } + if c == builtin7.AccountActorCodeID { + return true + } + return false } @@ -228,6 +248,10 @@ func IsStorageMinerActor(c cid.Cid) bool { return true } + if c == builtin7.StorageMinerActorCodeID { + return true + } + return false } @@ -257,6 +281,10 @@ func IsMultisigActor(c cid.Cid) bool { return true } + if c == builtin7.MultisigActorCodeID { + return true + } + return false } @@ -286,6 +314,10 @@ func IsPaymentChannelActor(c cid.Cid) bool { return true } + if c == builtin7.PaymentChannelActorCodeID { + return true + } + return false } diff --git a/chain/actors/builtin/builtin.go.template b/chain/actors/builtin/builtin.go.template index 31549a2d6..56adeef54 100644 --- a/chain/actors/builtin/builtin.go.template +++ b/chain/actors/builtin/builtin.go.template @@ -46,6 +46,7 @@ const ( // These are all just type aliases across actor versions. In the future, that might change // and we might need to do something fancier. type SectorInfo = proof{{.latestVersion}}.SectorInfo +type ExtendedSectorInfo = proof{{.latestVersion}}.ExtendedSectorInfo type PoStProof = proof{{.latestVersion}}.PoStProof type FilterEstimate = smoothing0.FilterEstimate diff --git a/chain/actors/builtin/cron/cron.go b/chain/actors/builtin/cron/cron.go index 9178a44ab..f27a14ac7 100644 --- a/chain/actors/builtin/cron/cron.go +++ b/chain/actors/builtin/cron/cron.go @@ -17,6 +17,8 @@ import ( builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" ) func MakeState(store adt.Store, av actors.Version) (State, error) { @@ -40,6 +42,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version6: return make6(store) + case actors.Version7: + return make7(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -65,14 +70,17 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.CronActorCodeID, nil + case actors.Version7: + return builtin7.CronActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) } var ( - Address = builtin6.CronActorAddr - Methods = builtin6.MethodsCron + Address = builtin7.CronActorAddr + Methods = builtin7.MethodsCron ) type State interface { diff --git a/chain/actors/builtin/cron/v7.go b/chain/actors/builtin/cron/v7.go new file mode 100644 index 000000000..e5538c89f --- /dev/null +++ b/chain/actors/builtin/cron/v7.go @@ -0,0 +1,35 @@ +package cron + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + cron7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/cron" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + out.State = *cron7.ConstructState(cron7.BuiltInEntries()) + return &out, nil +} + +type state7 struct { + cron7.State + store adt.Store +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/init/init.go b/chain/actors/builtin/init/init.go index ee06eeab7..737241ffe 100644 --- a/chain/actors/builtin/init/init.go +++ b/chain/actors/builtin/init/init.go @@ -25,6 +25,8 @@ import ( builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" ) func init() { @@ -52,11 +54,15 @@ func init() { builtin.RegisterActorState(builtin6.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load6(store, root) }) + + builtin.RegisterActorState(builtin7.InitActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) } var ( - Address = builtin6.InitActorAddr - Methods = builtin6.MethodsInit + Address = builtin7.InitActorAddr + Methods = builtin7.MethodsInit ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -80,6 +86,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.InitActorCodeID: return load6(store, act.Head) + case builtin7.InitActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -105,6 +114,9 @@ func MakeState(store adt.Store, av actors.Version, networkName string) (State, e case actors.Version6: return make6(store, networkName) + case actors.Version7: + return make7(store, networkName) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -130,6 +142,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.InitActorCodeID, nil + case actors.Version7: + return builtin7.InitActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/builtin/init/v7.go b/chain/actors/builtin/init/v7.go new file mode 100644 index 000000000..341aa52cd --- /dev/null +++ b/chain/actors/builtin/init/v7.go @@ -0,0 +1,114 @@ +package init + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/node/modules/dtypes" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + init7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/init" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store, networkName string) (State, error) { + out := state7{store: store} + + s, err := init7.ConstructState(store, networkName) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state7 struct { + init7.State + store adt.Store +} + +func (s *state7) ResolveAddress(address address.Address) (address.Address, bool, error) { + return s.State.ResolveAddress(s.store, address) +} + +func (s *state7) MapAddressToNewID(address address.Address) (address.Address, error) { + return s.State.MapAddressToNewID(s.store, address) +} + +func (s *state7) ForEachActor(cb func(id abi.ActorID, address address.Address) error) error { + addrs, err := adt7.AsMap(s.store, s.State.AddressMap, builtin7.DefaultHamtBitwidth) + if err != nil { + return err + } + var actorID cbg.CborInt + return addrs.ForEach(&actorID, func(key string) error { + addr, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(abi.ActorID(actorID), addr) + }) +} + +func (s *state7) NetworkName() (dtypes.NetworkName, error) { + return dtypes.NetworkName(s.State.NetworkName), nil +} + +func (s *state7) SetNetworkName(name string) error { + s.State.NetworkName = name + return nil +} + +func (s *state7) SetNextID(id abi.ActorID) error { + s.State.NextID = id + return nil +} + +func (s *state7) Remove(addrs ...address.Address) (err error) { + m, err := adt7.AsMap(s.store, s.State.AddressMap, builtin7.DefaultHamtBitwidth) + if err != nil { + return err + } + for _, addr := range addrs { + if err = m.Delete(abi.AddrKey(addr)); err != nil { + return xerrors.Errorf("failed to delete entry for address: %s; err: %w", addr, err) + } + } + amr, err := m.Root() + if err != nil { + return xerrors.Errorf("failed to get address map root: %w", err) + } + s.State.AddressMap = amr + return nil +} + +func (s *state7) SetAddressMap(mcid cid.Cid) error { + s.State.AddressMap = mcid + return nil +} + +func (s *state7) AddressMap() (adt.Map, error) { + return adt7.AsMap(s.store, s.State.AddressMap, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/market/market.go b/chain/actors/builtin/market/market.go index 7e35f3919..6781b55e3 100644 --- a/chain/actors/builtin/market/market.go +++ b/chain/actors/builtin/market/market.go @@ -25,6 +25,8 @@ import ( builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -56,11 +58,15 @@ func init() { builtin.RegisterActorState(builtin6.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load6(store, root) }) + + builtin.RegisterActorState(builtin7.StorageMarketActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) } var ( - Address = builtin6.StorageMarketActorAddr - Methods = builtin6.MethodsMarket + Address = builtin7.StorageMarketActorAddr + Methods = builtin7.MethodsMarket ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -84,6 +90,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.StorageMarketActorCodeID: return load6(store, act.Head) + case builtin7.StorageMarketActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -109,6 +118,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version6: return make6(store) + case actors.Version7: + return make7(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -134,6 +146,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.StorageMarketActorCodeID, nil + case actors.Version7: + return builtin7.StorageMarketActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) @@ -211,6 +226,9 @@ func DecodePublishStorageDealsReturn(b []byte, nv network.Version) (PublishStora case actors.Version6: return decodePublishStorageDealsReturn6(b) + case actors.Version7: + return decodePublishStorageDealsReturn7(b) + } return nil, xerrors.Errorf("unknown actor version %d", av) } diff --git a/chain/actors/builtin/market/v7.go b/chain/actors/builtin/market/v7.go new file mode 100644 index 000000000..553913146 --- /dev/null +++ b/chain/actors/builtin/market/v7.go @@ -0,0 +1,252 @@ +package market + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/types" + + market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + + s, err := market7.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state7 struct { + market7.State + store adt.Store +} + +func (s *state7) TotalLocked() (abi.TokenAmount, error) { + fml := types.BigAdd(s.TotalClientLockedCollateral, s.TotalProviderLockedCollateral) + fml = types.BigAdd(fml, s.TotalClientStorageFee) + return fml, nil +} + +func (s *state7) BalancesChanged(otherState State) (bool, error) { + otherState7, ok := otherState.(*state7) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.EscrowTable.Equals(otherState7.State.EscrowTable) || !s.State.LockedTable.Equals(otherState7.State.LockedTable), nil +} + +func (s *state7) StatesChanged(otherState State) (bool, error) { + otherState7, ok := otherState.(*state7) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.States.Equals(otherState7.State.States), nil +} + +func (s *state7) States() (DealStates, error) { + stateArray, err := adt7.AsArray(s.store, s.State.States, market7.StatesAmtBitwidth) + if err != nil { + return nil, err + } + return &dealStates7{stateArray}, nil +} + +func (s *state7) ProposalsChanged(otherState State) (bool, error) { + otherState7, ok := otherState.(*state7) + if !ok { + // there's no way to compare different versions of the state, so let's + // just say that means the state of balances has changed + return true, nil + } + return !s.State.Proposals.Equals(otherState7.State.Proposals), nil +} + +func (s *state7) Proposals() (DealProposals, error) { + proposalArray, err := adt7.AsArray(s.store, s.State.Proposals, market7.ProposalsAmtBitwidth) + if err != nil { + return nil, err + } + return &dealProposals7{proposalArray}, nil +} + +func (s *state7) EscrowTable() (BalanceTable, error) { + bt, err := adt7.AsBalanceTable(s.store, s.State.EscrowTable) + if err != nil { + return nil, err + } + return &balanceTable7{bt}, nil +} + +func (s *state7) LockedTable() (BalanceTable, error) { + bt, err := adt7.AsBalanceTable(s.store, s.State.LockedTable) + if err != nil { + return nil, err + } + return &balanceTable7{bt}, nil +} + +func (s *state7) VerifyDealsForActivation( + minerAddr address.Address, deals []abi.DealID, currEpoch, sectorExpiry abi.ChainEpoch, +) (weight, verifiedWeight abi.DealWeight, err error) { + w, vw, _, err := market7.ValidateDealsForActivation(&s.State, s.store, deals, minerAddr, sectorExpiry, currEpoch) + return w, vw, err +} + +func (s *state7) NextID() (abi.DealID, error) { + return s.State.NextID, nil +} + +type balanceTable7 struct { + *adt7.BalanceTable +} + +func (bt *balanceTable7) ForEach(cb func(address.Address, abi.TokenAmount) error) error { + asMap := (*adt7.Map)(bt.BalanceTable) + var ta abi.TokenAmount + return asMap.ForEach(&ta, func(key string) error { + a, err := address.NewFromBytes([]byte(key)) + if err != nil { + return err + } + return cb(a, ta) + }) +} + +type dealStates7 struct { + adt.Array +} + +func (s *dealStates7) Get(dealID abi.DealID) (*DealState, bool, error) { + var deal7 market7.DealState + found, err := s.Array.Get(uint64(dealID), &deal7) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + deal := fromV7DealState(deal7) + return &deal, true, nil +} + +func (s *dealStates7) ForEach(cb func(dealID abi.DealID, ds DealState) error) error { + var ds7 market7.DealState + return s.Array.ForEach(&ds7, func(idx int64) error { + return cb(abi.DealID(idx), fromV7DealState(ds7)) + }) +} + +func (s *dealStates7) decode(val *cbg.Deferred) (*DealState, error) { + var ds7 market7.DealState + if err := ds7.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + ds := fromV7DealState(ds7) + return &ds, nil +} + +func (s *dealStates7) array() adt.Array { + return s.Array +} + +func fromV7DealState(v7 market7.DealState) DealState { + return (DealState)(v7) +} + +type dealProposals7 struct { + adt.Array +} + +func (s *dealProposals7) Get(dealID abi.DealID) (*DealProposal, bool, error) { + var proposal7 market7.DealProposal + found, err := s.Array.Get(uint64(dealID), &proposal7) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, nil + } + proposal := fromV7DealProposal(proposal7) + return &proposal, true, nil +} + +func (s *dealProposals7) ForEach(cb func(dealID abi.DealID, dp DealProposal) error) error { + var dp7 market7.DealProposal + return s.Array.ForEach(&dp7, func(idx int64) error { + return cb(abi.DealID(idx), fromV7DealProposal(dp7)) + }) +} + +func (s *dealProposals7) decode(val *cbg.Deferred) (*DealProposal, error) { + var dp7 market7.DealProposal + if err := dp7.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return nil, err + } + dp := fromV7DealProposal(dp7) + return &dp, nil +} + +func (s *dealProposals7) array() adt.Array { + return s.Array +} + +func fromV7DealProposal(v7 market7.DealProposal) DealProposal { + return (DealProposal)(v7) +} + +func (s *state7) GetState() interface{} { + return &s.State +} + +var _ PublishStorageDealsReturn = (*publishStorageDealsReturn7)(nil) + +func decodePublishStorageDealsReturn7(b []byte) (PublishStorageDealsReturn, error) { + var retval market7.PublishStorageDealsReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, xerrors.Errorf("failed to unmarshal PublishStorageDealsReturn: %w", err) + } + + return &publishStorageDealsReturn7{retval}, nil +} + +type publishStorageDealsReturn7 struct { + market7.PublishStorageDealsReturn +} + +func (r *publishStorageDealsReturn7) IsDealValid(index uint64) (bool, error) { + + return r.ValidDeals.IsSet(index) + +} + +func (r *publishStorageDealsReturn7) DealIDs() ([]abi.DealID, error) { + return r.IDs, nil +} diff --git a/chain/actors/builtin/miner/actor.go.template b/chain/actors/builtin/miner/actor.go.template index 2669a05a6..74c16be36 100644 --- a/chain/actors/builtin/miner/actor.go.template +++ b/chain/actors/builtin/miner/actor.go.template @@ -23,6 +23,7 @@ import ( miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner" {{range .versions}} builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin" {{end}} @@ -177,6 +178,7 @@ type SectorOnChainInfo struct { InitialPledge abi.TokenAmount ExpectedDayReward abi.TokenAmount ExpectedStoragePledge abi.TokenAmount + SectorKeyCID *cid.Cid } type SectorPreCommitInfo = miner0.SectorPreCommitInfo @@ -192,6 +194,7 @@ type SectorPreCommitOnChainInfo struct { type PoStPartition = miner0.PoStPartition type RecoveryDeclaration = miner0.RecoveryDeclaration type FaultDeclaration = miner0.FaultDeclaration +type ReplicaUpdate = miner7.ReplicaUpdate // Params type DeclareFaultsParams = miner0.DeclareFaultsParams @@ -200,6 +203,7 @@ type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams type ProveCommitSectorParams = miner0.ProveCommitSectorParams type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams type ProveCommitAggregateParams = miner5.ProveCommitAggregateParams +type ProveReplicaUpdatesParams = miner7.ProveReplicaUpdatesParams func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) { // We added support for the new proofs in network version 7, and removed support for the old diff --git a/chain/actors/builtin/miner/miner.go b/chain/actors/builtin/miner/miner.go index 1c7f47e11..7889d7a4d 100644 --- a/chain/actors/builtin/miner/miner.go +++ b/chain/actors/builtin/miner/miner.go @@ -23,6 +23,7 @@ import ( miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" miner3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/miner" miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" + miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" @@ -35,6 +36,8 @@ import ( builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" ) func init() { @@ -63,9 +66,13 @@ func init() { return load6(store, root) }) + builtin.RegisterActorState(builtin7.StorageMinerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) + } -var Methods = builtin6.MethodsMiner +var Methods = builtin7.MethodsMiner // Unchanged between v0, v2, v3, v4, and v5 actors var WPoStProvingPeriod = miner0.WPoStProvingPeriod @@ -102,6 +109,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.StorageMinerActorCodeID: return load6(store, act.Head) + case builtin7.StorageMinerActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -127,6 +137,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version6: return make6(store) + case actors.Version7: + return make7(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -152,6 +165,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.StorageMinerActorCodeID, nil + case actors.Version7: + return builtin7.StorageMinerActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) @@ -251,6 +267,7 @@ type SectorOnChainInfo struct { InitialPledge abi.TokenAmount ExpectedDayReward abi.TokenAmount ExpectedStoragePledge abi.TokenAmount + SectorKeyCID *cid.Cid } type SectorPreCommitInfo = miner0.SectorPreCommitInfo @@ -266,6 +283,7 @@ type SectorPreCommitOnChainInfo struct { type PoStPartition = miner0.PoStPartition type RecoveryDeclaration = miner0.RecoveryDeclaration type FaultDeclaration = miner0.FaultDeclaration +type ReplicaUpdate = miner7.ReplicaUpdate // Params type DeclareFaultsParams = miner0.DeclareFaultsParams @@ -274,6 +292,7 @@ type SubmitWindowedPoStParams = miner0.SubmitWindowedPoStParams type ProveCommitSectorParams = miner0.ProveCommitSectorParams type DisputeWindowedPoStParams = miner3.DisputeWindowedPoStParams type ProveCommitAggregateParams = miner5.ProveCommitAggregateParams +type ProveReplicaUpdatesParams = miner7.ProveReplicaUpdatesParams func PreferredSealProofTypeFromWindowPoStType(nver network.Version, proof abi.RegisteredPoStProof) (abi.RegisteredSealProof, error) { // We added support for the new proofs in network version 7, and removed support for the old diff --git a/chain/actors/builtin/miner/state.go.template b/chain/actors/builtin/miner/state.go.template index 2ea6a905e..775631961 100644 --- a/chain/actors/builtin/miner/state.go.template +++ b/chain/actors/builtin/miner/state.go.template @@ -138,11 +138,22 @@ func (s *state{{.v}}) GetSectorExpiration(num abi.SectorNumber) (*SectorExpirati return nil, err } // NOTE: this can be optimized significantly. - // 1. If the sector is non-faulty, it will either expire on-time (can be +{{if (ge .v 7) -}} + // 1. If the sector is non-faulty, it will expire on-time (can be + // learned from the sector info). +{{- else -}} + // 1. If the sector is non-faulty, it will either expire on-time (can be // learned from the sector info), or in the next quantized expiration // epoch (i.e., the first element in the partition's expiration queue. +{{- end}} +{{if (ge .v 6) -}} + // 2. If it's faulty, it will expire early within the first 42 entries + // of the expiration queue. +{{- else -}} // 2. If it's faulty, it will expire early within the first 14 entries // of the expiration queue. +{{- end}} + stopErr := errors.New("stop") out := SectorExpiration{} err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner{{.v}}.Deadline) error { @@ -554,8 +565,7 @@ func (p *partition{{.v}}) UnprovenSectors() (bitfield.BitField, error) { } func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorOnChainInfo { -{{if (ge .v 2)}} - return SectorOnChainInfo{ + info := SectorOnChainInfo{ SectorNumber: v{{.v}}.SectorNumber, SealProof: v{{.v}}.SealProof, SealedCID: v{{.v}}.SealedCID, @@ -567,10 +577,11 @@ func fromV{{.v}}SectorOnChainInfo(v{{.v}} miner{{.v}}.SectorOnChainInfo) SectorO InitialPledge: v{{.v}}.InitialPledge, ExpectedDayReward: v{{.v}}.ExpectedDayReward, ExpectedStoragePledge: v{{.v}}.ExpectedStoragePledge, + {{if (ge .v 7)}} + SectorKeyCID: v{{.v}}.SectorKeyCID, + {{end}} } -{{else}} - return (SectorOnChainInfo)(v0) -{{end}} + return info } func fromV{{.v}}SectorPreCommitOnChainInfo(v{{.v}} miner{{.v}}.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { diff --git a/chain/actors/builtin/miner/utils.go b/chain/actors/builtin/miner/utils.go index 2f24e8454..5fafc31ef 100644 --- a/chain/actors/builtin/miner/utils.go +++ b/chain/actors/builtin/miner/utils.go @@ -67,3 +67,22 @@ func SealProofTypeFromSectorSize(ssize abi.SectorSize, nv network.Version) (abi. return 0, xerrors.Errorf("unsupported network version") } + +// WindowPoStProofTypeFromSectorSize returns preferred post proof type for creating +// new miner actors and new sectors +func WindowPoStProofTypeFromSectorSize(ssize abi.SectorSize) (abi.RegisteredPoStProof, error) { + switch ssize { + case 2 << 10: + return abi.RegisteredPoStProof_StackedDrgWindow2KiBV1, nil + case 8 << 20: + return abi.RegisteredPoStProof_StackedDrgWindow8MiBV1, nil + case 512 << 20: + return abi.RegisteredPoStProof_StackedDrgWindow512MiBV1, nil + case 32 << 30: + return abi.RegisteredPoStProof_StackedDrgWindow32GiBV1, nil + case 64 << 30: + return abi.RegisteredPoStProof_StackedDrgWindow64GiBV1, nil + default: + return 0, xerrors.Errorf("unsupported sector size for miner: %v", ssize) + } +} diff --git a/chain/actors/builtin/miner/v0.go b/chain/actors/builtin/miner/v0.go index 564bcbbc2..8bde8bf73 100644 --- a/chain/actors/builtin/miner/v0.go +++ b/chain/actors/builtin/miner/v0.go @@ -140,6 +140,7 @@ func (s *state0) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e // epoch (i.e., the first element in the partition's expiration queue. // 2. If it's faulty, it will expire early within the first 14 entries // of the expiration queue. + stopErr := errors.New("stop") out := SectorExpiration{} err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner0.Deadline) error { @@ -505,9 +506,20 @@ func (p *partition0) UnprovenSectors() (bitfield.BitField, error) { } func fromV0SectorOnChainInfo(v0 miner0.SectorOnChainInfo) SectorOnChainInfo { - - return (SectorOnChainInfo)(v0) - + info := SectorOnChainInfo{ + SectorNumber: v0.SectorNumber, + SealProof: v0.SealProof, + SealedCID: v0.SealedCID, + DealIDs: v0.DealIDs, + Activation: v0.Activation, + Expiration: v0.Expiration, + DealWeight: v0.DealWeight, + VerifiedDealWeight: v0.VerifiedDealWeight, + InitialPledge: v0.InitialPledge, + ExpectedDayReward: v0.ExpectedDayReward, + ExpectedStoragePledge: v0.ExpectedStoragePledge, + } + return info } func fromV0SectorPreCommitOnChainInfo(v0 miner0.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { diff --git a/chain/actors/builtin/miner/v2.go b/chain/actors/builtin/miner/v2.go index fe0863111..bbfdd403e 100644 --- a/chain/actors/builtin/miner/v2.go +++ b/chain/actors/builtin/miner/v2.go @@ -138,6 +138,7 @@ func (s *state2) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e // epoch (i.e., the first element in the partition's expiration queue. // 2. If it's faulty, it will expire early within the first 14 entries // of the expiration queue. + stopErr := errors.New("stop") out := SectorExpiration{} err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner2.Deadline) error { @@ -535,8 +536,7 @@ func (p *partition2) UnprovenSectors() (bitfield.BitField, error) { } func fromV2SectorOnChainInfo(v2 miner2.SectorOnChainInfo) SectorOnChainInfo { - - return SectorOnChainInfo{ + info := SectorOnChainInfo{ SectorNumber: v2.SectorNumber, SealProof: v2.SealProof, SealedCID: v2.SealedCID, @@ -549,7 +549,7 @@ func fromV2SectorOnChainInfo(v2 miner2.SectorOnChainInfo) SectorOnChainInfo { ExpectedDayReward: v2.ExpectedDayReward, ExpectedStoragePledge: v2.ExpectedStoragePledge, } - + return info } func fromV2SectorPreCommitOnChainInfo(v2 miner2.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { diff --git a/chain/actors/builtin/miner/v3.go b/chain/actors/builtin/miner/v3.go index b0d5429ea..68505918a 100644 --- a/chain/actors/builtin/miner/v3.go +++ b/chain/actors/builtin/miner/v3.go @@ -140,6 +140,7 @@ func (s *state3) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e // epoch (i.e., the first element in the partition's expiration queue. // 2. If it's faulty, it will expire early within the first 14 entries // of the expiration queue. + stopErr := errors.New("stop") out := SectorExpiration{} err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner3.Deadline) error { @@ -536,8 +537,7 @@ func (p *partition3) UnprovenSectors() (bitfield.BitField, error) { } func fromV3SectorOnChainInfo(v3 miner3.SectorOnChainInfo) SectorOnChainInfo { - - return SectorOnChainInfo{ + info := SectorOnChainInfo{ SectorNumber: v3.SectorNumber, SealProof: v3.SealProof, SealedCID: v3.SealedCID, @@ -550,7 +550,7 @@ func fromV3SectorOnChainInfo(v3 miner3.SectorOnChainInfo) SectorOnChainInfo { ExpectedDayReward: v3.ExpectedDayReward, ExpectedStoragePledge: v3.ExpectedStoragePledge, } - + return info } func fromV3SectorPreCommitOnChainInfo(v3 miner3.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { diff --git a/chain/actors/builtin/miner/v4.go b/chain/actors/builtin/miner/v4.go index 7e5a9761a..5c40d4189 100644 --- a/chain/actors/builtin/miner/v4.go +++ b/chain/actors/builtin/miner/v4.go @@ -140,6 +140,7 @@ func (s *state4) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e // epoch (i.e., the first element in the partition's expiration queue. // 2. If it's faulty, it will expire early within the first 14 entries // of the expiration queue. + stopErr := errors.New("stop") out := SectorExpiration{} err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner4.Deadline) error { @@ -536,8 +537,7 @@ func (p *partition4) UnprovenSectors() (bitfield.BitField, error) { } func fromV4SectorOnChainInfo(v4 miner4.SectorOnChainInfo) SectorOnChainInfo { - - return SectorOnChainInfo{ + info := SectorOnChainInfo{ SectorNumber: v4.SectorNumber, SealProof: v4.SealProof, SealedCID: v4.SealedCID, @@ -550,7 +550,7 @@ func fromV4SectorOnChainInfo(v4 miner4.SectorOnChainInfo) SectorOnChainInfo { ExpectedDayReward: v4.ExpectedDayReward, ExpectedStoragePledge: v4.ExpectedStoragePledge, } - + return info } func fromV4SectorPreCommitOnChainInfo(v4 miner4.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { diff --git a/chain/actors/builtin/miner/v5.go b/chain/actors/builtin/miner/v5.go index 7f4aaf168..f717934f4 100644 --- a/chain/actors/builtin/miner/v5.go +++ b/chain/actors/builtin/miner/v5.go @@ -140,6 +140,7 @@ func (s *state5) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e // epoch (i.e., the first element in the partition's expiration queue. // 2. If it's faulty, it will expire early within the first 14 entries // of the expiration queue. + stopErr := errors.New("stop") out := SectorExpiration{} err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner5.Deadline) error { @@ -536,8 +537,7 @@ func (p *partition5) UnprovenSectors() (bitfield.BitField, error) { } func fromV5SectorOnChainInfo(v5 miner5.SectorOnChainInfo) SectorOnChainInfo { - - return SectorOnChainInfo{ + info := SectorOnChainInfo{ SectorNumber: v5.SectorNumber, SealProof: v5.SealProof, SealedCID: v5.SealedCID, @@ -550,7 +550,7 @@ func fromV5SectorOnChainInfo(v5 miner5.SectorOnChainInfo) SectorOnChainInfo { ExpectedDayReward: v5.ExpectedDayReward, ExpectedStoragePledge: v5.ExpectedStoragePledge, } - + return info } func fromV5SectorPreCommitOnChainInfo(v5 miner5.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { diff --git a/chain/actors/builtin/miner/v6.go b/chain/actors/builtin/miner/v6.go index de5a22a10..7a9dfb0df 100644 --- a/chain/actors/builtin/miner/v6.go +++ b/chain/actors/builtin/miner/v6.go @@ -138,8 +138,9 @@ func (s *state6) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, e // 1. If the sector is non-faulty, it will either expire on-time (can be // learned from the sector info), or in the next quantized expiration // epoch (i.e., the first element in the partition's expiration queue. - // 2. If it's faulty, it will expire early within the first 14 entries + // 2. If it's faulty, it will expire early within the first 42 entries // of the expiration queue. + stopErr := errors.New("stop") out := SectorExpiration{} err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner6.Deadline) error { @@ -536,8 +537,7 @@ func (p *partition6) UnprovenSectors() (bitfield.BitField, error) { } func fromV6SectorOnChainInfo(v6 miner6.SectorOnChainInfo) SectorOnChainInfo { - - return SectorOnChainInfo{ + info := SectorOnChainInfo{ SectorNumber: v6.SectorNumber, SealProof: v6.SealProof, SealedCID: v6.SealedCID, @@ -550,7 +550,7 @@ func fromV6SectorOnChainInfo(v6 miner6.SectorOnChainInfo) SectorOnChainInfo { ExpectedDayReward: v6.ExpectedDayReward, ExpectedStoragePledge: v6.ExpectedStoragePledge, } - + return info } func fromV6SectorPreCommitOnChainInfo(v6 miner6.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { diff --git a/chain/actors/builtin/miner/v7.go b/chain/actors/builtin/miner/v7.go new file mode 100644 index 000000000..e1b2520e4 --- /dev/null +++ b/chain/actors/builtin/miner/v7.go @@ -0,0 +1,571 @@ +package miner + +import ( + "bytes" + "errors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-bitfield" + rle "github.com/filecoin-project/go-bitfield/rle" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/dline" + "github.com/ipfs/go-cid" + "github.com/libp2p/go-libp2p-core/peer" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + out.State = miner7.State{} + return &out, nil +} + +type state7 struct { + miner7.State + store adt.Store +} + +type deadline7 struct { + miner7.Deadline + store adt.Store +} + +type partition7 struct { + miner7.Partition + store adt.Store +} + +func (s *state7) AvailableBalance(bal abi.TokenAmount) (available abi.TokenAmount, err error) { + defer func() { + if r := recover(); r != nil { + err = xerrors.Errorf("failed to get available balance: %w", r) + available = abi.NewTokenAmount(0) + } + }() + // this panics if the miner doesnt have enough funds to cover their locked pledge + available, err = s.GetAvailableBalance(bal) + return available, err +} + +func (s *state7) VestedFunds(epoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.CheckVestedFunds(s.store, epoch) +} + +func (s *state7) LockedFunds() (LockedFunds, error) { + return LockedFunds{ + VestingFunds: s.State.LockedFunds, + InitialPledgeRequirement: s.State.InitialPledge, + PreCommitDeposits: s.State.PreCommitDeposits, + }, nil +} + +func (s *state7) FeeDebt() (abi.TokenAmount, error) { + return s.State.FeeDebt, nil +} + +func (s *state7) InitialPledge() (abi.TokenAmount, error) { + return s.State.InitialPledge, nil +} + +func (s *state7) PreCommitDeposits() (abi.TokenAmount, error) { + return s.State.PreCommitDeposits, nil +} + +func (s *state7) GetSector(num abi.SectorNumber) (*SectorOnChainInfo, error) { + info, ok, err := s.State.GetSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV7SectorOnChainInfo(*info) + return &ret, nil +} + +func (s *state7) FindSector(num abi.SectorNumber) (*SectorLocation, error) { + dlIdx, partIdx, err := s.State.FindSector(s.store, num) + if err != nil { + return nil, err + } + return &SectorLocation{ + Deadline: dlIdx, + Partition: partIdx, + }, nil +} + +func (s *state7) NumLiveSectors() (uint64, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return 0, err + } + var total uint64 + if err := dls.ForEach(s.store, func(dlIdx uint64, dl *miner7.Deadline) error { + total += dl.LiveSectors + return nil + }); err != nil { + return 0, err + } + return total, nil +} + +// GetSectorExpiration returns the effective expiration of the given sector. +// +// If the sector does not expire early, the Early expiration field is 0. +func (s *state7) GetSectorExpiration(num abi.SectorNumber) (*SectorExpiration, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + // NOTE: this can be optimized significantly. + // 1. If the sector is non-faulty, it will expire on-time (can be + // learned from the sector info). + // 2. If it's faulty, it will expire early within the first 42 entries + // of the expiration queue. + + stopErr := errors.New("stop") + out := SectorExpiration{} + err = dls.ForEach(s.store, func(dlIdx uint64, dl *miner7.Deadline) error { + partitions, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + quant := s.State.QuantSpecForDeadline(dlIdx) + var part miner7.Partition + return partitions.ForEach(&part, func(partIdx int64) error { + if found, err := part.Sectors.IsSet(uint64(num)); err != nil { + return err + } else if !found { + return nil + } + if found, err := part.Terminated.IsSet(uint64(num)); err != nil { + return err + } else if found { + // already terminated + return stopErr + } + + q, err := miner7.LoadExpirationQueue(s.store, part.ExpirationsEpochs, quant, miner7.PartitionExpirationAmtBitwidth) + if err != nil { + return err + } + var exp miner7.ExpirationSet + return q.ForEach(&exp, func(epoch int64) error { + if early, err := exp.EarlySectors.IsSet(uint64(num)); err != nil { + return err + } else if early { + out.Early = abi.ChainEpoch(epoch) + return nil + } + if onTime, err := exp.OnTimeSectors.IsSet(uint64(num)); err != nil { + return err + } else if onTime { + out.OnTime = abi.ChainEpoch(epoch) + return stopErr + } + return nil + }) + }) + }) + if err == stopErr { + err = nil + } + if err != nil { + return nil, err + } + if out.Early == 0 && out.OnTime == 0 { + return nil, xerrors.Errorf("failed to find sector %d", num) + } + return &out, nil +} + +func (s *state7) GetPrecommittedSector(num abi.SectorNumber) (*SectorPreCommitOnChainInfo, error) { + info, ok, err := s.State.GetPrecommittedSector(s.store, num) + if !ok || err != nil { + return nil, err + } + + ret := fromV7SectorPreCommitOnChainInfo(*info) + + return &ret, nil +} + +func (s *state7) ForEachPrecommittedSector(cb func(SectorPreCommitOnChainInfo) error) error { + precommitted, err := adt7.AsMap(s.store, s.State.PreCommittedSectors, builtin7.DefaultHamtBitwidth) + if err != nil { + return err + } + + var info miner7.SectorPreCommitOnChainInfo + if err := precommitted.ForEach(&info, func(_ string) error { + return cb(fromV7SectorPreCommitOnChainInfo(info)) + }); err != nil { + return err + } + + return nil +} + +func (s *state7) LoadSectors(snos *bitfield.BitField) ([]*SectorOnChainInfo, error) { + sectors, err := miner7.LoadSectors(s.store, s.State.Sectors) + if err != nil { + return nil, err + } + + // If no sector numbers are specified, load all. + if snos == nil { + infos := make([]*SectorOnChainInfo, 0, sectors.Length()) + var info7 miner7.SectorOnChainInfo + if err := sectors.ForEach(&info7, func(_ int64) error { + info := fromV7SectorOnChainInfo(info7) + infos = append(infos, &info) + return nil + }); err != nil { + return nil, err + } + return infos, nil + } + + // Otherwise, load selected. + infos7, err := sectors.Load(*snos) + if err != nil { + return nil, err + } + infos := make([]*SectorOnChainInfo, len(infos7)) + for i, info7 := range infos7 { + info := fromV7SectorOnChainInfo(*info7) + infos[i] = &info + } + return infos, nil +} + +func (s *state7) loadAllocatedSectorNumbers() (bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors) + return allocatedSectors, err +} + +func (s *state7) IsAllocated(num abi.SectorNumber) (bool, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return false, err + } + + return allocatedSectors.IsSet(uint64(num)) +} + +func (s *state7) GetProvingPeriodStart() (abi.ChainEpoch, error) { + return s.State.ProvingPeriodStart, nil +} + +func (s *state7) UnallocatedSectorNumbers(count int) ([]abi.SectorNumber, error) { + allocatedSectors, err := s.loadAllocatedSectorNumbers() + if err != nil { + return nil, err + } + + allocatedRuns, err := allocatedSectors.RunIterator() + if err != nil { + return nil, err + } + + unallocatedRuns, err := rle.Subtract( + &rle.RunSliceIterator{Runs: []rle.Run{{Val: true, Len: abi.MaxSectorNumber}}}, + allocatedRuns, + ) + if err != nil { + return nil, err + } + + iter, err := rle.BitsFromRuns(unallocatedRuns) + if err != nil { + return nil, err + } + + sectors := make([]abi.SectorNumber, 0, count) + for iter.HasNext() && len(sectors) < count { + nextNo, err := iter.Next() + if err != nil { + return nil, err + } + sectors = append(sectors, abi.SectorNumber(nextNo)) + } + + return sectors, nil +} + +func (s *state7) GetAllocatedSectors() (*bitfield.BitField, error) { + var allocatedSectors bitfield.BitField + if err := s.store.Get(s.store.Context(), s.State.AllocatedSectors, &allocatedSectors); err != nil { + return nil, err + } + + return &allocatedSectors, nil +} + +func (s *state7) LoadDeadline(idx uint64) (Deadline, error) { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return nil, err + } + dl, err := dls.LoadDeadline(s.store, idx) + if err != nil { + return nil, err + } + return &deadline7{*dl, s.store}, nil +} + +func (s *state7) ForEachDeadline(cb func(uint64, Deadline) error) error { + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + return dls.ForEach(s.store, func(i uint64, dl *miner7.Deadline) error { + return cb(i, &deadline7{*dl, s.store}) + }) +} + +func (s *state7) NumDeadlines() (uint64, error) { + return miner7.WPoStPeriodDeadlines, nil +} + +func (s *state7) DeadlinesChanged(other State) (bool, error) { + other7, ok := other.(*state7) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !s.State.Deadlines.Equals(other7.Deadlines), nil +} + +func (s *state7) MinerInfoChanged(other State) (bool, error) { + other0, ok := other.(*state7) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Info.Equals(other0.State.Info), nil +} + +func (s *state7) Info() (MinerInfo, error) { + info, err := s.State.GetInfo(s.store) + if err != nil { + return MinerInfo{}, err + } + + var pid *peer.ID + if peerID, err := peer.IDFromBytes(info.PeerId); err == nil { + pid = &peerID + } + + mi := MinerInfo{ + Owner: info.Owner, + Worker: info.Worker, + ControlAddresses: info.ControlAddresses, + + NewWorker: address.Undef, + WorkerChangeEpoch: -1, + + PeerId: pid, + Multiaddrs: info.Multiaddrs, + WindowPoStProofType: info.WindowPoStProofType, + SectorSize: info.SectorSize, + WindowPoStPartitionSectors: info.WindowPoStPartitionSectors, + ConsensusFaultElapsed: info.ConsensusFaultElapsed, + } + + if info.PendingWorkerKey != nil { + mi.NewWorker = info.PendingWorkerKey.NewWorker + mi.WorkerChangeEpoch = info.PendingWorkerKey.EffectiveAt + } + + return mi, nil +} + +func (s *state7) DeadlineInfo(epoch abi.ChainEpoch) (*dline.Info, error) { + return s.State.RecordedDeadlineInfo(epoch), nil +} + +func (s *state7) DeadlineCronActive() (bool, error) { + return s.State.DeadlineCronActive, nil +} + +func (s *state7) sectors() (adt.Array, error) { + return adt7.AsArray(s.store, s.Sectors, miner7.SectorsAmtBitwidth) +} + +func (s *state7) decodeSectorOnChainInfo(val *cbg.Deferred) (SectorOnChainInfo, error) { + var si miner7.SectorOnChainInfo + err := si.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorOnChainInfo{}, err + } + + return fromV7SectorOnChainInfo(si), nil +} + +func (s *state7) precommits() (adt.Map, error) { + return adt7.AsMap(s.store, s.PreCommittedSectors, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) decodeSectorPreCommitOnChainInfo(val *cbg.Deferred) (SectorPreCommitOnChainInfo, error) { + var sp miner7.SectorPreCommitOnChainInfo + err := sp.UnmarshalCBOR(bytes.NewReader(val.Raw)) + if err != nil { + return SectorPreCommitOnChainInfo{}, err + } + + return fromV7SectorPreCommitOnChainInfo(sp), nil +} + +func (s *state7) EraseAllUnproven() error { + + dls, err := s.State.LoadDeadlines(s.store) + if err != nil { + return err + } + + err = dls.ForEach(s.store, func(dindx uint64, dl *miner7.Deadline) error { + ps, err := dl.PartitionsArray(s.store) + if err != nil { + return err + } + + var part miner7.Partition + err = ps.ForEach(&part, func(pindx int64) error { + _ = part.ActivateUnproven() + err = ps.Set(uint64(pindx), &part) + return nil + }) + + if err != nil { + return err + } + + dl.Partitions, err = ps.Root() + if err != nil { + return err + } + + return dls.UpdateDeadline(s.store, dindx, dl) + }) + if err != nil { + return err + } + + return s.State.SaveDeadlines(s.store, dls) + +} + +func (d *deadline7) LoadPartition(idx uint64) (Partition, error) { + p, err := d.Deadline.LoadPartition(d.store, idx) + if err != nil { + return nil, err + } + return &partition7{*p, d.store}, nil +} + +func (d *deadline7) ForEachPartition(cb func(uint64, Partition) error) error { + ps, err := d.Deadline.PartitionsArray(d.store) + if err != nil { + return err + } + var part miner7.Partition + return ps.ForEach(&part, func(i int64) error { + return cb(uint64(i), &partition7{part, d.store}) + }) +} + +func (d *deadline7) PartitionsChanged(other Deadline) (bool, error) { + other7, ok := other.(*deadline7) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + + return !d.Deadline.Partitions.Equals(other7.Deadline.Partitions), nil +} + +func (d *deadline7) PartitionsPoSted() (bitfield.BitField, error) { + return d.Deadline.PartitionsPoSted, nil +} + +func (d *deadline7) DisputableProofCount() (uint64, error) { + + ops, err := d.OptimisticProofsSnapshotArray(d.store) + if err != nil { + return 0, err + } + + return ops.Length(), nil + +} + +func (p *partition7) AllSectors() (bitfield.BitField, error) { + return p.Partition.Sectors, nil +} + +func (p *partition7) FaultySectors() (bitfield.BitField, error) { + return p.Partition.Faults, nil +} + +func (p *partition7) RecoveringSectors() (bitfield.BitField, error) { + return p.Partition.Recoveries, nil +} + +func (p *partition7) UnprovenSectors() (bitfield.BitField, error) { + return p.Partition.Unproven, nil +} + +func fromV7SectorOnChainInfo(v7 miner7.SectorOnChainInfo) SectorOnChainInfo { + info := SectorOnChainInfo{ + SectorNumber: v7.SectorNumber, + SealProof: v7.SealProof, + SealedCID: v7.SealedCID, + DealIDs: v7.DealIDs, + Activation: v7.Activation, + Expiration: v7.Expiration, + DealWeight: v7.DealWeight, + VerifiedDealWeight: v7.VerifiedDealWeight, + InitialPledge: v7.InitialPledge, + ExpectedDayReward: v7.ExpectedDayReward, + ExpectedStoragePledge: v7.ExpectedStoragePledge, + + SectorKeyCID: v7.SectorKeyCID, + } + return info +} + +func fromV7SectorPreCommitOnChainInfo(v7 miner7.SectorPreCommitOnChainInfo) SectorPreCommitOnChainInfo { + + return SectorPreCommitOnChainInfo{ + Info: (SectorPreCommitInfo)(v7.Info), + PreCommitDeposit: v7.PreCommitDeposit, + PreCommitEpoch: v7.PreCommitEpoch, + DealWeight: v7.DealWeight, + VerifiedDealWeight: v7.VerifiedDealWeight, + } + +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/multisig/message7.go b/chain/actors/builtin/multisig/message7.go new file mode 100644 index 000000000..e7fb83e9b --- /dev/null +++ b/chain/actors/builtin/multisig/message7.go @@ -0,0 +1,71 @@ +package multisig + +import ( + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + init7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/init" + multisig7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/multisig" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message7 struct{ message0 } + +func (m message7) Create( + signers []address.Address, threshold uint64, + unlockStart, unlockDuration abi.ChainEpoch, + initialAmount abi.TokenAmount, +) (*types.Message, error) { + + lenAddrs := uint64(len(signers)) + + if lenAddrs < threshold { + return nil, xerrors.Errorf("cannot require signing of more addresses than provided for multisig") + } + + if threshold == 0 { + threshold = lenAddrs + } + + if m.from == address.Undef { + return nil, xerrors.Errorf("must provide source address") + } + + // Set up constructor parameters for multisig + msigParams := &multisig7.ConstructorParams{ + Signers: signers, + NumApprovalsThreshold: threshold, + UnlockDuration: unlockDuration, + StartEpoch: unlockStart, + } + + enc, actErr := actors.SerializeParams(msigParams) + if actErr != nil { + return nil, actErr + } + + // new actors are created by invoking 'exec' on the init actor with the constructor params + execParams := &init7.ExecParams{ + CodeCID: builtin7.MultisigActorCodeID, + ConstructorParams: enc, + } + + enc, actErr = actors.SerializeParams(execParams) + if actErr != nil { + return nil, actErr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Method: builtin7.MethodsInit.Exec, + Params: enc, + Value: initialAmount, + }, nil +} diff --git a/chain/actors/builtin/multisig/multisig.go b/chain/actors/builtin/multisig/multisig.go index ee725f7e5..f1b50475a 100644 --- a/chain/actors/builtin/multisig/multisig.go +++ b/chain/actors/builtin/multisig/multisig.go @@ -13,7 +13,7 @@ import ( "github.com/ipfs/go-cid" msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" - msig6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/multisig" + msig7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/multisig" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" @@ -27,6 +27,8 @@ import ( builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -58,6 +60,10 @@ func init() { builtin.RegisterActorState(builtin6.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load6(store, root) }) + + builtin.RegisterActorState(builtin7.MultisigActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) } func Load(store adt.Store, act *types.Actor) (State, error) { @@ -81,6 +87,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.MultisigActorCodeID: return load6(store, act.Head) + case builtin7.MultisigActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -106,6 +115,9 @@ func MakeState(store adt.Store, av actors.Version, signers []address.Address, th case actors.Version6: return make6(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + case actors.Version7: + return make7(store, signers, threshold, startEpoch, unlockDuration, initialBalance) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -131,6 +143,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.MultisigActorCodeID, nil + case actors.Version7: + return builtin7.MultisigActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) @@ -156,7 +171,7 @@ type State interface { type Transaction = msig0.Transaction -var Methods = builtin6.MethodsMultisig +var Methods = builtin7.MethodsMultisig func Message(version actors.Version, from address.Address) MessageBuilder { switch version { @@ -178,6 +193,9 @@ func Message(version actors.Version, from address.Address) MessageBuilder { case actors.Version6: return message6{message0{from}} + + case actors.Version7: + return message7{message0{from}} default: panic(fmt.Sprintf("unsupported actors version: %d", version)) } @@ -201,13 +219,13 @@ type MessageBuilder interface { } // this type is the same between v0 and v2 -type ProposalHashData = msig6.ProposalHashData -type ProposeReturn = msig6.ProposeReturn -type ProposeParams = msig6.ProposeParams -type ApproveReturn = msig6.ApproveReturn +type ProposalHashData = msig7.ProposalHashData +type ProposeReturn = msig7.ProposeReturn +type ProposeParams = msig7.ProposeParams +type ApproveReturn = msig7.ApproveReturn func txnParams(id uint64, data *ProposalHashData) ([]byte, error) { - params := msig6.TxnIDParams{ID: msig6.TxnID(id)} + params := msig7.TxnIDParams{ID: msig7.TxnID(id)} if data != nil { if data.Requester.Protocol() != address.ID { return nil, xerrors.Errorf("proposer address must be an ID address, was %s", data.Requester) diff --git a/chain/actors/builtin/multisig/v7.go b/chain/actors/builtin/multisig/v7.go new file mode 100644 index 000000000..bbe41f3db --- /dev/null +++ b/chain/actors/builtin/multisig/v7.go @@ -0,0 +1,119 @@ +package multisig + +import ( + "bytes" + "encoding/binary" + + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "golang.org/x/xerrors" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + msig7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/multisig" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store, signers []address.Address, threshold uint64, startEpoch abi.ChainEpoch, unlockDuration abi.ChainEpoch, initialBalance abi.TokenAmount) (State, error) { + out := state7{store: store} + out.State = msig7.State{} + out.State.Signers = signers + out.State.NumApprovalsThreshold = threshold + out.State.StartEpoch = startEpoch + out.State.UnlockDuration = unlockDuration + out.State.InitialBalance = initialBalance + + em, err := adt7.StoreEmptyMap(store, builtin7.DefaultHamtBitwidth) + if err != nil { + return nil, err + } + + out.State.PendingTxns = em + + return &out, nil +} + +type state7 struct { + msig7.State + store adt.Store +} + +func (s *state7) LockedBalance(currEpoch abi.ChainEpoch) (abi.TokenAmount, error) { + return s.State.AmountLocked(currEpoch - s.State.StartEpoch), nil +} + +func (s *state7) StartEpoch() (abi.ChainEpoch, error) { + return s.State.StartEpoch, nil +} + +func (s *state7) UnlockDuration() (abi.ChainEpoch, error) { + return s.State.UnlockDuration, nil +} + +func (s *state7) InitialBalance() (abi.TokenAmount, error) { + return s.State.InitialBalance, nil +} + +func (s *state7) Threshold() (uint64, error) { + return s.State.NumApprovalsThreshold, nil +} + +func (s *state7) Signers() ([]address.Address, error) { + return s.State.Signers, nil +} + +func (s *state7) ForEachPendingTxn(cb func(id int64, txn Transaction) error) error { + arr, err := adt7.AsMap(s.store, s.State.PendingTxns, builtin7.DefaultHamtBitwidth) + if err != nil { + return err + } + var out msig7.Transaction + return arr.ForEach(&out, func(key string) error { + txid, n := binary.Varint([]byte(key)) + if n <= 0 { + return xerrors.Errorf("invalid pending transaction key: %v", key) + } + return cb(txid, (Transaction)(out)) //nolint:unconvert + }) +} + +func (s *state7) PendingTxnChanged(other State) (bool, error) { + other7, ok := other.(*state7) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.PendingTxns.Equals(other7.PendingTxns), nil +} + +func (s *state7) transactions() (adt.Map, error) { + return adt7.AsMap(s.store, s.PendingTxns, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) decodeTransaction(val *cbg.Deferred) (Transaction, error) { + var tx msig7.Transaction + if err := tx.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Transaction{}, err + } + return tx, nil +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/paych/message.go.template b/chain/actors/builtin/paych/message.go.template index 4a5ea2331..99f64cabb 100644 --- a/chain/actors/builtin/paych/message.go.template +++ b/chain/actors/builtin/paych/message.go.template @@ -39,7 +39,11 @@ func (m message{{.v}}) Create(to address.Address, initialAmount abi.TokenAmount) func (m message{{.v}}) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) { params, aerr := actors.SerializeParams(&paych{{.v}}.UpdateChannelStateParams{ + {{if (ge .v 7)}} + Sv: toV{{.v}}SignedVoucher(*sv), + {{else}} Sv: *sv, + {{end}} Secret: secret, }) if aerr != nil { diff --git a/chain/actors/builtin/paych/message0.go b/chain/actors/builtin/paych/message0.go index bfeb2731e..7cba977e3 100644 --- a/chain/actors/builtin/paych/message0.go +++ b/chain/actors/builtin/paych/message0.go @@ -39,7 +39,9 @@ func (m message0) Create(to address.Address, initialAmount abi.TokenAmount) (*ty func (m message0) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) { params, aerr := actors.SerializeParams(&paych0.UpdateChannelStateParams{ - Sv: *sv, + + Sv: *sv, + Secret: secret, }) if aerr != nil { diff --git a/chain/actors/builtin/paych/message2.go b/chain/actors/builtin/paych/message2.go index 2cf3ef22e..60c7fe16e 100644 --- a/chain/actors/builtin/paych/message2.go +++ b/chain/actors/builtin/paych/message2.go @@ -39,7 +39,9 @@ func (m message2) Create(to address.Address, initialAmount abi.TokenAmount) (*ty func (m message2) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) { params, aerr := actors.SerializeParams(&paych2.UpdateChannelStateParams{ - Sv: *sv, + + Sv: *sv, + Secret: secret, }) if aerr != nil { diff --git a/chain/actors/builtin/paych/message3.go b/chain/actors/builtin/paych/message3.go index 50503a140..04fb35b57 100644 --- a/chain/actors/builtin/paych/message3.go +++ b/chain/actors/builtin/paych/message3.go @@ -39,7 +39,9 @@ func (m message3) Create(to address.Address, initialAmount abi.TokenAmount) (*ty func (m message3) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) { params, aerr := actors.SerializeParams(&paych3.UpdateChannelStateParams{ - Sv: *sv, + + Sv: *sv, + Secret: secret, }) if aerr != nil { diff --git a/chain/actors/builtin/paych/message4.go b/chain/actors/builtin/paych/message4.go index b2c6b612e..9f5e000d9 100644 --- a/chain/actors/builtin/paych/message4.go +++ b/chain/actors/builtin/paych/message4.go @@ -39,7 +39,9 @@ func (m message4) Create(to address.Address, initialAmount abi.TokenAmount) (*ty func (m message4) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) { params, aerr := actors.SerializeParams(&paych4.UpdateChannelStateParams{ - Sv: *sv, + + Sv: *sv, + Secret: secret, }) if aerr != nil { diff --git a/chain/actors/builtin/paych/message5.go b/chain/actors/builtin/paych/message5.go index 37a2b6f04..71e6b6799 100644 --- a/chain/actors/builtin/paych/message5.go +++ b/chain/actors/builtin/paych/message5.go @@ -39,7 +39,9 @@ func (m message5) Create(to address.Address, initialAmount abi.TokenAmount) (*ty func (m message5) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) { params, aerr := actors.SerializeParams(&paych5.UpdateChannelStateParams{ - Sv: *sv, + + Sv: *sv, + Secret: secret, }) if aerr != nil { diff --git a/chain/actors/builtin/paych/message6.go b/chain/actors/builtin/paych/message6.go index aecf26983..7f80bc4a6 100644 --- a/chain/actors/builtin/paych/message6.go +++ b/chain/actors/builtin/paych/message6.go @@ -39,7 +39,9 @@ func (m message6) Create(to address.Address, initialAmount abi.TokenAmount) (*ty func (m message6) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) { params, aerr := actors.SerializeParams(&paych6.UpdateChannelStateParams{ - Sv: *sv, + + Sv: *sv, + Secret: secret, }) if aerr != nil { diff --git a/chain/actors/builtin/paych/message7.go b/chain/actors/builtin/paych/message7.go new file mode 100644 index 000000000..e3ee0d77b --- /dev/null +++ b/chain/actors/builtin/paych/message7.go @@ -0,0 +1,76 @@ +package paych + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + init7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/init" + paych7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/paych" + + "github.com/filecoin-project/lotus/chain/actors" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/types" +) + +type message7 struct{ from address.Address } + +func (m message7) Create(to address.Address, initialAmount abi.TokenAmount) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych7.ConstructorParams{From: m.from, To: to}) + if aerr != nil { + return nil, aerr + } + enc, aerr := actors.SerializeParams(&init7.ExecParams{ + CodeCID: builtin7.PaymentChannelActorCodeID, + ConstructorParams: params, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: init_.Address, + From: m.from, + Value: initialAmount, + Method: builtin7.MethodsInit.Exec, + Params: enc, + }, nil +} + +func (m message7) Update(paych address.Address, sv *SignedVoucher, secret []byte) (*types.Message, error) { + params, aerr := actors.SerializeParams(&paych7.UpdateChannelStateParams{ + + Sv: toV7SignedVoucher(*sv), + + Secret: secret, + }) + if aerr != nil { + return nil, aerr + } + + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin7.MethodsPaych.UpdateChannelState, + Params: params, + }, nil +} + +func (m message7) Settle(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin7.MethodsPaych.Settle, + }, nil +} + +func (m message7) Collect(paych address.Address) (*types.Message, error) { + return &types.Message{ + To: paych, + From: m.from, + Value: abi.NewTokenAmount(0), + Method: builtin7.MethodsPaych.Collect, + }, nil +} diff --git a/chain/actors/builtin/paych/paych.go b/chain/actors/builtin/paych/paych.go index eea3659f8..f807b33ed 100644 --- a/chain/actors/builtin/paych/paych.go +++ b/chain/actors/builtin/paych/paych.go @@ -27,6 +27,8 @@ import ( builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -58,6 +60,10 @@ func init() { builtin.RegisterActorState(builtin6.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load6(store, root) }) + + builtin.RegisterActorState(builtin7.PaymentChannelActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) } // Load returns an abstract copy of payment channel state, irregardless of actor version @@ -82,6 +88,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.PaymentChannelActorCodeID: return load6(store, act.Head) + case builtin7.PaymentChannelActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -107,6 +116,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version6: return make6(store) + case actors.Version7: + return make7(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -132,6 +144,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.PaymentChannelActorCodeID, nil + case actors.Version7: + return builtin7.PaymentChannelActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) @@ -185,7 +200,7 @@ func DecodeSignedVoucher(s string) (*SignedVoucher, error) { return &sv, nil } -var Methods = builtin6.MethodsPaych +var Methods = builtin7.MethodsPaych func Message(version actors.Version, from address.Address) MessageBuilder { switch version { @@ -208,6 +223,9 @@ func Message(version actors.Version, from address.Address) MessageBuilder { case actors.Version6: return message6{from} + case actors.Version7: + return message7{from} + default: panic(fmt.Sprintf("unsupported actors version: %d", version)) } diff --git a/chain/actors/builtin/paych/state.go.template b/chain/actors/builtin/paych/state.go.template index 3e41f5be5..f11407202 100644 --- a/chain/actors/builtin/paych/state.go.template +++ b/chain/actors/builtin/paych/state.go.template @@ -112,3 +112,21 @@ func (ls *laneState{{.v}}) Redeemed() (big.Int, error) { func (ls *laneState{{.v}}) Nonce() (uint64, error) { return ls.LaneState.Nonce, nil } + +{{if (ge .v 7)}} +func toV{{.v}}SignedVoucher(sv SignedVoucher) paych{{.v}}.SignedVoucher { + return paych{{.v}}.SignedVoucher{ + ChannelAddr: sv.ChannelAddr, + TimeLockMin: sv.TimeLockMin, + TimeLockMax: sv.TimeLockMax, + SecretHash: sv.SecretPreimage, + Extra: sv.Extra, + Lane: sv.Lane, + Nonce: sv.Nonce, + Amount: sv.Amount, + MinSettleHeight: sv.MinSettleHeight, + Merges: sv.Merges, + Signature: sv.Signature, + } +} +{{end}} \ No newline at end of file diff --git a/chain/actors/builtin/paych/v7.go b/chain/actors/builtin/paych/v7.go new file mode 100644 index 000000000..19c801c82 --- /dev/null +++ b/chain/actors/builtin/paych/v7.go @@ -0,0 +1,130 @@ +package paych + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + paych7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/paych" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + out.State = paych7.State{} + return &out, nil +} + +type state7 struct { + paych7.State + store adt.Store + lsAmt *adt7.Array +} + +// Channel owner, who has funded the actor +func (s *state7) From() (address.Address, error) { + return s.State.From, nil +} + +// Recipient of payouts from channel +func (s *state7) To() (address.Address, error) { + return s.State.To, nil +} + +// Height at which the channel can be `Collected` +func (s *state7) SettlingAt() (abi.ChainEpoch, error) { + return s.State.SettlingAt, nil +} + +// Amount successfully redeemed through the payment channel, paid out on `Collect()` +func (s *state7) ToSend() (abi.TokenAmount, error) { + return s.State.ToSend, nil +} + +func (s *state7) getOrLoadLsAmt() (*adt7.Array, error) { + if s.lsAmt != nil { + return s.lsAmt, nil + } + + // Get the lane state from the chain + lsamt, err := adt7.AsArray(s.store, s.State.LaneStates, paych7.LaneStatesAmtBitwidth) + if err != nil { + return nil, err + } + + s.lsAmt = lsamt + return lsamt, nil +} + +// Get total number of lanes +func (s *state7) LaneCount() (uint64, error) { + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return 0, err + } + return lsamt.Length(), nil +} + +func (s *state7) GetState() interface{} { + return &s.State +} + +// Iterate lane states +func (s *state7) ForEachLaneState(cb func(idx uint64, dl LaneState) error) error { + // Get the lane state from the chain + lsamt, err := s.getOrLoadLsAmt() + if err != nil { + return err + } + + // Note: we use a map instead of an array to store laneStates because the + // client sets the lane ID (the index) and potentially they could use a + // very large index. + var ls paych7.LaneState + return lsamt.ForEach(&ls, func(i int64) error { + return cb(uint64(i), &laneState7{ls}) + }) +} + +type laneState7 struct { + paych7.LaneState +} + +func (ls *laneState7) Redeemed() (big.Int, error) { + return ls.LaneState.Redeemed, nil +} + +func (ls *laneState7) Nonce() (uint64, error) { + return ls.LaneState.Nonce, nil +} + +func toV7SignedVoucher(sv SignedVoucher) paych7.SignedVoucher { + return paych7.SignedVoucher{ + ChannelAddr: sv.ChannelAddr, + TimeLockMin: sv.TimeLockMin, + TimeLockMax: sv.TimeLockMax, + SecretHash: sv.SecretPreimage, + Extra: sv.Extra, + Lane: sv.Lane, + Nonce: sv.Nonce, + Amount: sv.Amount, + MinSettleHeight: sv.MinSettleHeight, + Merges: sv.Merges, + Signature: sv.Signature, + } +} diff --git a/chain/actors/builtin/power/power.go b/chain/actors/builtin/power/power.go index 84bd6948a..9b73cdd60 100644 --- a/chain/actors/builtin/power/power.go +++ b/chain/actors/builtin/power/power.go @@ -26,6 +26,8 @@ import ( builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" ) func init() { @@ -53,11 +55,15 @@ func init() { builtin.RegisterActorState(builtin6.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load6(store, root) }) + + builtin.RegisterActorState(builtin7.StoragePowerActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) } var ( - Address = builtin6.StoragePowerActorAddr - Methods = builtin6.MethodsPower + Address = builtin7.StoragePowerActorAddr + Methods = builtin7.MethodsPower ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -81,6 +87,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.StoragePowerActorCodeID: return load6(store, act.Head) + case builtin7.StoragePowerActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -106,6 +115,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version6: return make6(store) + case actors.Version7: + return make7(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -131,6 +143,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.StoragePowerActorCodeID, nil + case actors.Version7: + return builtin7.StoragePowerActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/builtin/power/v7.go b/chain/actors/builtin/power/v7.go new file mode 100644 index 000000000..af1761cb2 --- /dev/null +++ b/chain/actors/builtin/power/v7.go @@ -0,0 +1,187 @@ +package power + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + power7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/power" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + + s, err := power7.ConstructState(store) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state7 struct { + power7.State + store adt.Store +} + +func (s *state7) TotalLocked() (abi.TokenAmount, error) { + return s.TotalPledgeCollateral, nil +} + +func (s *state7) TotalPower() (Claim, error) { + return Claim{ + RawBytePower: s.TotalRawBytePower, + QualityAdjPower: s.TotalQualityAdjPower, + }, nil +} + +// Committed power to the network. Includes miners below the minimum threshold. +func (s *state7) TotalCommitted() (Claim, error) { + return Claim{ + RawBytePower: s.TotalBytesCommitted, + QualityAdjPower: s.TotalQABytesCommitted, + }, nil +} + +func (s *state7) MinerPower(addr address.Address) (Claim, bool, error) { + claims, err := s.claims() + if err != nil { + return Claim{}, false, err + } + var claim power7.Claim + ok, err := claims.Get(abi.AddrKey(addr), &claim) + if err != nil { + return Claim{}, false, err + } + return Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }, ok, nil +} + +func (s *state7) MinerNominalPowerMeetsConsensusMinimum(a address.Address) (bool, error) { + return s.State.MinerNominalPowerMeetsConsensusMinimum(s.store, a) +} + +func (s *state7) TotalPowerSmoothed() (builtin.FilterEstimate, error) { + return builtin.FromV7FilterEstimate(s.State.ThisEpochQAPowerSmoothed), nil +} + +func (s *state7) MinerCounts() (uint64, uint64, error) { + return uint64(s.State.MinerAboveMinPowerCount), uint64(s.State.MinerCount), nil +} + +func (s *state7) ListAllMiners() ([]address.Address, error) { + claims, err := s.claims() + if err != nil { + return nil, err + } + + var miners []address.Address + err = claims.ForEach(nil, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + miners = append(miners, a) + return nil + }) + if err != nil { + return nil, err + } + + return miners, nil +} + +func (s *state7) ForEachClaim(cb func(miner address.Address, claim Claim) error) error { + claims, err := s.claims() + if err != nil { + return err + } + + var claim power7.Claim + return claims.ForEach(&claim, func(k string) error { + a, err := address.NewFromBytes([]byte(k)) + if err != nil { + return err + } + return cb(a, Claim{ + RawBytePower: claim.RawBytePower, + QualityAdjPower: claim.QualityAdjPower, + }) + }) +} + +func (s *state7) ClaimsChanged(other State) (bool, error) { + other7, ok := other.(*state7) + if !ok { + // treat an upgrade as a change, always + return true, nil + } + return !s.State.Claims.Equals(other7.State.Claims), nil +} + +func (s *state7) SetTotalQualityAdjPower(p abi.StoragePower) error { + s.State.TotalQualityAdjPower = p + return nil +} + +func (s *state7) SetTotalRawBytePower(p abi.StoragePower) error { + s.State.TotalRawBytePower = p + return nil +} + +func (s *state7) SetThisEpochQualityAdjPower(p abi.StoragePower) error { + s.State.ThisEpochQualityAdjPower = p + return nil +} + +func (s *state7) SetThisEpochRawBytePower(p abi.StoragePower) error { + s.State.ThisEpochRawBytePower = p + return nil +} + +func (s *state7) GetState() interface{} { + return &s.State +} + +func (s *state7) claims() (adt.Map, error) { + return adt7.AsMap(s.store, s.Claims, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) decodeClaim(val *cbg.Deferred) (Claim, error) { + var ci power7.Claim + if err := ci.UnmarshalCBOR(bytes.NewReader(val.Raw)); err != nil { + return Claim{}, err + } + return fromV7Claim(ci), nil +} + +func fromV7Claim(v7 power7.Claim) Claim { + return Claim{ + RawBytePower: v7.RawBytePower, + QualityAdjPower: v7.QualityAdjPower, + } +} diff --git a/chain/actors/builtin/reward/reward.go b/chain/actors/builtin/reward/reward.go index 38d5b5b87..b6ee2f146 100644 --- a/chain/actors/builtin/reward/reward.go +++ b/chain/actors/builtin/reward/reward.go @@ -21,6 +21,8 @@ import ( builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/types" @@ -51,11 +53,15 @@ func init() { builtin.RegisterActorState(builtin6.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { return load6(store, root) }) + + builtin.RegisterActorState(builtin7.RewardActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) } var ( - Address = builtin6.RewardActorAddr - Methods = builtin6.MethodsReward + Address = builtin7.RewardActorAddr + Methods = builtin7.MethodsReward ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -79,6 +85,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.RewardActorCodeID: return load6(store, act.Head) + case builtin7.RewardActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -104,6 +113,9 @@ func MakeState(store adt.Store, av actors.Version, currRealizedPower abi.Storage case actors.Version6: return make6(store, currRealizedPower) + case actors.Version7: + return make7(store, currRealizedPower) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -129,6 +141,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.RewardActorCodeID, nil + case actors.Version7: + return builtin7.RewardActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/builtin/reward/v7.go b/chain/actors/builtin/reward/v7.go new file mode 100644 index 000000000..368bb3abd --- /dev/null +++ b/chain/actors/builtin/reward/v7.go @@ -0,0 +1,98 @@ +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" + + miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner" + reward7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/reward" + smoothing7 "github.com/filecoin-project/specs-actors/v7/actors/util/smoothing" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store, currRealizedPower abi.StoragePower) (State, error) { + out := state7{store: store} + out.State = *reward7.ConstructState(currRealizedPower) + return &out, nil +} + +type state7 struct { + reward7.State + store adt.Store +} + +func (s *state7) ThisEpochReward() (abi.TokenAmount, error) { + return s.State.ThisEpochReward, nil +} + +func (s *state7) ThisEpochRewardSmoothed() (builtin.FilterEstimate, error) { + + return builtin.FilterEstimate{ + PositionEstimate: s.State.ThisEpochRewardSmoothed.PositionEstimate, + VelocityEstimate: s.State.ThisEpochRewardSmoothed.VelocityEstimate, + }, nil + +} + +func (s *state7) ThisEpochBaselinePower() (abi.StoragePower, error) { + return s.State.ThisEpochBaselinePower, nil +} + +func (s *state7) TotalStoragePowerReward() (abi.TokenAmount, error) { + return s.State.TotalStoragePowerReward, nil +} + +func (s *state7) EffectiveBaselinePower() (abi.StoragePower, error) { + return s.State.EffectiveBaselinePower, nil +} + +func (s *state7) EffectiveNetworkTime() (abi.ChainEpoch, error) { + return s.State.EffectiveNetworkTime, nil +} + +func (s *state7) CumsumBaseline() (reward7.Spacetime, error) { + return s.State.CumsumBaseline, nil +} + +func (s *state7) CumsumRealized() (reward7.Spacetime, error) { + return s.State.CumsumRealized, nil +} + +func (s *state7) InitialPledgeForPower(qaPower abi.StoragePower, networkTotalPledge abi.TokenAmount, networkQAPower *builtin.FilterEstimate, circSupply abi.TokenAmount) (abi.TokenAmount, error) { + return miner7.InitialPledgeForPower( + qaPower, + s.State.ThisEpochBaselinePower, + s.State.ThisEpochRewardSmoothed, + smoothing7.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + circSupply, + ), nil +} + +func (s *state7) PreCommitDepositForPower(networkQAPower builtin.FilterEstimate, sectorWeight abi.StoragePower) (abi.TokenAmount, error) { + return miner7.PreCommitDepositForPower(s.State.ThisEpochRewardSmoothed, + smoothing7.FilterEstimate{ + PositionEstimate: networkQAPower.PositionEstimate, + VelocityEstimate: networkQAPower.VelocityEstimate, + }, + sectorWeight), nil +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/system/system.go b/chain/actors/builtin/system/system.go index 3d6105c38..fb7515f35 100644 --- a/chain/actors/builtin/system/system.go +++ b/chain/actors/builtin/system/system.go @@ -17,10 +17,12 @@ import ( builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" ) var ( - Address = builtin6.SystemActorAddr + Address = builtin7.SystemActorAddr ) func MakeState(store adt.Store, av actors.Version) (State, error) { @@ -44,6 +46,9 @@ func MakeState(store adt.Store, av actors.Version) (State, error) { case actors.Version6: return make6(store) + case actors.Version7: + return make7(store) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -69,6 +74,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.SystemActorCodeID, nil + case actors.Version7: + return builtin7.SystemActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/builtin/system/v7.go b/chain/actors/builtin/system/v7.go new file mode 100644 index 000000000..813add5fb --- /dev/null +++ b/chain/actors/builtin/system/v7.go @@ -0,0 +1,35 @@ +package system + +import ( + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors/adt" + + system7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/system" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store) (State, error) { + out := state7{store: store} + out.State = system7.State{} + return &out, nil +} + +type state7 struct { + system7.State + store adt.Store +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/verifreg/v7.go b/chain/actors/builtin/verifreg/v7.go new file mode 100644 index 000000000..9b2ca928a --- /dev/null +++ b/chain/actors/builtin/verifreg/v7.go @@ -0,0 +1,75 @@ +package verifreg + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + verifreg7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg" + adt7 "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +var _ State = (*state7)(nil) + +func load7(store adt.Store, root cid.Cid) (State, error) { + out := state7{store: store} + err := store.Get(store.Context(), root, &out) + if err != nil { + return nil, err + } + return &out, nil +} + +func make7(store adt.Store, rootKeyAddress address.Address) (State, error) { + out := state7{store: store} + + s, err := verifreg7.ConstructState(store, rootKeyAddress) + if err != nil { + return nil, err + } + + out.State = *s + + return &out, nil +} + +type state7 struct { + verifreg7.State + store adt.Store +} + +func (s *state7) RootKey() (address.Address, error) { + return s.State.RootKey, nil +} + +func (s *state7) VerifiedClientDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version7, s.verifiedClients, addr) +} + +func (s *state7) VerifierDataCap(addr address.Address) (bool, abi.StoragePower, error) { + return getDataCap(s.store, actors.Version7, s.verifiers, addr) +} + +func (s *state7) ForEachVerifier(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version7, s.verifiers, cb) +} + +func (s *state7) ForEachClient(cb func(addr address.Address, dcap abi.StoragePower) error) error { + return forEachCap(s.store, actors.Version7, s.verifiedClients, cb) +} + +func (s *state7) verifiedClients() (adt.Map, error) { + return adt7.AsMap(s.store, s.VerifiedClients, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) verifiers() (adt.Map, error) { + return adt7.AsMap(s.store, s.Verifiers, builtin7.DefaultHamtBitwidth) +} + +func (s *state7) GetState() interface{} { + return &s.State +} diff --git a/chain/actors/builtin/verifreg/verifreg.go b/chain/actors/builtin/verifreg/verifreg.go index 31e8e5a08..f6281334d 100644 --- a/chain/actors/builtin/verifreg/verifreg.go +++ b/chain/actors/builtin/verifreg/verifreg.go @@ -21,6 +21,8 @@ import ( builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" @@ -53,11 +55,15 @@ func init() { return load6(store, root) }) + builtin.RegisterActorState(builtin7.VerifiedRegistryActorCodeID, func(store adt.Store, root cid.Cid) (cbor.Marshaler, error) { + return load7(store, root) + }) + } var ( - Address = builtin6.VerifiedRegistryActorAddr - Methods = builtin6.MethodsVerifiedRegistry + Address = builtin7.VerifiedRegistryActorAddr + Methods = builtin7.MethodsVerifiedRegistry ) func Load(store adt.Store, act *types.Actor) (State, error) { @@ -81,6 +87,9 @@ func Load(store adt.Store, act *types.Actor) (State, error) { case builtin6.VerifiedRegistryActorCodeID: return load6(store, act.Head) + case builtin7.VerifiedRegistryActorCodeID: + return load7(store, act.Head) + } return nil, xerrors.Errorf("unknown actor code %s", act.Code) } @@ -106,6 +115,9 @@ func MakeState(store adt.Store, av actors.Version, rootKeyAddress address.Addres case actors.Version6: return make6(store, rootKeyAddress) + case actors.Version7: + return make7(store, rootKeyAddress) + } return nil, xerrors.Errorf("unknown actor version %d", av) } @@ -131,6 +143,9 @@ func GetActorCodeID(av actors.Version) (cid.Cid, error) { case actors.Version6: return builtin6.VerifiedRegistryActorCodeID, nil + case actors.Version7: + return builtin7.VerifiedRegistryActorCodeID, nil + } return cid.Undef, xerrors.Errorf("unknown actor version %d", av) diff --git a/chain/actors/policy/policy.go b/chain/actors/policy/policy.go index e00a6ae10..f51da7aa7 100644 --- a/chain/actors/policy/policy.go +++ b/chain/actors/policy/policy.go @@ -40,14 +40,19 @@ import ( miner6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/miner" verifreg6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/verifreg" - paych6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/paych" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market" + miner7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/miner" + verifreg7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/verifreg" + + paych7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/paych" ) const ( - ChainFinality = miner6.ChainFinality + ChainFinality = miner7.ChainFinality SealRandomnessLookback = ChainFinality - PaychSettleDelay = paych6.SettleDelay - MaxPreCommitRandomnessLookback = builtin6.EpochsInDay + SealRandomnessLookback + PaychSettleDelay = paych7.SettleDelay + MaxPreCommitRandomnessLookback = builtin7.EpochsInDay + SealRandomnessLookback ) // SetSupportedProofTypes sets supported proof types, across all actor versions. @@ -72,6 +77,8 @@ func SetSupportedProofTypes(types ...abi.RegisteredSealProof) { miner6.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + miner7.PreCommitSealProofTypesV8 = make(map[abi.RegisteredSealProof]struct{}, len(types)) + AddSupportedProofTypes(types...) } @@ -119,6 +126,15 @@ func AddSupportedProofTypes(types ...abi.RegisteredSealProof) { miner6.WindowPoStProofTypes[wpp] = struct{}{} + miner7.PreCommitSealProofTypesV8[t+abi.RegisteredSealProof_StackedDrg2KiBV1_1] = struct{}{} + wpp, err = t.RegisteredWindowPoStProof() + if err != nil { + // Fine to panic, this is a test-only method + panic(err) + } + + miner7.WindowPoStProofTypes[wpp] = struct{}{} + } } @@ -139,11 +155,13 @@ func SetPreCommitChallengeDelay(delay abi.ChainEpoch) { miner6.PreCommitChallengeDelay = delay + miner7.PreCommitChallengeDelay = delay + } // TODO: this function shouldn't really exist. Instead, the API should expose the precommit delay. func GetPreCommitChallengeDelay() abi.ChainEpoch { - return miner6.PreCommitChallengeDelay + return miner7.PreCommitChallengeDelay } // SetConsensusMinerMinPower sets the minimum power of an individual miner must @@ -173,6 +191,10 @@ func SetConsensusMinerMinPower(p abi.StoragePower) { policy.ConsensusMinerMinPower = p } + for _, policy := range builtin7.PoStProofPolicies { + policy.ConsensusMinerMinPower = p + } + } // SetMinVerifiedDealSize sets the minimum size of a verified deal. This should @@ -191,6 +213,8 @@ func SetMinVerifiedDealSize(size abi.StoragePower) { verifreg6.MinVerifiedDealSize = size + verifreg7.MinVerifiedDealSize = size + } func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) (abi.ChainEpoch, error) { @@ -220,6 +244,10 @@ func GetMaxProveCommitDuration(ver actors.Version, t abi.RegisteredSealProof) (a return miner6.MaxProveCommitDuration[t], nil + case actors.Version7: + + return miner7.MaxProveCommitDuration[t], nil + default: return 0, xerrors.Errorf("unsupported actors version") } @@ -255,6 +283,11 @@ func SetProviderCollateralSupplyTarget(num, denom big.Int) { Denominator: denom, } + market7.ProviderCollateralSupplyTarget = builtin7.BigFrac{ + Numerator: num, + Denominator: denom, + } + } func DealProviderCollateralBounds( @@ -298,13 +331,18 @@ func DealProviderCollateralBounds( min, max := market6.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) return min, max, nil + case actors.Version7: + + min, max := market7.DealProviderCollateralBounds(size, verified, rawBytePower, qaPower, baselinePower, circulatingFil) + return min, max, nil + default: return big.Zero(), big.Zero(), xerrors.Errorf("unsupported actors version") } } func DealDurationBounds(pieceSize abi.PaddedPieceSize) (min, max abi.ChainEpoch) { - return market6.DealDurationBounds(pieceSize) + return market7.DealDurationBounds(pieceSize) } // Sets the challenge window and scales the proving period to match (such that @@ -345,6 +383,13 @@ func SetWPoStChallengeWindow(period abi.ChainEpoch) { // scale it if we're scaling the challenge period. miner6.WPoStDisputeWindow = period * 30 + miner7.WPoStChallengeWindow = period + miner7.WPoStProvingPeriod = period * abi.ChainEpoch(miner7.WPoStPeriodDeadlines) + + // by default, this is 2x finality which is 30 periods. + // scale it if we're scaling the challenge period. + miner7.WPoStDisputeWindow = period * 30 + } func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { @@ -357,15 +402,15 @@ func GetWinningPoStSectorSetLookback(nwVer network.Version) abi.ChainEpoch { } func GetMaxSectorExpirationExtension() abi.ChainEpoch { - return miner6.MaxSectorExpirationExtension + return miner7.MaxSectorExpirationExtension } func GetMinSectorExpiration() abi.ChainEpoch { - return miner6.MinSectorExpiration + return miner7.MinSectorExpiration } func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, error) { - sectorsPerPart, err := builtin6.PoStProofWindowPoStPartitionSectors(p) + sectorsPerPart, err := builtin7.PoStProofWindowPoStPartitionSectors(p) if err != nil { return 0, err } @@ -378,8 +423,8 @@ func GetMaxPoStPartitions(nv network.Version, p abi.RegisteredPoStProof) (int, e func GetDefaultSectorSize() abi.SectorSize { // supported sector sizes are the same across versions. - szs := make([]abi.SectorSize, 0, len(miner6.PreCommitSealProofTypesV8)) - for spt := range miner6.PreCommitSealProofTypesV8 { + szs := make([]abi.SectorSize, 0, len(miner7.PreCommitSealProofTypesV8)) + for spt := range miner7.PreCommitSealProofTypesV8 { ss, err := spt.SectorSize() if err != nil { panic(err) @@ -404,7 +449,7 @@ func GetSectorMaxLifetime(proof abi.RegisteredSealProof, nwVer network.Version) return builtin4.SealProofPoliciesV0[proof].SectorMaxLifetime } - return builtin6.SealProofPoliciesV11[proof].SectorMaxLifetime + return builtin7.SealProofPoliciesV11[proof].SectorMaxLifetime } func GetAddressedSectorsMax(nwVer network.Version) (int, error) { @@ -432,6 +477,9 @@ func GetAddressedSectorsMax(nwVer network.Version) (int, error) { case actors.Version6: return miner6.AddressedSectorsMax, nil + case actors.Version7: + return miner7.AddressedSectorsMax, nil + default: return 0, xerrors.Errorf("unsupported network version") } @@ -469,6 +517,10 @@ func GetDeclarationsMax(nwVer network.Version) (int, error) { return miner6.DeclarationsMax, nil + case actors.Version7: + + return miner7.DeclarationsMax, nil + default: return 0, xerrors.Errorf("unsupported network version") } @@ -505,6 +557,10 @@ func AggregateProveCommitNetworkFee(nwVer network.Version, aggregateSize int, ba return miner6.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil + case actors.Version7: + + return miner7.AggregateProveCommitNetworkFee(aggregateSize, baseFee), nil + default: return big.Zero(), xerrors.Errorf("unsupported network version") } @@ -541,6 +597,10 @@ func AggregatePreCommitNetworkFee(nwVer network.Version, aggregateSize int, base return miner6.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil + case actors.Version7: + + return miner7.AggregatePreCommitNetworkFee(aggregateSize, baseFee), nil + default: return big.Zero(), xerrors.Errorf("unsupported network version") } diff --git a/chain/actors/version.go b/chain/actors/version.go index 95dd09126..af51161c9 100644 --- a/chain/actors/version.go +++ b/chain/actors/version.go @@ -8,9 +8,21 @@ import ( type Version int -var LatestVersion = 6 +/* inline-gen template -var Versions = []int{0, 2, 3, 4, 5, LatestVersion} +var LatestVersion = {{.latestActorsVersion}} + +var Versions = []int{ {{range .actorVersions}} {{.}}, {{end}} } + +const ({{range .actorVersions}} + Version{{.}} Version = {{.}}{{end}} +) + +/* inline-gen start */ + +var LatestVersion = 7 + +var Versions = []int{0, 2, 3, 4, 5, 6, 7} const ( Version0 Version = 0 @@ -19,8 +31,11 @@ const ( Version4 Version = 4 Version5 Version = 5 Version6 Version = 6 + Version7 Version = 7 ) +/* inline-gen end */ + // Converts a network version into an actors adt version. func VersionForNetwork(version network.Version) (Version, error) { switch version { @@ -36,6 +51,8 @@ func VersionForNetwork(version network.Version) (Version, error) { return Version5, nil case network.Version14: return Version6, nil + case network.Version15: + return Version7, nil default: return -1, fmt.Errorf("unsupported network version %d", version) } diff --git a/chain/checkpoint.go b/chain/checkpoint.go index a3660a45c..4f8310593 100644 --- a/chain/checkpoint.go +++ b/chain/checkpoint.go @@ -13,7 +13,7 @@ func (syncer *Syncer) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) e return xerrors.Errorf("called with empty tsk") } - ts, err := syncer.ChainStore().LoadTipSet(tsk) + ts, err := syncer.ChainStore().LoadTipSet(ctx, tsk) if err != nil { tss, err := syncer.Exchange.GetBlocks(ctx, tsk, 1) if err != nil { @@ -28,7 +28,7 @@ func (syncer *Syncer) SyncCheckpoint(ctx context.Context, tsk types.TipSetKey) e return xerrors.Errorf("failed to switch chain when syncing checkpoint: %w", err) } - if err := syncer.ChainStore().SetCheckpoint(ts); err != nil { + if err := syncer.ChainStore().SetCheckpoint(ctx, ts); err != nil { return xerrors.Errorf("failed to set the chain checkpoint: %w", err) } @@ -41,7 +41,7 @@ func (syncer *Syncer) switchChain(ctx context.Context, ts *types.TipSet) error { return nil } - if anc, err := syncer.store.IsAncestorOf(ts, hts); err == nil && anc { + if anc, err := syncer.store.IsAncestorOf(ctx, ts, hts); err == nil && anc { return nil } @@ -50,7 +50,7 @@ func (syncer *Syncer) switchChain(ctx context.Context, ts *types.TipSet) error { return xerrors.Errorf("failed to collect chain for checkpoint: %w", err) } - if err := syncer.ChainStore().SetHead(ts); err != nil { + if err := syncer.ChainStore().SetHead(ctx, ts); err != nil { return xerrors.Errorf("failed to set the chain head: %w", err) } return nil diff --git a/chain/checkpointing/cbor_gen.go b/chain/checkpointing/cbor_gen.go new file mode 100644 index 000000000..eb4dd4506 --- /dev/null +++ b/chain/checkpointing/cbor_gen.go @@ -0,0 +1,181 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package checkpointing + +import ( + "fmt" + "io" + "math" + "sort" + + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufResolveMsg = []byte{131} + +func (t *ResolveMsg) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufResolveMsg); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Type (checkpointing.MsgType) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Type)); err != nil { + return err + } + + // t.Cid (string) (string) + if len(t.Cid) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Cid was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Cid))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Cid)); err != nil { + return err + } + + // t.Content (checkpointing.MsgData) (struct) + if err := t.Content.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *ResolveMsg) UnmarshalCBOR(r io.Reader) error { + *t = ResolveMsg{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Type (checkpointing.MsgType) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Type = MsgType(extra) + + } + // t.Cid (string) (string) + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Cid = string(sval) + } + // t.Content (checkpointing.MsgData) (struct) + + { + + if err := t.Content.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Content: %w", err) + } + + } + return nil +} + +var lengthBufMsgData = []byte{129} + +func (t *MsgData) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufMsgData); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Content ([]uint8) (slice) + if len(t.Content) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Content was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Content))); err != nil { + return err + } + + if _, err := w.Write(t.Content[:]); err != nil { + return err + } + return nil +} + +func (t *MsgData) UnmarshalCBOR(r io.Reader) error { + *t = MsgData{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Content ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Content: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Content = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Content[:]); err != nil { + return err + } + return nil +} diff --git a/chain/checkpointing/kvs.go b/chain/checkpointing/kvs.go new file mode 100644 index 000000000..db8a636a2 --- /dev/null +++ b/chain/checkpointing/kvs.go @@ -0,0 +1,539 @@ +package checkpointing + +//go:generate go run ./gen/gen.go + +import ( + "bytes" + "context" + "sync" + "time" + "crypto/sha256" + "errors" + "encoding/hex" + "fmt" + + // "github.com/filecoin-project/go-address" + // "github.com/filecoin-project/lotus/chain/actors/adt" + // "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" + // "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" + //"github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet" + //"github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/modules/helpers" + lru "github.com/hashicorp/golang-lru" + //"github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + nsds "github.com/ipfs/go-datastore/namespace" + + //logging "github.com/ipfs/go-log/v2" + peer "github.com/libp2p/go-libp2p-core/peer" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "go.uber.org/fx" + xerrors "golang.org/x/xerrors" +) + +const retryTimeout = 10 * time.Second + +//var log = logging.Logger("checkpointing-kvs") + +// in the checkpointing case we always use the same topic +// we will use "pikachu" +// func SubnetResolverTopic(id address.SubnetID) string { +// return "/fil/subnet/resolver" + id.String() +// } + +// same for namespace, we will use "pikachu" +// func resolverNamespace(id address.SubnetID) datastore.Key { +// return datastore.NewKey("/resolver/" + id.String()) +// } + +type Resolver struct { + //netName address.SubnetID + self peer.ID + ds datastore.Datastore + pubsub *pubsub.PubSub + + // Caches to track duplicate and frequent msgs + pushCache *msgReceiptCache + pullCache *msgReceiptCache + // NOTE: We don't track number of response + // messages sent for now. We accept any number. + // We will need to address this to prevent potential + // spamming. + // responseCache *msgReceiptCache + + lk sync.Mutex + ongoingPull map[string]time.Time +} + +type MsgType uint64 + +const ( + // Push content to other subnet + Push MsgType = iota + // Pullrequests CrossMsgs behind a CID + Pull + // Response is used to answer to pull requests. + Response + + // NOTE: For now we don't expect subnets needing to + // pull checkpoints from other subnets (although this + // has been discussed for verification purposes) + // PullCheck requests Checkpoint form a CID + // PullCheck +) + +type MsgData struct{ + Content []byte +} + +type ResolveMsg struct { + // From subnet -> not needed for checkpointing + //From address.SubnetID + // Message type being propagated + Type MsgType + // Cid of the content + Cid string + // MsgMeta being propagated (if any)-> change this to be string? + //CrossMsgs sca.CrossMsgs + // Checkpoint being propagated (if any) + // Checkpoint schema.Checkpoint + + //for checkpointing, we use []byte + Content MsgData +} + +type msgReceiptCache struct { + msgs *lru.TwoQueueCache +} + +func newMsgReceiptCache() *msgReceiptCache { + c, _ := lru.New2Q(8192) + + return &msgReceiptCache{ + msgs: c, + } +} + +func (mrc *msgReceiptCache) add(bcid string) int { + val, ok := mrc.msgs.Get(bcid) + if !ok { + mrc.msgs.Add(bcid, int(1)) + return 0 + } + + mrc.msgs.Add(bcid, val.(int)+1) + return val.(int) +} + +func (r *Resolver) addMsgReceipt(t MsgType, bcid string, from peer.ID) int { + if t == Push { + // All push messages are considered equal independent of + // the source. + return r.pushCache.add(bcid) + } + // We allow each peer.ID in a subnet to send a pull request + // for each CID without being rejected. + // FIXME: Additional checks may be required to prevent malicious + // peers from spamming the topic with infinite requests. + // Deferring the design of a smarter logic here. + return r.pullCache.add(bcid + from.String()) +} + + +func NewResolver(self peer.ID, ds datastore.Datastore, pubsub *pubsub.PubSub) *Resolver { + return &Resolver{ + self: self, + ds: nsds.Wrap(ds, datastore.NewKey("pikachu")), + //ds: ds, + pubsub: pubsub, + pushCache: newMsgReceiptCache(), + pullCache: newMsgReceiptCache(), + ongoingPull: make(map[string]time.Time), + } +} + +func HandleMsgs(mctx helpers.MetricsCtx, lc fx.Lifecycle, r *Resolver) { + ctx := helpers.LifecycleCtx(mctx, lc) + if err := r.HandleMsgs(ctx); err != nil { + panic(err) + } +} + +func (r *Resolver) HandleMsgs(ctx context.Context) error { + // Register new message validator for resolver msgs. + v := NewValidator(r) + if err := r.pubsub.RegisterTopicValidator("pikachu", v.Validate); err != nil { + return err + } + + log.Infof("subscribing to content resolver topic pikachu") + + // Subscribe to subnet resolver topic. + msgSub, err := r.pubsub.Subscribe("pikachu") //nolint + if err != nil { + return err + } + fmt.Println("suscribed to message sub", msgSub) + //time.Sleep(6 * time.Second) + + // Start handle incoming resolver msg. + go r.HandleIncomingResolveMsg(ctx, msgSub) + return nil +} + +func (r *Resolver) Close() error { + // Unregister topic validator when resolver is closed. If not, when + // initializing it again registering the validator will fail. + return r.pubsub.UnregisterTopicValidator("pikachu") +} +func (r *Resolver) shouldPull(c string) bool { + r.lk.Lock() + defer r.lk.Unlock() + if time.Since(r.ongoingPull[c]) > retryTimeout { + r.ongoingPull[c] = time.Now() + return true + } + return false +} + +func (r *Resolver) pullSuccess(c string) { + r.lk.Lock() + defer r.lk.Unlock() + delete(r.ongoingPull, c) +} + +func DecodeResolveMsg(b []byte) (*ResolveMsg, error) { + var bm ResolveMsg + if err := bm.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, err + } + + return &bm, nil +} + +func EncodeResolveMsg(m *ResolveMsg) ([]byte, error) { + w := new(bytes.Buffer) + if err := m.MarshalCBOR(w); err != nil { + return nil, err + } + return w.Bytes(), nil +} + +type Validator struct { + r *Resolver +} + +func NewValidator( r *Resolver) *Validator { + return &Validator{r} +} + +func (v *Validator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message) (res pubsub.ValidationResult) { + // Decode resolve msg + fmt.Println("Calling Validate ") + rmsg, err := DecodeResolveMsg(msg.GetData()) + fmt.Println("decoded message cid: ", rmsg.Cid) + fmt.Println("Message is coming from: ", pid.String()) + if err != nil { + fmt.Println("errod decoding message cid") + log.Errorf("error decoding resolve msg cid: %s", err) + return pubsub.ValidationReject + } + fmt.Println(rmsg) + fmt.Println("we are here! hello") + log.Infof("Received kvs resolution message of type: %v, from %v", rmsg.Type, pid.String()) + log.Warnf("trying to warn you") + //fmt.Println("Message id: ", msg.Cid) + // Check the CID and messages sent are correct for push messages + if rmsg.Type == Push { + fmt.Println("message to validate is of type push") + msgs := rmsg.Content + c, err := msgs.HashedCid() // + if err != nil { + log.Errorf("error computing msgs cid: %s", err) + return pubsub.ValidationIgnore + } + if rmsg.Cid != c { + log.Errorf("cid computed for crossMsgs not equal to the one requested: %s", err) + return pubsub.ValidationReject + } + } + + // it's a correct message! make sure we've only seen it once + if count := v.r.addMsgReceipt(rmsg.Type, rmsg.Cid, msg.GetFrom()); count > 0 { + if pid == v.r.self { + log.Warnf("local block has been seen %d times; ignoring", count) + } + + return pubsub.ValidationIgnore + } + + // Process the resolveMsg, record error, and return gossipsub validation status. + sub, err := v.r.processResolveMsg(ctx, rmsg) + if err != nil { + log.Errorf("error processing resolve message: %s", err) + return sub + } + + // TODO: Any additional check? + + // Pass validated request. + // msg.ValidatorData = rmsg + fmt.Println("end of validate") + + return pubsub.ValidationAccept +} + +func (cm *MsgData) HashedCid() (string, error) { + // to do + if len((*cm).Content) == 0 { + return "", errors.New("Message data is empty.") + } + sha256 := sha256.Sum256((*cm).Content) + + return hex.EncodeToString(sha256[:]), nil + //return hex.EncodeToString((*cm).content), nil + +} + +func (r *Resolver) HandleIncomingResolveMsg(ctx context.Context, sub *pubsub.Subscription) { + for { + _, err := sub.Next(ctx) + if err != nil { + log.Warn("error from message subscription: ", err) + if ctx.Err() != nil { + log.Warn("quitting HandleResolveMessages loop") + return + } + log.Error("error from resolve-msg subscription: ", err) + continue + } + + // Do nothing... everything happens in validate + // Including message handling. + } +} + +func (r *Resolver) processResolveMsg(ctx context.Context, rmsg *ResolveMsg) (pubsub.ValidationResult, error) { + switch rmsg.Type { + case Push: + return r.processPush(ctx, rmsg) + case Pull: + return r.processPull( rmsg) + case Response: + return r.processResponse(ctx, rmsg) + } + return pubsub.ValidationReject, xerrors.Errorf("Resolve message type is not valid") + +} + +func (r *Resolver) processPush(ctx context.Context, rmsg *ResolveMsg) (pubsub.ValidationResult, error) { + // Check if we are already storing the CrossMsgs CID locally. + fmt.Println("Processing push for message with cid: ", rmsg.Cid) + _, found, err := r.getLocal(ctx, rmsg.Cid) + if err != nil { + return pubsub.ValidationIgnore, xerrors.Errorf("Error getting msg locally: %w", err) + } + if found { + // Ignoring message, we already have these cross-msgs + return pubsub.ValidationIgnore, nil + } + // If not stored locally, store it in the datastore for future access. + if err := r.setLocal(ctx, rmsg.Cid, &rmsg.Content); err != nil { + return pubsub.ValidationIgnore, err + } + fmt.Println("Message added! yay") + + // TODO: Introduce checks here to ensure that push messages come from the right + // source? + return pubsub.ValidationAccept, nil +} + +func (r *Resolver) processPull(rmsg *ResolveMsg) (pubsub.ValidationResult, error) { + // Inspect the state of the SCA to get crossMsgs behind the CID. + // st, store, err := submgr.GetSCAState(context.TODO(), r.netName) + // if err != nil { + // return pubsub.ValidationIgnore, err + // } + //msgs, found, err := st.GetCrossMsgs(store, rmsg.Cid) + // if err != nil { + // return pubsub.ValidationIgnore, err + // } + fmt.Println("Processing a pull request for message with cid: ",rmsg.Cid) + msg, found, err := r.getLocal(context.TODO(), rmsg.Cid) + if err != nil { + return pubsub.ValidationIgnore, err + } + if !found { + // Reject instead of ignore. Someone may be trying to spam us with + // random unvalid CIDs. + //return pubsub.ValidationIgnore, xerrors.Errorf("couldn't find data for msgMeta with cid: %s", rmsg.Cid) + return pubsub.ValidationAccept, nil + //return pubsub.ValidationReject, xerrors.Errorf("couldn't find crossmsgs for msgMeta with cid: %s", rmsg.Cid) + } + // Send response + if err := r.PushCheckpointMsgs(*msg,true); err != nil { + return pubsub.ValidationIgnore, err + } + // Publish a Response message to the source subnet if the CID is found. + return pubsub.ValidationAccept, nil +} + +// GetCrossMsgs returns the crossmsgs from a CID in the registry. +// func (st *SCAState) GetCrossMsgs(store adt.Store, c string) (*MsgData, bool, error) { +// msgMetas, err := adt.AsMap(store, st.CheckMsgsRegistry, builtin.DefaultHamtBitwidth) +// if err != nil { +// return nil, false, err +// } +// var out CrossMsgs +// found, err := msgMetas.Get(abi.CidKey(c), &out) +// if err != nil { +// return nil, false, xerrors.Errorf("failed to get crossMsgMeta from registry with cid %v: %w", c, err) +// } +// if !found { +// return nil, false, nil +// } +// return &out, true, nil +// } + +func (r *Resolver) processResponse(ctx context.Context, rmsg *ResolveMsg) (pubsub.ValidationResult, error) { + // Response messages are processed in the same way as push messages + // (at least for now). Is the validation what differs between them. + if sub, err := r.processPush(ctx, rmsg); err != nil { + return sub, err + } + // If received successfully we can delete ongoingPull + r.pullSuccess(rmsg.Cid) + return pubsub.ValidationAccept, nil +} + +func (r *Resolver) getLocal(ctx context.Context, c string) (*MsgData, bool, error) { + b, err := r.ds.Get(ctx, datastore.NewKey(c)) + if err != nil { + if err == datastore.ErrNotFound { + return nil, false, nil + } + return nil, false, err + } + out := &MsgData{} + if err := out.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, false, err + } + return out, true, nil +} + +func (r *Resolver) setLocal(ctx context.Context, c string, msgs *MsgData) error { + w := new(bytes.Buffer) + if err := msgs.MarshalCBOR(w); err != nil { + return err + } + fmt.Println("We are currently adding message %v to KVS.", msgs) + return r.ds.Put(ctx, datastore.NewKey(c), w.Bytes()) +} + +func (r *Resolver) publishMsg(m *ResolveMsg) error { + b, err := EncodeResolveMsg(m) + if err != nil { + return xerrors.Errorf("error serializing resolveMsg: %v", err) + } + fmt.Println("publishing message ",m) + return r.pubsub.Publish("pikachu", b) +} + +//WaitCrossMsgsResolved waits until crossMsgs for meta have been fully resolved +func (r *Resolver) WaitCheckpointResolved(ctx context.Context, c string) chan error { + out := make(chan error) + resolved := false + go func() { + var err error + for !resolved { + select { + case <-ctx.Done(): + out <- xerrors.Errorf("context timeout") + return + default: + // Check if crossMsg fully resolved. + _, resolved, err = r.ResolveCheckpointMsgs(ctx, c) + if err != nil { + out <- err + } + // If not resolved wait two seconds to poll again and see if it has been resolved + // FIXME: This is not the best approach, but good enough for now. + if !resolved { + time.Sleep(2 * time.Second) + } + } + } + close(out) + }() + fmt.Println("done with WaitCheckpointResolved") + return out +} + +func (r *Resolver) ResolveCheckpointMsgs(ctx context.Context, c string) ([]byte, bool, error) { + // FIXME: This function should keep track of the retries that have been done, + // and fallback to a 1:1 exchange if this fails. + cross, found, err := r.getLocal(ctx, c) + if err != nil { + return []byte{}, false, err + } + if found { + // Hurray! We resolved everything, ready to return. + return cross.Content, true, nil + } + // If not try to pull message + if r.shouldPull(c) { + return []byte{}, false, r.PullCheckpointMsgs(c) + } + + // If we shouldn't pull yet because we pulled recently + // do nothing for now, and notify that is wasn't resolved yet. + return []byte{}, false, nil + +} + +func (r *Resolver) PushCheckpointMsgs(msgs MsgData, isResponse bool) error { + c, err := msgs.HashedCid() + if err != nil { + return err + } + m := &ResolveMsg{ + Type: Push, + Cid: c, + Content: msgs, + } + if isResponse { + m.Type = Response + } + return r.publishMsg(m) +} + +// func (r *Resolver) PushMsgFromCheckpoint(ch *schema.Checkpoint, st *sca.SCAState, store adt.Store) error { +// // For each crossMsgMeta +// for _, meta := range ch.CrossMsgs() { +// // Get the crossMsgs behind Cid from SCA state and push it. +// c, err := meta.Cid() +// if err != nil { +// return err +// } +// msgs, found, err := st.GetCrossMsgs(store, c) +// if err != nil { +// return err +// } +// if !found { +// return xerrors.Errorf("couldn't found crossmsgs for msgMeta with cid: %s", c) +// } +// // Push cross-msgs to subnet +// if err = r.PushCrossMsgs(*msgs, false); err != nil { +// return err +// } +// } +// return nil +// } + +func (r *Resolver) PullCheckpointMsgs(ci string) error { + m := &ResolveMsg{ + Type: Pull, + Cid: ci, + } + return r.publishMsg(m) +} diff --git a/chain/checkpointing/kvs_test.go b/chain/checkpointing/kvs_test.go new file mode 100644 index 000000000..1aaae1086 --- /dev/null +++ b/chain/checkpointing/kvs_test.go @@ -0,0 +1,124 @@ +package checkpointing + +import ( + "context" + "testing" + "time" + + //"github.com/filecoin-project/go-address" + //"github.com/filecoin-project/go-state-types/abi" + //"github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" + //ltypes "github.com/filecoin-project/lotus/chain/types" + //tutil "github.com/filecoin-project/specs-actors/v7/support/testing" + "github.com/ipfs/go-datastore" + "github.com/libp2p/go-libp2p" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/stretchr/testify/require" +) + +func TestGetSet(t *testing.T) { + ctx := context.Background() + ds := datastore.NewMapDatastore() + h, err := libp2p.New() + require.NoError(t, err) + ps, err := pubsub.NewGossipSub(context.TODO(), h) + require.NoError(t, err) + //addr := tutil.NewIDAddr(t, 101) + // msg := ltypes.Message{ + // To: addr, + // From: addr, + // Value: abi.NewTokenAmount(1), + // Nonce: 2, + // GasLimit: 1 << 30, // This is will be applied as an implicit msg, add enough gas + // GasFeeCap: ltypes.NewInt(0), + // GasPremium: ltypes.NewInt(0), + // Params: nil, + // } + //out := &sca.CrossMsgs{Msgs: []ltypes.Message{msg}} + out := &MsgData{Content: []byte{0,1}} + r := NewResolver(h.ID(), ds, ps) + cid, _ := out.HashedCid() + out1, found, err := r.getLocal(ctx,cid ) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, out1) + require.NoError(t, err) + err = r.setLocal(ctx, cid, out) + require.NoError(t, err) + out2, found, err := r.getLocal(ctx, cid) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, out, out2) +} + +func TestResolve(t *testing.T) { + ctx := context.Background() + ds := datastore.NewMapDatastore() + h, err := libp2p.New() + require.NoError(t, err) + ps, err := pubsub.NewGossipSub(context.TODO(), h) + require.NoError(t, err) + //addr := tutil.NewIDAddr(t, 101) + // msg := ltypes.Message{ + // To: addr, + // From: addr, + // Value: abi.NewTokenAmount(1), + // Nonce: 2, + // GasLimit: 1 << 30, // This is will be applied as an implicit msg, add enough gas + // GasFeeCap: ltypes.NewInt(0), + // GasPremium: ltypes.NewInt(0), + // Params: nil, + // } + // out := &sca.CrossMsgs{Msgs: []ltypes.Message{msg}} + out := &MsgData{Content: []byte{0,1}} + r := NewResolver(h.ID(), ds, ps) + c, _ := out.HashedCid() + _, found, err := r.ResolveCheckpointMsgs(ctx, c) + require.NoError(t, err) + require.False(t, found) + err = r.setLocal(ctx, c, out) + require.NoError(t, err) + pulled, found, err := r.ResolveCheckpointMsgs(ctx, c) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, len(pulled), 2) + require.Equal(t, pulled, out.Content) + + // TODO: Test recursive resolve with Metas. +} + +func TestWaitResolve(t *testing.T) { + ctx := context.Background() + ds := datastore.NewMapDatastore() + h, err := libp2p.New() + require.NoError(t, err) + ps, err := pubsub.NewGossipSub(context.TODO(), h) + require.NoError(t, err) + // addr := tutil.NewIDAddr(t, 101) + // msg := ltypes.Message{ + // To: addr, + // From: addr, + // Value: abi.NewTokenAmount(1), + // Nonce: 2, + // GasLimit: 1 << 30, // This is will be applied as an implicit msg, add enough gas + // GasFeeCap: ltypes.NewInt(0), + // GasPremium: ltypes.NewInt(0), + // Params: nil, + // } + // out := &sca.CrossMsgs{Msgs: []ltypes.Message{msg}} + out := &MsgData{Content: []byte{0,1}} + r := NewResolver(h.ID(), ds, ps) + c, _ := out.HashedCid() + + // Wait for resolution. + found := r.WaitCheckpointMsgsResolved(context.TODO(), c) + go func() { + // Wait one second, and store cross-msgs locally + time.Sleep(1 * time.Second) + err = r.setLocal(ctx, c, out) + require.NoError(t, err) + }() + + err = <-found + require.NoError(t, err) +} \ No newline at end of file diff --git a/chain/checkpointing/sub.go b/chain/checkpointing/sub.go index 66919c149..dd57c8e72 100644 --- a/chain/checkpointing/sub.go +++ b/chain/checkpointing/sub.go @@ -4,13 +4,20 @@ import ( "context" "encoding/binary" "encoding/hex" + + //"encoding/json" "fmt" "os" + "bytes" "sort" "strconv" + "strings" "sync" "time" + //"github.com/libp2p/go-libp2p" + //datastore "github.com/ipfs/go-datastore" + "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/BurntSushi/toml" "github.com/Zondax/multi-party-sig/pkg/math/curve" "github.com/Zondax/multi-party-sig/pkg/party" @@ -18,8 +25,10 @@ import ( "github.com/Zondax/multi-party-sig/pkg/taproot" "github.com/Zondax/multi-party-sig/protocols/frost" "github.com/Zondax/multi-party-sig/protocols/frost/keygen" + address "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/consensus/actors/mpower" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/types" @@ -28,16 +37,42 @@ import ( "github.com/filecoin-project/lotus/node/modules/helpers" cbor "github.com/ipfs/go-ipld-cbor" logging "github.com/ipfs/go-log/v2" + //peer "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/host" pubsub "github.com/libp2p/go-libp2p-pubsub" - "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" "go.uber.org/fx" "golang.org/x/xerrors" + // act "github.com/filecoin-project/lotus/chain/consensus/actors" + // init_ "github.com/filecoin-project/specs-actors/actors/builtin/init" + // "github.com/filecoin-project/lotus/build" + // "github.com/filecoin-project/lotus/api" ) var log = logging.Logger("checkpointing") + +//update this value with the amount you want to send to the initial aggregated key (for testing purpose) +const initialValueInWallet = 50 +// for testnet I recommend using 0.002 +//const initialValueInWallet = 0.002 + +// change this to true to alternatively send all the amount from our wallet +var sendall = false + +// this variable is the number of blocks (in eudico) we want between each checkpoints +const checkpointFrequency = 15 + +//change to true if regtest is used +const Regtest = true + +// struct used to propagate detected changes. +type diffInfo struct { + newMiners []string + newPublicKey []byte + hash []byte + cp []byte +} + /* Main file for the checkpointing module. Handle all the core logic. */ @@ -68,34 +103,47 @@ type CheckpointingSub struct { */ // Generated public key pubkey []byte + //Keys of the new set of participants + newKey []byte // Participants list identified with their libp2p cid participants []string + // Participants list identified with their libp2p cid + newParticipants []string + // boolean to keep track of when the new config has finished the DKG + newDKGComplete bool + // boolean to keep + keysUpdated bool // taproot config - config *keygen.TaprootConfig + taprootConfig *keygen.TaprootConfig // new config generated - newconfig *keygen.TaprootConfig + newTaprootConfig *keygen.TaprootConfig // Previous tx ptxid string // Tweaked value tweakedValue []byte // Checkpoint section in config.toml cpconfig *config.Checkpoint - // minio client - minioClient *minio.Client + // minio client -> using KVS now + //minioClient *minio.Client // Bitcoin latest checkpoint used when syncing latestConfigCheckpoint types.TipSetKey // Is Eudico synced (do we have all the blocks) synced bool // height verified! (the height of the latest checkpoint) height abi.ChainEpoch + // last cid pushed to bitcoin + lastCid string - - // add intitial taproot address here + // KVS + r *Resolver + ds dtypes.MetadataDS } /* - Initiate checkpoint module - It will load config and inititate CheckpointingSub struct + Initiate checkpoint module. + It will load config and initiate CheckpointingSub struct + using some pre-generated data. (TODO: change this and re-generate + the data locally each time) */ func NewCheckpointSub( mctx helpers.MetricsCtx, @@ -103,8 +151,7 @@ func NewCheckpointSub( host host.Host, pubsub *pubsub.PubSub, api impl.FullNodeAPI, - // add init taproot address and define it somewhere here -) (*CheckpointingSub, error) { + ds dtypes.MetadataDS) (*CheckpointingSub, error) { ctx := helpers.LifecycleCtx(mctx, lc) // Starting checkpoint listener @@ -128,7 +175,7 @@ func NewCheckpointSub( synced := false // Load taproot verification shares from EUDICO_PATH environnement if file exist - var config *keygen.TaprootConfig + var taprootConfig *keygen.TaprootConfig _, err = os.Stat(os.Getenv("EUDICO_PATH") + "/share.toml") if err == nil { // If we have a share.toml containing the distributed key we load them @@ -180,7 +227,7 @@ func NewCheckpointSub( verificationShares[party.ID(key)] = &p } - config = &keygen.TaprootConfig{ + taprootConfig = &keygen.TaprootConfig{ ID: party.ID(host.ID().String()), Threshold: configTOML.Threshold, PrivateShare: &privateShare, @@ -188,34 +235,33 @@ func NewCheckpointSub( VerificationShares: verificationShares, } - for id := range config.VerificationShares { + // this is where we append the original list of signers + // note: they are not added in the mocked power actor (they probably should? TODO) + for id := range taprootConfig.VerificationShares { minerSigners = append(minerSigners, string(id)) } - } - // Initialize minio client object - minioClient, err := minio.New(cpconfig.MinioHost, &minio.Options{ - Creds: credentials.NewStaticV4(cpconfig.MinioAccessKeyID, cpconfig.MinioSecretAccessKey, ""), - Secure: false, - }) - if err != nil { - return nil, err } + return &CheckpointingSub{ - pubsub: pubsub, - topic: nil, - sub: nil, - host: host, - api: &api, - events: e, - ptxid: "", - config: config, - participants: minerSigners, - newconfig: nil, - cpconfig: &cpconfig, - minioClient: minioClient, - synced: synced, + pubsub: pubsub, + topic: nil, + sub: nil, + host: host, + api: &api, + events: e, + pubkey: make([]byte, 0), + ptxid: "", + taprootConfig: taprootConfig, //either nil (if no shares) or the configuration pre-generated for Alice, Bob and Charlie + participants: minerSigners, + newDKGComplete: false, + keysUpdated: true, + newTaprootConfig: nil, + cpconfig: &cpconfig, + synced: synced, + // r: r, + ds: ds, }, nil } @@ -230,9 +276,14 @@ func (c *CheckpointingSub) listenCheckpointEvents(ctx context.Context) { } changeHandler := func(oldTs, newTs *types.TipSet, states events.StateChange, curH abi.ChainEpoch) (more bool, err error) { - log.Infow("State change detected for power actor") - - return true, nil + log.Infow("State change detected for mocked power actor") + diff, ok := states.(*diffInfo) + if !ok { + log.Error("Error casting states, not of type *diffInfo") + return true, err + } + //return true, nil + return c.triggerChange(ctx, diff) } revertHandler := func(ctx context.Context, ts *types.TipSet) error { @@ -243,6 +294,8 @@ func (c *CheckpointingSub) listenCheckpointEvents(ctx context.Context) { c.lk.Lock() defer c.lk.Unlock() + diff := &diffInfo{} + // verify we are synced // Maybe move it to checkFunc st, err := c.api.SyncState(ctx) @@ -260,6 +313,42 @@ func (c *CheckpointingSub) listenCheckpointEvents(ctx context.Context) { log.Infow("we are synced") // Yes then verify our checkpoint from Bitcoin and verify if we find in it in our Eudico chain + + //first we fetch the checkpoint from the KVS using the cid found from bitcoin + cid := c.lastCid + fmt.Println("try pull with cid: ", cid) + ctx1, _ := context.WithTimeout(ctx, 10 * time.Second) + out := c.r.WaitCheckpointResolved(ctx1, cid) + select { + case <-ctx1.Done(): + log.Errorf("context timeout") + case err := <-out: + if err != nil { + log.Errorf("error fully resolving messages: %s", err) + } + } + data, found, err := c.r.ResolveCheckpointMsgs(ctx, cid) + if err != nil { + log.Errorf("Error resolving messages: %v", err) + } + // sanity-check, it should always be found + if !found { + log.Errorf("messages haven't been resolved: %v", err) + } + fmt.Println("Data pulled from KVS: ", data) + if len(data) >0 { + //extract the cid from the data + cpCid := strings.Split(string(data), "\n")[0] + // Decode hex checkpoint to bytes + cpBytes, err := hex.DecodeString(cpCid) + if err != nil { + log.Errorf("could not decode checkpoint: %v", err) + } + c.latestConfigCheckpoint, err = types.TipSetKeyFromBytes(cpBytes) + if err != nil { + log.Errorf("could not get tipset key from checkpoint: %v", err) + } + } ts, err := c.api.ChainGetTipSet(ctx, c.latestConfigCheckpoint) if err != nil { log.Errorf("couldnt get tipset: %v", err) @@ -273,11 +362,6 @@ func (c *CheckpointingSub) listenCheckpointEvents(ctx context.Context) { return false, nil, nil } } - - /* - Now we compared old Power Actor State and new Power Actor State - */ - // Get actors at specified tipset newAct, err := c.api.StateGetActor(ctx, mpower.PowerActorAddr, newTs.Key()) if err != nil { @@ -300,84 +384,167 @@ func (c *CheckpointingSub) listenCheckpointEvents(ctx context.Context) { return false, nil, err } - // Activate checkpointing every 25 blocks log.Infow("Height:", "height", newTs.Height().String()) fmt.Println("Height:", newTs.Height()) - // NOTES: this will only work in delegated consensus - // Wait for more tipset to valid the height and be sure it is valid - // NOTES: should retrieve list of signing miners using Power actor state (see Miners) and not through config instanciation - if newTs.Height()%25 == 0 && (c.config != nil || c.newconfig != nil) { - log.Infow("Checkpoint time") + // fmt.Println("Old address: ", oldSt.PublicKey) + // fmt.Println("New address: ", newSt.PublicKey) - // Initiation and config should be happening at start - cp := oldTs.Key().Bytes() - // If we don't have a config we don't sign but update our config with key - // NOTE: `config` refers to config taproot as mentioned in the multi-party-sig lib - if c.config == nil { - log.Infow("We don't have any config") - pubkey := c.newconfig.PublicKey + // check if there is a new configuration (i.e. new miners) + // if yes, trigger DKG + change, err := c.matchNewConfig(ctx, oldTs, newTs, oldSt, newSt, diff) + if err != nil { + log.Errorw("Error checking for new configuration", "err", err) + return false, nil, err + } - pubkeyShort := genCheckpointPublicKeyTaproot(pubkey, cp) + // check if a new public key was created (i.e. the DKG completed) + change3, err := c.matchNewPublicKey(ctx, oldTs, newTs, oldSt, newSt, diff) + if err != nil { + log.Errorw("Error checking for new public key", "err", err) + return false, nil, err + } + //check if it is time for a new checkpoint + change2, err := c.matchCheckpoint(ctx, oldTs, newTs, oldSt, newSt, diff) + if err != nil { + log.Errorw("Error checking if it is time for a new checkpoint", "err", err) + return false, nil, err + } - c.config = c.newconfig - merkleRoot := hashMerkleRoot(pubkey, cp) - c.tweakedValue = hashTweakedValue(pubkey, merkleRoot) - c.pubkey = pubkeyShort - c.newconfig = nil - } else { - // Change name to MinerConfig (checkpoint in hex and miners list)? - var minersConfig string = hex.EncodeToString(cp) + "\n" - for _, partyId := range c.orderParticipantsList() { - minersConfig += partyId + "\n" - } + return change || change2 || change3 , diff, nil + } - // This create the file that will be stored in minio (or any storage) - hash, err := CreateMinersConfig([]byte(minersConfig)) - if err != nil { - log.Errorf("could not create miners config: %v", err) - return false, nil, err - } + // Listen to changes in Eudico + // `76587687658765876` <- This is the confidence threshold used to determine if the StateChangeHandler should be triggered. + // It is an absurdly high number so the metric used to determine if to trigger it or not is the number of tipsets that have passed in the heaviest chain (the 5 you see there) + // put 1 here for testing purpose (i,e, there are no forks) + err := c.events.StateChanged(checkFunc, changeHandler, revertHandler, 1, 76587687658765876, match) + if err != nil { + return + } +} - // Push config to minio - err = StoreMinersConfig(ctx, c.minioClient, c.cpconfig.MinioBucketName, hex.EncodeToString(hash)) - if err != nil { - log.Errorf("could not push miners config: %v", err) - return false, nil, err - } +func (c *CheckpointingSub) matchNewPublicKey(ctx context.Context, oldTs, newTs *types.TipSet, oldSt, newSt mpower.State, diff *diffInfo) (bool, error) { + if !bytes.Equal(oldSt.PublicKey, newSt.PublicKey) { + // update this variable to add later on the ability to remove miners + c.newDKGComplete = true + c.newKey = newSt.PublicKey + c.keysUpdated = false + diff.newPublicKey = newSt.PublicKey + fmt.Println("The new public key has correctly been updated") + return true, nil + } + return false, nil +} - err = c.CreateCheckpoint(ctx, cp, hash) - if err != nil { - log.Errorf("could not create checkpoint: %v", err) - return false, nil, err - } - } +func (c *CheckpointingSub) matchNewConfig(ctx context.Context, oldTs, newTs *types.TipSet, oldSt, newSt mpower.State, diff *diffInfo) (bool, error) { + /* + Now we compared old Power Actor State and new Power Actor State + */ + + // If no changes in configuration + if sameStringSlice(oldSt.Miners, newSt.Miners) { + return false, nil + } + // only the participants in the new config need to trigger the DKG + for _, participant := range newSt.Miners { + if participant == c.host.ID().String() { + diff.newMiners = newSt.Miners + c.newParticipants = newSt.Miners + return true, nil } + } + return false, nil + +} + +func (c *CheckpointingSub) matchCheckpoint(ctx context.Context, oldTs, newTs *types.TipSet, oldSt, newSt mpower.State, diff *diffInfo) (bool, error) { + // we are checking that the list of mocked actor is not empty before starting the checkpoint + if newTs.Height()%checkpointFrequency == 0 && len(oldSt.Miners) > 0 && (c.taprootConfig != nil || c.newTaprootConfig != nil) { + cp := oldTs.Key().Bytes() // this is the checkpoint + diff.cp = cp + + // If we don't have a taprootconfig we don't sign because it means we were not part + // of the previous DKG and hence we need to let the "previous" miners update the aggregated + // key on bitcoin before starting signing. + // We update our config to be ready for next checkpointing + // This is the case for any "new" miner (i.e., not Alice, Bob and Charlie) + // Basically we skip the next + if c.taprootConfig == nil { + log.Infow("We don't have any config") + pubkey := c.newTaprootConfig.PublicKey // the new taproot config has been initialized + //during the DKG (in which the new node took part when they joined) + + pubkeyShort := genCheckpointPublicKeyTaproot(pubkey, cp) + + c.taprootConfig = c.newTaprootConfig + merkleRoot := hashMerkleRoot(pubkey, cp) + c.tweakedValue = hashTweakedValue(pubkey, merkleRoot) + c.pubkey = pubkeyShort + c.newTaprootConfig = nil + c.participants = newSt.Miners // we add ourselves to the list of participants + c.newDKGComplete = false + //c.newKey = + + } else { + // Miners config is the data that will be stored on a eudico-KVS + var minersConfig string = hex.EncodeToString(cp) + "\n" + for _, partyId := range newSt.Miners { // list of new miners + minersConfig += partyId + "\n" + } - // If Power Actors list has changed start DKG - // Changes detected so generate new key - if oldSt.MinerCount != newSt.MinerCount { - log.Infow("Generate new aggregated key") - err := c.GenerateNewKeys(ctx, newSt.Miners) + // This creates the file that will be stored in the KVS (or any storage) + hash, err := CreateMinersConfig([]byte(minersConfig)) if err != nil { - log.Errorf("error while generating new key: %v", err) - // If generating new key failed, checkpointing should not be possible + log.Errorf("could not create miners config: %v", err) + return false, err } + diff.hash = hash - return true, nil, nil // true mean generate keys + //Push config to the KVS + msgs := &MsgData{Content: []byte(minersConfig)} + //push config to kvs + cid_str, err := msgs.HashedCid() + err = c.r.setLocal(ctx, cid_str, msgs) + if err != nil { + log.Errorf("could not push miners config to kvs: %v", err) + return false, err + } + //Push data to everyone + c.r.PushCheckpointMsgs(*msgs,false) } - return false, nil, nil + return true, nil } + return true, nil +} - // Listen to changes in Eudico - // `76587687658765876` <- This is the confidence threshold used to determine if the StateChangeHandler should be triggered. - // It is an absurdly high number so the metric used to determine if to trigger it or not is the number of tipsets that have passed in the heaviest chain (the 5 you see there) - err := c.events.StateChanged(checkFunc, changeHandler, revertHandler, 5, 76587687658765876, match) - if err != nil { - return +func (c *CheckpointingSub) triggerChange(ctx context.Context, diff *diffInfo) (more bool, err error) { + //If there is a new configuration, trigger the DKG + if len(diff.newMiners) > 0 { + log.Infow("Generate new aggregated key") + err := c.GenerateNewKeys(ctx, diff.newMiners) + if err != nil { + log.Errorw("error while generating new key: %v", err) + // If generating new key failed, checkpointing should not be possible + return true, err + } + + log.Infow("Successful DKG") } + + // trigger the new checkpoint + if diff.cp != nil && diff.hash != nil { + // the checkpoint is created by the "previous" set of miners + // so that the new key is updated + err = c.CreateCheckpoint(ctx, diff.cp, diff.hash, c.participants) + if err != nil { + log.Errorw("could not create checkpoint: %v", err) + return true, err + } + } + return true, nil } func (c *CheckpointingSub) Start(ctx context.Context) error { @@ -402,13 +569,17 @@ func (c *CheckpointingSub) Start(ctx context.Context) error { } c.sub = sub + c.listenCheckpointEvents(ctx) + + return nil } func (c *CheckpointingSub) GenerateNewKeys(ctx context.Context, participants []string) error { - + fmt.Println("DKG participants: ", participants) + fmt.Println("Myself (DKG): ", c.host.ID().String()) idsStrings := participants sort.Strings(idsStrings) @@ -420,7 +591,7 @@ func (c *CheckpointingSub) GenerateNewKeys(ctx context.Context, participants []s threshold := (len(idsStrings) / 2) + 1 //starting a new ceremony with the subscription and topic that were - // already defined + // already defined //why not call the checkpointing sub directly? n := NewNetwork(c.sub, c.topic) @@ -428,22 +599,15 @@ func (c *CheckpointingSub) GenerateNewKeys(ctx context.Context, participants []s //f := frost.KeygenTaprootGennaro(id, ids, threshold) f := frost.KeygenTaproot(id, ids, threshold) - //{1,2,3} is session ID, it is hardcoded - // change it for a unique identifier - // we only need this identifier to be the same for every participants - // it could be for example the hash of the checkpointed block - // or hash of participants list - // problem with 1,2,3: people on different sessions could be on the same execution - // try nil --> it probably uses the hash of the participants list - // look at the library for DKG (taurus fork) - // for signing this is already updated - // for testing hardcoded is ok to ensure everyone is on the same session - // but for production this needs to be updated. - handler, err := protocol.NewMultiHandler(f, []byte{1, 2, 3}) + + + //handler, err := protocol.NewMultiHandler(f, []byte{1, 2, 3}) + sessionID := strings.Join(idsStrings, "") + handler, err := protocol.NewMultiHandler(f, []byte(sessionID)) if err != nil { return err } - LoopHandler(ctx, handler, n)//use the new network, could be re-written + LoopHandler(ctx, handler, n) //use the new network, could be re-written r, err := handler.Result() if err != nil { // if a participant is mibehaving the DKG entirely fail (no fallback) @@ -452,25 +616,69 @@ func (c *CheckpointingSub) GenerateNewKeys(ctx context.Context, participants []s log.Infow("result :", "result", r) var ok bool - c.newconfig, ok = r.(*keygen.TaprootConfig) + c.newTaprootConfig, ok = r.(*keygen.TaprootConfig) if !ok { return xerrors.Errorf("state change propagated is the wrong type") } + c.newDKGComplete = true + c.newKey = []byte(c.newTaprootConfig.PublicKey) + + // we need to update the taproot public key in the mocked actor + // this is done by sending a transaction with method 4 (which + // corresponds to the "add new public key method") + // for now only alice sends the transaction (will need to be changed TODO) + if c.host.ID().String() == "12D3KooWMBbLLKTM9Voo89TXLd98w4MjkJUych6QvECptousGtR4" { + addp := &mpower.NewTaprootAddressParam{ + PublicKey: []byte(c.newTaprootConfig.PublicKey), // new public key that was just generated + } + + seraddp, err1 := actors.SerializeParams(addp) + if err1 != nil { + return err1 + } - c.participants = participants + a, err2 := address.NewIDAddress(65) + if err2 != nil { + return xerrors.Errorf("mocked actor address not working") + } + //TODO: change this, import the wallet automatically + // right now we are just copying Alice's address manually (short-term solution) + aliceaddr, err3 := address.NewFromString("t1d2xrzcslx7xlbbylc5c3d5lvandqw4iwl6epxba") + if err3 != nil { + return xerrors.Errorf("alice address not working") + } + + _, aerr := c.api.MpoolPushMessage(ctx, &types.Message{ + To: a, //this is the mocked actor address + From: aliceaddr, // this is alice address, will need to be changed at some point + Value: abi.NewTokenAmount(0), + Method: 4, + Params: seraddp, + }, nil) + + if aerr != nil { + return aerr + } + + fmt.Println("message sent") + } return nil } -func (c *CheckpointingSub) CreateCheckpoint(ctx context.Context, cp, data []byte) error { +func (c *CheckpointingSub) CreateCheckpoint(ctx context.Context, cp, data []byte, participants []string) error { + + fmt.Println("I'm a checkpointer") taprootAddress, err := pubkeyToTapprootAddress(c.pubkey) if err != nil { return err } - pubkey := c.config.PublicKey - if c.newconfig != nil { - pubkey = c.newconfig.PublicKey + pubkey := c.taprootConfig.PublicKey + // if a new public key was generated (i.e. new miners), we use this key in the checkpoint + if c.newDKGComplete { + //pubkey = c.newTaprootConfig.PublicKey // change this to update from the actor + pubkey = taproot.PublicKey(c.newKey) } pubkeyShort := genCheckpointPublicKeyTaproot(pubkey, cp) @@ -483,22 +691,29 @@ func (c *CheckpointingSub) CreateCheckpoint(ctx context.Context, cp, data []byte // we will chose the "first" half of participants // in order to sign the transaction in the threshold signing. // In later improvement we will choose them randomly. - idsStrings := c.orderParticipantsList() + + // list from mocked power actor: + sort.Strings(participants) + idsStrings := participants + log.Infow("participants list :", "participants", idsStrings) log.Infow("precedent tx", "txid", c.ptxid) ids := c.formIDSlice(idsStrings) - + taprootScript := getTaprootScript(c.pubkey) + //we add our public key to our bitcoin wallet + success := addTaprootToWallet(c.cpconfig.BitcoinHost, taprootScript) + if !success { + return xerrors.Errorf("failed to add taproot address to wallet") + } if c.ptxid == "" { log.Infow("missing precedent txid") - taprootScript := getTaprootScript(c.pubkey) - success := addTaprootToWallet(c.cpconfig.BitcoinHost, taprootScript) - if !success { - return xerrors.Errorf("failed to add taproot address to wallet") - } // sleep an arbitrary long time to be sure it has been scanned - time.Sleep(6 * time.Second) + // removed this because now we are adding without rescanning (too long) + //time.Sleep(6 * time.Second) + //time.Sleep(20 * time.Second) + //we get the transaction id using our bitcoin client ptxid, err := walletGetTxidFromAddress(c.cpconfig.BitcoinHost, taprootAddress) if err != nil { return err @@ -508,19 +723,25 @@ func (c *CheckpointingSub) CreateCheckpoint(ctx context.Context, cp, data []byte } index := 0 + //fmt.Println("Previous tx id: ", c.ptxid) value, scriptPubkeyBytes := getTxOut(c.cpconfig.BitcoinHost, c.ptxid, index) + // TODO: instead of calling getTxOUt we need to check for the latest transaction + // same as is done in the verification.sh script + if scriptPubkeyBytes[0] != 0x51 { log.Infow("wrong txout") index = 1 value, scriptPubkeyBytes = getTxOut(c.cpconfig.BitcoinHost, c.ptxid, index) } newValue := value - c.cpconfig.Fee - - payload := "{\"jsonrpc\": \"1.0\", \"id\":\"wow\", \"method\": \"createrawtransaction\", \"params\": [[{\"txid\":\"" + c.ptxid + "\",\"vout\": " + strconv.Itoa(index) + ", \"sequence\": 4294967295}], [{\"" + newTaprootAddress + "\": \"" + fmt.Sprintf("%.2f", newValue) + "\"}, {\"data\": \"" + hex.EncodeToString(data) + "\"}]]}" - result := jsonRPC(c.cpconfig.BitcoinHost, payload) + //fmt.Println("Fee for next transaction is: ", c.cpconfig.Fee) + payload1 := "{\"jsonrpc\": \"1.0\", \"id\":\"wow\", \"method\": \"createrawtransaction\", \"params\": [[{\"txid\":\"" + c.ptxid + "\",\"vout\": " + strconv.Itoa(index) + ", \"sequence\": 4294967295}], [{\"" + newTaprootAddress + "\": \"" + fmt.Sprintf("%.8f", newValue) + "\"}, {\"data\": \"" + hex.EncodeToString(data) + "\"}]]}" + fmt.Println("Data pushed to opreturn: ", hex.EncodeToString(data)) + result := jsonRPC(c.cpconfig.BitcoinHost, payload1) + //fmt.Println("Result from Raw tx: ", result) if result == nil { - return xerrors.Errorf("cant create new transaction") + return xerrors.Errorf("can not create new transaction") } rawTransaction := result["result"].(string) @@ -543,9 +764,11 @@ func (c *CheckpointingSub) CreateCheckpoint(ctx context.Context, cp, data []byte /* * Orchestrate the signing message */ - + fmt.Println("I'm starting the checkpointing") log.Infow("starting signing") - f := frost.SignTaprootWithTweak(c.config, ids, hashedTx[:], c.tweakedValue[:]) + // Here all the participants sign the transaction + // in practice we only need "threshold" of them to sign + f := frost.SignTaprootWithTweak(c.taprootConfig, ids, hashedTx[:], c.tweakedValue[:]) n := NewNetwork(c.sub, c.topic) // hashedTx[:] is the session id // ensure everyone is on the same session id @@ -563,21 +786,18 @@ func (c *CheckpointingSub) CreateCheckpoint(ctx context.Context, cp, data []byte // if signing is a success we register the new value merkleRoot := hashMerkleRoot(pubkey, cp) c.tweakedValue = hashTweakedValue(pubkey, merkleRoot) - c.pubkey = pubkeyShort - // If new config used - if c.newconfig != nil { - c.config = c.newconfig - c.newconfig = nil - } + c.pubkey = pubkeyShort //updates the public key to the new key c.ptxid = "" // Only first one broadcast the transaction ? // Actually all participants can broadcast the transcation. It will be the same everywhere. rawtx := prepareWitnessRawTransaction(rawTransaction, r.(taproot.Signature)) - - payload = "{\"jsonrpc\": \"1.0\", \"id\":\"wow\", \"method\": \"sendrawtransaction\", \"params\": [\"" + rawtx + "\"]}" + payload := "{\"jsonrpc\": \"1.0\", \"id\":\"wow\", \"method\": \"sendrawtransaction\", \"params\": [\"" + rawtx + "\"]}" + //fmt.Println("Send raw transaction command:", payload) + //fmt.Println("Raw tx: ", payload1) result = jsonRPC(c.cpconfig.BitcoinHost, payload) + //fmt.Println("Transaction to be sent: ", result) if result["error"] != nil { return xerrors.Errorf("failed to broadcast transaction") } @@ -587,20 +807,39 @@ func (c *CheckpointingSub) CreateCheckpoint(ctx context.Context, cp, data []byte log.Infow("new Txid:", "newtxid", newtxid) c.ptxid = newtxid - return nil -} -func (c *CheckpointingSub) orderParticipantsList() []string { - var ids []string - for id := range c.config.VerificationShares { - ids = append(ids, string(id)) + // If we have new config (i.e. a DKG has completed and we participated in it) + // we replace the previous config with this config + // Note: if someone left the protocol, they will not do this so this is not great + if c.newTaprootConfig != nil { + c.taprootConfig = c.newTaprootConfig + c.newTaprootConfig = nil } - sort.Strings(ids) + // even miners who left the protocol will do this as their newDKGComplete + // return true for everyone after a DKG has completed (whether they took part or no) + if c.newDKGComplete { + c.keysUpdated = true + c.participants = c.newParticipants + c.newParticipants = []string{} + c.newDKGComplete = false + + } - return ids + return nil } +// func (c *CheckpointingSub) orderParticipantsList() []string { +// var ids []string +// for id := range c.taprootConfig.VerificationShares { // change for mocked actor +// ids = append(ids, string(id)) +// } + +// sort.Strings(ids) + +// return ids +// } + func (c *CheckpointingSub) formIDSlice(ids []string) party.IDSlice { var _ids []party.ID for _, p := range ids { @@ -635,62 +874,142 @@ func BuildCheckpointingSub(mctx helpers.MetricsCtx, lc fx.Lifecycle, c *Checkpoi // Get first checkpoint from eudico block 0 ts, err := c.api.ChainGetGenesis(ctx) if err != nil { - log.Errorf("couldnt get genesis tipset: %v", err) + log.Errorf("could not get genesis tipset: %v", err) return + } else { + log.Infow("Got genesis tipset") } - cidBytes := ts.Key().Bytes()// this is the checkpoint (i.e. hash of block) - publickey, err := hex.DecodeString(c.cpconfig.PublicKey) + + cidBytes := ts.Key().Bytes() + //fmt.Println("cidbytes: ", cidBytes) + //fmt.Println("public key before decoding: ", c.cpconfig.PublicKey) // this is the checkpoint (i.e. hash of block) + publickey, err := hex.DecodeString(c.cpconfig.PublicKey) //publickey pre-generated if err != nil { - log.Errorf("couldnt decode public key: %v", err) + log.Errorf("could not decode public key: %v", err) return + } else { + log.Infow("Decoded Public key") } - // Get the last checkpoint from the bitcoin node - btccp, err := GetLatestCheckpoint(c.cpconfig.BitcoinHost, publickey, cidBytes) + // initialize the kvs + c.r = NewResolver(c.host.ID(), c.ds, c.pubsub) + fmt.Println("My id: ",c.host.ID()) + err = c.r.HandleMsgs(ctx) if err != nil { - log.Errorf("couldnt decode public key: %v", err) + log.Errorf("error initializing cross-msg resolver: %s", err) return } - // Get the config in minio using the last checkpoint found through Bitcoin. - // NOTE: We should be able to get the config regarless of storage (minio, IPFS, KVS,....) - cp, err := GetMinersConfig(ctx, c.minioClient, c.cpconfig.MinioBucketName, btccp.cid) - if cp != "" { - // Decode hex checkpoint to bytes - cpBytes, err := hex.DecodeString(cp) - if err != nil { - log.Errorf("couldnt decode checkpoint: %v", err) - return + + //eiher send the funding transaction (if needed) or get the latest checkpoint + // of the transaction has already been sent + if c.taprootConfig != nil { + c.pubkey = genCheckpointPublicKeyTaproot(c.taprootConfig.PublicKey, cidBytes) + + // this should be changed such that the public key is updated when eudico is stopped + // (so that we can continue the checkpointing without restarting from scratch each time) + address, _ := pubkeyToTapprootAddress(c.pubkey) + fmt.Println("Address: ", address) + + // only Alice will send the funding transaction for testing purpose + if c.host.ID().String() == "12D3KooWMBbLLKTM9Voo89TXLd98w4MjkJUych6QvECptousGtR4" { + //start by getting the balance in our wallet (only if sendall is true, i.e. we send all the amount) + var value float64 + if sendall { + payload1 := "{\"jsonrpc\": \"1.0\", \"id\":\"wow\", \"method\": \"getbalances\", \"params\": []}" + result1 := jsonRPC(c.cpconfig.BitcoinHost, payload1) + fmt.Println("Getbalances result: ", result1) + intermediary1 := result1["result"].(map[string]interface{}) + intermediary2 := intermediary1["mine"].(map[string]interface{}) + value = intermediary2["trusted"].(float64) + fmt.Println("Initial value in walet: ", value) + } else { + value = initialValueInWallet + } + newValue := value - c.cpconfig.Fee + payload := "{\"jsonrpc\": \"1.0\", \"id\":\"wow\", \"method\": \"sendtoaddress\", \"params\": [\"" + address + "\", \"" + fmt.Sprintf("%.8f", newValue) + "\" ]}" + //fmt.Println(payload) + result := jsonRPC(c.cpconfig.BitcoinHost, payload) + //fmt.Println(result) + if result["error"] != nil { + log.Errorf("could not send initial Bitcoin transaction to: %v", address) + } else { + log.Infow("successfully sent first bitcoin tx") + c.ptxid = result["result"].(string) + } + //put the data in kvs + var minersConfig string = hex.EncodeToString(cidBytes) + "\n" + // c.orderParticipantsList() orders the miners from the taproot config --> to change + //for _, partyId := range c.orderParticipantsList() { + for _, partyId := range c.participants { // list of new miners + minersConfig += partyId + "\n" + } + msgs := &MsgData{Content: []byte(minersConfig)} + time.Sleep(2 * time.Second) + c.r.PushCheckpointMsgs(*msgs,false) } - // Cache latest checkpoint value from Bitcoin for when we sync and compare wit Eudico key tipset values - c.latestConfigCheckpoint, err = types.TipSetKeyFromBytes(cpBytes) - if err != nil { - log.Errorf("couldnt get tipset key from checkpoint: %v", err) - return + for { + init, txid, err := CheckIfFirstTxHasBeenSent(c.cpconfig.BitcoinHost, publickey, cidBytes) + if init { + c.ptxid = txid + if err != nil { + log.Errorf("Error with check if first tx has been sent") + } + break + } } } + // Get the last checkpoint from the bitcoin node + + btccp, err := GetLatestCheckpoint(c.cpconfig.BitcoinHost, publickey, cidBytes) + + if err != nil { + log.Errorf("could not get last checkpoint from Bitcoin: %v", err) + return + } else { + log.Infow("Got last checkpoint from Bitcoin node") + fmt.Println(btccp) + } + + + fmt.Println("last cid from bitcoin: ", btccp.cid) + c.lastCid = btccp.cid + + // Pre-compute values from participants in the signing process - if c.config != nil { + if c.taprootConfig != nil { // save public key taproot - // NOTE: cidBytes is the tipset key value (aka checkpoint) from the genesis block. When Eudico is stopped it should remember what was the last tipset key value + // NOTE: cidBytes is the tipset key value (aka checkpoint) from the genesis block. + // When Eudico is stopped it should remember what was the last tipset key value // it signed and replace it with it. Config is not saved, neither when new DKG is done. - c.pubkey = genCheckpointPublicKeyTaproot(c.config.PublicKey, cidBytes) + c.pubkey = genCheckpointPublicKeyTaproot(c.taprootConfig.PublicKey, cidBytes) // Get the taproot address used in taproot.sh + // this should be changed such that the public key is updated when eudico is stopped + // (so that we can continue the checkpointing without restarting from scratch each time) address, _ := pubkeyToTapprootAddress(c.pubkey) fmt.Println(address) + // to do: write method to get the total amount in the wallet we are using + //value, scriptPubkeyBytes := getTxOut(c.cpconfig.BitcoinHost, c.ptxid, index) + + // if scriptPubkeyBytes[0] != 0x51 { + // log.Infow("wrong txout") + // index = 1 + // value, scriptPubkeyBytes = getTxOut(c.cpconfig.BitcoinHost, c.ptxid, index) + // } + // Save tweaked value - merkleRoot := hashMerkleRoot(c.config.PublicKey, cidBytes) - c.tweakedValue = hashTweakedValue(c.config.PublicKey, merkleRoot) + merkleRoot := hashMerkleRoot(c.taprootConfig.PublicKey, cidBytes) + c.tweakedValue = hashTweakedValue(c.taprootConfig.PublicKey, merkleRoot) } // Start the checkpoint module err = c.Start(ctx) if err != nil { - log.Errorf("couldn't start checkpointing module: %v", err) + log.Errorf("could not start checkpointing module: %v", err) } lc.Append(fx.Hook{ diff --git a/chain/checkpointing/util.go b/chain/checkpointing/util.go index c90523480..a08a7f12c 100644 --- a/chain/checkpointing/util.go +++ b/chain/checkpointing/util.go @@ -107,7 +107,15 @@ func pubkeyToTapprootAddress(pubkey []byte) (string, error) { // regtest human-readable part is "bcrt" according to no documentation ever... (see https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki) // Using EncodeM becasue we want bech32m... which has a new checksum - taprootAddress, err := bech32.EncodeM("bcrt", conv) + if Regtest { + taprootAddress, err := bech32.EncodeM("bcrt", conv) + if err != nil { + return "", err + } + return taprootAddress, nil + } + // for testnet the human-readable part is "tb" + taprootAddress, err := bech32.EncodeM("tb", conv) if err != nil { return "", err } @@ -153,8 +161,13 @@ func genCheckpointPublicKeyTaproot(internal_pubkey []byte, checkpoint []byte) [] } func addTaprootToWallet(url, taprootScript string) bool { - payload := "{\"jsonrpc\": \"1.0\", \"id\":\"wow\", \"method\": \"importaddress\", \"params\": [\"" + taprootScript + "\", \"\", true]}" + payload := "{\"jsonrpc\": \"1.0\", \"id\":\"wow\", \"method\": \"importaddress\", \"params\": [\"" + taprootScript + "\", \"\", false]}" + if Regtest { + payload = "{\"jsonrpc\": \"1.0\", \"id\":\"wow\", \"method\": \"importaddress\", \"params\": [\"" + taprootScript + "\", \"\", true]}" + } + //time.Sleep(6 * time.Second) result := jsonRPC(url, payload) + if result["error"] == nil { return true } @@ -209,9 +222,14 @@ func parseUnspentTxOut(utxo []byte) (amount, script []byte) { func getTxOut(url, txid string, index int) (float64, []byte) { payload := "{\"jsonrpc\": \"1.0\", \"id\":\"wow\", \"method\": \"gettxout\", \"params\": [\"" + txid + "\", " + strconv.Itoa(index) + "]}" result := jsonRPC(url, payload) + if result == nil { panic("Cannot retrieve previous transaction.") } + if result["result"] == nil { + panic("No transaction returned (maybe the output has already be spent") + + } taprootTxOut := result["result"].(map[string]interface{}) scriptPubkey := taprootTxOut["scriptPubKey"].(map[string]interface{}) scriptPubkeyBytes, _ := hex.DecodeString(scriptPubkey["hex"].(string)) @@ -223,9 +241,12 @@ func jsonRPC(url, payload string) map[string]interface{} { // ZONDAX TODO // This needs to be in a config file method := "POST" - - user := "satoshi" - password := "amiens" + user := "sarah" + password := "pikachutestnetB2" + if Regtest { + user = "satoshi" + password = "amiens" + } client := &http.Client{} @@ -255,3 +276,26 @@ func jsonRPC(url, payload string) map[string]interface{} { json.Unmarshal([]byte(body), &result) return result } + +func sameStringSlice(x, y []string) bool { + if len(x) != len(y) { + return false + } + // create a map of string -> int + diff := make(map[string]int, len(x)) + for _, _x := range x { + // 0 value for int is 0, so just increment a counter for the string + diff[_x]++ + } + for _, _y := range y { + // If the string _y is not in diff bail out early + if _, ok := diff[_y]; !ok { + return false + } + diff[_y] -= 1 + if diff[_y] == 0 { + delete(diff, _y) + } + } + return len(diff) == 0 +} diff --git a/chain/checkpointing/util_test.go b/chain/checkpointing/util_test.go new file mode 100644 index 000000000..669dc648b --- /dev/null +++ b/chain/checkpointing/util_test.go @@ -0,0 +1,60 @@ +package checkpointing + +import ( + "encoding/hex" + "fmt" + "testing" +) + +func TestTaprootSignatureHash(t *testing.T) { + tx, _ := hex.DecodeString("0200000001cbfbdd3778e1d2e2b22fc728f3b902ff6e4df7a40582367e20aec056e05fbd9d0000000000ffffffff0280da2d0900000000225120b9435744b668ab44e3074432bf1b167d4e655db03be13aa6db295055a220b26a0000000000000000056a0363696400000000") + utxo, _ := hex.DecodeString("50ed400900000000225120b9435744b668ab44e3074432bf1b167d4e655db03be13aa6db295055a220b26a") + + sig_hash, _ := TaprootSignatureHash(tx, utxo, 0x00) + + if hex.EncodeToString(sig_hash) != "6b0fe64d6f1af182fb8b0d9e1f8587fafb08162b60495dfb2a1799516bb80874" { + fmt.Println(hex.EncodeToString(sig_hash)) + t.Errorf("Invalid hash") + } +} + +func TestTaggedHash(t *testing.T) { + tag := taggedHash("TapSighash") + + if hex.EncodeToString(tag) != "dabc11914abcd8072900042a2681e52f8dba99ce82e224f97b5fdb7cd4b9c803" { + fmt.Println(hex.EncodeToString(tag)) + t.Errorf("Invalid Tag") + } +} + +func TestTaggedHashExtraData(t *testing.T) { + tag := taggedHash("TapSighash", []byte{0}) + + if hex.EncodeToString(tag) != "c2fd0de003889a09c4afcf676656a0d8a1fb706313ff7d509afb00c323c010cd" { + fmt.Println(hex.EncodeToString(tag)) + t.Errorf("Invalid Tag") + } +} + +//some testvectors from https://github.com/bitcoin/bips/blob/995f45211d1baac4ac34685cf09d804eb8edd078/bip-0341/wallet-test-vectors.json +func TestTweakPubkey(t *testing.T) { + internal_pubkey, _ := hex.DecodeString("d6889cb081036e0faefa3a35157ad71086b123b2b144b649798b494c300a961d") + tweak, _ := hex.DecodeString("b86e7be8f39bab32a6f2c0443abbc210f0edac0e2c53d501b36b64437d9c6c70") + + tweaked_pubkey := applyTweakToPublicKeyTaproot(internal_pubkey, tweak) + + if hex.EncodeToString(tweaked_pubkey) != "53a1f6e454df1aa2776a2814a721372d6258050de330b3c6d10ee8f4e0dda343" { + t.Errorf("Invalid tweaked pubkey") + } +} + +func TestMerkleHash(t *testing.T) { + pubkey, _ := hex.DecodeString("187791b6f712a8ea41c8ecdd0ee77fab3e85263b37e1ec18a3651926b3a6cf27") + merkle_root, _ := hex.DecodeString("5b75adecf53548f3ec6ad7d78383bf84cc57b55a3127c72b9a2481752dd88b21") + + test_tweak := hashTweakedValue(pubkey, merkle_root) + + if hex.EncodeToString(test_tweak) != "cbd8679ba636c1110ea247542cfbd964131a6be84f873f7f3b62a777528ed001" { + t.Errorf("Invalid tweaked pubkey") + } +} \ No newline at end of file diff --git a/chain/checkpointing/verification.go b/chain/checkpointing/verification.go index 62218ffb4..32a6f2dc1 100644 --- a/chain/checkpointing/verification.go +++ b/chain/checkpointing/verification.go @@ -2,6 +2,7 @@ package checkpointing import ( "errors" + "fmt" ) type BitcoinTx struct { @@ -21,44 +22,63 @@ func GetFirstCheckpointAddress(url, taprootAddress string) (Checkpoint, error) { // url is the url of the bitcoin node with the RPC port result := jsonRPC(url, payload) list := result["result"].([]interface{}) + + //iterate through list of transactions for _, item := range list { item_map := item.(map[string]interface{}) - // Check if address match taproot adress given if yes return it if item_map["address"] == taprootAddress { tx_id := item_map["txid"].(string) payload = "{\"jsonrpc\": \"1.0\", \"id\":\"wow\", \"method\": \"getrawtransaction\", \"params\": [\"" + tx_id + "\", true]}" result = jsonRPC(url, payload) reader := result["result"].(map[string]interface{}) - + //fmt.Println(result) + // vout is the list of outputs of the transaction vout := reader["vout"].([]interface{}) + //fmt.Println("first vout", vout) taprootOut := vout[0].(map[string]interface{})["scriptPubKey"].(map[string]interface{}) new_address := taprootOut["hex"].(string) - - cidOut := vout[1].(map[string]interface{})["scriptPubKey"].(map[string]interface{}) - cid := cidOut["hex"].(string) + var cid string + if len(vout) > 1 { + cidOut := vout[1].(map[string]interface{})["scriptPubKey"].(map[string]interface{}) + cid = cidOut["hex"].(string) + } else { + cid = "0000" + } + //fmt.Println("first cid", cid) return Checkpoint{txid: tx_id, address: new_address, cid: cid[4:]}, nil } } - return Checkpoint{}, errors.New("Did not find checkpoint") + return Checkpoint{}, errors.New("Did not find new checkpoint") } func GetNextCheckpointFixed(url, txid string) (Checkpoint, error) { + //List 500000000 transactions (only includes the ones from/to our wallet) + // * stands for no label (i.e. transactions without a specific label) payload := "{\"jsonrpc\": \"1.0\", \"id\":\"wow\", \"method\": \"listtransactions\", \"params\": [\"*\", 500000000, 0, true]}" result := jsonRPC(url, payload) list := result["result"].([]interface{}) + // for each transaction in the list for _, item := range list { item_map := item.(map[string]interface{}) + // get the tx id tx_id := item_map["txid"].(string) + //get the associated raw tx payload = "{\"jsonrpc\": \"1.0\", \"id\":\"wow\", \"method\": \"getrawtransaction\", \"params\": [\"" + tx_id + "\", true]}" result = jsonRPC(url, payload) + //fmt.Println(result) reader := result["result"].(map[string]interface{}) + //fmt.Println("getnextcheckpointfixed print: ", reader) new_txid := reader["txid"].(string) + //read the ix id on the input vin := reader["vin"].([]interface{})[0].(map[string]interface{})["txid"] if vin == nil { continue } + //fmt.Println("vin string: ", vin.(string)) + //check that the input of the transaction is equal to the txid if vin.(string) == txid { + fmt.Println("found txid") vout := reader["vout"].([]interface{}) taprootOut := vout[0].(map[string]interface{})["scriptPubKey"].(map[string]interface{}) new_address := taprootOut["hex"].(string) @@ -75,6 +95,7 @@ func GetLatestCheckpoint(url string, first_pk []byte, first_cp []byte) (*Checkpo firstscript := getTaprootScript(first_pubkeyTaproot) taprootAddress, err := pubkeyToTapprootAddress(first_pubkeyTaproot) if err != nil { + log.Errorf("Error when getting the last checkpoint from bitcoin", err) return nil, err } @@ -84,18 +105,63 @@ func GetLatestCheckpoint(url string, first_pk []byte, first_cp []byte) (*Checkpo for transaction linked to it. */ addTaprootToWallet(url, firstscript) + //fmt.Println(firstscript) checkpoint, done := GetFirstCheckpointAddress(url, taprootAddress) - // Aging we add taproot "address" (actually the script) to the wallet in the Bitcoin node + // Again we add taproot "address" (actually the script) to the wallet in the Bitcoin node addTaprootToWallet(url, checkpoint.address) var new_checkpoint Checkpoint + fmt.Println("Starting get last checkpoint loop") for { new_checkpoint, done = GetNextCheckpointFixed(url, checkpoint.txid) if done == nil { checkpoint = new_checkpoint addTaprootToWallet(url, checkpoint.address) + fmt.Println(checkpoint) } else { // Return once we have found the last one in bitcoin return &checkpoint, nil } } } + +func CheckIfFirstTxHasBeenSent(url string, first_pk []byte, first_cp []byte) (bool, string, error) { + // the following will only work if we use one bitcoin node for our demo + first_pubkeyTaproot := genCheckpointPublicKeyTaproot(first_pk, first_cp) + firstscript := getTaprootScript(first_pubkeyTaproot) + taprootAddress, err := pubkeyToTapprootAddress(first_pubkeyTaproot) + if err != nil { + log.Errorf("Error when getting the last checkpoint from bitcoin", err) + return false, "", err + } + + /* + Bitcoin node only allow to collect transaction from addresses that are registered in the wallet + In this step we import taproot script (and not the address) in the wallet node to then be able to ask + for transaction linked to it. + */ + addTaprootToWallet(url, firstscript) + + //now we check the transactions associated with our taproot address + //first list the tx + payload := "{\"jsonrpc\": \"1.0\", \"id\":\"wow\", \"method\": \"listtransactions\", \"params\": [\"*\", 500000000, 0, true]}" + // url is the url of the bitcoin node with the RPC port + result := jsonRPC(url, payload) + list := result["result"].([]interface{}) + //iterate through list of transactions + for _, item := range list { + item_map := item.(map[string]interface{}) + // Check if address match taproot adress given + // if yes i means there exist some transaction associTED WITH THIS address + if item_map["address"] == taprootAddress { + log.Infow("Initial transaction has already been sent") + txid := item_map["txid"].(string) + return true, txid, nil + + //check if something was sent from the address (i.e. do we need to go to next checkpoint?) + // if item_map["category"].(string) == "sent" { + // } + } + } + + return false, "", nil +} diff --git a/chain/consensus/actors/actors.go b/chain/consensus/actors/actors.go index 6b30833d5..c49f1ee15 100644 --- a/chain/consensus/actors/actors.go +++ b/chain/consensus/actors/actors.go @@ -6,10 +6,15 @@ import ( ) var ( - SplitActorCodeID cid.Cid SubnetCoordActorCodeID cid.Cid SubnetActorCodeID cid.Cid + MpowerActorCodeID cid.Cid + RewardActorCodeID cid.Cid + + SplitActorCodeID cid.Cid + ReplaceActorCodeID cid.Cid + ) var builtinActors map[cid.Cid]*actorInfo @@ -23,10 +28,14 @@ func init() { builtinActors = make(map[cid.Cid]*actorInfo) for id, info := range map[*cid.Cid]*actorInfo{ //nolint:nomaprange - &SplitActorCodeID: {name: "example/0/split"}, &SubnetCoordActorCodeID: {name: "hierarchical/0/sca"}, &SubnetActorCodeID: {name: "hierarchical/0/subnet"}, &MpowerActorCodeID: {name: "deleg/0/mpower"}, + &RewardActorCodeID: {name: "hierarchical/0/reward"}, + + &SplitActorCodeID: {name: "example/0/split"}, + &ReplaceActorCodeID: {name: "example/0/replace"}, + } { c, err := builder.Sum([]byte(info.name)) if err != nil { diff --git a/chain/consensus/actors/atomic-replace/cbor_gen.go b/chain/consensus/actors/atomic-replace/cbor_gen.go new file mode 100644 index 000000000..f443d5703 --- /dev/null +++ b/chain/consensus/actors/atomic-replace/cbor_gen.go @@ -0,0 +1,300 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package replace + +import ( + "fmt" + "io" + "math" + "sort" + + atomic "github.com/filecoin-project/lotus/chain/consensus/hierarchical/atomic" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufReplaceState = []byte{129} + +func (t *ReplaceState) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufReplaceState); err != nil { + return err + } + + // t.Owners (atomic.LockedState) (struct) + if err := t.Owners.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *ReplaceState) UnmarshalCBOR(r io.Reader) error { + *t = ReplaceState{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Owners (atomic.LockedState) (struct) + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.Owners = new(atomic.LockedState) + if err := t.Owners.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Owners pointer: %w", err) + } + } + + } + return nil +} + +var lengthBufOwners = []byte{129} + +func (t *Owners) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufOwners); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.M (map[string]cid.Cid) (map) + { + if len(t.M) > 4096 { + return xerrors.Errorf("cannot marshal t.M map too large") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajMap, uint64(len(t.M))); err != nil { + return err + } + + keys := make([]string, 0, len(t.M)) + for k := range t.M { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := t.M[k] + + if len(k) > cbg.MaxLength { + return xerrors.Errorf("Value in field k was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(k))); err != nil { + return err + } + if _, err := io.WriteString(w, string(k)); err != nil { + return err + } + + if err := cbg.WriteCidBuf(scratch, w, v); err != nil { + return xerrors.Errorf("failed to write cid field v: %w", err) + } + + } + } + return nil +} + +func (t *Owners) UnmarshalCBOR(r io.Reader) error { + *t = Owners{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.M (map[string]cid.Cid) (map) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("expected a map (major type 5)") + } + if extra > 4096 { + return fmt.Errorf("t.M: map too large") + } + + t.M = make(map[string]cid.Cid, extra) + + for i, l := 0, int(extra); i < l; i++ { + + var k string + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + k = string(sval) + } + + var v cid.Cid + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field v: %w", err) + } + + v = c + + } + + t.M[k] = v + + } + return nil +} + +var lengthBufReplaceParams = []byte{129} + +func (t *ReplaceParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufReplaceParams); err != nil { + return err + } + + // t.Addr (address.Address) (struct) + if err := t.Addr.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *ReplaceParams) UnmarshalCBOR(r io.Reader) error { + *t = ReplaceParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Addr (address.Address) (struct) + + { + + if err := t.Addr.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Addr: %w", err) + } + + } + return nil +} + +var lengthBufOwnParams = []byte{129} + +func (t *OwnParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufOwnParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Seed (string) (string) + if len(t.Seed) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Seed was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Seed))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Seed)); err != nil { + return err + } + return nil +} + +func (t *OwnParams) UnmarshalCBOR(r io.Reader) error { + *t = OwnParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Seed (string) (string) + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Seed = string(sval) + } + return nil +} diff --git a/chain/consensus/actors/atomic-replace/gen/gen.go b/chain/consensus/actors/atomic-replace/gen/gen.go new file mode 100644 index 000000000..3250c01b9 --- /dev/null +++ b/chain/consensus/actors/atomic-replace/gen/gen.go @@ -0,0 +1,17 @@ +package main + +import ( + replace "github.com/filecoin-project/lotus/chain/consensus/actors/atomic-replace" + gen "github.com/whyrusleeping/cbor-gen" +) + +func main() { + if err := gen.WriteTupleEncodersToFile("./cbor_gen.go", "replace", + replace.ReplaceState{}, + replace.Owners{}, + replace.ReplaceParams{}, + replace.OwnParams{}, + ); err != nil { + panic(err) + } +} diff --git a/chain/consensus/actors/atomic-replace/replace.go b/chain/consensus/actors/atomic-replace/replace.go new file mode 100644 index 000000000..3662ab8de --- /dev/null +++ b/chain/consensus/actors/atomic-replace/replace.go @@ -0,0 +1,265 @@ +package replace + +// Sample actor that replaces the cid from one address to the other. +// This actor is used as an example of how to use the actor execution +// protocol. + +import ( + cid "github.com/ipfs/go-cid" + xerrors "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/exitcode" + actor "github.com/filecoin-project/lotus/chain/consensus/actors" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/atomic" + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/specs-actors/v7/actors/runtime" +) + +//go:generate go run ./gen/gen.go + +// example "Replace" actor that atomically replaces the cid from one owner +// to the other. + +var _ runtime.VMActor = ReplaceActor{} +var _ atomic.LockableActor = ReplaceActor{} + +// ReplaceState determines the actor state. +// FIXME: We are using a non-efficient locking strategy for now +// where the whole map is locked for an atomic execution. +// We could use a more fine-grained approach, but this actor +// has illustrative purposes for now. Consider improving it in +// future iterations. +type ReplaceState struct { + Owners *atomic.LockedState +} + +type Owners struct { + M map[string]cid.Cid +} + +// Merge implements the merge strategy to follow for our lockable state +// in the actor. +func (o *Owners) Merge(other atomic.LockableState) error { + tt, ok := other.(*Owners) + if !ok { + return xerrors.Errorf("type of LockableState not Owners") + } + + for k, v := range tt.M { + _, ok := o.M[k] + if ok { + return xerrors.Errorf("merge conflict. key for owner already set") + } + o.M[k] = v + } + return nil + +} + +func ConstructState() (*ReplaceState, error) { + owners, err := atomic.WrapLockableState(&Owners{M: map[string]cid.Cid{}}) + if err != nil { + return nil, err + } + return &ReplaceState{Owners: owners}, nil +} + +const ( + MethodReplace = 6 + MethodOwn = 7 +) + +type ReplaceActor struct{} + +func (a ReplaceActor) Exports() []interface{} { + return []interface{}{ + builtin.MethodConstructor: a.Constructor, + atomic.MethodLock: a.Lock, + atomic.MethodMerge: a.Merge, + atomic.MethodAbort: a.Abort, + atomic.MethodUnlock: a.Unlock, + MethodReplace: a.Replace, + MethodOwn: a.Own, + } +} + +func (a ReplaceActor) Code() cid.Cid { + return actor.ReplaceActorCodeID +} + +func (a ReplaceActor) IsSingleton() bool { + return false +} + +func (a ReplaceActor) State() cbor.Er { + return new(ReplaceState) +} + +func (a ReplaceActor) Constructor(rt runtime.Runtime, _ *abi.EmptyValue) *abi.EmptyValue { + rt.ValidateImmediateCallerType(builtin.InitActorCodeID) + st, err := ConstructState() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error computing initial state") + rt.StateCreate(st) + return nil +} + +type OwnParams struct { + Seed string +} + +func (a ReplaceActor) Own(rt runtime.Runtime, params *OwnParams) *abi.EmptyValue { + rt.ValidateImmediateCallerAcceptAny() + + var st ReplaceState + rt.StateTransaction(&st, func() { + ValidateLockedState(rt, &st) + own, err := st.UnwrapOwners() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error unwrapping lockable state") + _, ok := own.M[rt.Caller().String()] + if ok { + rt.Abortf(exitcode.ErrIllegalState, "address already owning something") + } + own.M[rt.Caller().String()], err = abi.CidBuilder.Sum([]byte(params.Seed)) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "error computing cid") + st.storeOwners(rt, own) + }) + + return nil +} + +type ReplaceParams struct { + Addr address.Address +} + +func (a ReplaceActor) Replace(rt runtime.Runtime, params *ReplaceParams) *abi.EmptyValue { + rt.ValidateImmediateCallerAcceptAny() + + var st ReplaceState + rt.StateTransaction(&st, func() { + ValidateLockedState(rt, &st) + own, err := st.UnwrapOwners() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error unwrapping lockable state") + _, ok1 := own.M[rt.Caller().String()] + _, ok2 := own.M[params.Addr.String()] + if !ok1 || !ok2 { + rt.Abortf(exitcode.ErrIllegalState, "one (or both) parties don't have an asset to replace") + } + // Replace + own.M[rt.Caller().String()], own.M[params.Addr.String()] = + own.M[params.Addr.String()], own.M[rt.Caller().String()] + st.storeOwners(rt, own) + }) + + return nil +} + +/////// +// Atomic function definitions. +////// +func (a ReplaceActor) Lock(rt runtime.Runtime, params *atomic.LockParams) *atomic.LockedOutput { + // Anyone can lock the state + rt.ValidateImmediateCallerAcceptAny() + + var st ReplaceState + rt.StateTransaction(&st, func() { + switch params.Method { + case MethodReplace: + builtin.RequireNoErr(rt, st.Owners.LockState(), exitcode.ErrIllegalArgument, "error locking state") + default: + rt.Abortf(exitcode.ErrIllegalArgument, "provided method doesn't support atomic execution. No need to lock") + } + }) + + c, err := st.Owners.Cid() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "error computing Cid for locked state") + return &atomic.LockedOutput{Cid: c} +} + +func (st *ReplaceState) unlock(rt runtime.Runtime) { + builtin.RequireNoErr(rt, st.Owners.UnlockState(), exitcode.ErrIllegalArgument, "error unlocking state") +} + +func (a ReplaceActor) Merge(rt runtime.Runtime, params *atomic.MergeParams) *abi.EmptyValue { + // Only system actor can trigger this function. + rt.ValidateImmediateCallerIs(builtin.SystemActorAddr) + var st ReplaceState + rt.StateTransaction(&st, func() { + merge := &Owners{} + err := atomic.UnwrapMergeParams(params, merge) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error unwrapping output from mergeParams") + st.merge(rt, merge) + }) + + return nil +} + +func (a ReplaceActor) Unlock(rt runtime.Runtime, params *atomic.UnlockParams) *abi.EmptyValue { + // Only system actor can trigger this function. + rt.ValidateImmediateCallerIs(builtin.SystemActorAddr) + + var st ReplaceState + rt.StateTransaction(&st, func() { + output := &Owners{} + err := atomic.UnwrapUnlockParams(params, output) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error unwrapping output from unlockParams") + switch params.Params.Method { + case MethodReplace: + st.merge(rt, output) + st.unlock(rt) + default: + rt.Abortf(exitcode.ErrIllegalArgument, "this method has nothing to merge") + } + }) + + return nil +} + +func (st *ReplaceState) merge(rt runtime.Runtime, state atomic.LockableState) { + var owners Owners + err := atomic.UnwrapLockableState(st.Owners, &owners) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error unwrapping owners") + builtin.RequireNoErr(rt, owners.Merge(state), exitcode.ErrIllegalState, "error merging output") + st.storeOwners(rt, &owners) +} + +func (a ReplaceActor) Abort(rt runtime.Runtime, params *atomic.LockParams) *abi.EmptyValue { + // FIXME: We should check here that the only one allowed to abort an execuetion is + // the rt.Caller() that locked the state? Or is the system.actor because is triggered + // through a top-down transaction? + rt.ValidateImmediateCallerAcceptAny() + + var st ReplaceState + rt.StateTransaction(&st, func() { + switch params.Method { + case MethodReplace: + st.unlock(rt) + default: + rt.Abortf(exitcode.ErrIllegalArgument, "this method has nothing to unlock") + } + }) + + return nil +} + +func ValidateLockedState(rt runtime.Runtime, st *ReplaceState) { + builtin.RequireNoErr(rt, + atomic.ValidateIfLocked([]*atomic.LockedState{st.Owners}...), + exitcode.ErrIllegalState, "state locked") +} + +// UnwrapOwners is a convenient function to handle the locked state from the actor. +func (st *ReplaceState) UnwrapOwners() (*Owners, error) { + var own Owners + if err := atomic.UnwrapLockableState(st.Owners, &own); err != nil { + return nil, err + } + return &own, nil +} + +func (st *ReplaceState) storeOwners(rt runtime.Runtime, owners *Owners) { + err := st.Owners.SetState(owners) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error wrapping lockable state") +} diff --git a/chain/consensus/actors/atomic-replace/replace_test.go b/chain/consensus/actors/atomic-replace/replace_test.go new file mode 100644 index 000000000..c089db995 --- /dev/null +++ b/chain/consensus/actors/atomic-replace/replace_test.go @@ -0,0 +1,252 @@ +package replace_test + +import ( + "testing" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/exitcode" + replace "github.com/filecoin-project/lotus/chain/consensus/actors/atomic-replace" + atomic "github.com/filecoin-project/lotus/chain/consensus/hierarchical/atomic" + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/specs-actors/v7/support/mock" + tutil "github.com/filecoin-project/specs-actors/v7/support/testing" + cid "github.com/ipfs/go-cid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var cidUndef, _ = abi.CidBuilder.Sum([]byte("test")) + +func TestExports(t *testing.T) { + mock.CheckActorExports(t, replace.ReplaceActor{}) +} + +func TestConstruction(t *testing.T) { + t.Run("simple construction", func(t *testing.T) { + actor := newHarness(t) + rt := getRuntime(t) + actor.constructAndVerify(t, rt) + }) + +} + +func TestOwn(t *testing.T) { + h := newHarness(t) + rt := getRuntime(t) + h.constructAndVerify(t, rt) + caller := tutil.NewIDAddr(t, 1000) + rt.SetCaller(caller, builtin.AccountActorCodeID) + rt.ExpectValidateCallerAny() + rt.Call(h.ReplaceActor.Own, &replace.OwnParams{Seed: "test"}) + rt.Verify() + + st := getState(rt) + owners, err := st.UnwrapOwners() + require.NoError(t, err) + _, ok := owners.M[caller.String()] + require.True(t, ok) + + rt.ExpectValidateCallerAny() + rt.ExpectAbort(exitcode.ErrIllegalState, func() { + rt.Call(h.ReplaceActor.Own, &replace.OwnParams{Seed: "test"}) + }) + + caller = tutil.NewIDAddr(t, 1001) + rt.SetCaller(caller, builtin.AccountActorCodeID) + rt.ExpectValidateCallerAny() + rt.Call(h.ReplaceActor.Own, &replace.OwnParams{Seed: "test2"}) + rt.Verify() + +} + +func TestReplace(t *testing.T) { + h := newHarness(t) + rt := getRuntime(t) + h.constructAndVerify(t, rt) + caller := tutil.NewIDAddr(t, 1000) + target := tutil.NewIDAddr(t, 1001) + + rt.SetCaller(target, builtin.AccountActorCodeID) + rt.ExpectValidateCallerAny() + rt.Call(h.ReplaceActor.Own, &replace.OwnParams{Seed: "test1"}) + + rt.SetCaller(caller, builtin.AccountActorCodeID) + rt.ExpectValidateCallerAny() + rt.Call(h.ReplaceActor.Own, &replace.OwnParams{Seed: "test2"}) + + st := getState(rt) + owners, err := st.UnwrapOwners() + require.NoError(t, err) + prev1 := owners.M[caller.String()] + prev2 := owners.M[target.String()] + + rt.ExpectValidateCallerAny() + rt.Call(h.ReplaceActor.Replace, &replace.ReplaceParams{Addr: target}) + rt.Verify() + + st = getState(rt) + owners, err = st.UnwrapOwners() + require.NoError(t, err) + own1, ok := owners.M[caller.String()] + require.True(t, ok) + require.Equal(t, own1, prev2) + own2, ok := owners.M[target.String()] + require.True(t, ok) + require.Equal(t, own2, prev1) + +} + +func TestLockAbort(t *testing.T) { + h := newHarness(t) + rt := getRuntime(t) + h.constructAndVerify(t, rt) + caller := tutil.NewIDAddr(t, 1000) + target := tutil.NewIDAddr(t, 1001) + + rt.SetCaller(target, builtin.AccountActorCodeID) + rt.ExpectValidateCallerAny() + rt.Call(h.ReplaceActor.Own, &replace.OwnParams{Seed: "test1"}) + + rt.SetCaller(caller, builtin.AccountActorCodeID) + rt.ExpectValidateCallerAny() + rt.Call(h.ReplaceActor.Own, &replace.OwnParams{Seed: "test2"}) + + st := getState(rt) + owners, err := st.UnwrapOwners() + require.NoError(t, err) + prev1 := owners.M[caller.String()] + prev2 := owners.M[target.String()] + + lockparams, err := atomic.WrapLockParams(replace.MethodReplace, &replace.ReplaceParams{Addr: target}) + require.NoError(t, err) + rt.ExpectValidateCallerAny() + rt.Call(h.ReplaceActor.Lock, lockparams) + rt.Verify() + + // It'll fail because state is locked. + rt.ExpectAbort(exitcode.ErrIllegalState, func() { + rt.ExpectValidateCallerAny() + rt.Call(h.ReplaceActor.Replace, &replace.ReplaceParams{Addr: target}) + }) + + // Abort + rt.ExpectValidateCallerAny() + rt.Call(h.ReplaceActor.Abort, lockparams) + + rt.ExpectValidateCallerAny() + rt.Call(h.ReplaceActor.Replace, &replace.ReplaceParams{Addr: target}) + rt.Verify() + + st = getState(rt) + owners, err = st.UnwrapOwners() + require.NoError(t, err) + own1, ok := owners.M[caller.String()] + require.True(t, ok) + require.Equal(t, own1, prev2) + own2, ok := owners.M[target.String()] + require.True(t, ok) + require.Equal(t, own2, prev1) +} + +func TestUnlock(t *testing.T) { + h := newHarness(t) + rt := getRuntime(t) + h.constructAndVerify(t, rt) + target := tutil.NewIDAddr(t, 1001) + + rt.SetCaller(target, builtin.AccountActorCodeID) + rt.ExpectValidateCallerAny() + rt.Call(h.ReplaceActor.Own, &replace.OwnParams{Seed: "test1"}) + + lockparams, err := atomic.WrapLockParams(replace.MethodReplace, &replace.ReplaceParams{Addr: target}) + require.NoError(t, err) + rt.ExpectValidateCallerAny() + rt.Call(h.ReplaceActor.Lock, lockparams) + rt.Verify() + + rt.SetCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) + ls := &replace.Owners{M: map[string]cid.Cid{"test": cidUndef}} + require.NoError(t, err) + rt.ExpectValidateCallerAddr(builtin.SystemActorAddr) + params, err := atomic.WrapUnlockParams(lockparams, ls) + require.NoError(t, err) + rt.Call(h.ReplaceActor.Unlock, params) + st := getState(rt) + owners, err := st.UnwrapOwners() + require.NoError(t, err) + own1, ok := owners.M["test"] + require.True(t, ok) + require.Equal(t, own1, cidUndef) + _, ok = owners.M[target.String()] + require.True(t, ok) +} + +func TestMerge(t *testing.T) { + h := newHarness(t) + rt := getRuntime(t) + h.constructAndVerify(t, rt) + target := tutil.NewIDAddr(t, 1001) + + rt.SetCaller(target, builtin.AccountActorCodeID) + rt.ExpectValidateCallerAny() + rt.Call(h.ReplaceActor.Own, &replace.OwnParams{Seed: "test1"}) + + lockparams, err := atomic.WrapLockParams(replace.MethodReplace, &replace.ReplaceParams{Addr: target}) + require.NoError(t, err) + rt.ExpectValidateCallerAny() + rt.Call(h.ReplaceActor.Lock, lockparams) + rt.Verify() + + rt.SetCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) + ls1 := &replace.Owners{M: map[string]cid.Cid{"test": cidUndef}} + ls2 := &replace.Owners{M: map[string]cid.Cid{"test2": cidUndef}} + require.NoError(t, err) + params, err := atomic.WrapMergeParams(ls1) + require.NoError(t, err) + rt.ExpectValidateCallerAddr(builtin.SystemActorAddr) + rt.Call(h.ReplaceActor.Merge, params) + params, err = atomic.WrapMergeParams(ls2) + require.NoError(t, err) + rt.ExpectValidateCallerAddr(builtin.SystemActorAddr) + rt.Call(h.ReplaceActor.Merge, params) + st := getState(rt) + owners, err := st.UnwrapOwners() + require.NoError(t, err) + own1, ok := owners.M["test"] + require.True(t, ok) + require.Equal(t, own1, cidUndef) + own2, ok := owners.M["test2"] + require.True(t, ok) + require.Equal(t, own2, cidUndef) +} + +type shActorHarness struct { + replace.ReplaceActor + t *testing.T +} + +func newHarness(t *testing.T) *shActorHarness { + return &shActorHarness{ + ReplaceActor: replace.ReplaceActor{}, + t: t, + } +} + +func (h *shActorHarness) constructAndVerify(t *testing.T, rt *mock.Runtime) { + rt.ExpectValidateCallerType(builtin.InitActorCodeID) + ret := rt.Call(h.ReplaceActor.Constructor, nil) + assert.Nil(h.t, ret) + rt.Verify() +} + +func getRuntime(t *testing.T) *mock.Runtime { + replaceActorAddr := tutil.NewIDAddr(t, 100) + builder := mock.NewBuilder(replaceActorAddr).WithCaller(builtin.InitActorAddr, builtin.InitActorCodeID) + return builder.Build(t) +} + +func getState(rt *mock.Runtime) *replace.ReplaceState { + var st replace.ReplaceState + rt.GetState(&st) + return &st +} diff --git a/chain/consensus/actors/init/actor_init.go b/chain/consensus/actors/init/actor_init.go index d8cb1d9f5..294439c48 100644 --- a/chain/consensus/actors/init/actor_init.go +++ b/chain/consensus/actors/init/actor_init.go @@ -7,15 +7,15 @@ import ( "github.com/filecoin-project/go-state-types/exitcode" actor "github.com/filecoin-project/lotus/chain/consensus/actors" init0 "github.com/filecoin-project/specs-actors/actors/builtin/init" - init6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/init" + init7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/init" cid "github.com/ipfs/go-cid" - "github.com/filecoin-project/specs-actors/v6/actors/builtin" - "github.com/filecoin-project/specs-actors/v6/actors/runtime" - "github.com/filecoin-project/specs-actors/v6/actors/util/adt" + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/specs-actors/v7/actors/runtime" + "github.com/filecoin-project/specs-actors/v7/actors/util/adt" ) -// copied init6 actor but allows the SplitActor to be constructed +// copied init7 actor but allows the SplitActor to be constructed // The init actor uniquely has the power to create new actors. // It maintains a table resolving pubkey and temporary actor addresses to the canonical ID-addresses. @@ -36,7 +36,7 @@ func (a InitActor) IsSingleton() bool { return true } -func (a InitActor) State() cbor.Er { return new(init6.State) } +func (a InitActor) State() cbor.Er { return new(init7.State) } var _ runtime.VMActor = InitActor{} @@ -47,7 +47,7 @@ type ConstructorParams = init0.ConstructorParams func (a InitActor) Constructor(rt runtime.Runtime, params *ConstructorParams) *abi.EmptyValue { rt.ValidateImmediateCallerIs(builtin.SystemActorAddr) - st, err := init6.ConstructState(adt.AsStore(rt), params.NetworkName) + st, err := init7.ConstructState(adt.AsStore(rt), params.NetworkName) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to construct state") rt.StateCreate(st) return nil @@ -81,7 +81,7 @@ func (a InitActor) Exec(rt runtime.Runtime, params *ExecParams) *ExecReturn { // Allocate an ID for this actor. // Store mapping of pubkey or actor address to actor ID - var st init6.State + var st init7.State var idAddr addr.Address rt.StateTransaction(&st, func() { var err error @@ -110,8 +110,9 @@ func canExec(callerCodeID cid.Cid, execCodeID cid.Cid) bool { case builtin.PaymentChannelActorCodeID, builtin.MultisigActorCodeID, actor.SplitActorCodeID, - actor.SubnetActorCodeID, - actor.MpowerActorCodeID: + actor.MpowerActorCodeID, + actor.ReplaceActorCodeID, + actor.SubnetActorCodeID: return true default: return false diff --git a/chain/consensus/actors/mpower/cbor_gen.go b/chain/consensus/actors/mpower/cbor_gen.go index 4d223b5e7..10b3bc74e 100644 --- a/chain/consensus/actors/mpower/cbor_gen.go +++ b/chain/consensus/actors/mpower/cbor_gen.go @@ -3,14 +3,20 @@ package mpower import ( "fmt" "io" + "math" + "sort" + cid "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" xerrors "golang.org/x/xerrors" ) var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort -var lengthBufState = []byte{130} +var lengthBufState = []byte{131} func (t *State) MarshalCBOR(w io.Writer) error { if t == nil { @@ -47,6 +53,18 @@ func (t *State) MarshalCBOR(w io.Writer) error { return err } } + // t.PublicKey ([]uint8) (slice) + if len(t.PublicKey) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.PublicKey was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.PublicKey))); err != nil { + return err + } + + if _, err := w.Write(t.PublicKey[:]); err != nil { + return err + } return nil } @@ -77,7 +95,7 @@ func (t *State) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type array") } - if extra != 2 { + if extra != 3 { return fmt.Errorf("cbor input had wrong number of fields") } @@ -134,8 +152,29 @@ func (t *State) UnmarshalCBOR(r io.Reader) error { t.Miners[i] = m } + // t.PublicKey ([]uint8) (slice) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.PublicKey: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.PublicKey = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.PublicKey[:]); err != nil { + return err + } return nil + } var lengthBufAddMinerParams = []byte{129} @@ -216,3 +255,73 @@ func (t *AddMinerParams) UnmarshalCBOR(r io.Reader) error { return nil } + +var lengthBufNewTaprootAddressParam = []byte{129} + +func (t *NewTaprootAddressParam) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufNewTaprootAddressParam); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.PublicKey ([]uint8) (slice) + if len(t.PublicKey) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.PublicKey was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.PublicKey))); err != nil { + return err + } + + if _, err := w.Write(t.PublicKey[:]); err != nil { + return err + } + return nil +} + +func (t *NewTaprootAddressParam) UnmarshalCBOR(r io.Reader) error { + *t = NewTaprootAddressParam{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.PublicKey ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.PublicKey: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.PublicKey = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.PublicKey[:]); err != nil { + return err + } + return nil +} diff --git a/chain/consensus/actors/mpower/gen/gen.go b/chain/consensus/actors/mpower/gen/gen.go new file mode 100644 index 000000000..ca04ecf90 --- /dev/null +++ b/chain/consensus/actors/mpower/gen/gen.go @@ -0,0 +1,16 @@ +package main + +import ( + mpower "github.com/filecoin-project/lotus/chain/consensus/actors/mpower" + gen "github.com/whyrusleeping/cbor-gen" +) + +func main() { + if err := gen.WriteTupleEncodersToFile("./cbor_gen.go", "mpower", + mpower.State{}, + mpower.AddMinerParams{}, + mpower.NewTaprootAddressParam{}, + ); err != nil { + panic(err) + } +} diff --git a/chain/consensus/actors/mpower/mpower_test.go b/chain/consensus/actors/mpower/mpower_test.go new file mode 100644 index 000000000..914b2527d --- /dev/null +++ b/chain/consensus/actors/mpower/mpower_test.go @@ -0,0 +1,86 @@ +package mpower_test + +import ( + "testing" + + //address "github.com/filecoin-project/go-address" + tutil "github.com/filecoin-project/specs-actors/v7/support/testing" + + mpower "github.com/filecoin-project/lotus/chain/consensus/actors/mpower" + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/specs-actors/v7/support/mock" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestExports(t *testing.T) { + mock.CheckActorExports(t, mpower.Actor{}) +} + +func TestConstruction(t *testing.T) { + t.Run("simple construction", func(t *testing.T) { + actor := newHarness(t) + rt := getRuntime(t) + actor.constructAndVerify(t, rt) + }) + +} + +func TestAddMiner(t *testing.T) { + h := newHarness(t) + rt := getRuntime(t) + h.constructAndVerify(t, rt) + caller := tutil.NewIDAddr(t, 1000) + rt.SetCaller(caller, builtin.AccountActorCodeID) + rt.ExpectValidateCallerAny() + //rt.Call(h.Actor.AddMiners, &mpower.AddMinerParams{Miners: []address.Address{caller}}) + rt.Call(h.Actor.AddMiners, &mpower.AddMinerParams{Miners: []string{"caller"}}) + rt.Verify() + + st := getState(rt) + require.Equal(t, len(st.Miners), 1) +} + +// func TestAddPkey(t *testing.T) { +// h := newHarness(t) +// rt := getRuntime(t) +// h.constructAndVerify(t, rt) +// caller := tutil.NewIDAddr(t, 1000) +// rt.SetCaller(caller, builtin.AccountActorCodeID) +// rt.ExpectValidateCallerAny() +// rt.Call(h.Actor.UpdateTaprootAddress, &mpower.NewTaprootAddressParam{PublicKey: []byte("test")}) +// rt.Verify() + +// st := getState(rt) +// require.Equal(t, st.PublicKey, []byte("test")) +// } + +type shActorHarness struct { + mpower.Actor + t *testing.T +} + +func newHarness(t *testing.T) *shActorHarness { + return &shActorHarness{ + Actor: mpower.Actor{}, + t: t, + } +} + +func (h *shActorHarness) constructAndVerify(t *testing.T, rt *mock.Runtime) { + rt.ExpectValidateCallerAddr(builtin.SystemActorAddr) + ret := rt.Call(h.Actor.Constructor, nil) + assert.Nil(h.t, ret) + rt.Verify() +} +func getRuntime(t *testing.T) *mock.Runtime { + mpowerActorAddr := tutil.NewIDAddr(t, 65) + builder := mock.NewBuilder(mpowerActorAddr).WithCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) + return builder.Build(t) +} + +func getState(rt *mock.Runtime) *mpower.State { + var st mpower.State + rt.GetState(&st) + return &st +} diff --git a/chain/consensus/actors/mpower/power_actor.go b/chain/consensus/actors/mpower/power_actor.go index d4d4bce7e..3deb63bfd 100644 --- a/chain/consensus/actors/mpower/power_actor.go +++ b/chain/consensus/actors/mpower/power_actor.go @@ -1,6 +1,7 @@ package mpower import ( + "fmt" address "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/cbor" @@ -9,16 +10,19 @@ import ( "github.com/ipfs/go-cid" - "github.com/filecoin-project/specs-actors/v6/actors/builtin" - "github.com/filecoin-project/specs-actors/v6/actors/runtime" - "github.com/filecoin-project/specs-actors/v6/actors/util/adt" + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/specs-actors/v7/actors/runtime" + "github.com/filecoin-project/specs-actors/v7/actors/util/adt" + + //"github.com/Zondax/multi-party-sig/pkg/taproot" + ) type Runtime = runtime.Runtime type Actor struct{} -// Power Actor address is t065 (arbitrarly choosen) +// Mocked Power Actor address is t065 (arbitrarly choosen) var PowerActorAddr = func() address.Address { a, err := address.NewIDAddress(65) if err != nil { @@ -30,7 +34,9 @@ var PowerActorAddr = func() address.Address { func (a Actor) Exports() []interface{} { return []interface{}{ builtin.MethodConstructor: a.Constructor, // Initialiazed the actor; always required - 2: a.AddMiner, // Add a miner to the list (specificaly crafted for checkpointing) + 2: a.AddMiners, // Add a miner to the list (specificaly crafted for checkpointing) + 3: a.RemoveMiners, // Remove miners from the list + 4: a.UpdateTaprootAddress, // Update the taproot address } } @@ -67,15 +73,67 @@ type AddMinerParams struct { Miners []string } -// Adds or removes claimed power for the calling actor. +// Adds claimed power for the calling actor. +// May only be invoked by a miner actor. +func (a Actor) AddMiners(rt Runtime, params *AddMinerParams) *abi.EmptyValue { + rt.ValidateImmediateCallerAcceptAny() + var st State + rt.StateTransaction(&st, func() { + // Miners list is replaced with the one passed as parameters + st.Miners = append(st.Miners,params.Miners...) + st.Miners = unique(st.Miners) + st.MinerCount = int64(len(st.Miners)) + }) + return nil +} + +// Removes claimed power for the calling actor. // May only be invoked by a miner actor. -func (a Actor) AddMiner(rt Runtime, params *AddMinerParams) *abi.EmptyValue { +func (a Actor) RemoveMiners(rt Runtime, params *AddMinerParams) *abi.EmptyValue { rt.ValidateImmediateCallerAcceptAny() var st State rt.StateTransaction(&st, func() { // Miners list is replaced with the one passed as parameters - st.MinerCount += 1 - st.Miners = params.Miners + + for _, minerToRemove := range params.Miners{ + for i, oldMiner := range st.Miners{ + if minerToRemove == oldMiner{ + st.Miners = append(st.Miners[:i], st.Miners[i+1:]...) + break + } + } + + } + fmt.Println("New list of miners after removal: ",st.Miners) + st.MinerCount = int64(len(st.Miners)) }) return nil } + + +type NewTaprootAddressParam struct { + PublicKey []byte +} + +func (a Actor) UpdateTaprootAddress(rt Runtime, addr *NewTaprootAddressParam) *abi.EmptyValue { + rt.ValidateImmediateCallerAcceptAny() + var st State + rt.StateTransaction(&st, func() { + // public key is replaced with the one passed as parameters + st.PublicKey = addr.PublicKey + fmt.Println("address updated",st.PublicKey) + }) + return nil +} + +func unique(strSlice []string) []string { + keys := make(map[string]bool) + list := []string{} + for _, entry := range strSlice { + if _, value := keys[entry]; !value { + keys[entry] = true + list = append(list, entry) + } + } + return list +} diff --git a/chain/consensus/actors/mpower/power_state.go b/chain/consensus/actors/mpower/power_state.go index 2e0024497..0f68dc1b5 100644 --- a/chain/consensus/actors/mpower/power_state.go +++ b/chain/consensus/actors/mpower/power_state.go @@ -1,7 +1,8 @@ package mpower import ( - "github.com/filecoin-project/specs-actors/v6/actors/util/adt" + "github.com/filecoin-project/specs-actors/v7/actors/util/adt" + //"github.com/Zondax/multi-party-sig/pkg/taproot" ) // Mpower actor is only used to determine if a new miner joined or not when running the checkpointing module @@ -9,6 +10,7 @@ import ( type State struct { MinerCount int64 Miners []string + PublicKey []byte //taproot address } func ConstructState(store adt.Store) (*State, error) { @@ -16,5 +18,6 @@ func ConstructState(store adt.Store) (*State, error) { MinerCount: 0, // should have participants with pre generated key Miners: make([]string, 0), + PublicKey: make([]byte, 0), }, nil } diff --git a/chain/consensus/actors/registry/registry.go b/chain/consensus/actors/registry/registry.go index b4839bacf..6c48bc3a8 100644 --- a/chain/consensus/actors/registry/registry.go +++ b/chain/consensus/actors/registry/registry.go @@ -2,26 +2,36 @@ package registry import ( "github.com/filecoin-project/lotus/chain/actors" + replace "github.com/filecoin-project/lotus/chain/consensus/actors/atomic-replace" initactor "github.com/filecoin-project/lotus/chain/consensus/actors/init" + "github.com/filecoin-project/lotus/chain/consensus/actors/mpower" + + "github.com/filecoin-project/lotus/chain/consensus/actors/reward" + "github.com/filecoin-project/lotus/chain/consensus/actors/split" "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/subnet" "github.com/filecoin-project/lotus/chain/vm" - exported6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/exported" + exported7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/exported" ) func NewActorRegistry() *vm.ActorRegistry { inv := vm.NewActorRegistry() // TODO: drop unneeded - inv.Register(vm.ActorsVersionPredicate(actors.Version6), exported6.BuiltinActors()...) + inv.Register(vm.ActorsVersionPredicate(actors.Version7), exported7.BuiltinActors()...) inv.Register(nil, initactor.InitActor{}) // use our custom init actor - inv.Register(nil, split.SplitActor{}) + // Hierarchical consensus + inv.Register(nil, reward.Actor{}) inv.Register(nil, subnet.SubnetActor{}) inv.Register(nil, sca.SubnetCoordActor{}) inv.Register(nil, mpower.Actor{}) + // Custom actors + inv.Register(nil, split.SplitActor{}) + inv.Register(nil, replace.ReplaceActor{}) + return inv } diff --git a/chain/consensus/actors/reward/cbor_gen.go b/chain/consensus/actors/reward/cbor_gen.go new file mode 100644 index 000000000..b943af656 --- /dev/null +++ b/chain/consensus/actors/reward/cbor_gen.go @@ -0,0 +1,316 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package reward + +import ( + "fmt" + "io" + "math" + "sort" + + abi "github.com/filecoin-project/go-state-types/abi" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufFundingParams = []byte{130} + +func (t *FundingParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufFundingParams); err != nil { + return err + } + + // t.Addr (address.Address) (struct) + if err := t.Addr.MarshalCBOR(w); err != nil { + return err + } + + // t.Value (big.Int) (struct) + if err := t.Value.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *FundingParams) UnmarshalCBOR(r io.Reader) error { + *t = FundingParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Addr (address.Address) (struct) + + { + + if err := t.Addr.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Addr: %w", err) + } + + } + // t.Value (big.Int) (struct) + + { + + if err := t.Value.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Value: %w", err) + } + + } + return nil +} + +var lengthBufState = []byte{139} + +func (t *State) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufState); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.CumsumBaseline (big.Int) (struct) + if err := t.CumsumBaseline.MarshalCBOR(w); err != nil { + return err + } + + // t.CumsumRealized (big.Int) (struct) + if err := t.CumsumRealized.MarshalCBOR(w); err != nil { + return err + } + + // t.EffectiveNetworkTime (abi.ChainEpoch) (int64) + if t.EffectiveNetworkTime >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.EffectiveNetworkTime)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.EffectiveNetworkTime-1)); err != nil { + return err + } + } + + // t.EffectiveBaselinePower (big.Int) (struct) + if err := t.EffectiveBaselinePower.MarshalCBOR(w); err != nil { + return err + } + + // t.ThisEpochReward (big.Int) (struct) + if err := t.ThisEpochReward.MarshalCBOR(w); err != nil { + return err + } + + // t.ThisEpochRewardSmoothed (smoothing.FilterEstimate) (struct) + if err := t.ThisEpochRewardSmoothed.MarshalCBOR(w); err != nil { + return err + } + + // t.ThisEpochBaselinePower (big.Int) (struct) + if err := t.ThisEpochBaselinePower.MarshalCBOR(w); err != nil { + return err + } + + // t.Epoch (abi.ChainEpoch) (int64) + if t.Epoch >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Epoch)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Epoch-1)); err != nil { + return err + } + } + + // t.TotalStoragePowerReward (big.Int) (struct) + if err := t.TotalStoragePowerReward.MarshalCBOR(w); err != nil { + return err + } + + // t.SimpleTotal (big.Int) (struct) + if err := t.SimpleTotal.MarshalCBOR(w); err != nil { + return err + } + + // t.BaselineTotal (big.Int) (struct) + if err := t.BaselineTotal.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *State) UnmarshalCBOR(r io.Reader) error { + *t = State{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 11 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.CumsumBaseline (big.Int) (struct) + + { + + if err := t.CumsumBaseline.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.CumsumBaseline: %w", err) + } + + } + // t.CumsumRealized (big.Int) (struct) + + { + + if err := t.CumsumRealized.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.CumsumRealized: %w", err) + } + + } + // t.EffectiveNetworkTime (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.EffectiveNetworkTime = abi.ChainEpoch(extraI) + } + // t.EffectiveBaselinePower (big.Int) (struct) + + { + + if err := t.EffectiveBaselinePower.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.EffectiveBaselinePower: %w", err) + } + + } + // t.ThisEpochReward (big.Int) (struct) + + { + + if err := t.ThisEpochReward.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ThisEpochReward: %w", err) + } + + } + // t.ThisEpochRewardSmoothed (smoothing.FilterEstimate) (struct) + + { + + if err := t.ThisEpochRewardSmoothed.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ThisEpochRewardSmoothed: %w", err) + } + + } + // t.ThisEpochBaselinePower (big.Int) (struct) + + { + + if err := t.ThisEpochBaselinePower.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.ThisEpochBaselinePower: %w", err) + } + + } + // t.Epoch (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Epoch = abi.ChainEpoch(extraI) + } + // t.TotalStoragePowerReward (big.Int) (struct) + + { + + if err := t.TotalStoragePowerReward.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.TotalStoragePowerReward: %w", err) + } + + } + // t.SimpleTotal (big.Int) (struct) + + { + + if err := t.SimpleTotal.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.SimpleTotal: %w", err) + } + + } + // t.BaselineTotal (big.Int) (struct) + + { + + if err := t.BaselineTotal.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.BaselineTotal: %w", err) + } + + } + return nil +} diff --git a/chain/consensus/actors/reward/gen/gen.go b/chain/consensus/actors/reward/gen/gen.go new file mode 100644 index 000000000..ab98228d0 --- /dev/null +++ b/chain/consensus/actors/reward/gen/gen.go @@ -0,0 +1,17 @@ +package main + +import ( + actor "github.com/filecoin-project/lotus/chain/consensus/actors/reward" + + gen "github.com/whyrusleeping/cbor-gen" +) + +func main() { + if err := gen.WriteTupleEncodersToFile("./cbor_gen.go", "reward", + // actor.ThisEpochRewardReturn{}, + actor.FundingParams{}, + actor.State{}, + ); err != nil { + panic(err) + } +} diff --git a/chain/consensus/actors/reward/reward_actor.go b/chain/consensus/actors/reward/reward_actor.go new file mode 100644 index 000000000..6678de4c3 --- /dev/null +++ b/chain/consensus/actors/reward/reward_actor.go @@ -0,0 +1,236 @@ +package reward + +//go:generate go run ./gen/gen.go + +import ( + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/go-state-types/exitcode" + rtt "github.com/filecoin-project/go-state-types/rt" + actor "github.com/filecoin-project/lotus/chain/consensus/actors" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical" + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" + reward0 "github.com/filecoin-project/specs-actors/actors/builtin/reward" + reward6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/reward" + "github.com/ipfs/go-cid" + + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/specs-actors/v7/actors/runtime" +) + +var RewardActorAddr = func() address.Address { + a, err := address.NewIDAddress(2) + if err != nil { + panic(err) + } + return a +}() + +// PenaltyMultiplier is the factor miner penaltys are scaled up by +const PenaltyMultiplier = 3 + +type Actor struct{} + +var Methods = struct { + Constructor abi.MethodNum + AwardBlockReward abi.MethodNum + ThisEpochReward abi.MethodNum + UpdateNetworkKPI abi.MethodNum + ExternalFunding abi.MethodNum +}{builtin0.MethodConstructor, 2, 3, 4, 5} + +func (a Actor) Exports() []interface{} { + return []interface{}{ + builtin.MethodConstructor: a.Constructor, + 2: a.AwardBlockReward, + 3: a.ThisEpochReward, + 4: a.UpdateNetworkKPI, + 5: a.ExternalFunding, + } +} + +func (a Actor) Code() cid.Cid { + return actor.RewardActorCodeID +} + +func (a Actor) IsSingleton() bool { + return true +} + +func (a Actor) State() cbor.Er { + return new(State) +} + +var _ runtime.VMActor = Actor{} + +func (a Actor) Constructor(rt runtime.Runtime, currRealizedPower *abi.StoragePower) *abi.EmptyValue { + rt.ValidateImmediateCallerIs(builtin.SystemActorAddr) + + if currRealizedPower == nil { + rt.Abortf(exitcode.ErrIllegalArgument, "argument should not be nil") + return nil // linter does not understand abort exiting + } + st := ConstructState(*currRealizedPower) + rt.StateCreate(st) + return nil +} + +//type AwardBlockRewardParams struct { +// Miner address.Address +// Penalty abi.TokenAmount // penalty for including bad messages in a block, >= 0 +// GasReward abi.TokenAmount // gas reward from all gas fees in a block, >= 0 +// WinCount int64 // number of reward units won, > 0 +//} +type AwardBlockRewardParams = reward0.AwardBlockRewardParams + +type FundingParams struct { + Addr address.Address + Value abi.TokenAmount +} + +// ExternalFunding sends to an address funding coming from another subnet. +func (a Actor) ExternalFunding(rt runtime.Runtime, params *FundingParams) *abi.EmptyValue { + rt.ValidateImmediateCallerIs(hierarchical.SubnetCoordActorAddr) + addr, ok := rt.ResolveAddress(params.Addr) + if !ok { + rt.Abortf(exitcode.ErrNotFound, "failed to resolve given owner address") + } + value := params.Value + // TODO: We should maybe remove this check and allow sending funds even if the actor doesn't have balance, + // or give virtually "infinite" balance to subnets. This transaction failing due to a lack of funds + // could be catastrophic, and users may lose their funds. + // TODO 2: The reward actor in subnets should give no reward to miners, so much of this actor will be + // simplified, giving "infinite" balance to this actor won't mess up with FIL's circulating supply because + // the only way to move funds from here is by externally funding. + if value.GreaterThan(rt.CurrentBalance()) { + rt.Log(rtt.WARN, "reward actor balance %d below totalReward expected %d, paying out rest of balance", rt.CurrentBalance(), value) + value = rt.CurrentBalance() + } + + code := rt.Send(addr, builtin.MethodSend, nil, value, &builtin.Discard{}) + if !code.IsSuccess() { + rt.Log(rtt.ERROR, "failed to send external funds to address with value: %v, code: %v", value, code) + } + + return nil +} + +// Awards a reward to a block producer. +// This method is called only by the system actor, implicitly, as the last message in the evaluation of a block. +// The system actor thus computes the parameters and attached value. +// +// The reward includes two components: +// - the epoch block reward, computed and paid from the reward actor's balance, +// - the block gas reward, expected to be transferred to the reward actor with this invocation. +// +// The reward is reduced before the residual is credited to the block producer, by: +// - a penalty amount, provided as a parameter, which is burnt, +func (a Actor) AwardBlockReward(rt runtime.Runtime, params *AwardBlockRewardParams) *abi.EmptyValue { + rt.ValidateImmediateCallerIs(builtin.SystemActorAddr) + priorBalance := rt.CurrentBalance() + if params.Penalty.LessThan(big.Zero()) { + rt.Abortf(exitcode.ErrIllegalArgument, "negative penalty %v", params.Penalty) + } + if params.GasReward.LessThan(big.Zero()) { + rt.Abortf(exitcode.ErrIllegalArgument, "negative gas reward %v", params.GasReward) + } + if priorBalance.LessThan(params.GasReward) { + rt.Abortf(exitcode.ErrIllegalState, "actor current balance %v insufficient to pay gas reward %v", + priorBalance, params.GasReward) + } + if params.WinCount <= 0 { + rt.Abortf(exitcode.ErrIllegalArgument, "invalid win count %d", params.WinCount) + } + + minerAddr, ok := rt.ResolveAddress(params.Miner) + if !ok { + rt.Abortf(exitcode.ErrNotFound, "failed to resolve given owner address") + } + // The miner penalty is scaled up by a factor of PenaltyMultiplier + penalty := big.Mul(big.NewInt(PenaltyMultiplier), params.Penalty) + totalReward := big.Zero() + var st State + rt.StateTransaction(&st, func() { + blockReward := big.Mul(st.ThisEpochReward, big.NewInt(params.WinCount)) + blockReward = big.Div(blockReward, big.NewInt(builtin.ExpectedLeadersPerEpoch)) + totalReward = big.Add(blockReward, params.GasReward) + currBalance := rt.CurrentBalance() + if totalReward.GreaterThan(currBalance) { + rt.Log(rtt.WARN, "reward actor balance %d below totalReward expected %d, paying out rest of balance", currBalance, totalReward) + totalReward = currBalance + + blockReward = big.Sub(totalReward, params.GasReward) + // Since we have already asserted the balance is greater than gas reward blockReward is >= 0 + builtin.RequireState(rt, blockReward.GreaterThanEqual(big.Zero()), "programming error, block reward %v below zero", blockReward) + } + st.TotalStoragePowerReward = big.Add(st.TotalStoragePowerReward, blockReward) + }) + + builtin.RequireState(rt, totalReward.LessThanEqual(priorBalance), "reward %v exceeds balance %v", totalReward, priorBalance) + + // if this fails, we can assume the miner is responsible and avoid failing here. + rewardParams := builtin.ApplyRewardParams{ + Reward: totalReward, + Penalty: penalty, + } + code := rt.Send(minerAddr, builtin.MethodsMiner.ApplyRewards, &rewardParams, totalReward, &builtin.Discard{}) + if !code.IsSuccess() { + rt.Log(rtt.ERROR, "failed to send ApplyRewards call to the miner actor with funds: %v, code: %v", totalReward, code) + code := rt.Send(builtin.BurntFundsActorAddr, builtin.MethodSend, nil, totalReward, &builtin.Discard{}) + if !code.IsSuccess() { + rt.Log(rtt.ERROR, "failed to send unsent reward to the burnt funds actor, code: %v", code) + } + } + + return nil +} + +// Changed since v0: +// - removed ThisEpochReward (unsmoothed) +//type ThisEpochRewardReturn struct { +// ThisEpochRewardSmoothed smoothing.FilterEstimate +// ThisEpochBaselinePower abi.StoragePower +//} +type ThisEpochRewardReturn = reward6.ThisEpochRewardReturn + +// The award value used for the current epoch, updated at the end of an epoch +// through cron tick. In the case previous epochs were null blocks this +// is the reward value as calculated at the last non-null epoch. +func (a Actor) ThisEpochReward(rt runtime.Runtime, _ *abi.EmptyValue) *ThisEpochRewardReturn { + rt.ValidateImmediateCallerAcceptAny() + + var st State + rt.StateReadonly(&st) + return &ThisEpochRewardReturn{ + ThisEpochRewardSmoothed: st.ThisEpochRewardSmoothed, + ThisEpochBaselinePower: st.ThisEpochBaselinePower, + } +} + +// Called at the end of each epoch by the power actor (in turn by its cron hook). +// This is only invoked for non-empty tipsets, but catches up any number of null +// epochs to compute the next epoch reward. +func (a Actor) UpdateNetworkKPI(rt runtime.Runtime, currRealizedPower *abi.StoragePower) *abi.EmptyValue { + rt.ValidateImmediateCallerIs(builtin.StoragePowerActorAddr) + if currRealizedPower == nil { + rt.Abortf(exitcode.ErrIllegalArgument, "argument should not be nil") + } + + var st State + rt.StateTransaction(&st, func() { + prev := st.Epoch + // if there were null runs catch up the computation until + // st.Epoch == rt.CurrEpoch() + for st.Epoch < rt.CurrEpoch() { + // Update to next epoch to process null rounds + st.updateToNextEpoch(*currRealizedPower) + } + + st.updateToNextEpochWithReward(*currRealizedPower) + // only update smoothed estimates after updating reward and epoch + st.updateSmoothedEstimates(st.Epoch - prev) + }) + return nil +} diff --git a/chain/consensus/actors/reward/reward_logic.go b/chain/consensus/actors/reward/reward_logic.go new file mode 100644 index 000000000..571b0f51c --- /dev/null +++ b/chain/consensus/actors/reward/reward_logic.go @@ -0,0 +1,119 @@ +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/specs-actors/v7/actors/util/math" +) + +// Baseline function = BaselineInitialValue * (BaselineExponent) ^(t), t in epochs +// Note: we compute exponential iteratively using recurrence e(n) = e * e(n-1). +// Caller of baseline power function is responsible for keeping track of intermediate, +// state e(n-1), the baseline power function just does the next multiplication + +// Floor(e^(ln[1 + 100%] / epochsInYear) * 2^128 +// Q.128 formatted number such that f(epoch) = baseExponent^epoch grows 100% in one year of epochs +// Calculation here: https://www.wolframalpha.com/input/?i=IntegerPart%5BExp%5BLog%5B1%2B100%25%5D%2F%28%28365+days%29%2F%2830+seconds%29%29%5D*2%5E128%5D +var BaselineExponent = big.MustFromString("340282591298641078465964189926313473653") // Q.128 + +// 2.5057116798121726 EiB +var BaselineInitialValue = big.NewInt(2_888_888_880_000_000_000) // Q.0 + +// Initialize baseline power for epoch -1 so that baseline power at epoch 0 is +// BaselineInitialValue. +func InitBaselinePower() abi.StoragePower { + baselineInitialValue256 := big.Lsh(BaselineInitialValue, 2*math.Precision128) // Q.0 => Q.256 + baselineAtMinusOne := big.Div(baselineInitialValue256, BaselineExponent) // Q.256 / Q.128 => Q.128 + return big.Rsh(baselineAtMinusOne, math.Precision128) // Q.128 => Q.0 +} + +// Compute BaselinePower(t) from BaselinePower(t-1) with an additional multiplication +// of the base exponent. +func BaselinePowerFromPrev(prevEpochBaselinePower abi.StoragePower) abi.StoragePower { + thisEpochBaselinePower := big.Mul(prevEpochBaselinePower, BaselineExponent) // Q.0 * Q.128 => Q.128 + return big.Rsh(thisEpochBaselinePower, math.Precision128) // Q.128 => Q.0 +} + +// These numbers are estimates of the onchain constants. They are good for initializing state in +// devnets and testing but will not match the on chain values exactly which depend on storage onboarding +// and upgrade epoch history. They are in units of attoFIL, 10^-18 FIL +var DefaultSimpleTotal = big.Mul(big.NewInt(330e6), big.NewInt(1e18)) // 330M +var DefaultBaselineTotal = big.Mul(big.NewInt(770e6), big.NewInt(1e18)) // 770M + +// Computes RewardTheta which is is precise fractional value of effectiveNetworkTime. +// The effectiveNetworkTime is defined by CumsumBaselinePower(theta) == CumsumRealizedPower +// As baseline power is defined over integers and the RewardTheta is required to be fractional, +// we perform linear interpolation between CumsumBaseline(⌊thetaβŒ‹) and CumsumBaseline(⌈thetaβŒ‰). +// The effectiveNetworkTime argument is ceiling of theta. +// The result is a fractional effectiveNetworkTime (theta) in Q.128 format. +func ComputeRTheta(effectiveNetworkTime abi.ChainEpoch, baselinePowerAtEffectiveNetworkTime, cumsumRealized, cumsumBaseline big.Int) big.Int { + var rewardTheta big.Int + if effectiveNetworkTime != 0 { + rewardTheta = big.NewInt(int64(effectiveNetworkTime)) // Q.0 + rewardTheta = big.Lsh(rewardTheta, math.Precision128) // Q.0 => Q.128 + diff := big.Sub(cumsumBaseline, cumsumRealized) + diff = big.Lsh(diff, math.Precision128) // Q.0 => Q.128 + diff = big.Div(diff, baselinePowerAtEffectiveNetworkTime) // Q.128 / Q.0 => Q.128 + rewardTheta = big.Sub(rewardTheta, diff) // Q.128 + } else { + // special case for initialization + rewardTheta = big.Zero() + } + return rewardTheta +} + +var ( + // lambda = ln(2) / (6 * epochsInYear) + // for Q.128: int(lambda * 2^128) + // Calculation here: https://www.wolframalpha.com/input/?i=IntegerPart%5BLog%5B2%5D+%2F+%286+*+%281+year+%2F+30+seconds%29%29+*+2%5E128%5D + Lambda = big.MustFromString("37396271439864487274534522888786") + // expLamSubOne = e^lambda - 1 + // for Q.128: int(expLamSubOne * 2^128) + // Calculation here: https://www.wolframalpha.com/input/?i=IntegerPart%5B%5BExp%5BLog%5B2%5D+%2F+%286+*+%281+year+%2F+30+seconds%29%29%5D+-+1%5D+*+2%5E128%5D + ExpLamSubOne = big.MustFromString("37396273494747879394193016954629") +) + +// Computes a reward for all expected leaders when effective network time changes from prevTheta to currTheta +// Inputs are in Q.128 format +func computeReward(epoch abi.ChainEpoch, prevTheta, currTheta, simpleTotal, baselineTotal big.Int) abi.TokenAmount { + simpleReward := big.Mul(simpleTotal, ExpLamSubOne) //Q.0 * Q.128 => Q.128 + epochLam := big.Mul(big.NewInt(int64(epoch)), Lambda) // Q.0 * Q.128 => Q.128 + + simpleReward = big.Mul(simpleReward, big.NewFromGo(math.ExpNeg(epochLam.Int))) // Q.128 * Q.128 => Q.256 + simpleReward = big.Rsh(simpleReward, math.Precision128) // Q.256 >> 128 => Q.128 + + baselineReward := big.Sub(computeBaselineSupply(currTheta, baselineTotal), computeBaselineSupply(prevTheta, baselineTotal)) // Q.128 + + reward := big.Add(simpleReward, baselineReward) // Q.128 + + return big.Rsh(reward, math.Precision128) // Q.128 => Q.0 +} + +// Computes baseline supply based on theta in Q.128 format. +// Return is in Q.128 format +func computeBaselineSupply(theta, baselineTotal big.Int) big.Int { + thetaLam := big.Mul(theta, Lambda) // Q.128 * Q.128 => Q.256 + thetaLam = big.Rsh(thetaLam, math.Precision128) // Q.256 >> 128 => Q.128 + + eTL := big.NewFromGo(math.ExpNeg(thetaLam.Int)) // Q.128 + + one := big.NewInt(1) + one = big.Lsh(one, math.Precision128) // Q.0 => Q.128 + oneSub := big.Sub(one, eTL) // Q.128 + + return big.Mul(baselineTotal, oneSub) // Q.0 * Q.128 => Q.128 +} + +// SlowConvenientBaselineForEpoch computes baseline power for use in epoch t +// by calculating the value of ThisEpochBaselinePower that shows up in block at t - 1 +// It multiplies ~t times so it should not be used in actor code directly. It is exported as +// convenience for consuming node. +func SlowConvenientBaselineForEpoch(targetEpoch abi.ChainEpoch) abi.StoragePower { + baseline := InitBaselinePower() + baseline = BaselinePowerFromPrev(baseline) // value in genesis block (for epoch 1) + for i := abi.ChainEpoch(1); i < targetEpoch; i++ { + baseline = BaselinePowerFromPrev(baseline) // value in block i (for epoch i+1) + } + return baseline +} diff --git a/chain/consensus/actors/reward/reward_logic_test.go b/chain/consensus/actors/reward/reward_logic_test.go new file mode 100644 index 000000000..66b656507 --- /dev/null +++ b/chain/consensus/actors/reward/reward_logic_test.go @@ -0,0 +1,154 @@ +package reward + +import ( + "bytes" + "fmt" + gbig "math/big" + "testing" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/stretchr/testify/assert" + "github.com/xorcare/golden" + + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/specs-actors/v7/actors/util/math" +) + +func q128ToF(x big.Int) float64 { + q128 := new(gbig.Int).SetInt64(1) + q128 = q128.Lsh(q128, math.Precision128) + res, _ := new(gbig.Rat).SetFrac(x.Int, q128).Float64() + return res +} + +func TestComputeRTeta(t *testing.T) { + baselinePowerAt := func(epoch abi.ChainEpoch) abi.StoragePower { + return big.Mul(big.NewInt(int64(epoch+1)), big.NewInt(2048)) + } + + assert.Equal(t, 0.5, q128ToF(ComputeRTheta(1, baselinePowerAt(1), big.NewInt(2048+2*2048*0.5), big.NewInt(2048+2*2048)))) + assert.Equal(t, 0.25, q128ToF(ComputeRTheta(1, baselinePowerAt(1), big.NewInt(2048+2*2048*0.25), big.NewInt(2048+2*2048)))) + + cumsum15 := big.NewInt(0) + for i := abi.ChainEpoch(0); i < 16; i++ { + cumsum15 = big.Add(cumsum15, baselinePowerAt(i)) + } + assert.Equal(t, 15.25, q128ToF(ComputeRTheta(16, + baselinePowerAt(16), + big.Add(cumsum15, big.Div(baselinePowerAt(16), big.NewInt(4))), + big.Add(cumsum15, baselinePowerAt(16))))) +} + +func TestBaselineReward(t *testing.T) { + step := gbig.NewInt(5000) + step = step.Lsh(step, math.Precision128) + step = step.Sub(step, gbig.NewInt(77777777777)) // offset from full integers + + delta := gbig.NewInt(1) + delta = delta.Lsh(delta, math.Precision128) + delta = delta.Sub(delta, gbig.NewInt(33333333333)) // offset from full integers + + prevTheta := new(gbig.Int) + theta := new(gbig.Int).Set(delta) + + b := &bytes.Buffer{} + b.WriteString("t0, t1, y\n") + simple := computeReward(0, big.Zero(), big.Zero(), DefaultSimpleTotal, DefaultBaselineTotal) + + for i := 0; i < 512; i++ { + reward := computeReward(0, big.NewFromGo(prevTheta), big.NewFromGo(theta), DefaultSimpleTotal, DefaultBaselineTotal) + reward = big.Sub(reward, simple) + fmt.Fprintf(b, "%s,%s,%s\n", prevTheta, theta, reward.Int) + prevTheta = prevTheta.Add(prevTheta, step) + theta = theta.Add(theta, step) + } + + golden.Assert(t, b.Bytes()) +} + +func TestSimpleReward(t *testing.T) { + b := &bytes.Buffer{} + b.WriteString("x, y\n") + for i := int64(0); i < 512; i++ { + x := i * 5000 + reward := computeReward(abi.ChainEpoch(x), big.Zero(), big.Zero(), DefaultSimpleTotal, DefaultBaselineTotal) + fmt.Fprintf(b, "%d,%s\n", x, reward.Int) + } + + golden.Assert(t, b.Bytes()) +} + +func TestBaselineRewardGrowth(t *testing.T) { + + baselineInYears := func(start abi.StoragePower, x abi.ChainEpoch) abi.StoragePower { + baseline := start + for i := abi.ChainEpoch(0); i < x*builtin.EpochsInYear; i++ { + baseline = BaselinePowerFromPrev(baseline) + } + return baseline + } + + // Baseline reward should have 100% growth rate + // This implies that for every year x, the baseline function should be: + // StartVal * 2^x. + // + // Error values for 1 years of growth were determined empirically with latest + // baseline power construction to set bounds in this test in order to + // 1. throw a test error if function changes and percent error goes up + // 2. serve as documentation of current error bounds + type growthTestCase struct { + StartVal abi.StoragePower + ErrBound float64 + } + cases := []growthTestCase{ + // 1 byte + { + abi.NewStoragePower(1), + 1, + }, + // GiB + { + abi.NewStoragePower(1 << 30), + 1e-3, + }, + // TiB + { + abi.NewStoragePower(1 << 40), + 1e-6, + }, + // PiB + { + abi.NewStoragePower(1 << 50), + 1e-8, + }, + // EiB + { + BaselineInitialValue, + 1e-8, + }, + // ZiB + { + big.Lsh(big.NewInt(1), 70), + 1e-8, + }, + // non power of 2 ~ 1 EiB + { + abi.NewStoragePower(513633559722596517), + 1e-8, + }, + } + for _, testCase := range cases { + years := int64(1) + end := baselineInYears(testCase.StartVal, abi.ChainEpoch(1)) + + multiplier := big.Exp(big.NewInt(2), big.NewInt(years)) // keeping this generalized in case we want to test more years + expected := big.Mul(testCase.StartVal, multiplier) + diff := big.Sub(expected, end) + + perrFrac := gbig.NewRat(1, 1).SetFrac(diff.Int, expected.Int) + perr, _ := perrFrac.Float64() + + assert.Less(t, perr, testCase.ErrBound) + } +} diff --git a/chain/consensus/actors/reward/reward_state.go b/chain/consensus/actors/reward/reward_state.go new file mode 100644 index 000000000..b395a44d4 --- /dev/null +++ b/chain/consensus/actors/reward/reward_state.go @@ -0,0 +1,118 @@ +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/specs-actors/v6/actors/util/smoothing" +) + +// A quantity of space * time (in byte-epochs) representing power committed to the network for some duration. +type Spacetime = big.Int + +// 36.266260308195979333 FIL +// https://www.wolframalpha.com/input/?i=IntegerPart%5B330%2C000%2C000+*+%281+-+Exp%5B-Log%5B2%5D+%2F+%286+*+%281+year+%2F+30+seconds%29%29%5D%29+*+10%5E18%5D +const InitialRewardPositionEstimateStr = "36266260308195979333" + +var InitialRewardPositionEstimate = big.MustFromString(InitialRewardPositionEstimateStr) + +// -1.0982489*10^-7 FIL per epoch. Change of simple minted tokens between epochs 0 and 1 +// https://www.wolframalpha.com/input/?i=IntegerPart%5B%28Exp%5B-Log%5B2%5D+%2F+%286+*+%281+year+%2F+30+seconds%29%29%5D+-+1%29+*+10%5E18%5D +var InitialRewardVelocityEstimate = abi.NewTokenAmount(-109897758509) + +// Changed since v0: +// - ThisEpochRewardSmoothed is not a pointer +type State struct { + // CumsumBaseline is a target CumsumRealized needs to reach for EffectiveNetworkTime to increase + // CumsumBaseline and CumsumRealized are expressed in byte-epochs. + CumsumBaseline Spacetime + + // CumsumRealized is cumulative sum of network power capped by BaselinePower(epoch) + CumsumRealized Spacetime + + // EffectiveNetworkTime is ceiling of real effective network time `theta` based on + // CumsumBaselinePower(theta) == CumsumRealizedPower + // Theta captures the notion of how much the network has progressed in its baseline + // and in advancing network time. + EffectiveNetworkTime abi.ChainEpoch + + // EffectiveBaselinePower is the baseline power at the EffectiveNetworkTime epoch + EffectiveBaselinePower abi.StoragePower + + // The reward to be paid in per WinCount to block producers. + // The actual reward total paid out depends on the number of winners in any round. + // This value is recomputed every non-null epoch and used in the next non-null epoch. + ThisEpochReward abi.TokenAmount + // Smoothed ThisEpochReward + ThisEpochRewardSmoothed smoothing.FilterEstimate + + // The baseline power the network is targeting at st.Epoch + ThisEpochBaselinePower abi.StoragePower + + // Epoch tracks for which epoch the Reward was computed + Epoch abi.ChainEpoch + + // TotalStoragePowerReward tracks the total FIL awarded to block miners + TotalStoragePowerReward abi.TokenAmount + + // Simple and Baseline totals are constants used for computing rewards. + // They are on chain because of a historical fix resetting baseline value + // in a way that depended on the history leading immediately up to the + // migration fixing the value. These values can be moved from state back + // into a code constant in a subsequent upgrade. + SimpleTotal abi.TokenAmount + BaselineTotal abi.TokenAmount +} + +func ConstructState(currRealizedPower abi.StoragePower) *State { + st := &State{ + CumsumBaseline: big.Zero(), + CumsumRealized: big.Zero(), + EffectiveNetworkTime: 0, + EffectiveBaselinePower: BaselineInitialValue, + + ThisEpochReward: big.Zero(), + ThisEpochBaselinePower: InitBaselinePower(), + Epoch: -1, + + ThisEpochRewardSmoothed: smoothing.NewEstimate(InitialRewardPositionEstimate, InitialRewardVelocityEstimate), + TotalStoragePowerReward: big.Zero(), + + SimpleTotal: DefaultSimpleTotal, + BaselineTotal: DefaultBaselineTotal, + } + + st.updateToNextEpochWithReward(currRealizedPower) + + return st +} + +// Takes in current realized power and updates internal state +// Used for update of internal state during null rounds +func (st *State) updateToNextEpoch(currRealizedPower abi.StoragePower) { + st.Epoch++ + st.ThisEpochBaselinePower = BaselinePowerFromPrev(st.ThisEpochBaselinePower) + cappedRealizedPower := big.Min(st.ThisEpochBaselinePower, currRealizedPower) + st.CumsumRealized = big.Add(st.CumsumRealized, cappedRealizedPower) + + for st.CumsumRealized.GreaterThan(st.CumsumBaseline) { + st.EffectiveNetworkTime++ + st.EffectiveBaselinePower = BaselinePowerFromPrev(st.EffectiveBaselinePower) + st.CumsumBaseline = big.Add(st.CumsumBaseline, st.EffectiveBaselinePower) + } +} + +// Takes in a current realized power for a reward epoch and computes +// and updates reward state to track reward for the next epoch +func (st *State) updateToNextEpochWithReward(currRealizedPower abi.StoragePower) { + prevRewardTheta := ComputeRTheta(st.EffectiveNetworkTime, st.EffectiveBaselinePower, st.CumsumRealized, st.CumsumBaseline) + st.updateToNextEpoch(currRealizedPower) + currRewardTheta := ComputeRTheta(st.EffectiveNetworkTime, st.EffectiveBaselinePower, st.CumsumRealized, st.CumsumBaseline) + + st.ThisEpochReward = computeReward(st.Epoch, prevRewardTheta, currRewardTheta, st.SimpleTotal, st.BaselineTotal) +} + +func (st *State) updateSmoothedEstimates(delta abi.ChainEpoch) { + filterReward := smoothing.LoadFilter(st.ThisEpochRewardSmoothed, smoothing.DefaultAlpha, smoothing.DefaultBeta) + st.ThisEpochRewardSmoothed = filterReward.NextEstimate(st.ThisEpochReward, delta) +} diff --git a/chain/consensus/actors/reward/reward_test.go b/chain/consensus/actors/reward/reward_test.go new file mode 100644 index 000000000..ddb7c78cb --- /dev/null +++ b/chain/consensus/actors/reward/reward_test.go @@ -0,0 +1,328 @@ +package reward_test + +import ( + "testing" + + address "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + actors "github.com/filecoin-project/lotus/chain/consensus/actors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/lotus/chain/consensus/actors/reward" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical" + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/specs-actors/v7/support/mock" + tutil "github.com/filecoin-project/specs-actors/v7/support/testing" +) + +func TestExports(t *testing.T) { + mock.CheckActorExports(t, reward.Actor{}) +} + +const EpochZeroReward = "36266264293777134739" + +func TestConstructor(t *testing.T) { + actor := rewardHarness{reward.Actor{}, t} + + t.Run("construct with 0 power", func(t *testing.T) { + rt := mock.NewBuilder(builtin.RewardActorAddr). + WithCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID). + Build(t) + startRealizedPower := abi.NewStoragePower(0) + actor.constructAndVerify(rt, &startRealizedPower) + st := getState(rt) + assert.Equal(t, abi.ChainEpoch(0), st.Epoch) + assert.Equal(t, abi.NewStoragePower(0), st.CumsumRealized) + assert.Equal(t, big.MustFromString(EpochZeroReward), st.ThisEpochReward) + epochZeroBaseline := big.Sub(reward.BaselineInitialValue, big.NewInt(1)) // account for rounding error of one byte during construction + assert.Equal(t, epochZeroBaseline, st.ThisEpochBaselinePower) + assert.Equal(t, reward.BaselineInitialValue, st.EffectiveBaselinePower) + }) + t.Run("construct with less power than baseline", func(t *testing.T) { + rt := mock.NewBuilder(builtin.RewardActorAddr). + WithCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID). + Build(t) + startRealizedPower := big.Lsh(abi.NewStoragePower(1), 39) + actor.constructAndVerify(rt, &startRealizedPower) + st := getState(rt) + assert.Equal(t, abi.ChainEpoch(0), st.Epoch) + assert.Equal(t, startRealizedPower, st.CumsumRealized) + + assert.NotEqual(t, big.Zero(), st.ThisEpochReward) + }) + t.Run("construct with more power than baseline", func(t *testing.T) { + rt := mock.NewBuilder(builtin.RewardActorAddr). + WithCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID). + Build(t) + startRealizedPower := reward.BaselineInitialValue + actor.constructAndVerify(rt, &startRealizedPower) + st := getState(rt) + rwrd := st.ThisEpochReward + + // start with 2x power + rt = mock.NewBuilder(builtin.RewardActorAddr). + WithCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID). + Build(t) + startRealizedPower = big.Mul(reward.BaselineInitialValue, big.NewInt(2)) + actor.constructAndVerify(rt, &startRealizedPower) + newSt := getState(rt) + // Reward value is the same; realized power impact on reward is capped at baseline + assert.Equal(t, rwrd, newSt.ThisEpochReward) + }) + +} + +func TestExternalFunding(t *testing.T) { + actor := rewardHarness{reward.Actor{}, t} + addr := tutil.NewIDAddr(t, 1000) + builder := mock.NewBuilder(builtin.RewardActorAddr). + WithCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) + rt := builder.Build(t) + startRealizedPower := abi.NewStoragePower(0) + actor.constructAndVerify(rt, &startRealizedPower) + + rt.SetBalance(abi.NewTokenAmount(1e18)) + value := abi.NewTokenAmount(1) + params := &reward.FundingParams{Addr: addr, Value: value} + rt.SetCaller(hierarchical.SubnetCoordActorAddr, actors.SubnetCoordActorCodeID) + rt.ExpectValidateCallerAddr(hierarchical.SubnetCoordActorAddr) + rt.ExpectSend(addr, builtin.MethodSend, nil, value, nil, 0) + rt.Call(actor.ExternalFunding, params) + rt.Verify() +} + +func TestAwardBlockReward(t *testing.T) { + actor := rewardHarness{reward.Actor{}, t} + winner := tutil.NewIDAddr(t, 1000) + builder := mock.NewBuilder(builtin.RewardActorAddr). + WithCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) + + t.Run("rejects gas reward exceeding balance", func(t *testing.T) { + rt := builder.Build(t) + startRealizedPower := abi.NewStoragePower(0) + actor.constructAndVerify(rt, &startRealizedPower) + + rt.SetBalance(abi.NewTokenAmount(9)) + rt.ExpectValidateCallerAddr(builtin.SystemActorAddr) + rt.ExpectAbort(exitcode.ErrIllegalState, func() { + gasReward := big.NewInt(10) + actor.awardBlockReward(rt, winner, big.Zero(), gasReward, 1, big.Zero()) + }) + }) + + t.Run("rejects negative penalty or reward", func(t *testing.T) { + rt := builder.Build(t) + startRealizedPower := abi.NewStoragePower(0) + actor.constructAndVerify(rt, &startRealizedPower) + + rt.SetBalance(abi.NewTokenAmount(1e18)) + rt.ExpectValidateCallerAddr(builtin.SystemActorAddr) + rt.ExpectAbort(exitcode.ErrIllegalArgument, func() { + penalty := big.NewInt(-1) + actor.awardBlockReward(rt, winner, penalty, big.Zero(), 1, big.Zero()) + }) + rt.Reset() + rt.ExpectAbort(exitcode.ErrIllegalArgument, func() { + gasReward := big.NewInt(-1) + actor.awardBlockReward(rt, winner, big.Zero(), gasReward, 1, big.Zero()) + }) + }) + + t.Run("rejects zero wincount", func(t *testing.T) { + rt := builder.Build(t) + startRealizedPower := abi.NewStoragePower(0) + actor.constructAndVerify(rt, &startRealizedPower) + + rt.SetBalance(abi.NewTokenAmount(1e18)) + rt.ExpectValidateCallerAddr(builtin.SystemActorAddr) + rt.ExpectAbort(exitcode.ErrIllegalArgument, func() { + actor.awardBlockReward(rt, winner, big.Zero(), big.Zero(), 0, big.Zero()) + }) + rt.Reset() + }) + + t.Run("pays reward and tracks penalty", func(t *testing.T) { + rt := builder.Build(t) + startRealizedPower := abi.NewStoragePower(0) + actor.constructAndVerify(rt, &startRealizedPower) + + rt.SetBalance(big.Mul(big.NewInt(1e9), abi.NewTokenAmount(1e18))) + rt.ExpectValidateCallerAddr(builtin.SystemActorAddr) + penalty := big.NewInt(100) + gasReward := big.NewInt(200) + expectedReward := big.Sum(big.Div(big.MustFromString(EpochZeroReward), big.NewInt(5)), gasReward) + actor.awardBlockReward(rt, winner, penalty, gasReward, 1, expectedReward) + rt.Reset() + }) + + t.Run("pays out current balance when reward exceeds total balance", func(t *testing.T) { + rt := builder.Build(t) + startRealizedPower := abi.NewStoragePower(1) + actor.constructAndVerify(rt, &startRealizedPower) + + // Total reward is a huge number, upon writing ~1e18, so 300 should be way less + smallReward := abi.NewTokenAmount(300) + penalty := abi.NewTokenAmount(100) + rt.SetBalance(smallReward) + rt.ExpectValidateCallerAddr(builtin.SystemActorAddr) + + minerPenalty := big.Mul(big.NewInt(reward.PenaltyMultiplier), penalty) + expectedParams := builtin.ApplyRewardParams{Reward: smallReward, Penalty: minerPenalty} + rt.ExpectSend(winner, builtin.MethodsMiner.ApplyRewards, &expectedParams, smallReward, nil, 0) + rt.Call(actor.AwardBlockReward, &reward.AwardBlockRewardParams{ + Miner: winner, + Penalty: penalty, + GasReward: big.Zero(), + WinCount: 1, + }) + rt.Verify() + }) + + t.Run("TotalStoragePowerReward tracks correctly", func(t *testing.T) { + rt := builder.Build(t) + startRealizedPower := abi.NewStoragePower(1) + actor.constructAndVerify(rt, &startRealizedPower) + miner := tutil.NewIDAddr(t, 1000) + + st := getState(rt) + assert.Equal(t, big.Zero(), st.TotalStoragePowerReward) + st.ThisEpochReward = abi.NewTokenAmount(5000) + rt.ReplaceState(st) + // enough balance to pay 3 full rewards and one partial + totalPayout := abi.NewTokenAmount(3500) + rt.SetBalance(totalPayout) + + // award normalized by expected leaders is 1000 + actor.awardBlockReward(rt, miner, big.Zero(), big.Zero(), 1, big.NewInt(1000)) + actor.awardBlockReward(rt, miner, big.Zero(), big.Zero(), 1, big.NewInt(1000)) + actor.awardBlockReward(rt, miner, big.Zero(), big.Zero(), 1, big.NewInt(1000)) + actor.awardBlockReward(rt, miner, big.Zero(), big.Zero(), 1, big.NewInt(500)) // partial payout when balance below reward + + newState := getState(rt) + assert.Equal(t, totalPayout, newState.TotalStoragePowerReward) + + }) + + t.Run("funds are sent to the burnt funds actor if sending locked funds to miner fails", func(t *testing.T) { + rt := builder.Build(t) + startRealizedPower := abi.NewStoragePower(1) + actor.constructAndVerify(rt, &startRealizedPower) + miner := tutil.NewIDAddr(t, 1000) + st := getState(rt) + assert.Equal(t, big.Zero(), st.TotalStoragePowerReward) + st.ThisEpochReward = abi.NewTokenAmount(5000) + rt.ReplaceState(st) + // enough balance to pay 3 full rewards and one partial + totalPayout := abi.NewTokenAmount(3500) + rt.SetBalance(totalPayout) + + rt.ExpectValidateCallerAddr(builtin.SystemActorAddr) + expectedReward := big.NewInt(1000) + penalty := big.Zero() + expectedParams := builtin.ApplyRewardParams{Reward: expectedReward, Penalty: penalty} + rt.ExpectSend(miner, builtin.MethodsMiner.ApplyRewards, &expectedParams, expectedReward, nil, exitcode.ErrForbidden) + rt.ExpectSend(builtin.BurntFundsActorAddr, builtin.MethodSend, nil, expectedReward, nil, exitcode.Ok) + + rt.Call(actor.AwardBlockReward, &reward.AwardBlockRewardParams{ + Miner: miner, + Penalty: big.Zero(), + GasReward: big.Zero(), + WinCount: 1, + }) + + rt.Verify() + }) +} + +func TestThisEpochReward(t *testing.T) { + t.Run("successfully fetch reward for this epoch", func(t *testing.T) { + actor := rewardHarness{reward.Actor{}, t} + builder := mock.NewBuilder(builtin.RewardActorAddr). + WithCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) + rt := builder.Build(t) + power := abi.NewStoragePower(1 << 50) + actor.constructAndVerify(rt, &power) + + resp := actor.thisEpochReward(rt) + st := getState(rt) + + require.EqualValues(t, st.ThisEpochBaselinePower, resp.ThisEpochBaselinePower) + require.EqualValues(t, st.ThisEpochRewardSmoothed, resp.ThisEpochRewardSmoothed) + }) +} + +func TestSuccessiveKPIUpdates(t *testing.T) { + actor := rewardHarness{reward.Actor{}, t} + builder := mock.NewBuilder(builtin.RewardActorAddr). + WithCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) + rt := builder.Build(t) + power := abi.NewStoragePower(1 << 50) + actor.constructAndVerify(rt, &power) + + rt.SetEpoch(abi.ChainEpoch(1)) + actor.updateNetworkKPI(rt, &power) + + rt.SetEpoch(abi.ChainEpoch(2)) + actor.updateNetworkKPI(rt, &power) + + rt.SetEpoch(abi.ChainEpoch(3)) + actor.updateNetworkKPI(rt, &power) + +} + +type rewardHarness struct { + reward.Actor + t testing.TB +} + +func (h *rewardHarness) constructAndVerify(rt *mock.Runtime, currRawPower *abi.StoragePower) { + rt.ExpectValidateCallerAddr(builtin.SystemActorAddr) + ret := rt.Call(h.Constructor, currRawPower) + assert.Nil(h.t, ret) + rt.Verify() + +} + +func (h *rewardHarness) updateNetworkKPI(rt *mock.Runtime, currRawPower *abi.StoragePower) { + rt.SetCaller(builtin.StoragePowerActorAddr, builtin.StoragePowerActorCodeID) + rt.ExpectValidateCallerAddr(builtin.StoragePowerActorAddr) + ret := rt.Call(h.UpdateNetworkKPI, currRawPower) + assert.Nil(h.t, ret) + rt.Verify() +} + +func (h *rewardHarness) awardBlockReward(rt *mock.Runtime, miner address.Address, penalty, gasReward abi.TokenAmount, winCount int64, expectedPayment abi.TokenAmount) { + rt.ExpectValidateCallerAddr(builtin.SystemActorAddr) + // expect penalty multiplier + minerPenalty := big.Mul(big.NewInt(reward.PenaltyMultiplier), penalty) + expectedParams := builtin.ApplyRewardParams{Reward: expectedPayment, Penalty: minerPenalty} + rt.ExpectSend(miner, builtin.MethodsMiner.ApplyRewards, &expectedParams, expectedPayment, nil, 0) + + rt.Call(h.AwardBlockReward, &reward.AwardBlockRewardParams{ + Miner: miner, + Penalty: penalty, + GasReward: gasReward, + WinCount: winCount, + }) + rt.Verify() +} + +func (h *rewardHarness) thisEpochReward(rt *mock.Runtime) *reward.ThisEpochRewardReturn { + rt.ExpectValidateCallerAny() + + ret := rt.Call(h.ThisEpochReward, nil) + rt.Verify() + + resp, ok := ret.(*reward.ThisEpochRewardReturn) + require.True(h.t, ok) + return resp +} + +func getState(rt *mock.Runtime) *reward.State { + var st reward.State + rt.GetState(&st) + return &st +} diff --git a/chain/consensus/actors/reward/testdata/TestBaselineReward.golden b/chain/consensus/actors/reward/testdata/TestBaselineReward.golden new file mode 100644 index 000000000..1a114ad84 --- /dev/null +++ b/chain/consensus/actors/reward/testdata/TestBaselineReward.golden @@ -0,0 +1,513 @@ +t0, t1, y +0,340282366920938463463374607398434878123,84621274052457285111 +1701411834604692317316873037158763279502223,1701752116971613255780336411766161714380346,84574788381047331439 +3402823669209384634633746074317526559004446,3403163951576305573097209448924924993882569,84528328245977625901 +5104235503814076951950619111476289838506669,5104575786180997890414082486083688273384792,84481893633220089447 +6805647338418769269267492148635053118008892,6805987620785690207730955523242451552887015,84435484528754349181 +8507059173023461586584365185793816397511115,8507399455390382525047828560401214832389238,84389100918567734129 +10208471007628153903901238222952579677013338,10208811289995074842364701597559978111891461,84342742788655271010 +11909882842232846221218111260111342956515561,11910223124599767159681574634718741391393684,84296410125019680001 +13611294676837538538534984297270106236017784,13611634959204459476998447671877504670895907,84250102913671370520 +15312706511442230855851857334428869515520007,15313046793809151794315320709036267950398130,84203821140628436995 +17014118346046923173168730371587632795022230,17014458628413844111632193746195031229900353,84157564791916654642 +18715530180651615490485603408746396074524453,18715870463018536428949066783353794509402576,84111333853569475252 +20416942015256307807802476445905159354026676,20417282297623228746265939820512557788904799,84065128311628022968 +22118353849861000125119349483063922633528899,22118694132227921063582812857671321068407022,84018948152141090071 +23819765684465692442436222520222685913031122,23820105966832613380899685894830084347909245,83972793361165132773 +25521177519070384759753095557381449192533345,25521517801437305698216558931988847627411468,83926663924764266998 +27222589353675077077069968594540212472035568,27222929636041998015533431969147610906913691,83880559829010264183 +28924001188279769394386841631698975751537791,28924341470646690332850305006306374186415914,83834481059982547065 +30625413022884461711703714668857739031040014,30625753305251382650167178043465137465918137,83788427603768185484 +32326824857489154029020587706016502310542237,32327165139856074967484051080623900745420360,83742399446461892180 +34028236692093846346337460743175265590044460,34028576974460767284800924117782664024922583,83696396574166018591 +35729648526698538663654333780334028869546683,35729988809065459602117797154941427304424806,83650418972990550662 +37431060361303230980971206817492792149048906,37431400643670151919434670192100190583927029,83604466629053104650 +39132472195907923298288079854651555428551129,39132812478274844236751543229258953863429252,83558539528478922929 +40833884030512615615604952891810318708053352,40834224312879536554068416266417717142931475,83512637657400869803 +42535295865117307932921825928969081987555575,42535636147484228871385289303576480422433698,83466761001959427321 +44236707699722000250238698966127845267057798,44237047982088921188702162340735243701935921,83420909548302691088 +45938119534326692567555572003286608546560021,45938459816693613506019035377894006981438144,83375083282586366086 +47639531368931384884872445040445371826062244,47639871651298305823335908415052770260940367,83329282190973762493 +49340943203536077202189318077604135105564467,49341283485902998140652781452211533540442590,83283506259635791504 +51042355038140769519506191114762898385066690,51042695320507690457969654489370296819944813,83237755474750961157 +52743766872745461836823064151921661664568913,52744107155112382775286527526529060099447036,83192029822505372158 +54445178707350154154139937189080424944071136,54445518989717075092603400563687823378949259,83146329289092713713 +56146590541954846471456810226239188223573359,56146930824321767409920273600846586658451482,83100653860714259356 +57848002376559538788773683263397951503075582,57848342658926459727237146638005349937953705,83055003523578862785 +59549414211164231106090556300556714782577805,59549754493531152044554019675164113217455928,83009378263902953696 +61250826045768923423407429337715478062080028,61251166328135844361870892712322876496958151,82963778067910533624 +62952237880373615740724302374874241341582251,62952578162740536679187765749481639776460374,82918202921833171780 +64653649714978308058041175412033004621084474,64653989997345228996504638786640403055962597,82872652811910000897 +66355061549583000375358048449191767900586697,66355401831949921313821511823799166335464820,82827127724387713073 +68056473384187692692674921486350531180088920,68056813666554613631138384860957929614967043,82781627645520555620 +69757885218792385009991794523509294459591143,69758225501159305948455257898116692894469266,82736152561570326910 +71459297053397077327308667560668057739093366,71459637335763998265772130935275456173971489,82690702458806372233 +73160708888001769644625540597826821018595589,73161049170368690583089003972434219453473712,82645277323505579646 +74862120722606461961942413634985584298097812,74862461004973382900405877009592982732975935,82599877141952375831 +76563532557211154279259286672144347577600035,76563872839578075217722750046751746012478158,82554501900438721954 +78264944391815846596576159709303110857102258,78265284674182767535039623083910509291980381,82509151585264109528 +79966356226420538913893032746461874136604481,79966696508787459852356496121069272571482604,82463826182735556271 +81667768061025231231209905783620637416106704,81668108343392152169673369158228035850984827,82418525679167601977 +83369179895629923548526778820779400695608927,83369520177996844486990242195386799130487050,82373250060882304382 +85070591730234615865843651857938163975111150,85070932012601536804307115232545562409989273,82327999314209235033 +86772003564839308183160524895096927254613373,86772343847206229121623988269704325689491496,82282773425485475161 +88473415399444000500477397932255690534115596,88473755681810921438940861306863088968993719,82237572381055611557 +90174827234048692817794270969414453813617819,90175167516415613756257734344021852248495942,82192396167271732445 +91876239068653385135111144006573217093120042,91876579351020306073574607381180615527998165,82147244770493423368 +93577650903258077452428017043731980372622265,93577991185624998390891480418339378807500388,82102118177087763062 +95279062737862769769744890080890743652124488,95279403020229690708208353455498142087002611,82057016373429319346 +96980474572467462087061763118049506931626711,96980814854834383025525226492656905366504834,82011939345900145001 +98681886407072154404378636155208270211128934,98682226689439075342842099529815668646007057,81966887080889773666 +100383298241676846721695509192367033490631157,100383638524043767660158972566974431925509280,81921859564795215724 +102084710076281539039012382229525796770133380,102085050358648459977475845604133195205011503,81876856784020954195 +103786121910886231356329255266684560049635603,103786462193253152294792718641291958484513726,81831878724978940630 +105487533745490923673646128303843323329137826,105487874027857844612109591678450721764015949,81786925374088591012 +107188945580095615990963001341002086608640049,107189285862462536929426464715609485043518172,81741996717776781652 +108890357414700308308279874378160849888142272,108890697697067229246743337752768248323020395,81697092742477845094 +110591769249305000625596747415319613167644495,110592109531671921564060210789927011602522618,81652213434633566014 +112293181083909692942913620452478376447146718,112293521366276613881377083827085774882024841,81607358780693177131 +113994592918514385260230493489637139726648941,113994933200881306198693956864244538161527064,81562528767113355113 +115696004753119077577547366526795903006151164,115696345035485998516010829901403301441029287,81517723380358216491 +117397416587723769894864239563954666285653387,117397756870090690833327702938562064720531510,81472942606899313566 +119098828422328462212181112601113429565155610,119099168704695383150644575975720828000033733,81428186433215630329 +120800240256933154529497985638272192844657833,120800580539300075467961449012879591279535956,81383454845793578379 +122501652091537846846814858675430956124160056,122501992373904767785278322050038354559038179,81338747831126992840 +124203063926142539164131731712589719403662279,124203404208509460102595195087197117838540402,81294065375717128282 +125904475760747231481448604749748482683164502,125904816043114152419912068124355881118042625,81249407466072654651 +127605887595351923798765477786907245962666725,127606227877718844737228941161514644397544848,81204774088709653188 +129307299429956616116082350824066009242168948,129307639712323537054545814198673407677047071,81160165230151612364 +131008711264561308433399223861224772521671171,131009051546928229371862687235832170956549294,81115580876929423806 +132710123099166000750716096898383535801173394,132710463381532921689179560272990934236051517,81071021015581378236 +134411534933770693068032969935542299080675617,134411875216137614006496433310149697515553740,81026485632653161398 +136112946768375385385349842972701062360177840,136113287050742306323813306347308460795055963,80981974714697850005 +137814358602980077702666716009859825639680063,137814698885346998641130179384467224074558186,80937488248275907672 +139515770437584770019983589047018588919182286,139516110719951690958447052421625987354060409,80893026219955180860 +141217182272189462337300462084177352198684509,141217522554556383275763925458784750633562632,80848588616310894823 +142918594106794154654617335121336115478186732,142918934389161075593080798495943513913064855,80804175423925649550 +144620005941398846971934208158494878757688955,144620346223765767910397671533102277192567078,80759786629389415718 +146321417776003539289251081195653642037191178,146321758058370460227714544570261040472069301,80715422219299530638 +148022829610608231606567954232812405316693401,148023169892975152545031417607419803751571524,80671082180260694216 +149724241445212923923884827269971168596195624,149724581727579844862348290644578567031073747,80626766498884964900 +151425653279817616241201700307129931875697847,151425993562184537179665163681737330310575970,80582475161791755644 +153127065114422308558518573344288695155200070,153127405396789229496982036718896093590078193,80538208155607829862 +154828476949027000875835446381447458434702293,154828817231393921814298909756054856869580416,80493965466967297397 +156529888783631693193152319418606221714204516,156530229065998614131615782793213620149082639,80449747082511610479 +158231300618236385510469192455764984993706739,158231640900603306448932655830372383428584862,80405552988889559697 +159932712452841077827786065492923748273208962,159933052735207998766249528867531146708087085,80361383172757269963 +161634124287445770145102938530082511552711185,161634464569812691083566401904689909987589308,80317237620778196486 +163335536122050462462419811567241274832213408,163335876404417383400883274941848673267091531,80273116319623120744 +165036947956655154779736684604400038111715631,165037288239022075718200147979007436546593754,80229019255970146460 +166738359791259847097053557641558801391217854,166738700073626768035517021016166199826095977,80184946416504695579 +168439771625864539414370430678717564670720077,168440111908231460352833894053324963105598200,80140897787919504247 +170141183460469231731687303715876327950222300,170141523742836152670150767090483726385100423,80096873356914618796 +171842595295073924049004176753035091229724523,171842935577440844987467640127642489664602646,80052873110197391726 +173544007129678616366321049790193854509226746,173544347412045537304784513164801252944104869,80008897034482477690 +175245418964283308683637922827352617788728969,175245759246650229622101386201960016223607092,79964945116491829488 +176946830798888001000954795864511381068231192,176947171081254921939418259239118779503109315,79921017342954694050 +178648242633492693318271668901670144347733415,178648582915859614256735132276277542782611538,79877113700607608438 +180349654468097385635588541938828907627235638,180349994750464306574052005313436306062113761,79833234176194395831 +182051066302702077952905414975987670906737861,182051406585068998891368878350595069341615984,79789378756466161534 +183752478137306770270222288013146434186240084,183752818419673691208685751387753832621118207,79745547428181288968 +185453889971911462587539161050305197465742307,185454230254278383526002624424912595900620430,79701740178105435677 +187155301806516154904856034087463960745244530,187155642088883075843319497462071359180122653,79657956993011529330 +188856713641120847222172907124622724024746753,188857053923487768160636370499230122459624876,79614197859679763728 +190558125475725539539489780161781487304248976,190558465758092460477953243536388885739127099,79570462764897594812 +192259537310330231856806653198940250583751199,192259877592697152795270116573547649018629322,79526751695459736676 +193960949144934924174123526236099013863253422,193961289427301845112586989610706412298131545,79483064638168157575 +195662360979539616491440399273257777142755645,195662701261906537429903862647865175577633768,79439401579832075944 +197363772814144308808757272310416540422257868,197364113096511229747220735685023938857135991,79395762507267956415 +199065184648749001126074145347575303701760091,199065524931115922064537608722182702136638214,79352147407299505834 +200766596483353693443391018384734066981262314,200766936765720614381854481759341465416140437,79308556266757669284 +202468008317958385760707891421892830260764537,202468348600325306699171354796500228695642660,79264989072480626109 +204169420152563078078024764459051593540266760,204169760434929999016488227833658991975144883,79221445811313785940 +205870831987167770395341637496210356819768983,205871172269534691333805100870817755254647106,79177926470109784723 +207572243821772462712658510533369120099271206,207572584104139383651121973907976518534149329,79134431035728480748 +209273655656377155029975383570527883378773429,209273995938744075968438846945135281813651552,79090959495036950685 +210975067490981847347292256607686646658275652,210975407773348768285755719982294045093153775,79047511834909485612 +212676479325586539664609129644845409937777875,212676819607953460603072593019452808372655998,79004088042227587061 +214377891160191231981926002682004173217280098,214378231442558152920389466056611571652158221,78960688103879963048 +216079302994795924299242875719162936496782321,216079643277162845237706339093770334931660444,78917312006762524122 +217780714829400616616559748756321699776284544,217781055111767537555023212130929098211162667,78873959737778379402 +219482126664005308933876621793480463055786767,219482466946372229872340085168087861490664890,78830631283837832627 +221183538498610001251193494830639226335288990,221183878780976922189656958205246624770167113,78787326631858378200 +222884950333214693568510367867797989614791213,222885290615581614506973831242405388049669336,78744045768764697242 +224586362167819385885827240904956752894293436,224586702450186306824290704279564151329171559,78700788681488653641 +226287774002424078203144113942115516173795659,226288114284790999141607577316722914608673782,78657555356969290108 +227989185837028770520460986979274279453297882,227989526119395691458924450353881677888176005,78614345782152824231 +229690597671633462837777860016433042732800105,229690937954000383776241323391040441167678228,78571159943992644537 +231392009506238155155094733053591806012302328,231392349788605076093558196428199204447180451,78527997829449306550 +233093421340842847472411606090750569291804551,233093761623209768410875069465357967726682674,78484859425490528854 +234794833175447539789728479127909332571306774,234795173457814460728191942502516731006184897,78441744719091189160 +236496245010052232107045352165068095850808997,236496585292419153045508815539675494285687120,78398653697233320373 +238197656844656924424362225202226859130311220,238197997127023845362825688576834257565189343,78355586346906106657 +239899068679261616741679098239385622409813443,239899408961628537680142561613993020844691566,78312542655105879515 +241600480513866309058995971276544385689315666,241600820796233229997459434651151784124193789,78269522608836113853 +243301892348471001376312844313703148968817889,243302232630837922314776307688310547403696012,78226526195107424063 +245003304183075693693629717350861912248320112,245003644465442614632093180725469310683198235,78183553400937560098 +246704716017680386010946590388020675527822335,246705056300047306949410053762628073962700458,78140604213351403553 +248406127852285078328263463425179438807324558,248406468134651999266726926799786837242202681,78097678619380963748 +250107539686889770645580336462338202086826781,250107879969256691584043799836945600521704904,78054776606065373809 +251808951521494462962897209499496965366329004,251809291803861383901360672874104363801207127,78011898160450886759 +253510363356099155280214082536655728645831227,253510703638466076218677545911263127080709350,77969043269590871604 +255211775190703847597530955573814491925333450,255212115473070768535994418948421890360211573,77926211920545809426 +256913187025308539914847828610973255204835673,256913527307675460853311291985580653639713796,77883404100383289474 +258614598859913232232164701648132018484337896,258614939142280153170628165022739416919216019,77840619796178005261 +260316010694517924549481574685290781763840119,260316350976884845487945038059898180198718242,77797858995011750659 +262017422529122616866798447722449545043342342,262017762811489537805261911097056943478220465,77755121683973416002 +263718834363727309184115320759608308322844565,263719174646094230122578784134215706757722688,77712407850158984183 +265420246198332001501432193796767071602346788,265420586480698922439895657171374470037224911,77669717480671526762 +267121658032936693818749066833925834881849011,267121998315303614757212530208533233316727134,77627050562621200072 +268823069867541386136065939871084598161351234,268823410149908307074529403245691996596229357,77584407083125241322 +270524481702146078453382812908243361440853457,270524821984512999391846276282850759875731580,77541787029307964712 +272225893536750770770699685945402124720355680,272226233819117691709163149320009523155233803,77499190388300757545 +273927305371355463088016558982560887999857903,273927645653722384026480022357168286434736026,77456617147242076341 +275628717205960155405333432019719651279360126,275629057488327076343796895394327049714238249,77414067293277442949 +277330129040564847722650305056878414558862349,277330469322931768661113768431485812993740472,77371540813559440674 +279031540875169540039967178094037177838364572,279031881157536460978430641468644576273242695,77329037695247710392 +280732952709774232357284051131195941117866795,280733292992141153295747514505803339552744918,77286557925508946675 +282434364544378924674600924168354704397369018,282434704826745845613064387542962102832247141,77244101491516893916 +284135776378983616991917797205513467676871241,284136116661350537930381260580120866111749364,77201668380452342454 +285837188213588309309234670242672230956373464,285837528495955230247698133617279629391251587,77159258579503124710 +287538600048193001626551543279830994235875687,287538940330559922565015006654438392670753810,77116872075864111309 +289240011882797693943868416316989757515377910,289240352165164614882331879691597155950256033,77074508856737207221 +290941423717402386261185289354148520794880133,290941763999769307199648752728755919229758256,77032168909331347895 +292642835552007078578502162391307284074382356,292643175834373999516965625765914682509260479,76989852220862495396 +294344247386611770895819035428466047353884579,294344587668978691834282498803073445788762702,76947558778553634544 +296045659221216463213135908465624810633386802,296045999503583384151599371840232209068264925,76905288569634769059 +297747071055821155530452781502783573912889025,297747411338188076468916244877390972347767148,76863041581342917702 +299448482890425847847769654539942337192391248,299448823172792768786233117914549735627269371,76820817800922110425 +301149894725030540165086527577101100471893471,301150235007397461103549990951708498906771594,76778617215623384516 +302851306559635232482403400614259863751395694,302851646842002153420866863988867262186273817,76736439812704780753 +304552718394239924799720273651418627030897917,304553058676606845738183737026026025465776040,76694285579431339553 +306254130228844617117037146688577390310400140,306254470511211538055500610063184788745278263,76652154503075097129 +307955542063449309434354019725736153589902363,307955882345816230372817483100343552024780486,76610046570915081648 +309656953898054001751670892762894916869404586,309657294180420922690134356137502315304282709,76567961770237309387 +311358365732658694068987765800053680148906809,311358706015025615007451229174661078583784932,76525900088334780897 +313059777567263386386304638837212443428409032,313060117849630307324768102211819841863287155,76483861512507477165 +314761189401868078703621511874371206707911255,314761529684234999642084975248978605142789378,76441846030062355780 +316462601236472771020938384911529969987413478,316462941518839691959401848286137368422291601,76399853628313347098 +318164013071077463338255257948688733266915701,318164353353444384276718721323296131701793824,76357884294581350416 +319865424905682155655572130985847496546417924,319865765188049076594035594360454894981296047,76315938016194230140 +321566836740286847972889004023006259825920147,321567177022653768911352467397613658260798270,76274014780486811961 +323268248574891540290205877060165023105422370,323268588857258461228669340434772421540300493,76232114574800879029 +324969660409496232607522750097323786384924593,324970000691863153545986213471931184819802716,76190237386485168132 +326671072244100924924839623134482549664426816,326671412526467845863303086509089948099304939,76148383202895365878 +328372484078705617242156496171641312943929039,328372824361072538180619959546248711378807162,76106552011394104874 +330073895913310309559473369208800076223431262,330074236195677230497936832583407474658309385,76064743799350959910 +331775307747915001876790242245958839502933485,331775648030281922815253705620566237937811608,76022958554142444150 +333476719582519694194107115283117602782435708,333477059864886615132570578657725001217313831,75981196263152005315 +335178131417124386511423988320276366061937931,335178471699491307449887451694883764496816054,75939456913770021879 +336879543251729078828740861357435129341440154,336879883534095999767204324732042527776318277,75897740493393799256 +338580955086333771146057734394593892620942377,338581295368700692084521197769201291055820500,75856046989427565998 +340282366920938463463374607431752655900444600,340282707203305384401838070806360054335322723,75814376389282469993 +341983778755543155780691480468911419179946823,341984119037910076719154943843518817614824946,75772728680376574661 +343685190590147848098008353506070182459449046,343685530872514769036471816880677580894327169,75731103850134855157 +345386602424752540415325226543228945738951269,345386942707119461353788689917836344173829392,75689501885989194573 +347088014259357232732642099580387709018453492,347088354541724153671105562954995107453331615,75647922775378380143 +348789426093961925049958972617546472297955715,348789766376328845988422435992153870732833838,75606366505748099453 +350490837928566617367275845654705235577457938,350491178210933538305739309029312634012336061,75564833064550936648 +352192249763171309684592718691863998856960161,352192590045538230623056182066471397291838284,75523322439246368641 +353893661597776002001909591729022762136462384,353894001880142922940373055103630160571340507,75481834617300761334 +355595073432380694319226464766181525415964607,355595413714747615257689928140788923850842730,75440369586187365825 +357296485266985386636543337803340288695466830,357296825549352307575006801177947687130344953,75398927333386314632 +358997897101590078953860210840499051974969053,358998237383956999892323674215106450409847176,75357507846384617909 +360699308936194771271177083877657815254471276,360699649218561692209640547252265213689349399,75316111112676159670 +362400720770799463588493956914816578533973499,362401061053166384526957420289423976968851622,75274737119761694014 +364102132605404155905810829951975341813475722,364102472887771076844274293326582740248353845,75233385855148841346 +365803544440008848223127702989134105092977945,365803884722375769161591166363741503527856068,75192057306352084610 +367504956274613540540444576026292868372480168,367505296556980461478908039400900266807358291,75150751460892765518 +369206368109218232857761449063451631651982391,369206708391585153796224912438059030086860514,75109468306299080781 +370907779943822925175078322100610394931484614,370908120226189846113541785475217793366362737,75068207830106078345 +372609191778427617492395195137769158210986837,372609532060794538430858658512376556645864960,75026970019855653623 +374310603613032309809712068174927921490489060,374310943895399230748175531549535319925367183,74985754863096545742 +376012015447637002127028941212086684769991283,376012355730003923065492404586694083204869406,74944562347384333773 +377713427282241694444345814249245448049493506,377713767564608615382809277623852846484371629,74903392460281432984 +379414839116846386761662687286404211328995729,379415179399213307700126150661011609763873852,74862245189357091075 +381116250951451079078979560323562974608497952,381116591233818000017443023698170373043376075,74821120522187384431 +382817662786055771396296433360721737888000175,382818003068422692334759896735329136322878298,74780018446355214370 +384519074620660463713613306397880501167502398,384519414903027384652076769772487899602380521,74738938949450303391 +386220486455265156030930179435039264447004621,386220826737632076969393642809646662881882744,74697882019069191428 +387921898289869848348247052472198027726506844,387922238572236769286710515846805426161384967,74656847642815232108 +389623310124474540665563925509356791006009067,389623650406841461604027388883964189440887190,74615835808298589003 +391324721959079232982880798546515554285511290,391325062241446153921344261921122952720389413,74574846503136231892 +393026133793683925300197671583674317565013513,393026474076050846238661134958281715999891636,74533879714951933024 +394727545628288617617514544620833080844515736,394727885910655538555978007995440479279393859,74492935431376263375 +396428957462893309934831417657991844124017959,396429297745260230873294881032599242558896082,74452013640046588920 +398130369297498002252148290695150607403520182,398130709579864923190611754069758005838398305,74411114328607066894 +399831781132102694569465163732309370683022405,399832121414469615507928627106916769117900528,74370237484708642069 +401533192966707386886782036769468133962524628,401533533249074307825245500144075532397402751,74329383096009043016 +403234604801312079204098909806626897242026851,403234945083679000142562373181234295676904974,74288551150172778387 +404936016635916771521415782843785660521529074,404936356918283692459879246218393058956407197,74247741634871133184 +406637428470521463838732655880944423801031297,406637768752888384777196119255551822235909420,74206954537782165041 +408338840305126156156049528918103187080533520,408339180587493077094512992292710585515411643,74166189846590700502 +410040252139730848473366401955261950360035743,410040592422097769411829865329869348794913866,74125447548988331299 +411741663974335540790683274992420713639537966,411742004256702461729146738367028112074416089,74084727632673410643 +413443075808940233108000148029579476919040189,413443416091307154046463611404186875353918312,74044030085351049503 +415144487643544925425317021066738240198542412,415144827925911846363780484441345638633420535,74003354894733112898 +416845899478149617742633894103897003478044635,416846239760516538681097357478504401912922758,73962702048538216183 +418547311312754310059950767141055766757546858,418547651595121230998414230515663165192424981,73922071534491721346 +420248723147359002377267640178214530037049081,420249063429725923315731103552821928471927204,73881463340325733294 +421950134981963694694584513215373293316551304,421950475264330615633047976589980691751429427,73840877453779096158 +423651546816568387011901386252532056596053527,423651887098935307950364849627139455030931650,73800313862597389584 +425352958651173079329218259289690819875555750,425353298933540000267681722664298218310433873,73759772554532925036 +427054370485777771646535132326849583155057973,427054710768144692584998595701456981589936096,73719253517344742097 +428755782320382463963852005364008346434560196,428756122602749384902315468738615744869438319,73678756738798604773 +430457194154987156281168878401167109714062419,430457534437354077219632341775774508148940542,73638282206666997800 +432158605989591848598485751438325872993564642,432158946271958769536949214812933271428442765,73597829908729122951 +433860017824196540915802624475484636273066865,433860358106563461854266087850092034707944988,73557399832770895347 +435561429658801233233119497512643399552569088,435561769941168154171582960887250797987447211,73516991966584939767 +437262841493405925550436370549802162832071311,437263181775772846488899833924409561266949434,73476606297970586965 +438964253328010617867753243586960926111573534,438964593610377538806216706961568324546451657,73436242814733869984 +440665665162615310185070116624119689391075757,440666005444982231123533579998727087825953880,73395901504687520475 +442367076997220002502386989661278452670577980,442367417279586923440850453035885851105456103,73355582355650965019 +444068488831824694819703862698437215950080203,444068829114191615758167326073044614384958326,73315285355450321444 +445769900666429387137020735735595979229582426,445770240948796308075484199110203377664460549,73275010491918395156 +447471312501034079454337608772754742509084649,447471652783401000392801072147362140943962772,73234757752894675460 +449172724335638771771654481809913505788586872,449173064618005692710117945184520904223464995,73194527126225331890 +450874136170243464088971354847072269068089095,450874476452610385027434818221679667502967218,73154318599763210541 +452575548004848156406288227884231032347591318,452575888287215077344751691258838430782469441,73114132161367830399 +454276959839452848723605100921389795627093541,454277300121819769662068564295997194061971664,73073967798905379675 +455978371674057541040921973958548558906595764,455978711956424461979385437333155957341473887,73033825500248712143 +457679783508662233358238846995707322186097987,457680123791029154296702310370314720620976110,72993705253277343480 +459381195343266925675555720032866085465600210,459381535625633846614019183407473483900478333,72953607045877447602 +461082607177871617992872593070024848745102433,461082947460238538931336056444632247179980556,72913530865941853008 +462784019012476310310189466107183612024604656,462784359294843231248652929481791010459482779,72873476701370039127 +464485430847081002627506339144342375304106879,464485771129447923565969802518949773738985002,72833444540068132661 +466186842681685694944823212181501138583609102,466187182964052615883286675556108537018487225,72793434369948903935 +467888254516290387262140085218659901863111325,467888594798657308200603548593267300297989448,72753446178931763248 +469589666350895079579456958255818665142613548,469590006633262000517920421630426063577491671,72713479954942757222 +471291078185499771896773831292977428422115771,471291418467866692835237294667584826856993894,72673535685914565161 +472992490020104464214090704330136191701617994,472992830302471385152554167704743590136496117,72633613359786495407 +474693901854709156531407577367294954981120217,474694242137076077469871040741902353415998340,72593712964504481693 +476395313689313848848724450404453718260622440,476395653971680769787187913779061116695500563,72553834488021079509 +478096725523918541166041323441612481540124663,478097065806285462104504786816219879975002786,72513977918295462464 +479798137358523233483358196478771244819626886,479798477640890154421821659853378643254505009,72474143243293418647 +481499549193127925800675069515930008099129109,481499889475494846739138532890537406534007232,72434330450987346997 +483200961027732618117991942553088771378631332,483201301310099539056455405927696169813509455,72394539529356253668 +484902372862337310435308815590247534658133555,484902713144704231373772278964854933093011678,72354770466385748403 +486603784696942002752625688627406297937635778,486604124979308923691089152002013696372513901,72315023250068040905 +488305196531546695069942561664565061217138001,488305536813913616008406025039172459652016124,72275297868401937209 +490006608366151387387259434701723824496640224,490006948648518308325722898076331222931518347,72235594309392836063 +491708020200756079704576307738882587776142447,491708360483123000643039771113489986211020570,72195912561052725301 +493409432035360772021893180776041351055644670,493409772317727692960356644150648749490522793,72156252611400178229 +495110843869965464339210053813200114335146893,495111184152332385277673517187807512770025016,72116614448460350002 +496812255704570156656526926850358877614649116,496812595986937077594990390224966276049527239,72076998060264974014 +498513667539174848973843799887517640894151339,498514007821541769912307263262125039329029462,72037403434852358278 +500215079373779541291160672924676404173653562,500215419656146462229624136299283802608531685,71997830560267381819 +501916491208384233608477545961835167453155785,501916831490751154546941009336442565888033908,71958279424561491064 +503617903042988925925794418998993930732658008,503618243325355846864257882373601329167536131,71918750015792696231 +505319314877593618243111292036152694012160231,505319655159960539181574755410760092447038354,71879242322025567726 +507020726712198310560428165073311457291662454,507021066994565231498891628447918855726540577,71839756331331232539 +508722138546803002877745038110470220571164677,508722478829169923816208501485077619006042800,71800292031787370643 +510423550381407695195061911147628983850666900,510423890663774616133525374522236382285545023,71760849411478211389 +512124962216012387512378784184787747130169123,512125302498379308450842247559395145565047246,71721428458494529918 +513826374050617079829695657221946510409671346,513826714332984000768159120596553908844549469,71682029160933643554 +515527785885221772147012530259105273689173569,515528126167588693085475993633712672124051692,71642651506899408219 +517229197719826464464329403296264036968675792,517229538002193385402792866670871435403553915,71603295484502214836 +518930609554431156781646276333422800248178015,518930949836798077720109739708030198683056138,71563961081858985740 +520632021389035849098963149370581563527680238,520632361671402770037426612745188961962558361,71524648287093171094 +522333433223640541416280022407740326807182461,522333773506007462354743485782347725242060584,71485357088334745295 +524034845058245233733596895444899090086684684,524035185340612154672060358819506488521562807,71446087473720203399 +525736256892849926050913768482057853366186907,525736597175216846989377231856665251801065030,71406839431392557531 +527437668727454618368230641519216616645689130,527438009009821539306694104893824015080567253,71367612949501333313 +529139080562059310685547514556375379925191353,529139420844426231624010977930982778360069476,71328408016202566276 +530840492396664003002864387593534143204693576,530840832679030923941327850968141541639571699,71289224619658798295 +532541904231268695320181260630692906484195799,532542244513635616258644724005300304919073922,71250062748039074004 +534243316065873387637498133667851669763698022,534243656348240308575961597042459068198576145,71210922389518937233 +535944727900478079954815006705010433043200245,535945068182845000893278470079617831478078368,71171803532280427431 +537646139735082772272131879742169196322702468,537646480017449693210595343116776594757580591,71132706164512076103 +539347551569687464589448752779327959602204691,539347891852054385527912216153935358037082814,71093630274408903238 +541048963404292156906765625816486722881706914,541049303686659077845229089191094121316585037,71054575850172413751 +542750375238896849224082498853645486161209137,542750715521263770162545962228252884596087260,71015542880010593913 +544451787073501541541399371890804249440711360,544452127355868462479862835265411647875589483,70976531352137907800 +546153198908106233858716244927963012720213583,546153539190473154797179708302570411155091706,70937541254775293726 +547854610742710926176033117965121775999715806,547854951025077847114496581339729174434593929,70898572576150160690 +549556022577315618493349991002280539279218029,549556362859682539431813454376887937714096152,70859625304496384821 +551257434411920310810666864039439302558720252,551257774694287231749130327414046700993598375,70820699428054305829 +552958846246525003127983737076598065838222475,552959186528891924066447200451205464273100598,70781794935070723445 +554660258081129695445300610113756829117724698,554660598363496616383764073488364227552602821,70742911813798893884 +556361669915734387762617483150915592397226921,556362010198101308701080946525522990832105044,70704050052498526288 +558063081750339080079934356188074355676729144,558063422032706001018397819562681754111607267,70665209639435779188 +559764493584943772397251229225233118956231367,559764833867310693335714692599840517391109490,70626390562883256957 +561465905419548464714568102262391882235733590,561466245701915385653031565636999280670611713,70587592811120006272 +563167317254153157031884975299550645515235813,563167657536520077970348438674158043950113936,70548816372431512573 +564868729088757849349201848336709408794738036,564869069371124770287665311711316807229616159,70510061235109696526 +566570140923362541666518721373868172074240259,566570481205729462604982184748475570509118382,70471327387452910491 +568271552757967233983835594411026935353742482,568271893040334154922299057785634333788620605,70432614817765934983 +569972964592571926301152467448185698633244705,569973304874938847239615930822793097068122828,70393923514359975147 +571674376427176618618469340485344461912746928,571674716709543539556932803859951860347625051,70355253465552657223 +573375788261781310935786213522503225192249151,573376128544148231874249676897110623627127274,70316604659668025023 +575077200096386003253103086559661988471751374,575077540378752924191566549934269386906629497,70277977085036536405 +576778611930990695570419959596820751751253597,576778952213357616508883422971428150186131720,70239370729995059746 +578480023765595387887736832633979515030755820,578480364047962308826200296008586913465633943,70200785582886870426 +580181435600200080205053705671138278310258043,580181775882567001143517169045745676745136166,70162221632061647304 +581882847434804772522370578708297041589760266,581883187717171693460834042082904440024638389,70123678865875469200 +583584259269409464839687451745455804869262489,583584599551776385778150915120063203304140612,70085157272690811384 +585285671104014157157004324782614568148764712,585286011386381078095467788157221966583642835,70046656840876542057 +586987082938618849474321197819773331428266935,586987423220985770412784661194380729863145058,70008177558807918842 +588688494773223541791638070856932094707769158,588688835055590462730101534231539493142647281,69969719414866585275 +590389906607828234108954943894090857987271381,590390246890195155047418407268698256422149504,69931282397440567292 +592091318442432926426271816931249621266773604,592091658724799847364735280305857019701651727,69892866494924269730 +593792730277037618743588689968408384546275827,593793070559404539682052153343015782981153950,69854471695718472815 +595494142111642311060905563005567147825778050,595494482394009231999369026380174546260656173,69816097988230328666 +597195553946247003378222436042725911105280273,597195894228613924316685899417333309540158396,69777745360873357794 +598896965780851695695539309079884674384782496,598897306063218616634002772454492072819660619,69739413802067445600 +600598377615456388012856182117043437664284719,600598717897823308951319645491650836099162842,69701103300238838880 +602299789450061080330173055154202200943786942,602300129732428001268636518528809599378665065,69662813843820142333 +604001201284665772647489928191360964223289165,604001541567032693585953391565968362658167288,69624545421250315066 +605702613119270464964806801228519727502791388,605702953401637385903270264603127125937669511,69586298020974667102 +607404024953875157282123674265678490782293611,607404365236242078220587137640285889217171734,69548071631444855895 +609105436788479849599440547302837254061795834,609105777070846770537904010677444652496673957,69509866241118882841 +610806848623084541916757420339996017341298057,610807188905451462855220883714603415776176180,69471681838461089791 +612508260457689234234074293377154780620800280,612508600740056155172537756751762179055678403,69433518411942155573 +614209672292293926551391166414313543900302503,614210012574660847489854629788920942335180626,69395375950039092504 +615911084126898618868708039451472307179804726,615911424409265539807171502826079705614682849,69357254441235242918 +617612495961503311186024912488631070459306949,617612836243870232124488375863238468894185072,69319153874020275685 +619313907796108003503341785525789833738809172,619314248078474924441805248900397232173687295,69281074236890182733 +621015319630712695820658658562948597018311395,621015659913079616759122121937555995453189518,69243015518347275581 +622716731465317388137975531600107360297813618,622717071747684309076438994974714758732691741,69204977706900181862 +624418143299922080455292404637266123577315841,624418483582289001393755868011873522012193964,69166960791063841856 +626119555134526772772609277674424886856818064,626119895416893693711072741049032285291696187,69128964759359505020 +627820966969131465089926150711583650136320287,627821307251498386028389614086191048571198410,69090989600314726524 +629522378803736157407243023748742413415822510,629522719086103078345706487123349811850700633,69053035302463363788 +631223790638340849724559896785901176695324733,631224130920707770663023360160508575130202856,69015101854345573017 +632925202472945542041876769823059939974826956,632925542755312462980340233197667338409705079,68977189244507805742 +634626614307550234359193642860218703254329179,634626954589917155297657106234826101689207302,68939297461502805364 +636328026142154926676510515897377466533831402,636328366424521847614973979271984864968709525,68901426493889603693 +638029437976759618993827388934536229813333625,638029778259126539932290852309143628248211748,68863576330233517499 +639730849811364311311144261971694993092835848,639731190093731232249607725346302391527713971,68825746959106145054 +641432261645969003628461135008853756372338071,641432601928335924566924598383461154807216194,68787938369085362686 +643133673480573695945778008046012519651840294,643134013762940616884241471420619918086718417,68750150548755321327 +644835085315178388263094881083171282931342517,644835425597545309201558344457778681366220640,68712383486706443069 +646536497149783080580411754120330046210844740,646536837432150001518875217494937444645722863,68674637171535417716 +648237908984387772897728627157488809490346963,648238249266754693836192090532096207925225086,68636911591845199341 +649939320818992465215045500194647572769849186,649939661101359386153508963569254971204727309,68599206736245002849 +651640732653597157532362373231806336049351409,651641072935964078470825836606413734484229532,68561522593350300532 +653342144488201849849679246268965099328853632,653342484770568770788142709643572497763731755,68523859151782818637 +655043556322806542166996119306123862608355855,655043896605173463105459582680731261043233978,68486216400170533926 +656744968157411234484312992343282625887858078,656745308439778155422776455717890024322736201,68448594327147670244 +658446379992015926801629865380441389167360301,658446720274382847740093328755048787602238424,68410992921354695088 +660147791826620619118946738417600152446862524,660148132108987540057410201792207550881740647,68373412171438316178 +661849203661225311436263611454758915726364747,661849543943592232374727074829366314161242870,68335852066051478024 +663550615495830003753580484491917679005866970,663550955778196924692043947866525077440745093,68298312593853358506 +665252027330434696070897357529076442285369193,665252367612801617009360820903683840720247316,68260793743509365447 +666953439165039388388214230566235205564871416,666953779447406309326677693940842603999749539,68223295503691133191 +668654850999644080705531103603393968844373639,668655191282011001643994566978001367279251762,68185817863076519181 +670356262834248773022847976640552732123875862,670356603116615693961311440015160130558753985,68148360810349600542 +672057674668853465340164849677711495403378085,672058014951220386278628313052318893838256208,68110924334200670667 +673759086503458157657481722714870258682880308,673759426785825078595945186089477657117758431,68073508423326235795 +675460498338062849974798595752029021962382531,675460838620429770913262059126636420397260654,68036113066429011606 +677161910172667542292115468789187785241884754,677162250455034463230578932163795183676762877,67998738252217919803 +678863322007272234609432341826346548521386977,678863662289639155547895805200953946956265100,67961383969408084708 +680564733841876926926749214863505311800889200,680565074124243847865212678238112710235767323,67924050206720829853 +682266145676481619244066087900664075080391423,682266485958848540182529551275271473515269546,67886736952883674573 +683967557511086311561382960937822838359893646,683967897793453232499846424312430236794771769,67849444196630330605 +685668969345691003878699833974981601639395869,685669309628057924817163297349589000074273992,67812171926700698683 +687370381180295696196016707012140364918898092,687370721462662617134480170386747763353776215,67774920131840865142 +689071793014900388513333580049299128198400315,689072133297267309451797043423906526633278438,67737688800803098516 +690773204849505080830650453086457891477902538,690773545131872001769113916461065289912780661,67700477922345846148 +692474616684109773147967326123616654757404761,692474956966476694086430789498224053192282884,67663287485233730786 +694176028518714465465284199160775418036906984,694176368801081386403747662535382816471785107,67626117478237547201 +695877440353319157782601072197934181316409207,695877780635686078721064535572541579751287330,67588967890134258790 +697578852187923850099917945235092944595911430,697579192470290771038381408609700343030789553,67551838709706994188 +699280264022528542417234818272251707875413653,699280604304895463355698281646859106310291776,67514729925745043886 +700981675857133234734551691309410471154915876,700982016139500155673015154684017869589793999,67477641527043856838 +702683087691737927051868564346569234434418099,702683427974104847990332027721176632869296222,67440573502405037085 +704384499526342619369185437383727997713920322,704384839808709540307648900758335396148798445,67403525840636340371 +706085911360947311686502310420886760993422545,706086251643314232624965773795494159428300668,67366498530551670762 +707787323195552004003819183458045524272924768,707787663477918924942282646832652922707802891,67329491560971077273 +709488735030156696321136056495204287552426991,709489075312523617259599519869811685987305114,67292504920720750488 +711190146864761388638452929532363050831929214,711190487147128309576916392906970449266807337,67255538598633019186 +712891558699366080955769802569521814111431437,712891898981733001894233265944129212546309560,67218592583546346974 +714592970533970773273086675606680577390933660,714593310816337694211550138981287975825811783,67181666864305328913 +716294382368575465590403548643839340670435883,716294722650942386528867012018446739105314006,67144761429760688149 +717995794203180157907720421680998103949938106,717996134485547078846183885055605502384816229,67107876268769272550 +719697206037784850225037294718156867229440329,719697546320151771163500758092764265664318452,67071011370194051338 +721398617872389542542354167755315630508942552,721398958154756463480817631129923028943820675,67034166722904111729 +723100029706994234859671040792474393788444775,723100369989361155798134504167081792223322898,66997342315774655570 +724801441541598927176987913829633157067946998,724801781823965848115451377204240555502825121,66960538137686995982 +726502853376203619494304786866791920347449221,726503193658570540432768250241399318782327344,66923754177528554001 +728204265210808311811621659903950683626951444,728204605493175232750085123278558082061829567,66886990424192855226 +729905677045413004128938532941109446906453667,729906017327779925067401996315716845341331790,66850246866579526460 +731607088880017696446255405978268210185955890,731607429162384617384718869352875608620834013,66813523493594292362 +733308500714622388763572279015426973465458113,733308840996989309702035742390034371900336236,66776820294148972099 +735009912549227081080889152052585736744960336,735010252831594002019352615427193135179838459,66740137257161475994 +736711324383831773398206025089744500024462559,736711664666198694336669488464351898459340682,66703474371555802181 +738412736218436465715522898126903263303964782,738413076500803386653986361501510661738842905,66666831626262033262 +740114148053041158032839771164062026583467005,740114488335408078971303234538669425018345128,66630209010216332966 +741815559887645850350156644201220789862969228,741815900170012771288620107575828188297847351,66593606512360942804 +743516971722250542667473517238379553142471451,743517312004617463605936980612986951577349574,66557024121644178733 +745218383556855234984790390275538316421973674,745218723839222155923253853650145714856851797,66520461827020427819 +746919795391459927302107263312697079701475897,746920135673826848240570726687304478136354020,66483919617450144901 +748621207226064619619424136349855842980978120,748621547508431540557887599724463241415856243,66447397481899849260 +750322619060669311936741009387014606260480343,750322959343036232875204472761622004695358466,66410895409342121284 +752024030895274004254057882424173369539982566,752024371177640925192521345798780767974860689,66374413388755599143 +753725442729878696571374755461332132819484789,753725783012245617509838218835939531254362912,66337951409124975454 +755426854564483388888691628498490896098987012,755427194846850309827155091873098294533865135,66301509459440993964 +757128266399088081206008501535649659378489235,757128606681455002144471964910257057813367358,66265087528700446220 +758829678233692773523325374572808422657991458,758830018516059694461788837947415821092869581,66228685605906168248 +760531090068297465840642247609967185937493681,760531430350664386779105710984574584372371804,66192303680067037232 +762232501902902158157959120647125949216995904,762232842185269079096422584021733347651874027,66155941740197968197 +763933913737506850475275993684284712496498127,763934254019873771413739457058892110931376250,66119599775319910693 +765635325572111542792592866721443475776000350,765635665854478463731056330096050874210878473,66083277774459845475 +767336737406716235109909739758602239055502573,767337077689083156048373203133209637490380696,66046975726650781195 +769038149241320927427226612795761002335004796,769038489523687848365690076170368400769882919,66010693620931751089 +770739561075925619744543485832919765614507019,770739901358292540683006949207527164049385142,65974431446347809666 +772440972910530312061860358870078528894009242,772441313192897233000323822244685927328887365,65938189191950029403 +774142384745135004379177231907237292173511465,774142725027501925317640695281844690608389588,65901966846795497437 +775843796579739696696494104944396055453013688,775844136862106617634957568319003453887891811,65865764399947312262 +777545208414344389013810977981554818732515911,777545548696711309952274441356162217167394034,65829581840474580424 +779246620248949081331127851018713582012018134,779246960531316002269591314393320980446896257,65793419157452413227 +780948032083553773648444724055872345291520357,780948372365920694586908187430479743726398480,65757276339961923427 +782649443918158465965761597093031108571022580,782649784200525386904225060467638507005900703,65721153377090221938 +784350855752763158283078470130189871850524803,784351196035130079221541933504797270285402926,65685050257930414541 +786052267587367850600395343167348635130027026,786052607869734771538858806541956033564905149,65648966971581598583 +787753679421972542917712216204507398409529249,787754019704339463856175679579114796844407372,65612903507148859693 +789455091256577235235029089241666161689031472,789455431538944156173492552616273560123909595,65576859853743268487 +791156503091181927552345962278824924968533695,791156843373548848490809425653432323403411818,65540836000481877283 +792857914925786619869662835315983688248035918,792858255208153540808126298690591086682914041,65504831936487716816 +794559326760391312186979708353142451527538141,794559667042758233125443171727749849962416264,65468847650889792949 +796260738594996004504296581390301214807040364,796261078877362925442760044764908613241918487,65432883132823083396 +797962150429600696821613454427459978086542587,797962490711967617760076917802067376521420710,65396938371428534440 +799663562264205389138930327464618741366044810,799663902546572310077393790839226139800922933,65361013355853057653 +801364974098810081456247200501777504645547033,801365314381177002394710663876384903080425156,65325108075249526618 +803066385933414773773564073538936267925049256,803066726215781694712027536913543666359927379,65289222518776773657 +804767797768019466090880946576095031204551479,804768138050386387029344409950702429639429602,65253356675599586557 +806469209602624158408197819613253794484053702,806469549884991079346661282987861192918931825,65217510534888705294 +808170621437228850725514692650412557763555925,808170961719595771663978156025019956198434048,65181684085820818771 +809872033271833543042831565687571321043058148,809872373554200463981295029062178719477936271,65145877317578561543 +811573445106438235360148438724730084322560371,811573785388805156298611902099337482757438494,65110090219350510555 +813274856941042927677465311761888847602062594,813275197223409848615928775136496246036940717,65074322780331181875 +814976268775647619994782184799047610881564817,814976609058014540933245648173655009316442940,65038574989721027434 +816677680610252312312099057836206374161067040,816678020892619233250562521210813772595945163,65002846836726431763 +818379092444857004629415930873365137440569263,818379432727223925567879394247972535875447386,64967138310559708735 +820080504279461696946732803910523900720071486,820080844561828617885196267285131299154949609,64931449400439098307 +821781916114066389264049676947682663999573709,821782256396433310202513140322290062434451832,64895780095588763267 +823483327948671081581366549984841427279075932,823483668231038002519830013359448825713954055,64860130385238785977 +825184739783275773898683423022000190558578155,825185080065642694837146886396607588993456278,64824500258625165123 +826886151617880466216000296059158953838080378,826886491900247387154463759433766352272958501,64788889704989812466 +828587563452485158533317169096317717117582601,828587903734852079471780632470925115552460724,64753298713580549590 +830288975287089850850634042133476480397084824,830289315569456771789097505508083878831962947,64717727273651104660 +831990387121694543167950915170635243676587047,831990727404061464106414378545242642111465170,64682175374461109174 +833691798956299235485267788207794006956089270,833692139238666156423731251582401405390967393,64646643005276094722 +835393210790903927802584661244952770235591493,835393551073270848741048124619560168670469616,64611130155367489744 +837094622625508620119901534282111533515093716,837094962907875541058364997656718931949971839,64575636814012616290 +838796034460113312437218407319270296794595939,838796374742480233375681870693877695229474062,64540162970494686784 +840497446294718004754535280356429060074098162,840497786577084925692998743731036458508976285,64504708614102800788 +842198858129322697071852153393587823353600385,842199198411689618010315616768195221788478508,64469273734131941766 +843900269963927389389169026430746586633102608,843900610246294310327632489805353985067980731,64433858319882973853 +845601681798532081706485899467905349912604831,845602022080899002644949362842512748347482954,64398462360662638626 +847303093633136774023802772505064113192107054,847303433915503694962266235879671511626985177,64363085845783551874 +849004505467741466341119645542222876471609277,849004845750108387279583108916830274906487400,64327728764564200368 +850705917302346158658436518579381639751111500,850706257584713079596899981953989038185989623,64292391106328938643 +852407329136950850975753391616540403030613723,852407669419317771914216854991147801465491846,64257072860407985768 +854108740971555543293070264653699166310115946,854109081253922464231533728028306564744994069,64221774016137422127 +855810152806160235610387137690857929589618169,855810493088527156548850601065465328024496292,64186494562859186199 +857511564640764927927704010728016692869120392,857511904923131848866167474102624091303998515,64151234489921071342 +859212976475369620245020883765175456148622615,859213316757736541183484347139782854583500738,64115993786676722571 +860914388309974312562337756802334219428124838,860914728592341233500801220176941617863002961,64080772442485633350 +862615800144579004879654629839492982707627061,862616140426945925818118093214100381142505184,64045570446713142375 +864317211979183697196971502876651745987129284,864317552261550618135434966251259144422007407,64010387788730430365 +866018623813788389514288375913810509266631507,866018964096155310452751839288417907701509630,63975224457914516852 +867720035648393081831605248950969272546133730,867720375930760002770068712325576670981011853,63940080443648256973 +869421447482997774148922121988128035825635953,869421787765364695087385585362735434260514076,63904955735320338264 diff --git a/chain/consensus/actors/reward/testdata/TestSimpleReward.golden b/chain/consensus/actors/reward/testdata/TestSimpleReward.golden new file mode 100644 index 000000000..6117a3142 --- /dev/null +++ b/chain/consensus/actors/reward/testdata/TestSimpleReward.golden @@ -0,0 +1,513 @@ +x, y +0,36266264293777134739 +5000,36246341860983438171 +10000,36226430372336764970 +15000,36206529821825080595 +20000,36186640203439653146 +25000,36166761511175051545 +30000,36146893739029143725 +35000,36127036881003094819 +40000,36107190931101365346 +45000,36087355883331709402 +50000,36067531731705172852 +55000,36047718470236091517 +60000,36027916092942089375 +65000,36008124593844076745 +70000,35988343966966248490 +75000,35968574206336082207 +80000,35948815305984336427 +85000,35929067259945048810 +90000,35909330062255534348 +95000,35889603706956383558 +100000,35869888188091460688 +105000,35850183499707901919 +110000,35830489635856113563 +115000,35810806590589770270 +120000,35791134357965813232 +125000,35771472932044448387 +130000,35751822306889144629 +135000,35732182476566632013 +140000,35712553435146899962 +145000,35692935176703195481 +150000,35673327695312021363 +155000,35653730985053134403 +160000,35634145040009543611 +165000,35614569854267508423 +170000,35595005421916536916 +175000,35575451737049384026 +180000,35555908793762049762 +185000,35536376586153777422 +190000,35516855108327051817 +195000,35497344354387597482 +200000,35477844318444376903 +205000,35458354994609588738 +210000,35438876376998666034 +215000,35419408459730274454 +220000,35399951236926310503 +225000,35380504702711899749 +230000,35361068851215395052 +235000,35341643676568374790 +240000,35322229172905641086 +245000,35302825334365218041 +250000,35283432155088349958 +255000,35264049629219499580 +260000,35244677750906346316 +265000,35225316514299784477 +270000,35205965913553921511 +275000,35186625942826076233 +280000,35167296596276777067 +285000,35147977868069760277 +290000,35128669752371968212 +295000,35109372243353547537 +300000,35090085335187847477 +305000,35070809022051418057 +310000,35051543298124008345 +315000,35032288157588564692 +320000,35013043594631228977 +325000,34993809603441336853 +330000,34974586178211415988 +335000,34955373313137184317 +340000,34936171002417548287 +345000,34916979240254601104 +350000,34897798020853620985 +355000,34878627338423069407 +360000,34859467187174589358 +365000,34840317561323003590 +370000,34821178455086312872 +375000,34802049862685694246 +380000,34782931778345499277 +385000,34763824196293252314 +390000,34744727110759648747 +395000,34725640515978553261 +400000,34706564406186998099 +405000,34687498775625181322 +410000,34668443618536465065 +415000,34649398929167373804 +420000,34630364701767592618 +425000,34611340930589965451 +430000,34592327609890493376 +435000,34573324733928332864 +440000,34554332296965794049 +445000,34535350293268338995 +450000,34516378717104579964 +455000,34497417562746277688 +460000,34478466824468339636 +465000,34459526496548818288 +470000,34440596573268909408 +475000,34421677048912950313 +480000,34402767917768418153 +485000,34383869174125928181 +490000,34364980812279232031 +495000,34346102826525215998 +500000,34327235211163899311 +505000,34308377960498432415 +510000,34289531068835095251 +515000,34270694530483295536 +520000,34251868339755567043 +525000,34233052490967567889 +530000,34214246978438078812 +535000,34195451796489001460 +540000,34176666939445356677 +545000,34157892401635282787 +550000,34139128177390033882 +555000,34120374261043978112 +560000,34101630646934595971 +565000,34082897329402478592 +570000,34064174302791326035 +575000,34045461561447945578 +580000,34026759099722250015 +585000,34008066911967255944 +590000,33989384992539082067 +595000,33970713335796947482 +600000,33952051936103169985 +605000,33933400787823164359 +610000,33914759885325440683 +615000,33896129222981602625 +620000,33877508795166345743 +625000,33858898596257455790 +630000,33840298620635807012 +635000,33821708862685360457 +640000,33803129316793162272 +645000,33784559977349342015 +650000,33766000838747110957 +655000,33747451895382760391 +660000,33728913141655659939 +665000,33710384571968255862 +670000,33691866180726069369 +675000,33673357962337694928 +680000,33654859911214798578 +685000,33636372021772116242 +690000,33617894288427452039 +695000,33599426705601676601 +700000,33580969267718725386 +705000,33562521969205596996 +710000,33544084804492351495 +715000,33525657768012108722 +720000,33507240854201046620 +725000,33488834057498399544 +730000,33470437372346456594 +735000,33452050793190559927 +740000,33433674314479103086 +745000,33415307930663529321 +750000,33396951636198329914 +755000,33378605425541042506 +760000,33360269293152249423 +765000,33341943233495576000 +770000,33323627241037688917 +775000,33305321310248294519 +780000,33287025435600137154 +785000,33268739611568997498 +790000,33250463832633690894 +795000,33232198093276065677 +800000,33213942387981001514 +805000,33195696711236407735 +810000,33177461057533221671 +815000,33159235421365406991 +820000,33141019797229952037 +825000,33122814179626868163 +830000,33104618563059188076 +835000,33086432942032964176 +840000,33068257311057266896 +845000,33050091664644183044 +850000,33031935997308814148 +855000,33013790303569274797 +860000,32995654577946690988 +865000,32977528814965198472 +870000,32959413009151941097 +875000,32941307155037069161 +880000,32923211247153737756 +885000,32905125280038105120 +890000,32887049248229330986 +895000,32868983146269574933 +900000,32850926968703994738 +905000,32832880710080744731 +910000,32814844364950974146 +915000,32796817927868825478 +920000,32778801393391432838 +925000,32760794756078920308 +930000,32742798010494400302 +935000,32724811151203971919 +940000,32706834172776719310 +945000,32688867069784710030 +950000,32670909836802993406 +955000,32652962468409598893 +960000,32635024959185534441 +965000,32617097303714784859 +970000,32599179496584310176 +975000,32581271532384044011 +980000,32563373405706891936 +985000,32545485111148729845 +990000,32527606643308402322 +995000,32509737996787721011 +1000000,32491879166191462986 +1005000,32474030146127369118 +1010000,32456190931206142454 +1015000,32438361516041446585 +1020000,32420541895249904018 +1025000,32402732063451094557 +1030000,32384932015267553672 +1035000,32367141745324770879 +1040000,32349361248251188117 +1045000,32331590518678198123 +1050000,32313829551240142816 +1055000,32296078340574311671 +1060000,32278336881320940107 +1065000,32260605168123207863 +1070000,32242883195627237380 +1075000,32225170958482092190 +1080000,32207468451339775295 +1085000,32189775668855227557 +1090000,32172092605686326078 +1095000,32154419256493882593 +1100000,32136755615941641854 +1105000,32119101678696280022 +1110000,32101457439427403051 +1115000,32083822892807545087 +1120000,32066198033512166852 +1125000,32048582856219654040 +1130000,32030977355611315709 +1135000,32013381526371382675 +1140000,31995795363187005909 +1145000,31978218860748254929 +1150000,31960652013748116201 +1155000,31943094816882491533 +1160000,31925547264850196477 +1165000,31908009352352958723 +1170000,31890481074095416507 +1175000,31872962424785117005 +1180000,31855453399132514738 +1185000,31837953991850969976 +1190000,31820464197656747139 +1195000,31802984011269013204 +1200000,31785513427409836109 +1205000,31768052440804183162 +1210000,31750601046179919443 +1215000,31733159238267806217 +1220000,31715727011801499344 +1225000,31698304361517547681 +1230000,31680891282155391503 +1235000,31663487768457360906 +1240000,31646093815168674227 +1245000,31628709417037436451 +1250000,31611334568814637627 +1255000,31593969265254151288 +1260000,31576613501112732858 +1265000,31559267271150018079 +1270000,31541930570128521418 +1275000,31524603392813634497 +1280000,31507285733973624502 +1285000,31489977588379632610 +1290000,31472678950805672410 +1295000,31455389816028628319 +1300000,31438110178828254013 +1305000,31420840033987170845 +1310000,31403579376290866273 +1315000,31386328200527692284 +1320000,31369086501488863820 +1325000,31351854273968457206 +1330000,31334631512763408579 +1335000,31317418212673512315 +1340000,31300214368501419460 +1345000,31283019975052636163 +1350000,31265835027135522102 +1355000,31248659519561288921 +1360000,31231493447143998662 +1365000,31214336804700562200 +1370000,31197189587050737676 +1375000,31180051789017128935 +1380000,31162923405425183960 +1385000,31145804431103193314 +1390000,31128694860882288574 +1395000,31111594689596440773 +1400000,31094503912082458840 +1405000,31077422523179988040 +1410000,31060350517731508415 +1415000,31043287890582333230 +1420000,31026234636580607413 +1425000,31009190750577306003 +1430000,30992156227426232592 +1435000,30975131061984017774 +1440000,30958115249110117588 +1445000,30941108783666811972 +1450000,30924111660519203206 +1455000,30907123874535214365 +1460000,30890145420585587766 +1465000,30873176293543883425 +1470000,30856216488286477503 +1475000,30839265999692560763 +1480000,30822324822644137022 +1485000,30805392952026021606 +1490000,30788470382725839805 +1495000,30771557109634025334 +1500000,30754653127643818780 +1505000,30737758431651266072 +1510000,30720873016555216931 +1515000,30703996877257323335 +1520000,30687130008662037976 +1525000,30670272405676612725 +1530000,30653424063211097091 +1535000,30636584976178336685 +1540000,30619755139493971688 +1545000,30602934548076435308 +1550000,30586123196846952254 +1555000,30569321080729537196 +1560000,30552528194650993236 +1565000,30535744533540910376 +1570000,30518970092331663985 +1575000,30502204865958413271 +1580000,30485448849359099750 +1585000,30468702037474445720 +1590000,30451964425247952730 +1595000,30435236007625900057 +1600000,30418516779557343177 +1605000,30401806735994112241 +1610000,30385105871890810552 +1615000,30368414182204813041 +1620000,30351731661896264742 +1625000,30335058305928079272 +1630000,30318394109265937314 +1635000,30301739066878285090 +1640000,30285093173736332845 +1645000,30268456424814053330 +1650000,30251828815088180284 +1655000,30235210339538206915 +1660000,30218600993146384385 +1665000,30202000770897720297 +1670000,30185409667779977178 +1675000,30168827678783670969 +1680000,30152254798902069507 +1685000,30135691023131191020 +1690000,30119136346469802612 +1695000,30102590763919418751 +1700000,30086054270484299766 +1705000,30069526861171450333 +1710000,30053008530990617971 +1715000,30036499274954291531 +1720000,30019999088077699695 +1725000,30003507965378809469 +1730000,29987025901878324677 +1735000,29970552892599684460 +1740000,29954088932569061773 +1745000,29937634016815361880 +1750000,29921188140370220858 +1755000,29904751298268004095 +1760000,29888323485545804788 +1765000,29871904697243442447 +1770000,29855494928403461400 +1775000,29839094174071129289 +1780000,29822702429294435581 +1785000,29806319689124090070 +1790000,29789945948613521380 +1795000,29773581202818875477 +1800000,29757225446799014173 +1805000,29740878675615513631 +1810000,29724540884332662882 +1815000,29708212068017462327 +1820000,29691892221739622251 +1825000,29675581340571561335 +1830000,29659279419588405166 +1835000,29642986453867984751 +1840000,29626702438490835031 +1845000,29610427368540193398 +1850000,29594161239101998203 +1855000,29577904045264887282 +1860000,29561655782120196465 +1865000,29545416444761958098 +1870000,29529186028286899564 +1875000,29512964527794441794 +1880000,29496751938386697796 +1885000,29480548255168471173 +1890000,29464353473247254644 +1895000,29448167587733228567 +1900000,29431990593739259465 +1905000,29415822486380898546 +1910000,29399663260776380234 +1915000,29383512912046620688 +1920000,29367371435315216335 +1925000,29351238825708442396 +1930000,29335115078355251410 +1935000,29319000188387271772 +1940000,29302894150938806253 +1945000,29286796961146830539 +1950000,29270708614150991759 +1955000,29254629105093607016 +1960000,29238558429119661925 +1965000,29222496581376809142 +1970000,29206443557015366902 +1975000,29190399351188317555 +1980000,29174363959051306100 +1985000,29158337375762638725 +1990000,29142319596483281341 +1995000,29126310616376858128 +2000000,29110310430609650067 +2005000,29094319034350593486 +2010000,29078336422771278598 +2015000,29062362591045948044 +2020000,29046397534351495438 +2025000,29030441247867463909 +2030000,29014493726776044643 +2035000,28998554966262075434 +2040000,28982624961513039226 +2045000,28966703707719062659 +2050000,28950791200072914622 +2055000,28934887433770004795 +2060000,28918992404008382204 +2065000,28903106105988733766 +2070000,28887228534914382845 +2075000,28871359685991287801 +2080000,28855499554428040540 +2085000,28839648135435865072 +2090000,28823805424228616064 +2095000,28807971416022777392 +2100000,28792146106037460699 +2105000,28776329489494403952 +2110000,28760521561617969998 +2115000,28744722317635145121 +2120000,28728931752775537605 +2125000,28713149862271376289 +2130000,28697376641357509128 +2135000,28681612085271401759 +2140000,28665856189253136056 +2145000,28650108948545408700 +2150000,28634370358393529736 +2155000,28618640414045421140 +2160000,28602919110751615388 +2165000,28587206443765254015 +2170000,28571502408342086186 +2175000,28555806999740467263 +2180000,28540120213221357374 +2185000,28524442044048319980 +2190000,28508772487487520445 +2195000,28493111538807724610 +2200000,28477459193280297361 +2205000,28461815446179201201 +2210000,28446180292780994828 +2215000,28430553728364831702 +2220000,28414935748212458624 +2225000,28399326347608214310 +2230000,28383725521839027967 +2235000,28368133266194417872 +2240000,28352549575966489946 +2245000,28336974446449936336 +2250000,28321407872942033992 +2255000,28305849850742643249 +2260000,28290300375154206404 +2265000,28274759441481746304 +2270000,28259227045032864923 +2275000,28243703181117741948 +2280000,28228187845049133359 +2285000,28212681032142370020 +2290000,28197182737715356260 +2295000,28181692957088568460 +2300000,28166211685585053643 +2305000,28150738918530428055 +2310000,28135274651252875762 +2315000,28119818879083147235 +2320000,28104371597354557938 +2325000,28088932801402986924 +2330000,28073502486566875425 +2335000,28058080648187225440 +2340000,28042667281607598337 +2345000,28027262382174113438 +2350000,28011865945235446620 +2355000,27996477966142828909 +2360000,27981098440250045075 +2365000,27965727362913432230 +2370000,27950364729491878425 +2375000,27935010535346821252 +2380000,27919664775842246439 +2385000,27904327446344686454 +2390000,27888998542223219102 +2395000,27873678058849466131 +2400000,27858365991597591831 +2405000,27843062335844301640 +2410000,27827767086968840747 +2415000,27812480240352992695 +2420000,27797201791381077989 +2425000,27781931735439952703 +2430000,27766670067919007084 +2435000,27751416784210164162 +2440000,27736171879707878359 +2445000,27720935349809134096 +2450000,27705707189913444406 +2455000,27690487395422849544 +2460000,27675275961741915599 +2465000,27660072884277733104 +2470000,27644878158439915652 +2475000,27629691779640598509 +2480000,27614513743294437231 +2485000,27599344044818606274 +2490000,27584182679632797616 +2495000,27569029643159219370 +2500000,27553884930822594406 +2505000,27538748538050158965 +2510000,27523620460271661280 +2515000,27508500692919360199 +2520000,27493389231428023800 +2525000,27478286071234928018 +2530000,27463191207779855265 +2535000,27448104636505093052 +2540000,27433026352855432615 +2545000,27417956352278167539 +2550000,27402894630223092384 +2555000,27387841182142501307 diff --git a/chain/consensus/actors/reward/testing.go b/chain/consensus/actors/reward/testing.go new file mode 100644 index 000000000..93880a0c7 --- /dev/null +++ b/chain/consensus/actors/reward/testing.go @@ -0,0 +1,29 @@ +package reward + +import ( + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +type StateSummary struct{} + +var FIL = big.NewInt(1e18) +var StorageMiningAllocationCheck = big.Mul(big.NewInt(1_100_000_000), FIL) + +func CheckStateInvariants(st *State, store adt.Store, priorEpoch abi.ChainEpoch, balance abi.TokenAmount) (*StateSummary, *builtin.MessageAccumulator) { + acc := &builtin.MessageAccumulator{} + + // Can't assert equality because anyone can send funds to reward actor (and already have on mainnet) + acc.Require(big.Add(st.TotalStoragePowerReward, balance).GreaterThanEqual(StorageMiningAllocationCheck), "reward given %v + reward left %v < storage mining allocation %v", st.TotalStoragePowerReward, balance, StorageMiningAllocationCheck) + + acc.Require(st.Epoch == priorEpoch+1, "reward state epoch %d does not match priorEpoch+1 %d", st.Epoch, priorEpoch+1) + acc.Require(st.EffectiveNetworkTime <= st.Epoch, "effective network time greater than state epoch") + + acc.Require(st.CumsumRealized.LessThanEqual(st.CumsumBaseline), "cumsum realized > cumsum baseline") + acc.Require(st.CumsumRealized.GreaterThanEqual(big.Zero()), "cumsum realized < 0") + acc.Require(st.EffectiveBaselinePower.LessThanEqual(st.ThisEpochBaselinePower), "effective baseline power > baseline power") + + return &StateSummary{}, acc +} diff --git a/chain/consensus/actors/split/split.go b/chain/consensus/actors/split/split.go index e20b76313..0d7cb6efa 100644 --- a/chain/consensus/actors/split/split.go +++ b/chain/consensus/actors/split/split.go @@ -7,8 +7,8 @@ import ( "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/lotus/chain/actors/builtin" actor "github.com/filecoin-project/lotus/chain/consensus/actors" - builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" - "github.com/filecoin-project/specs-actors/v6/actors/runtime" + builtin6 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/specs-actors/v7/actors/runtime" cid "github.com/ipfs/go-cid" ) diff --git a/chain/consensus/common/cns_validations.go b/chain/consensus/common/cns_validations.go new file mode 100644 index 000000000..d088906ab --- /dev/null +++ b/chain/consensus/common/cns_validations.go @@ -0,0 +1,451 @@ +package common + +import ( + "context" + "fmt" + + "github.com/Gurpartap/async" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/network" + bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/consensus" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet/resolver" + "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/lib/sigs" + "github.com/filecoin-project/lotus/metrics" + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log/v2" + pubsub "github.com/libp2p/go-libp2p-pubsub" + cbg "github.com/whyrusleeping/cbor-gen" + "go.opencensus.io/stats" + "golang.org/x/xerrors" +) + +var log = logging.Logger("consensus-common") + +func CheckStateRoot(ctx context.Context, store *store.ChainStore, sm *stmgr.StateManager, b *types.FullBlock, baseTs *types.TipSet) async.ErrorFuture { + h := b.Header + return async.Err(func() error { + stateroot, precp, err := sm.TipSetState(ctx, baseTs) + if err != nil { + return xerrors.Errorf("get tipsetstate(%d, %s) failed: %w", h.Height, h.Parents, err) + } + + if stateroot != h.ParentStateRoot { + msgs, err := store.MessagesForTipset(ctx, baseTs) + if err != nil { + log.Error("failed to load messages for tipset during tipset state mismatch error: ", err) + } else { + log.Warn("Messages for tipset with mismatching state:") + for i, m := range msgs { + mm := m.VMMessage() + log.Warnf("Message[%d]: from=%s to=%s method=%d params=%x", i, mm.From, mm.To, mm.Method, mm.Params) + } + } + + return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot) + } + + if precp != h.ParentMessageReceipts { + return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts) + } + + return nil + }) +} + +func CheckMsgs(ctx context.Context, store *store.ChainStore, sm *stmgr.StateManager, submgr subnet.SubnetMgr, r *resolver.Resolver, netName address.SubnetID, b *types.FullBlock, baseTs *types.TipSet) []async.ErrorFuture { + h := b.Header + msgsCheck := async.Err(func() error { + if b.Cid() == build.WhitelistedBlock { + return nil + } + + if err := checkBlockMessages(ctx, store, sm, submgr, r, netName, b, baseTs); err != nil { + return xerrors.Errorf("block had invalid messages: %w", err) + } + return nil + }) + + baseFeeCheck := async.Err(func() error { + baseFee, err := store.ComputeBaseFee(ctx, baseTs) + if err != nil { + return xerrors.Errorf("computing base fee: %w", err) + } + if types.BigCmp(baseFee, b.Header.ParentBaseFee) != 0 { + return xerrors.Errorf("base fee doesn't match: %s (header) != %s (computed)", + b.Header.ParentBaseFee, baseFee) + } + return nil + }) + blockSigCheck := async.Err(func() error { + if err := sigs.CheckBlockSignature(ctx, h, b.Header.Miner); err != nil { + return xerrors.Errorf("check block signature failed: %w", err) + } + return nil + }) + + return []async.ErrorFuture{msgsCheck, baseFeeCheck, blockSigCheck} + +} + +func BlockSanityChecks(ctype hierarchical.ConsensusType, h *types.BlockHeader) error { + // Delegated consensus has no election proof. + switch ctype { + case hierarchical.Delegated: + if h.ElectionProof != nil { + return xerrors.Errorf("block must have nil election proof") + } + if h.Ticket != nil { + return xerrors.Errorf("block must have nil ticket") + } + default: + // FIXME: We currently support PoW and delegated, thus the + // default instead of specifying other consensus. This needs + // to change. + if h.Ticket == nil { + return xerrors.Errorf("block must not have nil ticket") + } + } + + if h.BlockSig == nil { + return xerrors.Errorf("block had nil signature") + } + + if h.BLSAggregate == nil { + return xerrors.Errorf("block had nil bls aggregate signature") + } + + if h.Miner.Protocol() != address.SECP256K1 { + return xerrors.Errorf("block had non-secp miner address") + } + + if len(h.Parents) != 1 { + return xerrors.Errorf("must have 1 parent") + } + + return nil +} + +func checkBlockMessages(ctx context.Context, str *store.ChainStore, sm *stmgr.StateManager, submgr subnet.SubnetMgr, r *resolver.Resolver, netName address.SubnetID, b *types.FullBlock, baseTs *types.TipSet) error { + { + var sigCids []cid.Cid // this is what we get for people not wanting the marshalcbor method on the cid type + var pubks [][]byte + + for _, m := range b.BlsMessages { + sigCids = append(sigCids, m.Cid()) + + pubk, err := sm.GetBlsPublicKey(ctx, m.From, baseTs) + if err != nil { + return xerrors.Errorf("failed to load bls public to validate block: %w", err) + } + + pubks = append(pubks, pubk) + } + + if err := consensus.VerifyBlsAggregate(ctx, b.Header.BLSAggregate, sigCids, pubks); err != nil { + return xerrors.Errorf("bls aggregate signature was invalid: %w", err) + } + } + + nonces := make(map[address.Address]uint64) + + stateroot, _, err := sm.TipSetState(ctx, baseTs) + if err != nil { + return err + } + + st, err := state.LoadStateTree(str.ActorStore(ctx), stateroot) + if err != nil { + return xerrors.Errorf("failed to load base state tree: %w", err) + } + + nv := sm.GetNetworkVersion(ctx, b.Header.Height) + pl := vm.PricelistByEpoch(baseTs.Height()) + var sumGasLimit int64 + checkMsg := func(msg types.ChainMsg) error { + m := msg.VMMessage() + + // Phase 1: syntactic validation, as defined in the spec + minGas := pl.OnChainMessage(msg.ChainLength()) + if err := m.ValidForBlockInclusion(minGas.Total(), nv); err != nil { + return err + } + + // ValidForBlockInclusion checks if any single message does not exceed BlockGasLimit + // So below is overflow safe + sumGasLimit += m.GasLimit + if sumGasLimit > build.BlockGasLimit { + return xerrors.Errorf("block gas limit exceeded") + } + + // Phase 2: (Partial) semantic validation: + // the sender exists and is an account actor, and the nonces make sense + var sender address.Address + if sm.GetNetworkVersion(ctx, b.Header.Height) >= network.Version13 { + sender, err = st.LookupID(m.From) + if err != nil { + return err + } + } else { + sender = m.From + } + + if _, ok := nonces[sender]; !ok { + // `GetActor` does not validate that this is an account actor. + act, err := st.GetActor(sender) + if err != nil { + return xerrors.Errorf("failed to get actor: %w", err) + } + + if !builtin.IsAccountActor(act.Code) { + return xerrors.New("Sender must be an account actor") + } + nonces[sender] = act.Nonce + } + + if nonces[sender] != m.Nonce { + return xerrors.Errorf("wrong nonce (exp: %d, got: %d)", nonces[sender], m.Nonce) + } + nonces[sender]++ + + return nil + } + + // Validate message arrays in a temporary blockstore. + tmpbs := bstore.NewMemory() + tmpstore := blockadt.WrapStore(ctx, cbor.NewCborStore(tmpbs)) + + bmArr := blockadt.MakeEmptyArray(tmpstore) + for i, m := range b.BlsMessages { + if err := checkMsg(m); err != nil { + return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err) + } + + c, err := store.PutMessage(ctx, tmpbs, m) + if err != nil { + return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err) + } + + k := cbg.CborCid(c) + if err := bmArr.Set(uint64(i), &k); err != nil { + return xerrors.Errorf("failed to put bls message at index %d: %w", i, err) + } + } + + smArr := blockadt.MakeEmptyArray(tmpstore) + for i, m := range b.SecpkMessages { + if err := checkMsg(m); err != nil { + return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err) + } + + // `From` being an account actor is only validated inside the `vm.ResolveToKeyAddr` call + // in `StateManager.ResolveToKeyAddress` here (and not in `checkMsg`). + kaddr, err := sm.ResolveToKeyAddress(ctx, m.Message.From, baseTs) + if err != nil { + return xerrors.Errorf("failed to resolve key addr: %w", err) + } + + if err := sigs.Verify(&m.Signature, kaddr, m.Message.Cid().Bytes()); err != nil { + return xerrors.Errorf("secpk message %s has invalid signature: %w", m.Cid(), err) + } + + c, err := store.PutMessage(ctx, tmpbs, m) + if err != nil { + return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err) + } + k := cbg.CborCid(c) + if err := smArr.Set(uint64(i), &k); err != nil { + return xerrors.Errorf("failed to put secpk message at index %d: %w", i, err) + } + } + + crossArr := blockadt.MakeEmptyArray(tmpstore) + // Preamble to get states required for cross-msg checks. + var ( + parentSCA *sca.SCAState + snSCA *sca.SCAState + pstore blockadt.Store + snstore blockadt.Store + ) + // If subnet manager is not set we are in the root chain and we don't need to get parentSCA + // state + if submgr != nil { + parentSCA, pstore, err = getSCAState(ctx, sm, submgr, netName.Parent(), baseTs) + if err != nil { + return err + } + } + // Get SCA state in subnet. + snSCA, snstore, err = getSCAState(ctx, sm, submgr, netName, baseTs) + if err != nil { + return err + } + // Check cross messages + for i, m := range b.CrossMessages { + if err := checkCrossMsg(ctx, r, pstore, snstore, parentSCA, snSCA, m); err != nil { + return xerrors.Errorf("failed to check message %s: %w", m.Cid(), err) + } + + // FIXME: Should we try to apply the message before accepting the block? + // Check if the message can be applied before accepting it for proposal. + // if err := canApplyMsg(ctx, submgr, sm, netName, m); err != nil { + // return xerrors.Errorf("failed testing the application of cross-msg %s: %w", m.Cid(), err) + // } + // // NOTE: We don't check mesage against VM for cross shard messages. They are + // // checked in some other way. + // if err := checkMsg(m); err != nil { + // return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err) + // } + + c, err := store.PutMessage(ctx, tmpbs, m) + if err != nil { + return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err) + } + + k := cbg.CborCid(c) + if err := crossArr.Set(uint64(i), &k); err != nil { + return xerrors.Errorf("failed to put cross message at index %d: %w", i, err) + } + } + + bmroot, err := bmArr.Root() + if err != nil { + return err + } + + smroot, err := smArr.Root() + if err != nil { + return err + } + + crossroot, err := crossArr.Root() + if err != nil { + return err + } + + mrcid, err := tmpstore.Put(ctx, &types.MsgMeta{ + BlsMessages: bmroot, + SecpkMessages: smroot, + CrossMessages: crossroot, + }) + if err != nil { + return err + } + + if b.Header.Messages != mrcid { + return fmt.Errorf("messages didnt match message root in header") + } + + // Finally, flush. + return vm.Copy(ctx, tmpbs, str.ChainBlockstore(), mrcid) +} + +func ValidateLocalBlock(ctx context.Context, msg *pubsub.Message) (pubsub.ValidationResult, string) { + stats.Record(ctx, metrics.BlockPublished.M(1)) + + if size := msg.Size(); size > 1<<20-1<<15 { + log.Errorf("ignoring oversize block (%dB)", size) + return pubsub.ValidationIgnore, "oversize_block" + } + + blk, what, err := DecodeAndCheckBlock(msg) + if err != nil { + log.Errorf("got invalid local block: %s", err) + return pubsub.ValidationIgnore, what + } + + msg.ValidatorData = blk + stats.Record(ctx, metrics.BlockValidationSuccess.M(1)) + return pubsub.ValidationAccept, "" +} + +func DecodeAndCheckBlock(msg *pubsub.Message) (*types.BlockMsg, string, error) { + blk, err := types.DecodeBlockMsg(msg.GetData()) + if err != nil { + return nil, "invalid", xerrors.Errorf("error decoding block: %w", err) + } + + if count := len(blk.BlsMessages) + len(blk.SecpkMessages); count > build.BlockMessageLimit { + return nil, "too_many_messages", fmt.Errorf("block contains too many messages (%d)", count) + } + + // make sure we have a signature + if blk.Header.BlockSig == nil { + return nil, "missing_signature", fmt.Errorf("block without a signature") + } + + return blk, "", nil +} + +func ValidateMsgMeta(ctx context.Context, msg *types.BlockMsg) error { + // TODO there has to be a simpler way to do this without the blockstore dance + // block headers use adt0 + store := blockadt.WrapStore(ctx, cbor.NewCborStore(bstore.NewMemory())) + bmArr := blockadt.MakeEmptyArray(store) + smArr := blockadt.MakeEmptyArray(store) + crossArr := blockadt.MakeEmptyArray(store) + + for i, m := range msg.BlsMessages { + c := cbg.CborCid(m) + if err := bmArr.Set(uint64(i), &c); err != nil { + return err + } + } + + for i, m := range msg.SecpkMessages { + c := cbg.CborCid(m) + if err := smArr.Set(uint64(i), &c); err != nil { + return err + } + } + + for i, m := range msg.CrossMessages { + c := cbg.CborCid(m) + if err := crossArr.Set(uint64(i), &c); err != nil { + return err + } + } + + bmroot, err := bmArr.Root() + if err != nil { + return err + } + + smroot, err := smArr.Root() + if err != nil { + return err + } + + crossroot, err := crossArr.Root() + if err != nil { + return err + } + + mrcid, err := store.Put(store.Context(), &types.MsgMeta{ + BlsMessages: bmroot, + SecpkMessages: smroot, + CrossMessages: crossroot, + }) + + if err != nil { + return err + } + + if msg.Header.Messages != mrcid { + return fmt.Errorf("messages didn't match root cid in header") + } + + return nil +} diff --git a/chain/consensus/common/crossmsg.go b/chain/consensus/common/crossmsg.go new file mode 100644 index 000000000..327c7d7ec --- /dev/null +++ b/chain/consensus/common/crossmsg.go @@ -0,0 +1,295 @@ +package common + +import ( + "context" + "sort" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet/resolver" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" + "golang.org/x/xerrors" +) + +const crossMsgResolutionTimeout = 30 * time.Second + +func checkCrossMsg(ctx context.Context, r *resolver.Resolver, pstore, snstore blockadt.Store, parentSCA, snSCA *sca.SCAState, msg *types.Message) error { + + buApply, err := hierarchical.ApplyAsBottomUp(snSCA.NetworkName, msg) + if err != nil { + return xerrors.Errorf("error processing type to apply: %w", err) + } + if buApply { + return checkBottomUpMsg(ctx, r, snstore, snSCA, msg) + } + + // If the message needs to be applied top-down. + // sanity-check: the root chain doesn't support topDown messages, + // so return an error if parentSCA is nil and we are here. + if parentSCA == nil { + return xerrors.Errorf("root chains (id=%v) does not support topDown cross msgs", snSCA.NetworkName) + } + return checkTopDownMsg(pstore, parentSCA, snSCA, msg) +} + +// checkTopDownMsg validates the topdown message. +// - It checks that the msg nonce is larger than AppliedBottomUpNonce in the subnet SCA +// - It checks that the msg meta has been committed. +// - It resolves messages for msg-meta and verifies that the corresponding mesasge is included +// as part of MsgMeta. +func checkBottomUpMsg(ctx context.Context, r *resolver.Resolver, snstore blockadt.Store, snSCA *sca.SCAState, msg *types.Message) error { + // Check valid nonce in subnet where message is applied. + if snSCA.AppliedBottomUpNonce != sca.MaxNonce && msg.Nonce < snSCA.AppliedBottomUpNonce { + return xerrors.Errorf("bottomup msg nonce reuse in subnet (nonce=%v, applied=%v", msg.Nonce, snSCA.AppliedTopDownNonce) + } + + // check bottomup meta has been committed for nonce in SCA + comMeta, found, err := snSCA.GetBottomUpMsgMeta(snstore, msg.Nonce) + if err != nil { + return xerrors.Errorf("getting bottomup msgmeta: %w", err) + } + if !found { + return xerrors.Errorf("No BottomUp meta found for nonce in SCA: %d", msg.Nonce) + } + + // Wait to resolve bottom-up messages for meta + c, err := comMeta.Cid() + if err != nil { + return err + } + // Adding a 30 seconds time out for block resolution. + // FIXME: We may need to figure out what to do if we never find the msgs + // to check. + ctx, cancel := context.WithTimeout(ctx, crossMsgResolutionTimeout) + defer cancel() + out := r.WaitCrossMsgsResolved(ctx, c, address.SubnetID(comMeta.From)) + select { + case <-ctx.Done(): + return xerrors.Errorf("context timeout") + case err := <-out: + if err != nil { + return xerrors.Errorf("error fully resolving messages: %s", err) + } + } + + // Get cross-messages + cross, found, err := r.ResolveCrossMsgs(ctx, c, address.SubnetID(comMeta.From)) + if err != nil { + return xerrors.Errorf("Error resolving messages: %v", err) + } + // sanity-check, it should always be found + if !found { + return xerrors.Errorf("messages haven't been resolver: %v", err) + } + // Check if the message is included in the committed msgMeta. + if !hasMsg(comMeta, msg, cross) { + xerrors.Errorf("message proposed no included in committed bottom-up msgMeta") + } + + // NOTE: Any additional check required? + return nil +} + +func hasMsg(meta *schema.CrossMsgMeta, msg *types.Message, batch []types.Message) bool { + for _, m := range batch { + // Changing original nonce to that of the MsgMeta as done + // by the crossPool. + m.Nonce = uint64(meta.Nonce) + if msg.Equals(&m) { + return true + } + } + return false +} + +// checkTopDownMsg validates the topdown message. +// - It checks that the msg nonce is larger than AppliedBottomUpNonce in the subnet SCA +// Recall that applying crossMessages increases the AppliedNonce of the SCA in the subnet +// where the message is applied. +// - It checks that the cross-msg is committed in the sca of the parent chain +func checkTopDownMsg(pstore blockadt.Store, parentSCA, snSCA *sca.SCAState, msg *types.Message) error { + // Check valid nonce in subnet where message is applied. + if msg.Nonce < snSCA.AppliedTopDownNonce { + return xerrors.Errorf("topDown msg nonce reuse in subnet (nonce=%v, applied=%v", msg.Nonce, snSCA.AppliedTopDownNonce) + } + + // check the message for nonce is committed in sca. + comMsg, found, err := parentSCA.GetTopDownMsg(pstore, snSCA.NetworkName, msg.Nonce) + if err != nil { + return xerrors.Errorf("getting topDown msgs: %w", err) + } + if !found { + xerrors.Errorf("No TopDownMsg found for nonce in parent SCA: %d", msg.Nonce) + } + + if !comMsg.Equals(msg) { + xerrors.Errorf("Committed and proposed TopDownMsg for nonce %d not equal", msg.Nonce) + } + + // NOTE: Any additional check required? + return nil + +} + +func ApplyCrossMsg(ctx context.Context, vmi *vm.VM, submgr subnet.SubnetMgr, + em stmgr.ExecMonitor, msg *types.Message, + ts *types.TipSet) error { + switch hierarchical.GetMsgType(msg) { + case hierarchical.TopDown, hierarchical.BottomUp: + // At this point, both messages are applied in the same way + return applyMsg(ctx, vmi, em, msg, ts) + } + + return xerrors.Errorf("Unknown cross-msg type") +} + +func applyMsg(ctx context.Context, vmi *vm.VM, em stmgr.ExecMonitor, + msg *types.Message, ts *types.TipSet) error { + // Serialize params + params := &sca.CrossMsgParams{ + Msg: *msg, + } + serparams, aerr := actors.SerializeParams(params) + if aerr != nil { + return xerrors.Errorf("failed serializing init actor params: %s", aerr) + } + apply := &types.Message{ + From: builtin.SystemActorAddr, + To: hierarchical.SubnetCoordActorAddr, + Nonce: msg.Nonce, + Value: big.Zero(), + GasFeeCap: types.NewInt(0), + GasPremium: types.NewInt(0), + GasLimit: 1 << 30, + Method: sca.Methods.ApplyMessage, + Params: serparams, + } + + // Before applying the message in subnet, if the destination + // account hasn't been initialized, init the account actor. + // TODO: When handling arbitrary cross-messages, we should check if + // we need to trigger the state change in this subnet, if not we may not + // need to do this. + rto, err := params.Msg.To.RawAddr() + if err != nil { + return err + } + st := vmi.StateTree() + _, acterr := st.GetActor(rto) + if acterr != nil { + log.Debugw("Initializing To address for crossmsg", "address", rto) + _, _, err := vmi.CreateAccountActor(ctx, apply, rto) + if err != nil { + return xerrors.Errorf("failed to initialize address for crossmsg: %w", err) + } + } + + ret, actErr := vmi.ApplyImplicitMessage(ctx, apply) + if actErr != nil { + return xerrors.Errorf("failed to apply cross message :%w", actErr) + } + if em != nil { + if err := em.MessageApplied(ctx, ts, apply.Cid(), apply, ret, true); err != nil { + return xerrors.Errorf("callback failed on reward message: %w", err) + } + } + + if ret.ExitCode != 0 { + return xerrors.Errorf("reward application message failed (exit %d): %s", ret.ExitCode, ret.ActorErr) + } + log.Debugw("Applied cross msg implicitly (original msg Cid)", "cid", msg.Cid()) + return nil +} + +func getSCAState(ctx context.Context, sm *stmgr.StateManager, submgr subnet.SubnetMgr, id address.SubnetID, ts *types.TipSet) (*sca.SCAState, blockadt.Store, error) { + + var st sca.SCAState + // if submgr == nil we are in root, so we can load the actor using the state manager. + if submgr == nil { + // Getting SCA state for the base tipset being checked/validated in the current chain + subnetAct, err := sm.LoadActor(ctx, hierarchical.SubnetCoordActorAddr, ts) + if err != nil { + return nil, nil, xerrors.Errorf("loading actor state: %w", err) + } + if err := sm.ChainStore().ActorStore(ctx).Get(ctx, subnetAct.Head, &st); err != nil { + return nil, nil, xerrors.Errorf("getting actor state: %w", err) + } + return &st, sm.ChainStore().ActorStore(ctx), nil + } + + // For subnets getting SCA state for the current baseTs is worthless. + // We get it the standard way. + return submgr.GetSCAState(ctx, id) +} + +func sortCrossMsgs(ctx context.Context, sm *stmgr.StateManager, r *resolver.Resolver, msgs []types.ChainMsg, ts *types.TipSet) ([]*types.Message, error) { + buApply := map[uint64][]*types.Message{} + out := make([]*types.Message, len(msgs)) + + // Get messages that require sorting and organize them by duplicate nonce + i := 0 + for _, cm := range msgs { + m := cm.VMMessage() + + netName, err := stmgr.GetNetworkName(ctx, sm, ts.ParentState()) + if err != nil { + return []*types.Message{}, xerrors.Errorf("error getting network name: %w", err) + } + isBu, err := hierarchical.ApplyAsBottomUp(address.SubnetID(netName), m) + if err != nil { + return []*types.Message{}, xerrors.Errorf("error processing type to apply: %w", err) + } + + // Bottom-up messages are the ones that require exhaustive ordering + // top-down already come in order of nonce. + if isBu { + _, ok := buApply[m.Nonce] + if !ok { + buApply[m.Nonce] = make([]*types.Message, 0) + } + buApply[m.Nonce] = append(buApply[m.Nonce], m) + } else { + // Append top-down messages as they can be ordered directly. + out[i] = m + i++ + } + } + + // Sort meta nonces + j := 0 + metaNonces := make(NonceArray, len(buApply)) + for k := range buApply { + metaNonces[j] = k + j++ + } + sort.Sort(metaNonces) + + // GetSCA to get bottomUp messages for nonce. We don't need + // subnet-specific information here. + sca, store, err := getSCAState(ctx, sm, nil, address.UndefSubnetID, ts) + if err != nil { + return []*types.Message{}, err + } + // For each meta nonce, get all messages and sort them + // by nonce. + for _, n := range metaNonces { + mabu, err := sortByOriginalNonce(ctx, r, n, sca, store, buApply[n]) + if err != nil { + return []*types.Message{}, err + } + copy(out[i:], mabu) + i += len(mabu) + } + return out, nil + +} diff --git a/chain/consensus/common/crossmsg_test.go b/chain/consensus/common/crossmsg_test.go new file mode 100644 index 000000000..3c5121543 --- /dev/null +++ b/chain/consensus/common/crossmsg_test.go @@ -0,0 +1,30 @@ +package common + +import ( + "sort" + "testing" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/chain/types" + "github.com/stretchr/testify/require" +) + +func TestOrderMsgs(t *testing.T) { + l := MessageArray([]*types.Message{newMsg(7), newMsg(5), newMsg(3), newMsg(9)}) + sort.Sort(l) + require.Equal(t, l[0].Nonce, uint64(3)) + require.Equal(t, l[1].Nonce, uint64(5)) + require.Equal(t, l[2].Nonce, uint64(7)) + require.Equal(t, l[3].Nonce, uint64(9)) +} + +func newMsg(nonce uint64) *types.Message { + return &types.Message{ + To: address.Undef, + From: address.Undef, + Value: big.Zero(), + Nonce: nonce, + Params: nil, + } +} diff --git a/chain/consensus/delegcns/compute_state.go b/chain/consensus/common/executor.go similarity index 55% rename from chain/consensus/delegcns/compute_state.go rename to chain/consensus/common/executor.go index 0e10c0fb8..4aaba9f72 100644 --- a/chain/consensus/delegcns/compute_state.go +++ b/chain/consensus/common/executor.go @@ -1,12 +1,14 @@ -package delegcns +package common import ( "context" "sync/atomic" "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/lotus/chain/actors/builtin/reward" "github.com/filecoin-project/lotus/chain/consensus/actors/registry" + "github.com/filecoin-project/lotus/chain/consensus/actors/reward" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet/resolver" "github.com/filecoin-project/lotus/chain/rand" "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" @@ -30,7 +32,7 @@ func DefaultUpgradeSchedule() stmgr.UpgradeSchedule { updates := []stmgr.Upgrade{{ Height: -1, - Network: network.Version14, + Network: network.Version15, Migration: nil, Expensive: true, }, @@ -46,17 +48,24 @@ func DefaultUpgradeSchedule() stmgr.UpgradeSchedule { return us } -type tipSetExecutor struct{} +type tipSetExecutor struct { + submgr subnet.SubnetMgr +} func (t *tipSetExecutor) NewActorRegistry() *vm.ActorRegistry { return registry.NewActorRegistry() } -func TipSetExecutor() stmgr.Executor { - return &tipSetExecutor{} +func TipSetExecutor(submgr subnet.SubnetMgr) stmgr.Executor { + return &tipSetExecutor{submgr} +} + +func RootTipSetExecutor() stmgr.Executor { + return &tipSetExecutor{nil} } -func (t *tipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, em stmgr.ExecMonitor, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) { +func (t *tipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager, cr *resolver.Resolver, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, em stmgr.ExecMonitor, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) { + done := metrics.Timer(ctx, metrics.VMApplyBlocksTotal) defer done() @@ -65,16 +74,16 @@ func (t *tipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager partDone() }() - makeVmWithBaseState := func(base cid.Cid) (*vm.VM, error) { + makeVmWithBaseStateAndEpoch := func(base cid.Cid, e abi.ChainEpoch) (*vm.VM, error) { vmopt := &vm.VMOpts{ StateBase: base, - Epoch: epoch, + Epoch: e, Rand: r, Bstore: sm.ChainStore().StateBlockstore(), Actors: registry.NewActorRegistry(), Syscalls: sm.Syscalls, CircSupplyCalc: sm.GetVMCirculatingSupply, - NtwkVersion: sm.GetNtwkVersion, + NetworkVersion: sm.GetNetworkVersion(ctx, e), BaseFee: baseFee, LookbackState: stmgr.LookbackStateGetterForTipset(sm, ts), } @@ -82,7 +91,7 @@ func (t *tipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager return sm.VMConstructor()(ctx, vmopt) } - vmi, err := makeVmWithBaseState(pstate) + vmi, err := makeVmWithBaseStateAndEpoch(pstate, epoch) if err != nil { return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err) } @@ -96,13 +105,12 @@ func (t *tipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager } if pstate != newState { - vmi, err = makeVmWithBaseState(newState) + vmi, err = makeVmWithBaseStateAndEpoch(newState, i) if err != nil { return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err) } } - vmi.SetBlockHeight(i + 1) pstate = newState } @@ -137,29 +145,45 @@ func (t *tipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager processedMsgs[m.Cid()] = struct{}{} } - rwMsg := &types.Message{ - From: reward.Address, - To: b.Miner, - Nonce: uint64(epoch), - Value: types.FromFil(1), // always reward 1 fil - GasFeeCap: types.NewInt(0), - GasPremium: types.NewInt(0), - GasLimit: 1 << 30, - Method: 0, + // FIXME: Here we send rewards for miners. + // Rewards are only applied in the root, for subnets + // rewards are disabled at this point and miners only get + // the gas reward. + // if t.submgr == nil we are in root net. + reward := gasReward + // In root consensus, there is currently a static reward of 1FIL per block. + // (this is mainly for testing purposes). + if t.submgr == nil { + reward = big.Add(reward, types.FromFil(1)) } - ret, actErr := vmi.ApplyImplicitMessage(ctx, rwMsg) - if actErr != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("failed to apply reward message for miner %s: %w", b.Miner, actErr) + if err := applyMiningRewards(ctx, vmi, em, b, epoch, ts, reward); err != nil { + return cid.Undef, cid.Undef, err } - if em != nil { - if err := em.MessageApplied(ctx, ts, rwMsg.Cid(), rwMsg, ret, true); err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("callback failed on reward message: %w", err) - } + + // Sort cross-messages deterministically before applying them + crossm, err := sortCrossMsgs(ctx, sm, cr, b.CrossMessages, ts) + if err != nil { + return cid.Undef, cid.Undef, xerrors.Errorf("error sorting cross-msgs: %w", err) } - if ret.ExitCode != 0 { - return cid.Undef, cid.Undef, xerrors.Errorf("reward application message failed (exit %d): %s", ret.ExitCode, ret.ActorErr) + processedMsgs = make(map[cid.Cid]struct{}) + for _, m := range crossm { + // m := crossm.VMMessage() + // additional sanity-check to avoid processing a message + // included in a block twice (although this is already checked + // by SCA, and there are a few more additional checks, so this + // may not be needed). + if _, found := processedMsgs[m.Cid()]; found { + continue + } + log.Infof("Executing cross message: %v", crossm) + + if err := ApplyCrossMsg(ctx, vmi, t.submgr, em, m, ts); err != nil { + return cid.Undef, cid.Undef, xerrors.Errorf("cross messsage application failed: %w", err) + } + processedMsgs[m.Cid()] = struct{}{} } + } partDone() @@ -190,7 +214,34 @@ func (t *tipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager return st, rectroot, nil } -func (t *tipSetExecutor) ExecuteTipSet(ctx context.Context, sm *stmgr.StateManager, ts *types.TipSet, em stmgr.ExecMonitor) (stateroot cid.Cid, rectsroot cid.Cid, err error) { +func applyMiningRewards(ctx context.Context, vmi *vm.VM, em stmgr.ExecMonitor, b store.BlockMessages, epoch abi.ChainEpoch, ts *types.TipSet, value abi.TokenAmount) error { + rwMsg := &types.Message{ + From: reward.RewardActorAddr, + To: b.Miner, + Nonce: uint64(epoch), + Value: value, + GasFeeCap: types.NewInt(0), + GasPremium: types.NewInt(0), + GasLimit: 1 << 30, + Method: 0, + } + ret, actErr := vmi.ApplyImplicitMessage(ctx, rwMsg) + if actErr != nil { + return xerrors.Errorf("failed to apply reward message for miner %s: %w", b.Miner, actErr) + } + if em != nil { + if err := em.MessageApplied(ctx, ts, rwMsg.Cid(), rwMsg, ret, true); err != nil { + return xerrors.Errorf("callback failed on reward message: %w", err) + } + } + + if ret.ExitCode != 0 { + return xerrors.Errorf("reward application message failed (exit %d): %s", ret.ExitCode, ret.ActorErr) + } + return nil +} + +func (t *tipSetExecutor) ExecuteTipSet(ctx context.Context, sm *stmgr.StateManager, cr *resolver.Resolver, ts *types.TipSet, em stmgr.ExecMonitor) (stateroot cid.Cid, rectsroot cid.Cid, err error) { ctx, span := trace.StartSpan(ctx, "computeTipSetState") defer span.End() @@ -209,7 +260,7 @@ func (t *tipSetExecutor) ExecuteTipSet(ctx context.Context, sm *stmgr.StateManag var parentEpoch abi.ChainEpoch pstate := blks[0].ParentStateRoot if blks[0].Height > 0 { - parent, err := sm.ChainStore().GetBlock(blks[0].Parents[0]) + parent, err := sm.ChainStore().GetBlock(ctx, blks[0].Parents[0]) if err != nil { return cid.Undef, cid.Undef, xerrors.Errorf("getting parent block: %w", err) } @@ -217,16 +268,16 @@ func (t *tipSetExecutor) ExecuteTipSet(ctx context.Context, sm *stmgr.StateManag parentEpoch = parent.Height } - r := rand.NewStateRand(sm.ChainStore(), ts.Cids(), nil) + r := rand.NewStateRand(sm.ChainStore(), ts.Cids(), sm.Beacon(), sm.GetNetworkVersion) - blkmsgs, err := sm.ChainStore().BlockMsgsForTipset(ts) + blkmsgs, err := sm.ChainStore().BlockMsgsForTipset(ctx, ts) if err != nil { return cid.Undef, cid.Undef, xerrors.Errorf("getting block messages for tipset: %w", err) } baseFee := blks[0].ParentBaseFee - return t.ApplyBlocks(ctx, sm, parentEpoch, pstate, blkmsgs, blks[0].Height, r, em, baseFee, ts) + return t.ApplyBlocks(ctx, sm, cr, parentEpoch, pstate, blkmsgs, blks[0].Height, r, em, baseFee, ts) } var _ stmgr.Executor = &tipSetExecutor{} diff --git a/chain/consensus/common/mine.go b/chain/consensus/common/mine.go new file mode 100644 index 000000000..bff38b317 --- /dev/null +++ b/chain/consensus/common/mine.go @@ -0,0 +1,142 @@ +package common + +import ( + "context" + + "github.com/filecoin-project/go-state-types/crypto" + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/consensus" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" + "github.com/ipfs/go-cid" + "golang.org/x/xerrors" +) + +func PrepareBlockForSignature(ctx context.Context, sm *stmgr.StateManager, bt *lapi.BlockTemplate) (*types.FullBlock, error) { + pts, err := sm.ChainStore().LoadTipSet(ctx, bt.Parents) + if err != nil { + return nil, xerrors.Errorf("failed to load parent tipset: %w", err) + } + + st, recpts, err := sm.TipSetState(ctx, pts) + if err != nil { + return nil, xerrors.Errorf("failed to load tipset state: %w", err) + } + + next := &types.BlockHeader{ + Miner: bt.Miner, + Parents: bt.Parents.Cids(), + Ticket: bt.Ticket, + ElectionProof: bt.Eproof, + + BeaconEntries: bt.BeaconValues, + Height: bt.Epoch, + Timestamp: bt.Timestamp, + WinPoStProof: bt.WinningPoStProof, + ParentStateRoot: st, + ParentMessageReceipts: recpts, + } + + var blsMessages []*types.Message + var secpkMessages []*types.SignedMessage + + var blsMsgCids, secpkMsgCids, crossMsgCids []cid.Cid + var blsSigs []crypto.Signature + for _, msg := range bt.Messages { + if msg.Signature.Type == crypto.SigTypeBLS { + blsSigs = append(blsSigs, msg.Signature) + blsMessages = append(blsMessages, &msg.Message) + + c, err := sm.ChainStore().PutMessage(ctx, &msg.Message) + if err != nil { + return nil, err + } + + blsMsgCids = append(blsMsgCids, c) + } else { + c, err := sm.ChainStore().PutMessage(ctx, msg) + if err != nil { + return nil, err + } + + secpkMsgCids = append(secpkMsgCids, c) + secpkMessages = append(secpkMessages, msg) + } + } + + for _, msg := range bt.CrossMessages { + c, err := sm.ChainStore().PutMessage(ctx, msg) + if err != nil { + return nil, err + } + + crossMsgCids = append(crossMsgCids, c) + } + + store := sm.ChainStore().ActorStore(ctx) + blsmsgroot, err := consensus.ToMessagesArray(store, blsMsgCids) + if err != nil { + return nil, xerrors.Errorf("building bls amt: %w", err) + } + secpkmsgroot, err := consensus.ToMessagesArray(store, secpkMsgCids) + if err != nil { + return nil, xerrors.Errorf("building secpk amt: %w", err) + } + crossmsgroot, err := consensus.ToMessagesArray(store, crossMsgCids) + if err != nil { + return nil, xerrors.Errorf("building cross amt: %w", err) + } + + mmcid, err := store.Put(store.Context(), &types.MsgMeta{ + BlsMessages: blsmsgroot, + SecpkMessages: secpkmsgroot, + CrossMessages: crossmsgroot, + }) + if err != nil { + return nil, err + } + next.Messages = mmcid + + aggSig, err := consensus.AggregateSignatures(blsSigs) + if err != nil { + return nil, err + } + + next.BLSAggregate = aggSig + pweight, err := sm.ChainStore().Weight(ctx, pts) + if err != nil { + return nil, err + } + next.ParentWeight = pweight + + baseFee, err := sm.ChainStore().ComputeBaseFee(ctx, pts) + if err != nil { + return nil, xerrors.Errorf("computing base fee: %w", err) + } + next.ParentBaseFee = baseFee + return &types.FullBlock{ + Header: next, + BlsMessages: blsMessages, + SecpkMessages: secpkMessages, + CrossMessages: bt.CrossMessages, + }, nil + +} + +func SignBlock(ctx context.Context, w lapi.Wallet, b *types.FullBlock) error { + next := b.Header + nosigbytes, err := next.SigningBytes() + if err != nil { + return xerrors.Errorf("failed to get signing bytes for block: %w", err) + } + + sig, err := w.WalletSign(ctx, next.Miner, nosigbytes, lapi.MsgMeta{ + Type: lapi.MTBlock, + }) + if err != nil { + return xerrors.Errorf("failed to sign new block: %w", err) + } + + next.BlockSig = sig + return nil +} diff --git a/chain/consensus/params/params.go b/chain/consensus/common/params/params.go similarity index 100% rename from chain/consensus/params/params.go rename to chain/consensus/common/params/params.go diff --git a/chain/consensus/common/utils.go b/chain/consensus/common/utils.go new file mode 100644 index 000000000..087bd63f5 --- /dev/null +++ b/chain/consensus/common/utils.go @@ -0,0 +1,92 @@ +package common + +import ( + "context" + "sort" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet/resolver" + "github.com/filecoin-project/lotus/chain/types" + "golang.org/x/xerrors" +) + +type MessageArray []*types.Message + +func (ma MessageArray) Len() int { + return len(ma) +} + +func (ma MessageArray) Less(i, j int) bool { + return ma[i].Nonce <= ma[j].Nonce +} + +func (ma MessageArray) Swap(i, j int) { + ma[i], ma[j] = ma[j], ma[i] +} + +type NonceArray []uint64 + +func (ma NonceArray) Len() int { + return len(ma) +} + +func (ma NonceArray) Less(i, j int) bool { + return ma[i] <= ma[j] +} + +func (ma NonceArray) Swap(i, j int) { + ma[i], ma[j] = ma[j], ma[i] +} + +// Take messages from meta before the transformation +// to the meta nonce, and recover original nonce +func recoverOriginalNonce(ctx context.Context, r *resolver.Resolver, n uint64, sca *sca.SCAState, + store adt.Store, msg []*types.Message) ([]*types.Message, error) { + meta, found, err := sca.GetBottomUpMsgMeta(store, n) + if err != nil { + return []*types.Message{}, xerrors.Errorf("getting bottomup msgmeta: %w", err) + } + if !found { + return []*types.Message{}, xerrors.Errorf("No BottomUp meta found for nonce in SCA: %d", n) + } + c, _ := meta.Cid() + orig, _, err := r.ResolveCrossMsgs(ctx, c, address.SubnetID(meta.From)) + if err != nil { + return []*types.Message{}, xerrors.Errorf("error resolving cross-msgs: %w", err) + } + + for _, o := range orig { + for _, m := range msg { + origNonce := o.Nonce + // If changing original to the meta nonce is equal + // it means they are the same msg and we can recover original nonce + o.Nonce = n + if o.Equals(m) { + m.Nonce = origNonce + } + } + } + return msg, nil + +} + +func sortByOriginalNonce(ctx context.Context, r *resolver.Resolver, n uint64, sca *sca.SCAState, + store adt.Store, msg []*types.Message) ([]*types.Message, error) { + bu, err := recoverOriginalNonce(ctx, r, n, sca, store, msg) + if err != nil { + return []*types.Message{}, err + } + + // Sort messages + mabu := MessageArray(bu) + sort.Sort(mabu) + + // Recover meta nonce in msgs. + for i := range mabu { + mabu[i].Nonce = n + } + + return mabu, nil +} diff --git a/chain/consensus/delegcns/delegated.go b/chain/consensus/delegcns/delegated.go index fe609b266..fccb08c5e 100644 --- a/chain/consensus/delegcns/delegated.go +++ b/chain/consensus/delegcns/delegated.go @@ -8,32 +8,29 @@ import ( "github.com/Gurpartap/async" "github.com/filecoin-project/go-state-types/big" "github.com/hashicorp/go-multierror" - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" logging "github.com/ipfs/go-log/v2" pubsub "github.com/libp2p/go-libp2p-pubsub" - cbg "github.com/whyrusleeping/cbor-gen" "go.opencensus.io/stats" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/network" bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain" - "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/beacon" "github.com/filecoin-project/lotus/chain/consensus" - "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/consensus/common" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet/resolver" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/lib/sigs" "github.com/filecoin-project/lotus/metrics" - blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/filecoin-project/lotus/node/modules/dtypes" ) var _ consensus.Consensus = &Delegated{} @@ -53,6 +50,14 @@ type Delegated struct { verifier ffiwrapper.Verifier genesis *types.TipSet + + subMgr subnet.SubnetMgr + + r *resolver.Resolver + + // We could get network name from state manager, but with this + // we avoid having fetch it for every block validation. + netName address.SubnetID } var producer = func() address.Address { @@ -67,24 +72,28 @@ var producer = func() address.Address { // the theoretical max height based on systime are quickly rejected const MaxHeightDrift = 5 -func NewDelegatedConsensus(sm *stmgr.StateManager, beacon beacon.Schedule, verifier ffiwrapper.Verifier, genesis chain.Genesis) consensus.Consensus { +func NewDelegatedConsensus(sm *stmgr.StateManager, submgr subnet.SubnetMgr, beacon beacon.Schedule, + r *resolver.Resolver, verifier ffiwrapper.Verifier, + genesis chain.Genesis, netName dtypes.NetworkName) consensus.Consensus { return &Delegated{ store: sm.ChainStore(), beacon: beacon, + r: r, sm: sm, verifier: verifier, genesis: genesis, + subMgr: submgr, + netName: address.SubnetID(netName), } } func (deleg *Delegated) ValidateBlock(ctx context.Context, b *types.FullBlock) (err error) { - if err := blockSanityChecks(b.Header); err != nil { + if err := common.BlockSanityChecks(hierarchical.Delegated, b.Header); err != nil { return xerrors.Errorf("incoming header failed basic sanity checks: %w", err) } h := b.Header - - baseTs, err := deleg.store.LoadTipSet(types.NewTipSetKey(h.Parents...)) + baseTs, err := deleg.store.LoadTipSet(ctx, types.NewTipSetKey(h.Parents...)) if err != nil { return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err) } @@ -107,16 +116,7 @@ func (deleg *Delegated) ValidateBlock(ctx context.Context, b *types.FullBlock) ( log.Warn("Got block from the future, but within threshold", h.Timestamp, build.Clock.Now().Unix()) } - msgsCheck := async.Err(func() error { - if b.Cid() == build.WhitelistedBlock { - return nil - } - - if err := deleg.checkBlockMessages(ctx, b, baseTs); err != nil { - return xerrors.Errorf("block had invalid messages: %w", err) - } - return nil - }) + msgsChecks := common.CheckMsgs(ctx, deleg.store, deleg.sm, deleg.subMgr, deleg.r, deleg.netName, b, baseTs) minerCheck := async.Err(func() error { if err := deleg.minerIsValid(ctx, h.Miner, baseTs); err != nil { @@ -125,17 +125,6 @@ func (deleg *Delegated) ValidateBlock(ctx context.Context, b *types.FullBlock) ( return nil }) - baseFeeCheck := async.Err(func() error { - baseFee, err := deleg.store.ComputeBaseFee(ctx, baseTs) - if err != nil { - return xerrors.Errorf("computing base fee: %w", err) - } - if types.BigCmp(baseFee, b.Header.ParentBaseFee) != 0 { - return xerrors.Errorf("base fee doesn't match: %s (header) != %s (computed)", - b.Header.ParentBaseFee, baseFee) - } - return nil - }) pweight, err := Weight(context.TODO(), nil, baseTs) if err != nil { return xerrors.Errorf("getting parent weight: %w", err) @@ -146,48 +135,13 @@ func (deleg *Delegated) ValidateBlock(ctx context.Context, b *types.FullBlock) ( b.Header.ParentWeight, pweight) } - stateRootCheck := async.Err(func() error { - stateroot, precp, err := deleg.sm.TipSetState(ctx, baseTs) - if err != nil { - return xerrors.Errorf("get tipsetstate(%d, %s) failed: %w", h.Height, h.Parents, err) - } - - if stateroot != h.ParentStateRoot { - msgs, err := deleg.store.MessagesForTipset(baseTs) - if err != nil { - log.Error("failed to load messages for tipset during tipset state mismatch error: ", err) - } else { - log.Warn("Messages for tipset with mismatching state:") - for i, m := range msgs { - mm := m.VMMessage() - log.Warnf("Message[%d]: from=%s to=%s method=%d params=%x", i, mm.From, mm.To, mm.Method, mm.Params) - } - } - - return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot) - } - - if precp != h.ParentMessageReceipts { - return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts) - } - - return nil - }) - - blockSigCheck := async.Err(func() error { - if err := sigs.CheckBlockSignature(ctx, h, b.Header.Miner); err != nil { - return xerrors.Errorf("check block signature failed: %w", err) - } - return nil - }) + stateRootCheck := common.CheckStateRoot(ctx, deleg.store, deleg.sm, b, baseTs) await := []async.ErrorFuture{ minerCheck, - blockSigCheck, - msgsCheck, - baseFeeCheck, stateRootCheck, } + await = append(await, msgsChecks...) var merr error for _, fut := range await { @@ -217,194 +171,6 @@ func (deleg *Delegated) ValidateBlock(ctx context.Context, b *types.FullBlock) ( return nil } -func blockSanityChecks(h *types.BlockHeader) error { - if h.ElectionProof != nil { - return xerrors.Errorf("block must have nil election proof") - } - - if h.Ticket != nil { - return xerrors.Errorf("block must have nil ticket") - } - - if h.BlockSig == nil { - return xerrors.Errorf("block had nil signature") - } - - if h.BLSAggregate == nil { - return xerrors.Errorf("block had nil bls aggregate signature") - } - - if h.Miner.Protocol() != address.SECP256K1 { - return xerrors.Errorf("block had non-secp miner address") - } - - if len(h.Parents) != 1 { - return xerrors.Errorf("must have 1 parent") - } - - return nil -} - -// TODO: We should extract this somewhere else and make the message pool and miner use the same logic -func (deleg *Delegated) checkBlockMessages(ctx context.Context, b *types.FullBlock, baseTs *types.TipSet) error { - { - var sigCids []cid.Cid // this is what we get for people not wanting the marshalcbor method on the cid type - var pubks [][]byte - - for _, m := range b.BlsMessages { - sigCids = append(sigCids, m.Cid()) - - pubk, err := deleg.sm.GetBlsPublicKey(ctx, m.From, baseTs) - if err != nil { - return xerrors.Errorf("failed to load bls public to validate block: %w", err) - } - - pubks = append(pubks, pubk) - } - - if err := consensus.VerifyBlsAggregate(ctx, b.Header.BLSAggregate, sigCids, pubks); err != nil { - return xerrors.Errorf("bls aggregate signature was invalid: %w", err) - } - } - - nonces := make(map[address.Address]uint64) - - stateroot, _, err := deleg.sm.TipSetState(ctx, baseTs) - if err != nil { - return err - } - - st, err := state.LoadStateTree(deleg.store.ActorStore(ctx), stateroot) - if err != nil { - return xerrors.Errorf("failed to load base state tree: %w", err) - } - - nv := deleg.sm.GetNtwkVersion(ctx, b.Header.Height) - pl := vm.PricelistByEpoch(baseTs.Height()) - var sumGasLimit int64 - checkMsg := func(msg types.ChainMsg) error { - m := msg.VMMessage() - - // Phase 1: syntactic validation, as defined in the spec - minGas := pl.OnChainMessage(msg.ChainLength()) - if err := m.ValidForBlockInclusion(minGas.Total(), nv); err != nil { - return err - } - - // ValidForBlockInclusion checks if any single message does not exceed BlockGasLimit - // So below is overflow safe - sumGasLimit += m.GasLimit - if sumGasLimit > build.BlockGasLimit { - return xerrors.Errorf("block gas limit exceeded") - } - - // Phase 2: (Partial) semantic validation: - // the sender exists and is an account actor, and the nonces make sense - var sender address.Address - if deleg.sm.GetNtwkVersion(ctx, b.Header.Height) >= network.Version13 { - sender, err = st.LookupID(m.From) - if err != nil { - return err - } - } else { - sender = m.From - } - - if _, ok := nonces[sender]; !ok { - // `GetActor` does not validate that this is an account actor. - act, err := st.GetActor(sender) - if err != nil { - return xerrors.Errorf("failed to get actor: %w", err) - } - - if !builtin.IsAccountActor(act.Code) { - return xerrors.New("Sender must be an account actor") - } - nonces[sender] = act.Nonce - } - - if nonces[sender] != m.Nonce { - return xerrors.Errorf("wrong nonce (exp: %d, got: %d)", nonces[sender], m.Nonce) - } - nonces[sender]++ - - return nil - } - - // Validate message arrays in a temporary blockstore. - tmpbs := bstore.NewMemory() - tmpstore := blockadt.WrapStore(ctx, cbor.NewCborStore(tmpbs)) - - bmArr := blockadt.MakeEmptyArray(tmpstore) - for i, m := range b.BlsMessages { - if err := checkMsg(m); err != nil { - return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err) - } - - c, err := store.PutMessage(tmpbs, m) - if err != nil { - return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err) - } - - k := cbg.CborCid(c) - if err := bmArr.Set(uint64(i), &k); err != nil { - return xerrors.Errorf("failed to put bls message at index %d: %w", i, err) - } - } - - smArr := blockadt.MakeEmptyArray(tmpstore) - for i, m := range b.SecpkMessages { - if err := checkMsg(m); err != nil { - return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err) - } - - // `From` being an account actor is only validated inside the `vm.ResolveToKeyAddr` call - // in `StateManager.ResolveToKeyAddress` here (and not in `checkMsg`). - kaddr, err := deleg.sm.ResolveToKeyAddress(ctx, m.Message.From, baseTs) - if err != nil { - return xerrors.Errorf("failed to resolve key addr: %w", err) - } - - if err := sigs.Verify(&m.Signature, kaddr, m.Message.Cid().Bytes()); err != nil { - return xerrors.Errorf("secpk message %s has invalid signature: %w", m.Cid(), err) - } - - c, err := store.PutMessage(tmpbs, m) - if err != nil { - return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err) - } - k := cbg.CborCid(c) - if err := smArr.Set(uint64(i), &k); err != nil { - return xerrors.Errorf("failed to put secpk message at index %d: %w", i, err) - } - } - - bmroot, err := bmArr.Root() - if err != nil { - return err - } - - smroot, err := smArr.Root() - if err != nil { - return err - } - - mrcid, err := tmpstore.Put(ctx, &types.MsgMeta{ - BlsMessages: bmroot, - SecpkMessages: smroot, - }) - if err != nil { - return err - } - - if b.Header.Messages != mrcid { - return fmt.Errorf("messages didnt match message root in header") - } - - // Finally, flush. - return vm.Copy(ctx, tmpbs, deleg.store.ChainBlockstore(), mrcid) -} - func (deleg *Delegated) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool { if deleg.genesis == nil { return false @@ -437,7 +203,7 @@ func Weight(ctx context.Context, stateBs bstore.Blockstore, ts *types.TipSet) (t func (deleg *Delegated) ValidateBlockPubsub(ctx context.Context, self bool, msg *pubsub.Message) (pubsub.ValidationResult, string) { if self { - return deleg.validateLocalBlock(ctx, msg) + return common.ValidateLocalBlock(ctx, msg) } // track validation time @@ -453,7 +219,7 @@ func (deleg *Delegated) ValidateBlockPubsub(ctx context.Context, self bool, msg panic(what) } - blk, what, err := deleg.decodeAndCheckBlock(msg) + blk, what, err := common.DecodeAndCheckBlock(msg) if err != nil { log.Error("got invalid block over pubsub: ", err) recordFailureFlagPeer(what) @@ -461,7 +227,7 @@ func (deleg *Delegated) ValidateBlockPubsub(ctx context.Context, self bool, msg } // validate the block meta: the Message CID in the header must match the included messages - err = deleg.validateMsgMeta(ctx, blk) + err = common.ValidateMsgMeta(ctx, blk) if err != nil { log.Warnf("error validating message metadata: %s", err) recordFailureFlagPeer("invalid_block_meta") @@ -484,90 +250,6 @@ func (deleg *Delegated) ValidateBlockPubsub(ctx context.Context, self bool, msg return pubsub.ValidationAccept, "" } -func (deleg *Delegated) validateLocalBlock(ctx context.Context, msg *pubsub.Message) (pubsub.ValidationResult, string) { - stats.Record(ctx, metrics.BlockPublished.M(1)) - - if size := msg.Size(); size > 1<<20-1<<15 { - log.Errorf("ignoring oversize block (%dB)", size) - return pubsub.ValidationIgnore, "oversize_block" - } - - blk, what, err := deleg.decodeAndCheckBlock(msg) - if err != nil { - log.Errorf("got invalid local block: %s", err) - return pubsub.ValidationIgnore, what - } - - msg.ValidatorData = blk - stats.Record(ctx, metrics.BlockValidationSuccess.M(1)) - return pubsub.ValidationAccept, "" -} - -func (deleg *Delegated) decodeAndCheckBlock(msg *pubsub.Message) (*types.BlockMsg, string, error) { - blk, err := types.DecodeBlockMsg(msg.GetData()) - if err != nil { - return nil, "invalid", xerrors.Errorf("error decoding block: %w", err) - } - - if count := len(blk.BlsMessages) + len(blk.SecpkMessages); count > build.BlockMessageLimit { - return nil, "too_many_messages", fmt.Errorf("block contains too many messages (%d)", count) - } - - // make sure we have a signature - if blk.Header.BlockSig == nil { - return nil, "missing_signature", fmt.Errorf("block without a signature") - } - - return blk, "", nil -} - -func (deleg *Delegated) validateMsgMeta(ctx context.Context, msg *types.BlockMsg) error { - // TODO there has to be a simpler way to do this without the blockstore dance - // block headers use adt0 - store := blockadt.WrapStore(ctx, cbor.NewCborStore(bstore.NewMemory())) - bmArr := blockadt.MakeEmptyArray(store) - smArr := blockadt.MakeEmptyArray(store) - - for i, m := range msg.BlsMessages { - c := cbg.CborCid(m) - if err := bmArr.Set(uint64(i), &c); err != nil { - return err - } - } - - for i, m := range msg.SecpkMessages { - c := cbg.CborCid(m) - if err := smArr.Set(uint64(i), &c); err != nil { - return err - } - } - - bmroot, err := bmArr.Root() - if err != nil { - return err - } - - smroot, err := smArr.Root() - if err != nil { - return err - } - - mrcid, err := store.Put(store.Context(), &types.MsgMeta{ - BlsMessages: bmroot, - SecpkMessages: smroot, - }) - - if err != nil { - return err - } - - if msg.Header.Messages != mrcid { - return fmt.Errorf("messages didn't match root cid in header") - } - - return nil -} - func (deleg *Delegated) validateBlockHeader(ctx context.Context, b *types.BlockHeader) (rejectReason string, err error) { baseTs := deleg.store.GetHeaviestTipSet() diff --git a/chain/consensus/delegcns/mine.go b/chain/consensus/delegcns/mine.go index acf88e4c3..f65d5e166 100644 --- a/chain/consensus/delegcns/mine.go +++ b/chain/consensus/delegcns/mine.go @@ -4,16 +4,14 @@ import ( "context" "time" - "github.com/ipfs/go-cid" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/crypto" lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/consensus" + "github.com/filecoin-project/lotus/chain/consensus/common" "github.com/filecoin-project/lotus/chain/types" ) @@ -55,6 +53,17 @@ func Mine(ctx context.Context, api v1api.FullNode) error { log.Errorw("selecting messages failed", "error", err) } + // Get cross-message pool from subnet. + nn, err := api.StateNetworkName(ctx) + if err != nil { + return err + } + crossmsgs, err := api.GetCrossMsgsPool(ctx, address.SubnetID(nn), base.Height()+1) + if err != nil { + log.Errorw("selecting cross-messages failed", "error", err) + } + log.Debugf("CrossMsgs being proposed in block @%s: %d", base.Height()+1, len(crossmsgs)) + bh, err := api.MinerCreateBlock(ctx, &lapi.BlockTemplate{ Miner: miner, Parents: base.Key(), @@ -65,6 +74,7 @@ func Mine(ctx context.Context, api v1api.FullNode) error { Epoch: base.Height() + 1, Timestamp: base.MinTimestamp() + build.BlockDelaySecs, WinningPoStProof: nil, + CrossMessages: crossmsgs, }) if err != nil { log.Errorw("creating block failed", "error", err) @@ -75,6 +85,7 @@ func Mine(ctx context.Context, api v1api.FullNode) error { Header: bh.Header, BlsMessages: bh.BlsMessages, SecpkMessages: bh.SecpkMessages, + CrossMessages: bh.CrossMessages, }) if err != nil { log.Errorw("submitting block failed", "error", err) @@ -89,114 +100,14 @@ func Mine(ctx context.Context, api v1api.FullNode) error { } func (deleg *Delegated) CreateBlock(ctx context.Context, w lapi.Wallet, bt *lapi.BlockTemplate) (*types.FullBlock, error) { - pts, err := deleg.sm.ChainStore().LoadTipSet(bt.Parents) - if err != nil { - return nil, xerrors.Errorf("failed to load parent tipset: %w", err) - } - - st, recpts, err := deleg.sm.TipSetState(ctx, pts) - if err != nil { - return nil, xerrors.Errorf("failed to load tipset state: %w", err) - } - - next := &types.BlockHeader{ - Miner: bt.Miner, - Parents: bt.Parents.Cids(), - Ticket: bt.Ticket, - ElectionProof: bt.Eproof, - - BeaconEntries: bt.BeaconValues, - Height: bt.Epoch, - Timestamp: bt.Timestamp, - WinPoStProof: bt.WinningPoStProof, - ParentStateRoot: st, - ParentMessageReceipts: recpts, - } - - var blsMessages []*types.Message - var secpkMessages []*types.SignedMessage - - var blsMsgCids, secpkMsgCids []cid.Cid - var blsSigs []crypto.Signature - for _, msg := range bt.Messages { - if msg.Signature.Type == crypto.SigTypeBLS { - blsSigs = append(blsSigs, msg.Signature) - blsMessages = append(blsMessages, &msg.Message) - - c, err := deleg.sm.ChainStore().PutMessage(&msg.Message) - if err != nil { - return nil, err - } - - blsMsgCids = append(blsMsgCids, c) - } else { - c, err := deleg.sm.ChainStore().PutMessage(msg) - if err != nil { - return nil, err - } - - secpkMsgCids = append(secpkMsgCids, c) - secpkMessages = append(secpkMessages, msg) - - } - } - - store := deleg.sm.ChainStore().ActorStore(ctx) - blsmsgroot, err := consensus.ToMessagesArray(store, blsMsgCids) - if err != nil { - return nil, xerrors.Errorf("building bls amt: %w", err) - } - secpkmsgroot, err := consensus.ToMessagesArray(store, secpkMsgCids) - if err != nil { - return nil, xerrors.Errorf("building secpk amt: %w", err) - } - - mmcid, err := store.Put(store.Context(), &types.MsgMeta{ - BlsMessages: blsmsgroot, - SecpkMessages: secpkmsgroot, - }) + b, err := common.PrepareBlockForSignature(ctx, deleg.sm, bt) if err != nil { return nil, err } - next.Messages = mmcid - aggSig, err := consensus.AggregateSignatures(blsSigs) + err = common.SignBlock(ctx, w, b) if err != nil { return nil, err } - - next.BLSAggregate = aggSig - pweight, err := deleg.sm.ChainStore().Weight(ctx, pts) - if err != nil { - return nil, err - } - next.ParentWeight = pweight - - baseFee, err := deleg.sm.ChainStore().ComputeBaseFee(ctx, pts) - if err != nil { - return nil, xerrors.Errorf("computing base fee: %w", err) - } - next.ParentBaseFee = baseFee - - nosigbytes, err := next.SigningBytes() - if err != nil { - return nil, xerrors.Errorf("failed to get signing bytes for block: %w", err) - } - - sig, err := w.WalletSign(ctx, bt.Miner, nosigbytes, lapi.MsgMeta{ - Type: lapi.MTBlock, - }) - if err != nil { - return nil, xerrors.Errorf("failed to sign new block: %w", err) - } - - next.BlockSig = sig - - fullBlock := &types.FullBlock{ - Header: next, - BlsMessages: blsMessages, - SecpkMessages: secpkMessages, - } - - return fullBlock, nil + return b, nil } diff --git a/chain/consensus/filcns/compute_state.go b/chain/consensus/filcns/compute_state.go index 927562840..5804c0dd6 100644 --- a/chain/consensus/filcns/compute_state.go +++ b/chain/consensus/filcns/compute_state.go @@ -4,6 +4,7 @@ import ( "context" "sync/atomic" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet/resolver" "github.com/filecoin-project/lotus/chain/rand" "github.com/ipfs/go-cid" @@ -16,12 +17,21 @@ import ( "github.com/filecoin-project/go-state-types/big" blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" + /* inline-gen template + {{range .actorVersions}} + exported{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin/exported"{{end}} + + /* inline-gen start */ + exported0 "github.com/filecoin-project/specs-actors/actors/builtin/exported" exported2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/exported" exported3 "github.com/filecoin-project/specs-actors/v3/actors/builtin/exported" exported4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/exported" exported5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/exported" exported6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/exported" + exported7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/exported" + + /* inline-gen end */ "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors" @@ -39,6 +49,11 @@ func NewActorRegistry() *vm.ActorRegistry { inv := vm.NewActorRegistry() // TODO: define all these properties on the actors themselves, in specs-actors. + /* inline-gen template + {{range .actorVersions}} + inv.Register(vm.ActorsVersionPredicate(actors.Version{{.}}), exported{{.}}.BuiltinActors()...){{end}} + + /* inline-gen start */ inv.Register(vm.ActorsVersionPredicate(actors.Version0), exported0.BuiltinActors()...) inv.Register(vm.ActorsVersionPredicate(actors.Version2), exported2.BuiltinActors()...) @@ -46,6 +61,9 @@ func NewActorRegistry() *vm.ActorRegistry { inv.Register(vm.ActorsVersionPredicate(actors.Version4), exported4.BuiltinActors()...) inv.Register(vm.ActorsVersionPredicate(actors.Version5), exported5.BuiltinActors()...) inv.Register(vm.ActorsVersionPredicate(actors.Version6), exported6.BuiltinActors()...) + inv.Register(vm.ActorsVersionPredicate(actors.Version7), exported7.BuiltinActors()...) + + /* inline-gen end */ return inv } @@ -75,16 +93,16 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager partDone() }() - makeVmWithBaseState := func(base cid.Cid) (*vm.VM, error) { + makeVmWithBaseStateAndEpoch := func(base cid.Cid, e abi.ChainEpoch) (*vm.VM, error) { vmopt := &vm.VMOpts{ StateBase: base, - Epoch: epoch, + Epoch: e, Rand: r, Bstore: sm.ChainStore().StateBlockstore(), Actors: NewActorRegistry(), Syscalls: sm.Syscalls, CircSupplyCalc: sm.GetVMCirculatingSupply, - NtwkVersion: sm.GetNtwkVersion, + NetworkVersion: sm.GetNetworkVersion(ctx, e), BaseFee: baseFee, LookbackState: stmgr.LookbackStateGetterForTipset(sm, ts), } @@ -92,12 +110,7 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager return sm.VMConstructor()(ctx, vmopt) } - vmi, err := makeVmWithBaseState(pstate) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err) - } - - runCron := func(epoch abi.ChainEpoch) error { + runCron := func(vmCron *vm.VM, epoch abi.ChainEpoch) error { cronMsg := &types.Message{ To: cron.Address, From: builtin.SystemActorAddr, @@ -109,56 +122,58 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager Method: cron.Methods.EpochTick, Params: nil, } - ret, err := vmi.ApplyImplicitMessage(ctx, cronMsg) + ret, err := vmCron.ApplyImplicitMessage(ctx, cronMsg) if err != nil { - return err + return xerrors.Errorf("running cron: %w", err) } + if em != nil { if err := em.MessageApplied(ctx, ts, cronMsg.Cid(), cronMsg, ret, true); err != nil { return xerrors.Errorf("callback failed on cron message: %w", err) } } if ret.ExitCode != 0 { - return xerrors.Errorf("CheckProofSubmissions exit was non-zero: %d", ret.ExitCode) + return xerrors.Errorf("cron exit was non-zero: %d", ret.ExitCode) } return nil } for i := parentEpoch; i < epoch; i++ { + var err error if i > parentEpoch { + vmCron, err := makeVmWithBaseStateAndEpoch(pstate, i) + if err != nil { + return cid.Undef, cid.Undef, xerrors.Errorf("making cron vm: %w", err) + } + // run cron for null rounds if any - if err := runCron(i); err != nil { - return cid.Undef, cid.Undef, err + if err = runCron(vmCron, i); err != nil { + return cid.Undef, cid.Undef, xerrors.Errorf("running cron: %w", err) } - pstate, err = vmi.Flush(ctx) + pstate, err = vmCron.Flush(ctx) if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("flushing vm: %w", err) + return cid.Undef, cid.Undef, xerrors.Errorf("flushing cron vm: %w", err) } } // handle state forks // XXX: The state tree - newState, err := sm.HandleStateForks(ctx, pstate, i, em, ts) + pstate, err = sm.HandleStateForks(ctx, pstate, i, em, ts) if err != nil { return cid.Undef, cid.Undef, xerrors.Errorf("error handling state forks: %w", err) } - - if pstate != newState { - vmi, err = makeVmWithBaseState(newState) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err) - } - } - - vmi.SetBlockHeight(i + 1) - pstate = newState } partDone() partDone = metrics.Timer(ctx, metrics.VMApplyMessages) + vmi, err := makeVmWithBaseStateAndEpoch(pstate, epoch) + if err != nil { + return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err) + } + var receipts []cbg.CBORMarshaler processedMsgs := make(map[cid.Cid]struct{}) for _, b := range bms { @@ -226,7 +241,7 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager partDone() partDone = metrics.Timer(ctx, metrics.VMApplyCron) - if err := runCron(epoch); err != nil { + if err := runCron(vmi, epoch); err != nil { return cid.Cid{}, cid.Cid{}, err } @@ -255,7 +270,7 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager return st, rectroot, nil } -func (t *TipSetExecutor) ExecuteTipSet(ctx context.Context, sm *stmgr.StateManager, ts *types.TipSet, em stmgr.ExecMonitor) (stateroot cid.Cid, rectsroot cid.Cid, err error) { +func (t *TipSetExecutor) ExecuteTipSet(ctx context.Context, sm *stmgr.StateManager, cr *resolver.Resolver, ts *types.TipSet, em stmgr.ExecMonitor) (stateroot cid.Cid, rectsroot cid.Cid, err error) { ctx, span := trace.StartSpan(ctx, "computeTipSetState") defer span.End() @@ -274,7 +289,7 @@ func (t *TipSetExecutor) ExecuteTipSet(ctx context.Context, sm *stmgr.StateManag var parentEpoch abi.ChainEpoch pstate := blks[0].ParentStateRoot if blks[0].Height > 0 { - parent, err := sm.ChainStore().GetBlock(blks[0].Parents[0]) + parent, err := sm.ChainStore().GetBlock(ctx, blks[0].Parents[0]) if err != nil { return cid.Undef, cid.Undef, xerrors.Errorf("getting parent block: %w", err) } @@ -282,9 +297,9 @@ func (t *TipSetExecutor) ExecuteTipSet(ctx context.Context, sm *stmgr.StateManag parentEpoch = parent.Height } - r := rand.NewStateRand(sm.ChainStore(), ts.Cids(), sm.Beacon()) + r := rand.NewStateRand(sm.ChainStore(), ts.Cids(), sm.Beacon(), sm.GetNetworkVersion) - blkmsgs, err := sm.ChainStore().BlockMsgsForTipset(ts) + blkmsgs, err := sm.ChainStore().BlockMsgsForTipset(ctx, ts) if err != nil { return cid.Undef, cid.Undef, xerrors.Errorf("getting block messages for tipset: %w", err) } diff --git a/chain/consensus/filcns/filecoin.go b/chain/consensus/filcns/filecoin.go index 7abd2cb77..2b77dc080 100644 --- a/chain/consensus/filcns/filecoin.go +++ b/chain/consensus/filcns/filecoin.go @@ -26,7 +26,7 @@ import ( "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/network" blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" @@ -90,19 +90,19 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock) h := b.Header - baseTs, err := filec.store.LoadTipSet(types.NewTipSetKey(h.Parents...)) + baseTs, err := filec.store.LoadTipSet(ctx, types.NewTipSetKey(h.Parents...)) if err != nil { return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err) } - winPoStNv := filec.sm.GetNtwkVersion(ctx, baseTs.Height()) + winPoStNv := filec.sm.GetNetworkVersion(ctx, baseTs.Height()) lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, filec.sm, baseTs, h.Height) if err != nil { return xerrors.Errorf("failed to get lookback tipset for block: %w", err) } - prevBeacon, err := filec.store.GetLatestBeaconEntry(baseTs) + prevBeacon, err := filec.store.GetLatestBeaconEntry(ctx, baseTs) if err != nil { return xerrors.Errorf("failed to get latest beacon entry: %w", err) } @@ -171,7 +171,7 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock) } if stateroot != h.ParentStateRoot { - msgs, err := filec.store.MessagesForTipset(baseTs) + msgs, err := filec.store.MessagesForTipset(ctx, baseTs) if err != nil { log.Error("failed to load messages for tipset during tipset state mismatch error: ", err) } else { @@ -182,7 +182,7 @@ func (filec *FilecoinEC) ValidateBlock(ctx context.Context, b *types.FullBlock) } } - return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot) + return xerrors.Errorf("parent state root did not match computed state (%s != %s)", h.ParentStateRoot, stateroot) } if precp != h.ParentMessageReceipts { @@ -400,12 +400,21 @@ func (filec *FilecoinEC) VerifyWinningPoStProof(ctx context.Context, nv network. return xerrors.Errorf("failed to get ID from miner address %s: %w", h.Miner, err) } - sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, nv, filec.verifier, filec.sm, lbst, h.Miner, rand) + xsectors, err := stmgr.GetSectorsForWinningPoSt(ctx, nv, filec.verifier, filec.sm, lbst, h.Miner, rand) if err != nil { return xerrors.Errorf("getting winning post sector set: %w", err) } - ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof2.WinningPoStVerifyInfo{ + sectors := make([]proof.SectorInfo, len(xsectors)) + for i, xsi := range xsectors { + sectors[i] = proof.SectorInfo{ + SealProof: xsi.SealProof, + SectorNumber: xsi.SectorNumber, + SealedCID: xsi.SealedCID, + } + } + + ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof.WinningPoStVerifyInfo{ Randomness: rand, Proofs: h.WinPoStProof, ChallengedSectors: sectors, @@ -449,7 +458,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl stateroot, _, err := filec.sm.TipSetState(ctx, baseTs) if err != nil { - return err + return xerrors.Errorf("failed to compute tipsettate for %s: %w", baseTs.Key(), err) } st, err := state.LoadStateTree(filec.store.ActorStore(ctx), stateroot) @@ -457,7 +466,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl return xerrors.Errorf("failed to load base state tree: %w", err) } - nv := filec.sm.GetNtwkVersion(ctx, b.Header.Height) + nv := filec.sm.GetNetworkVersion(ctx, b.Header.Height) pl := vm.PricelistByEpoch(baseTs.Height()) var sumGasLimit int64 checkMsg := func(msg types.ChainMsg) error { @@ -466,7 +475,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl // Phase 1: syntactic validation, as defined in the spec minGas := pl.OnChainMessage(msg.ChainLength()) if err := m.ValidForBlockInclusion(minGas.Total(), nv); err != nil { - return err + return xerrors.Errorf("msg %s invalid for block inclusion: %w", m.Cid(), err) } // ValidForBlockInclusion checks if any single message does not exceed BlockGasLimit @@ -479,10 +488,10 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl // Phase 2: (Partial) semantic validation: // the sender exists and is an account actor, and the nonces make sense var sender address.Address - if filec.sm.GetNtwkVersion(ctx, b.Header.Height) >= network.Version13 { + if filec.sm.GetNetworkVersion(ctx, b.Header.Height) >= network.Version13 { sender, err = st.LookupID(m.From) if err != nil { - return err + return xerrors.Errorf("failed to lookup sender %s: %w", m.From, err) } } else { sender = m.From @@ -519,7 +528,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err) } - c, err := store.PutMessage(tmpbs, m) + c, err := store.PutMessage(ctx, tmpbs, m) if err != nil { return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err) } @@ -532,7 +541,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl smArr := blockadt.MakeEmptyArray(tmpstore) for i, m := range b.SecpkMessages { - if filec.sm.GetNtwkVersion(ctx, b.Header.Height) >= network.Version14 { + if filec.sm.GetNetworkVersion(ctx, b.Header.Height) >= network.Version14 { if m.Signature.Type != crypto.SigTypeSecp256k1 { return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err) } @@ -553,7 +562,7 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl return xerrors.Errorf("secpk message %s has invalid signature: %w", m.Cid(), err) } - c, err := store.PutMessage(tmpbs, m) + c, err := store.PutMessage(ctx, tmpbs, m) if err != nil { return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err) } @@ -565,20 +574,26 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl bmroot, err := bmArr.Root() if err != nil { - return err + return xerrors.Errorf("failed to root bls msgs: %w", err) + } smroot, err := smArr.Root() if err != nil { - return err + return xerrors.Errorf("failed to root secp msgs: %w", err) } + emptyroot, err := blockadt.MakeEmptyArray(tmpstore).Root() + if err != nil { + return err + } mrcid, err := tmpstore.Put(ctx, &types.MsgMeta{ BlsMessages: bmroot, SecpkMessages: smroot, + CrossMessages: emptyroot, }) if err != nil { - return err + return xerrors.Errorf("failed to put msg meta: %w", err) } if b.Header.Messages != mrcid { @@ -586,7 +601,12 @@ func (filec *FilecoinEC) checkBlockMessages(ctx context.Context, b *types.FullBl } // Finally, flush. - return vm.Copy(ctx, tmpbs, filec.store.ChainBlockstore(), mrcid) + err = vm.Copy(ctx, tmpbs, filec.store.ChainBlockstore(), mrcid) + if err != nil { + return xerrors.Errorf("failed to flush:%w", err) + } + + return nil } func (filec *FilecoinEC) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool { @@ -760,10 +780,17 @@ func (filec *FilecoinEC) validateMsgMeta(ctx context.Context, msg *types.BlockMs if err != nil { return err } - + emptyroot, err := blockadt.MakeEmptyArray(store).Root() + if err != nil { + return err + } + // TODO FIXME: No support for the application of cross-messages with + // filecoin consensus. To support cross-messages with Filecoin consensus this + // will need to change. mrcid, err := store.Put(store.Context(), &types.MsgMeta{ BlsMessages: bmroot, SecpkMessages: smroot, + CrossMessages: emptyroot, }) if err != nil { @@ -823,7 +850,7 @@ func (filec *FilecoinEC) checkPowerAndGetWorkerKey(ctx context.Context, bh *type key, err := stmgr.GetMinerWorkerRaw(ctx, filec.sm, lbst, bh.Miner) if err != nil { - log.Warnf("failed to resolve worker key for miner %s: %s", bh.Miner, err) + log.Warnf("failed to resolve worker key for miner %s and block height %d: %s", bh.Miner, bh.Height, err) return address.Undef, ErrSoftFailure } diff --git a/chain/consensus/filcns/mine.go b/chain/consensus/filcns/mine.go index 851b9cb35..ef42a7b06 100644 --- a/chain/consensus/filcns/mine.go +++ b/chain/consensus/filcns/mine.go @@ -11,10 +11,11 @@ import ( "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/types" + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" ) func (filec *FilecoinEC) CreateBlock(ctx context.Context, w api.Wallet, bt *api.BlockTemplate) (*types.FullBlock, error) { - pts, err := filec.sm.ChainStore().LoadTipSet(bt.Parents) + pts, err := filec.sm.ChainStore().LoadTipSet(ctx, bt.Parents) if err != nil { return nil, xerrors.Errorf("failed to load parent tipset: %w", err) } @@ -58,14 +59,14 @@ func (filec *FilecoinEC) CreateBlock(ctx context.Context, w api.Wallet, bt *api. blsSigs = append(blsSigs, msg.Signature) blsMessages = append(blsMessages, &msg.Message) - c, err := filec.sm.ChainStore().PutMessage(&msg.Message) + c, err := filec.sm.ChainStore().PutMessage(ctx, &msg.Message) if err != nil { return nil, err } blsMsgCids = append(blsMsgCids, c) - } else { - c, err := filec.sm.ChainStore().PutMessage(msg) + } else if msg.Signature.Type == crypto.SigTypeSecp256k1 { + c, err := filec.sm.ChainStore().PutMessage(ctx, msg) if err != nil { return nil, err } @@ -73,6 +74,8 @@ func (filec *FilecoinEC) CreateBlock(ctx context.Context, w api.Wallet, bt *api. secpkMsgCids = append(secpkMsgCids, c) secpkMessages = append(secpkMessages, msg) + } else { + return nil, xerrors.Errorf("unknown sig type: %d", msg.Signature.Type) } } @@ -86,9 +89,14 @@ func (filec *FilecoinEC) CreateBlock(ctx context.Context, w api.Wallet, bt *api. return nil, xerrors.Errorf("building secpk amt: %w", err) } + emptyroot, err := blockadt.MakeEmptyArray(store).Root() + if err != nil { + return nil, err + } mmcid, err := store.Put(store.Context(), &types.MsgMeta{ BlsMessages: blsmsgroot, SecpkMessages: secpkmsgroot, + CrossMessages: emptyroot, }) if err != nil { return nil, err diff --git a/chain/consensus/filcns/upgrades.go b/chain/consensus/filcns/upgrades.go index cf4c62bf3..2fa020d3d 100644 --- a/chain/consensus/filcns/upgrades.go +++ b/chain/consensus/filcns/upgrades.go @@ -5,7 +5,10 @@ import ( "runtime" "time" + "github.com/docker/go-units" + "github.com/filecoin-project/specs-actors/v6/actors/migration/nv14" + "github.com/filecoin-project/specs-actors/v7/actors/migration/nv15" "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" @@ -156,6 +159,22 @@ func DefaultUpgradeSchedule() stmgr.UpgradeSchedule { StopWithin: 5, }}, Expensive: true, + }, { + Height: build.UpgradeOhSnapHeight, + Network: network.Version15, + Migration: UpgradeActorsV7, + PreMigrations: []stmgr.PreMigration{{ + PreMigration: PreUpgradeActorsV7, + StartWithin: 120, + DontStartWithin: 60, + StopWithin: 35, + }, { + PreMigration: PreUpgradeActorsV7, + StartWithin: 30, + DontStartWithin: 15, + StopWithin: 5, + }}, + Expensive: true, }, } @@ -625,7 +644,7 @@ func splitGenesisMultisig0(ctx context.Context, em stmgr.ExecMonitor, addr addre // TODO: After the Liftoff epoch, refactor this to use resetMultisigVesting func resetGenesisMsigs0(ctx context.Context, sm *stmgr.StateManager, store adt0.Store, tree *state.StateTree, startEpoch abi.ChainEpoch) error { - gb, err := sm.ChainStore().GetGenesis() + gb, err := sm.ChainStore().GetGenesis(ctx) if err != nil { return xerrors.Errorf("getting genesis block: %w", err) } @@ -1170,7 +1189,7 @@ func upgradeActorsV6Common( // Perform the migration newHamtRoot, err := nv14.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) if err != nil { - return cid.Undef, xerrors.Errorf("upgrading to actors v5: %w", err) + return cid.Undef, xerrors.Errorf("upgrading to actors v6: %w", err) } // Persist the result. @@ -1197,6 +1216,99 @@ func upgradeActorsV6Common( return newRoot, nil } +func UpgradeActorsV7(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, cb stmgr.ExecMonitor, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) (cid.Cid, error) { + // Use all the CPUs except 3. + workerCount := runtime.NumCPU() - 3 + if workerCount <= 0 { + workerCount = 1 + } + + config := nv15.Config{ + MaxWorkers: uint(workerCount), + JobQueueSize: 1000, + ResultQueueSize: 100, + ProgressLogPeriod: 10 * time.Second, + } + + newRoot, err := upgradeActorsV7Common(ctx, sm, cache, root, epoch, ts, config) + if err != nil { + return cid.Undef, xerrors.Errorf("migrating actors v6 state: %w", err) + } + + return newRoot, nil +} + +func PreUpgradeActorsV7(ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet) error { + // Use half the CPUs for pre-migration, but leave at least 3. + workerCount := runtime.NumCPU() + if workerCount <= 4 { + workerCount = 1 + } else { + workerCount /= 2 + } + + lbts, lbRoot, err := stmgr.GetLookbackTipSetForRound(ctx, sm, ts, epoch) + if err != nil { + return xerrors.Errorf("error getting lookback ts for premigration: %w", err) + } + + config := nv15.Config{MaxWorkers: uint(workerCount), + ProgressLogPeriod: time.Minute * 5} + + _, err = upgradeActorsV7Common(ctx, sm, cache, lbRoot, epoch, lbts, config) + return err +} + +func upgradeActorsV7Common( + ctx context.Context, sm *stmgr.StateManager, cache stmgr.MigrationCache, + root cid.Cid, epoch abi.ChainEpoch, ts *types.TipSet, + config nv15.Config, +) (cid.Cid, error) { + writeStore := blockstore.NewAutobatch(ctx, sm.ChainStore().StateBlockstore(), units.GiB) + // TODO: pretty sure we'd achieve nothing by doing this, confirm in review + //buf := blockstore.NewTieredBstore(sm.ChainStore().StateBlockstore(), writeStore) + store := store.ActorStore(ctx, writeStore) + // Load the state root. + var stateRoot types.StateRoot + if err := store.Get(ctx, root, &stateRoot); err != nil { + return cid.Undef, xerrors.Errorf("failed to decode state root: %w", err) + } + + if stateRoot.Version != types.StateTreeVersion4 { + return cid.Undef, xerrors.Errorf( + "expected state root version 4 for actors v7 upgrade, got %d", + stateRoot.Version, + ) + } + + // Perform the migration + newHamtRoot, err := nv15.MigrateStateTree(ctx, store, stateRoot.Actors, epoch, config, migrationLogger{}, cache) + if err != nil { + return cid.Undef, xerrors.Errorf("upgrading to actors v7: %w", err) + } + + // Persist the result. + newRoot, err := store.Put(ctx, &types.StateRoot{ + Version: types.StateTreeVersion4, + Actors: newHamtRoot, + Info: stateRoot.Info, + }) + if err != nil { + return cid.Undef, xerrors.Errorf("failed to persist new state root: %w", err) + } + + // Persists the new tree and shuts down the flush worker + if err := writeStore.Flush(ctx); err != nil { + return cid.Undef, xerrors.Errorf("writeStore flush failed: %w", err) + } + + if err := writeStore.Shutdown(ctx); err != nil { + return cid.Undef, xerrors.Errorf("writeStore shutdown failed: %w", err) + } + + return newRoot, nil +} + type migrationLogger struct{} func (ml migrationLogger) Log(level rt.LogLevel, msg string, args ...interface{}) { diff --git a/chain/consensus/hierarchical/actors/sca/cbor_gen.go b/chain/consensus/hierarchical/actors/sca/cbor_gen.go index 000c54d03..2a7c34ab6 100644 --- a/chain/consensus/hierarchical/actors/sca/cbor_gen.go +++ b/chain/consensus/hierarchical/actors/sca/cbor_gen.go @@ -8,7 +8,10 @@ import ( "math" "sort" - hierarchical "github.com/filecoin-project/lotus/chain/consensus/hierarchical" + address "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + schema "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" + types "github.com/filecoin-project/lotus/chain/types" cid "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" xerrors "golang.org/x/xerrors" @@ -19,59 +22,1144 @@ var _ = cid.Undef var _ = math.E var _ = sort.Sort -var lengthBufSCAState = []byte{133} +var lengthBufConstructorParams = []byte{130} + +func (t *ConstructorParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufConstructorParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.NetworkName (string) (string) + if len(t.NetworkName) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.NetworkName was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.NetworkName))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.NetworkName)); err != nil { + return err + } + + // t.CheckpointPeriod (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.CheckpointPeriod)); err != nil { + return err + } + + return nil +} + +func (t *ConstructorParams) UnmarshalCBOR(r io.Reader) error { + *t = ConstructorParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.NetworkName (string) (string) + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.NetworkName = string(sval) + } + // t.CheckpointPeriod (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.CheckpointPeriod = uint64(extra) + + } + return nil +} + +var lengthBufCheckpointParams = []byte{129} + +func (t *CheckpointParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufCheckpointParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Checkpoint ([]uint8) (slice) + if len(t.Checkpoint) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Checkpoint was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Checkpoint))); err != nil { + return err + } + + if _, err := w.Write(t.Checkpoint[:]); err != nil { + return err + } + return nil +} + +func (t *CheckpointParams) UnmarshalCBOR(r io.Reader) error { + *t = CheckpointParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Checkpoint ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Checkpoint: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Checkpoint = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Checkpoint[:]); err != nil { + return err + } + return nil +} + +var lengthBufSCAState = []byte{141} func (t *SCAState) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write(lengthBufSCAState); err != nil { + if _, err := w.Write(lengthBufSCAState); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.NetworkName (address.SubnetID) (string) + if len(t.NetworkName) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.NetworkName was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.NetworkName))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.NetworkName)); err != nil { + return err + } + + // t.TotalSubnets (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TotalSubnets)); err != nil { + return err + } + + // t.MinStake (big.Int) (struct) + if err := t.MinStake.MarshalCBOR(w); err != nil { + return err + } + + // t.Subnets (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.Subnets); err != nil { + return xerrors.Errorf("failed to write cid field t.Subnets: %w", err) + } + + // t.CheckPeriod (abi.ChainEpoch) (int64) + if t.CheckPeriod >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.CheckPeriod)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.CheckPeriod-1)); err != nil { + return err + } + } + + // t.Checkpoints (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.Checkpoints); err != nil { + return xerrors.Errorf("failed to write cid field t.Checkpoints: %w", err) + } + + // t.CheckMsgsRegistry (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.CheckMsgsRegistry); err != nil { + return xerrors.Errorf("failed to write cid field t.CheckMsgsRegistry: %w", err) + } + + // t.Nonce (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Nonce)); err != nil { + return err + } + + // t.BottomUpNonce (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.BottomUpNonce)); err != nil { + return err + } + + // t.BottomUpMsgsMeta (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.BottomUpMsgsMeta); err != nil { + return xerrors.Errorf("failed to write cid field t.BottomUpMsgsMeta: %w", err) + } + + // t.AppliedBottomUpNonce (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.AppliedBottomUpNonce)); err != nil { + return err + } + + // t.AppliedTopDownNonce (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.AppliedTopDownNonce)); err != nil { + return err + } + + // t.AtomicExecRegistry (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.AtomicExecRegistry); err != nil { + return xerrors.Errorf("failed to write cid field t.AtomicExecRegistry: %w", err) + } + + return nil +} + +func (t *SCAState) UnmarshalCBOR(r io.Reader) error { + *t = SCAState{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 13 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.NetworkName (address.SubnetID) (string) + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.NetworkName = address.SubnetID(sval) + } + // t.TotalSubnets (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.TotalSubnets = uint64(extra) + + } + // t.MinStake (big.Int) (struct) + + { + + if err := t.MinStake.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.MinStake: %w", err) + } + + } + // t.Subnets (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Subnets: %w", err) + } + + t.Subnets = c + + } + // t.CheckPeriod (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.CheckPeriod = abi.ChainEpoch(extraI) + } + // t.Checkpoints (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Checkpoints: %w", err) + } + + t.Checkpoints = c + + } + // t.CheckMsgsRegistry (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.CheckMsgsRegistry: %w", err) + } + + t.CheckMsgsRegistry = c + + } + // t.Nonce (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Nonce = uint64(extra) + + } + // t.BottomUpNonce (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.BottomUpNonce = uint64(extra) + + } + // t.BottomUpMsgsMeta (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.BottomUpMsgsMeta: %w", err) + } + + t.BottomUpMsgsMeta = c + + } + // t.AppliedBottomUpNonce (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.AppliedBottomUpNonce = uint64(extra) + + } + // t.AppliedTopDownNonce (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.AppliedTopDownNonce = uint64(extra) + + } + // t.AtomicExecRegistry (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.AtomicExecRegistry: %w", err) + } + + t.AtomicExecRegistry = c + + } + return nil +} + +var lengthBufSubnet = []byte{136} + +func (t *Subnet) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufSubnet); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.ID (address.SubnetID) (string) + if len(t.ID) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.ID was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.ID))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.ID)); err != nil { + return err + } + + // t.ParentID (address.SubnetID) (string) + if len(t.ParentID) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.ParentID was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.ParentID))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.ParentID)); err != nil { + return err + } + + // t.Stake (big.Int) (struct) + if err := t.Stake.MarshalCBOR(w); err != nil { + return err + } + + // t.TopDownMsgs (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.TopDownMsgs); err != nil { + return xerrors.Errorf("failed to write cid field t.TopDownMsgs: %w", err) + } + + // t.Nonce (uint64) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Nonce)); err != nil { + return err + } + + // t.CircSupply (big.Int) (struct) + if err := t.CircSupply.MarshalCBOR(w); err != nil { + return err + } + + // t.Status (sca.Status) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err + } + + // t.PrevCheckpoint (schema.Checkpoint) (struct) + if err := t.PrevCheckpoint.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *Subnet) UnmarshalCBOR(r io.Reader) error { + *t = Subnet{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 8 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.ID (address.SubnetID) (string) + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.ID = address.SubnetID(sval) + } + // t.ParentID (address.SubnetID) (string) + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.ParentID = address.SubnetID(sval) + } + // t.Stake (big.Int) (struct) + + { + + if err := t.Stake.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Stake: %w", err) + } + + } + // t.TopDownMsgs (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.TopDownMsgs: %w", err) + } + + t.TopDownMsgs = c + + } + // t.Nonce (uint64) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Nonce = uint64(extra) + + } + // t.CircSupply (big.Int) (struct) + + { + + if err := t.CircSupply.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.CircSupply: %w", err) + } + + } + // t.Status (sca.Status) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = Status(extra) + + } + // t.PrevCheckpoint (schema.Checkpoint) (struct) + + { + + if err := t.PrevCheckpoint.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.PrevCheckpoint: %w", err) + } + + } + return nil +} + +var lengthBufFundParams = []byte{129} + +func (t *FundParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufFundParams); err != nil { + return err + } + + // t.Value (big.Int) (struct) + if err := t.Value.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *FundParams) UnmarshalCBOR(r io.Reader) error { + *t = FundParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Value (big.Int) (struct) + + { + + if err := t.Value.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Value: %w", err) + } + + } + return nil +} + +var lengthBufSubnetIDParam = []byte{129} + +func (t *SubnetIDParam) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufSubnetIDParam); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.ID (string) (string) + if len(t.ID) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.ID was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.ID))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.ID)); err != nil { + return err + } + return nil +} + +func (t *SubnetIDParam) UnmarshalCBOR(r io.Reader) error { + *t = SubnetIDParam{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.ID (string) (string) + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.ID = string(sval) + } + return nil +} + +var lengthBufCrossMsgs = []byte{130} + +func (t *CrossMsgs) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufCrossMsgs); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Msgs ([]types.Message) (slice) + if len(t.Msgs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Msgs was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Msgs))); err != nil { + return err + } + for _, v := range t.Msgs { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + + // t.Metas ([]schema.CrossMsgMeta) (slice) + if len(t.Metas) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Metas was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Metas))); err != nil { + return err + } + for _, v := range t.Metas { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + return nil +} + +func (t *CrossMsgs) UnmarshalCBOR(r io.Reader) error { + *t = CrossMsgs{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Msgs ([]types.Message) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Msgs: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Msgs = make([]types.Message, extra) + } + + for i := 0; i < int(extra); i++ { + + var v types.Message + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Msgs[i] = v + } + + // t.Metas ([]schema.CrossMsgMeta) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Metas: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Metas = make([]schema.CrossMsgMeta, extra) + } + + for i := 0; i < int(extra); i++ { + + var v schema.CrossMsgMeta + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Metas[i] = v + } + + return nil +} + +var lengthBufMetaTag = []byte{130} + +func (t *MetaTag) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufMetaTag); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.MsgsCid (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.MsgsCid); err != nil { + return xerrors.Errorf("failed to write cid field t.MsgsCid: %w", err) + } + + // t.MetasCid (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.MetasCid); err != nil { + return xerrors.Errorf("failed to write cid field t.MetasCid: %w", err) + } + + return nil +} + +func (t *MetaTag) UnmarshalCBOR(r io.Reader) error { + *t = MetaTag{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.MsgsCid (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.MsgsCid: %w", err) + } + + t.MsgsCid = c + + } + // t.MetasCid (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.MetasCid: %w", err) + } + + t.MetasCid = c + + } + return nil +} + +var lengthBufCrossMsgParams = []byte{130} + +func (t *CrossMsgParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufCrossMsgParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Msg (types.Message) (struct) + if err := t.Msg.MarshalCBOR(w); err != nil { + return err + } + + // t.Destination (address.SubnetID) (string) + if len(t.Destination) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Destination was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Destination))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Destination)); err != nil { + return err + } + return nil +} + +func (t *CrossMsgParams) UnmarshalCBOR(r io.Reader) error { + *t = CrossMsgParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Msg (types.Message) (struct) + + { + + if err := t.Msg.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Msg: %w", err) + } + + } + // t.Destination (address.SubnetID) (string) + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.Destination = address.SubnetID(sval) + } + return nil +} + +var lengthBufErrorParam = []byte{129} + +func (t *ErrorParam) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufErrorParam); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Code (int64) (int64) + if t.Code >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Code)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.Code-1)); err != nil { + return err + } + } + return nil +} + +func (t *ErrorParam) UnmarshalCBOR(r io.Reader) error { + *t = ErrorParam{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Code (int64) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.Code = int64(extraI) + } + return nil +} + +var lengthBufAtomicExec = []byte{131} + +func (t *AtomicExec) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufAtomicExec); err != nil { return err } scratch := make([]byte, 9) - // t.Network (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.Network); err != nil { - return xerrors.Errorf("failed to write cid field t.Network: %w", err) + // t.Params (sca.AtomicExecParams) (struct) + if err := t.Params.MarshalCBOR(w); err != nil { + return err } - // t.NetworkName (hierarchical.SubnetID) (string) - if len(t.NetworkName) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.NetworkName was too long") - } + // t.Submitted (map[string]cid.Cid) (map) + { + if len(t.Submitted) > 4096 { + return xerrors.Errorf("cannot marshal t.Submitted map too large") + } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.NetworkName))); err != nil { - return err - } - if _, err := io.WriteString(w, string(t.NetworkName)); err != nil { - return err - } + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajMap, uint64(len(t.Submitted))); err != nil { + return err + } - // t.TotalSubnets (uint64) (uint64) + keys := make([]string, 0, len(t.Submitted)) + for k := range t.Submitted { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := t.Submitted[k] - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.TotalSubnets)); err != nil { - return err - } + if len(k) > cbg.MaxLength { + return xerrors.Errorf("Value in field k was too long") + } - // t.MinStake (big.Int) (struct) - if err := t.MinStake.MarshalCBOR(w); err != nil { - return err + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(k))); err != nil { + return err + } + if _, err := io.WriteString(w, string(k)); err != nil { + return err + } + + if err := cbg.WriteCidBuf(scratch, w, v); err != nil { + return xerrors.Errorf("failed to write cid field v: %w", err) + } + + } } - // t.Subnets (cid.Cid) (struct) + // t.Status (sca.ExecStatus) (uint64) - if err := cbg.WriteCidBuf(scratch, w, t.Subnets); err != nil { - return xerrors.Errorf("failed to write cid field t.Subnets: %w", err) + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Status)); err != nil { + return err } return nil } -func (t *SCAState) UnmarshalCBOR(r io.Reader) error { - *t = SCAState{} +func (t *AtomicExec) UnmarshalCBOR(r io.Reader) error { + *t = AtomicExec{} br := cbg.GetPeeker(r) scratch := make([]byte, 8) @@ -84,141 +1172,147 @@ func (t *SCAState) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type array") } - if extra != 5 { + if extra != 3 { return fmt.Errorf("cbor input had wrong number of fields") } - // t.Network (cid.Cid) (struct) + // t.Params (sca.AtomicExecParams) (struct) { - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Network: %w", err) + if err := t.Params.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Params: %w", err) } - t.Network = c + } + // t.Submitted (map[string]cid.Cid) (map) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("expected a map (major type 5)") + } + if extra > 4096 { + return fmt.Errorf("t.Submitted: map too large") } - // t.NetworkName (hierarchical.SubnetID) (string) - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err - } + t.Submitted = make(map[string]cid.Cid, extra) - t.NetworkName = hierarchical.SubnetID(sval) - } - // t.TotalSubnets (uint64) (uint64) + for i, l := 0, int(extra); i < l; i++ { - { + var k string - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) - if err != nil { - return err - } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + k = string(sval) } - t.TotalSubnets = uint64(extra) - } - // t.MinStake (big.Int) (struct) + var v cid.Cid - { + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field v: %w", err) + } + + v = c - if err := t.MinStake.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.MinStake: %w", err) } + t.Submitted[k] = v + } - // t.Subnets (cid.Cid) (struct) + // t.Status (sca.ExecStatus) (uint64) { - c, err := cbg.ReadCid(br) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) if err != nil { - return xerrors.Errorf("failed to read cid field t.Subnets: %w", err) + return err } - - t.Subnets = c + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Status = ExecStatus(extra) } return nil } -var lengthBufSubnet = []byte{135} +var lengthBufAtomicExecParams = []byte{130} -func (t *Subnet) MarshalCBOR(w io.Writer) error { +func (t *AtomicExecParams) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write(lengthBufSubnet); err != nil { + if _, err := w.Write(lengthBufAtomicExecParams); err != nil { return err } scratch := make([]byte, 9) - // t.Cid (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.Cid); err != nil { - return xerrors.Errorf("failed to write cid field t.Cid: %w", err) + // t.Msgs ([]types.Message) (slice) + if len(t.Msgs) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Msgs was too long") } - // t.ID (hierarchical.SubnetID) (string) - if len(t.ID) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.ID was too long") - } - - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.ID))); err != nil { - return err - } - if _, err := io.WriteString(w, string(t.ID)); err != nil { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Msgs))); err != nil { return err } - - // t.Parent (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.Parent); err != nil { - return xerrors.Errorf("failed to write cid field t.Parent: %w", err) + for _, v := range t.Msgs { + if err := v.MarshalCBOR(w); err != nil { + return err + } } - // t.ParentID (hierarchical.SubnetID) (string) - if len(t.ParentID) > cbg.MaxLength { - return xerrors.Errorf("Value in field t.ParentID was too long") - } + // t.Inputs (map[string]sca.LockedState) (map) + { + if len(t.Inputs) > 4096 { + return xerrors.Errorf("cannot marshal t.Inputs map too large") + } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.ParentID))); err != nil { - return err - } - if _, err := io.WriteString(w, string(t.ParentID)); err != nil { - return err - } + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajMap, uint64(len(t.Inputs))); err != nil { + return err + } - // t.Stake (big.Int) (struct) - if err := t.Stake.MarshalCBOR(w); err != nil { - return err - } + keys := make([]string, 0, len(t.Inputs)) + for k := range t.Inputs { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := t.Inputs[k] - // t.Funds (cid.Cid) (struct) + if len(k) > cbg.MaxLength { + return xerrors.Errorf("Value in field k was too long") + } - if err := cbg.WriteCidBuf(scratch, w, t.Funds); err != nil { - return xerrors.Errorf("failed to write cid field t.Funds: %w", err) - } + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(k))); err != nil { + return err + } + if _, err := io.WriteString(w, string(k)); err != nil { + return err + } - // t.Status (sca.Status) (uint64) + if err := v.MarshalCBOR(w); err != nil { + return err + } - if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Status)); err != nil { - return err + } } - return nil } -func (t *Subnet) UnmarshalCBOR(r io.Reader) error { - *t = Subnet{} +func (t *AtomicExecParams) UnmarshalCBOR(r io.Reader) error { + *t = AtomicExecParams{} br := cbg.GetPeeker(r) scratch := make([]byte, 8) @@ -231,112 +1325,201 @@ func (t *Subnet) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type array") } - if extra != 7 { + if extra != 2 { return fmt.Errorf("cbor input had wrong number of fields") } - // t.Cid (cid.Cid) (struct) + // t.Msgs ([]types.Message) (slice) - { + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Cid: %w", err) - } + if extra > cbg.MaxLength { + return fmt.Errorf("t.Msgs: array too large (%d)", extra) + } - t.Cid = c + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + if extra > 0 { + t.Msgs = make([]types.Message, extra) } - // t.ID (hierarchical.SubnetID) (string) - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { + for i := 0; i < int(extra); i++ { + + var v types.Message + if err := v.UnmarshalCBOR(br); err != nil { return err } - t.ID = hierarchical.SubnetID(sval) + t.Msgs[i] = v } - // t.Parent (cid.Cid) (struct) - { + // t.Inputs (map[string]sca.LockedState) (map) - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Parent: %w", err) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajMap { + return fmt.Errorf("expected a map (major type 5)") + } + if extra > 4096 { + return fmt.Errorf("t.Inputs: map too large") + } + + t.Inputs = make(map[string]LockedState, extra) + + for i, l := 0, int(extra); i < l; i++ { + + var k string + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + k = string(sval) } - t.Parent = c + var v LockedState - } - // t.ParentID (hierarchical.SubnetID) (string) + { + + if err := v.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling v: %w", err) + } - { - sval, err := cbg.ReadStringBuf(br, scratch) - if err != nil { - return err } - t.ParentID = hierarchical.SubnetID(sval) + t.Inputs[k] = v + } - // t.Stake (big.Int) (struct) + return nil +} - { +var lengthBufSubmitExecParams = []byte{131} - if err := t.Stake.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Stake: %w", err) - } +func (t *SubmitExecParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufSubmitExecParams); err != nil { + return err + } + + scratch := make([]byte, 9) + // t.Cid (string) (string) + if len(t.Cid) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Cid was too long") } - // t.Funds (cid.Cid) (struct) - { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Cid))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Cid)); err != nil { + return err + } - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.Funds: %w", err) - } + // t.Abort (bool) (bool) + if err := cbg.WriteBool(w, t.Abort); err != nil { + return err + } + + // t.Output (atomic.LockedState) (struct) + if err := t.Output.MarshalCBOR(w); err != nil { + return err + } + return nil +} - t.Funds = c +func (t *SubmitExecParams) UnmarshalCBOR(r io.Reader) error { + *t = SubmitExecParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") } - // t.Status (sca.Status) (uint64) - { + if extra != 3 { + return fmt.Errorf("cbor input had wrong number of fields") + } - maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + // t.Cid (string) (string) + + { + sval, err := cbg.ReadStringBuf(br, scratch) if err != nil { return err } - if maj != cbg.MajUnsignedInt { - return fmt.Errorf("wrong type for uint64 field") + + t.Cid = string(sval) + } + // t.Abort (bool) (bool) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.Abort = false + case 21: + t.Abort = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.Output (atomic.LockedState) (struct) + + { + + if err := t.Output.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Output: %w", err) } - t.Status = Status(extra) } return nil } -var lengthBufFundParams = []byte{129} +var lengthBufSubmitOutput = []byte{129} -func (t *FundParams) MarshalCBOR(w io.Writer) error { +func (t *SubmitOutput) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write(lengthBufFundParams); err != nil { + if _, err := w.Write(lengthBufSubmitOutput); err != nil { return err } - // t.Value (big.Int) (struct) - if err := t.Value.MarshalCBOR(w); err != nil { + scratch := make([]byte, 9) + + // t.Status (sca.ExecStatus) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Status)); err != nil { return err } + return nil } -func (t *FundParams) UnmarshalCBOR(r io.Reader) error { - *t = FundParams{} +func (t *SubmitOutput) UnmarshalCBOR(r io.Reader) error { + *t = SubmitOutput{} br := cbg.GetPeeker(r) scratch := make([]byte, 8) @@ -353,42 +1536,64 @@ func (t *FundParams) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input had wrong number of fields") } - // t.Value (big.Int) (struct) + // t.Status (sca.ExecStatus) (uint64) { - if err := t.Value.UnmarshalCBOR(br); err != nil { - return xerrors.Errorf("unmarshaling t.Value: %w", err) + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") } + t.Status = ExecStatus(extra) } return nil } -var lengthBufAddSubnetReturn = []byte{129} +var lengthBufLockedState = []byte{130} -func (t *AddSubnetReturn) MarshalCBOR(w io.Writer) error { +func (t *LockedState) MarshalCBOR(w io.Writer) error { if t == nil { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write(lengthBufAddSubnetReturn); err != nil { + if _, err := w.Write(lengthBufLockedState); err != nil { return err } scratch := make([]byte, 9) - // t.Cid (cid.Cid) (struct) + // t.From (address.SubnetID) (string) + if len(t.From) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.From was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.From))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.From)); err != nil { + return err + } - if err := cbg.WriteCidBuf(scratch, w, t.Cid); err != nil { - return xerrors.Errorf("failed to write cid field t.Cid: %w", err) + // t.Cid (string) (string) + if len(t.Cid) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.Cid was too long") } + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.Cid))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.Cid)); err != nil { + return err + } return nil } -func (t *AddSubnetReturn) UnmarshalCBOR(r io.Reader) error { - *t = AddSubnetReturn{} +func (t *LockedState) UnmarshalCBOR(r io.Reader) error { + *t = LockedState{} br := cbg.GetPeeker(r) scratch := make([]byte, 8) @@ -401,21 +1606,29 @@ func (t *AddSubnetReturn) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type array") } - if extra != 1 { + if extra != 2 { return fmt.Errorf("cbor input had wrong number of fields") } - // t.Cid (cid.Cid) (struct) + // t.From (address.SubnetID) (string) { - - c, err := cbg.ReadCid(br) + sval, err := cbg.ReadStringBuf(br, scratch) if err != nil { - return xerrors.Errorf("failed to read cid field t.Cid: %w", err) + return err } - t.Cid = c + t.From = address.SubnetID(sval) + } + // t.Cid (string) (string) + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + t.Cid = string(sval) } return nil } diff --git a/chain/consensus/hierarchical/actors/sca/gen/gen.go b/chain/consensus/hierarchical/actors/sca/gen/gen.go index c9989cbb6..6d83ac01b 100644 --- a/chain/consensus/hierarchical/actors/sca/gen/gen.go +++ b/chain/consensus/hierarchical/actors/sca/gen/gen.go @@ -8,10 +8,21 @@ import ( func main() { if err := gen.WriteTupleEncodersToFile("./cbor_gen.go", "sca", + actor.ConstructorParams{}, + actor.CheckpointParams{}, actor.SCAState{}, actor.Subnet{}, actor.FundParams{}, - actor.AddSubnetReturn{}, + actor.SubnetIDParam{}, + actor.CrossMsgs{}, + actor.MetaTag{}, + actor.CrossMsgParams{}, + actor.ErrorParam{}, + actor.AtomicExec{}, + actor.AtomicExecParams{}, + actor.SubmitExecParams{}, + actor.SubmitOutput{}, + actor.LockedState{}, ); err != nil { panic(err) } diff --git a/chain/consensus/hierarchical/actors/sca/sca_actor.go b/chain/consensus/hierarchical/actors/sca/sca_actor.go index d78af540d..f4a378647 100644 --- a/chain/consensus/hierarchical/actors/sca/sca_actor.go +++ b/chain/consensus/hierarchical/actors/sca/sca_actor.go @@ -9,42 +9,39 @@ import ( "github.com/filecoin-project/go-state-types/cbor" "github.com/filecoin-project/go-state-types/exitcode" actor "github.com/filecoin-project/lotus/chain/consensus/actors" - initactor "github.com/filecoin-project/lotus/chain/consensus/actors/init" "github.com/filecoin-project/lotus/chain/consensus/hierarchical" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/atomic" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" + types "github.com/filecoin-project/lotus/chain/types" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/v6/actors/builtin" - "github.com/filecoin-project/specs-actors/v6/actors/runtime" - "github.com/filecoin-project/specs-actors/v6/actors/util/adt" + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/specs-actors/v7/actors/runtime" + "github.com/filecoin-project/specs-actors/v7/actors/util/adt" cid "github.com/ipfs/go-cid" + xerrors "golang.org/x/xerrors" ) var _ runtime.VMActor = SubnetCoordActor{} -// SubnetCoordActorAddr is initialized in genesis with the -// address t064 -var SubnetCoordActorAddr = func() address.Address { - a, err := address.NewIDAddress(64) - if err != nil { - panic(err) - } - return a -}() - var Methods = struct { - Constructor abi.MethodNum - Register abi.MethodNum - AddStake abi.MethodNum - ReleaseStake abi.MethodNum - Kill abi.MethodNum -}{builtin0.MethodConstructor, 2, 3, 4, 5} - -type FundParams struct { - Value abi.TokenAmount + Constructor abi.MethodNum + Register abi.MethodNum + AddStake abi.MethodNum + ReleaseStake abi.MethodNum + Kill abi.MethodNum + CommitChildCheckpoint abi.MethodNum + Fund abi.MethodNum + Release abi.MethodNum + SendCross abi.MethodNum + ApplyMessage abi.MethodNum + InitAtomicExec abi.MethodNum + SubmitAtomicExec abi.MethodNum +}{builtin0.MethodConstructor, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12} + +type SubnetIDParam struct { + ID string } -type AddSubnetReturn struct { - Cid cid.Cid -} type SubnetCoordActor struct{} func (a SubnetCoordActor) Exports() []interface{} { @@ -54,11 +51,13 @@ func (a SubnetCoordActor) Exports() []interface{} { 3: a.AddStake, 4: a.ReleaseStake, 5: a.Kill, - // -1: a.Fund, - // -1: a.Release, - // -1: a.Checkpoint, - // -1: a.RawCheckpoint, - // -1: a.XSubnetTx, + 6: a.CommitChildCheckpoint, + 7: a.Fund, + 8: a.Release, + 9: a.SendCross, + 10: a.ApplyMessage, + 11: a.InitAtomicExec, + 12: a.SubmitAtomicExec, } } @@ -74,9 +73,14 @@ func (a SubnetCoordActor) State() cbor.Er { return new(SCAState) } -func (a SubnetCoordActor) Constructor(rt runtime.Runtime, params *initactor.ConstructorParams) *abi.EmptyValue { +type ConstructorParams struct { + NetworkName string + CheckpointPeriod uint64 +} + +func (a SubnetCoordActor) Constructor(rt runtime.Runtime, params *ConstructorParams) *abi.EmptyValue { rt.ValidateImmediateCallerIs(builtin.SystemActorAddr) - st, err := ConstructSCAState(adt.AsStore(rt), hierarchical.SubnetID(params.NetworkName)) + st, err := ConstructSCAState(adt.AsStore(rt), params) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to construct state") rt.StateCreate(st) return nil @@ -87,20 +91,17 @@ func (a SubnetCoordActor) Constructor(rt runtime.Runtime, params *initactor.Cons // It registers a new subnet actor to the hierarchical consensus. // In order for the registering of a subnet to be successful, the transaction // needs to stake at least the minimum stake, if not it'll fail. -func (a SubnetCoordActor) Register(rt runtime.Runtime, _ *abi.EmptyValue) *AddSubnetReturn { +func (a SubnetCoordActor) Register(rt runtime.Runtime, _ *abi.EmptyValue) *SubnetIDParam { // Register can only be called by an actor implementing the subnet actor interface. rt.ValidateImmediateCallerType(actor.SubnetActorCodeID) SubnetActorAddr := rt.Caller() var st SCAState - var shcid cid.Cid + var shid address.SubnetID rt.StateTransaction(&st, func() { - var err error - shid := hierarchical.NewSubnetID(st.NetworkName, SubnetActorAddr) - shcid, err = shid.Cid() - builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "failed computing CID from subnetID") + shid = address.NewSubnetID(st.NetworkName, SubnetActorAddr) // Check if the subnet with that ID already exists - if _, has, _ := st.GetSubnet(adt.AsStore(rt), shcid); has { + if _, has, _ := st.GetSubnet(adt.AsStore(rt), shid); has { rt.Abortf(exitcode.ErrIllegalArgument, "can't register a subnet that has been already registered") } // Check if the transaction has enough funds to register the subnet. @@ -109,31 +110,11 @@ func (a SubnetCoordActor) Register(rt runtime.Runtime, _ *abi.EmptyValue) *AddSu rt.Abortf(exitcode.ErrIllegalArgument, "call to register doesn't include enough funds to stake") } - // We always initialize in instantiated state - status := Active - - // Instatiate the subnet state - emptyFundBalances, err := adt.StoreEmptyMap(adt.AsStore(rt), adt.BalanceTableBitwidth) - builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to create empty funds balance table") - - sh := &Subnet{ - Cid: shcid, - ID: shid, - Parent: st.Network, - ParentID: st.NetworkName, - Stake: value, - Funds: emptyFundBalances, - Status: status, - } - - // Increase the number of child subnets for the current network. - st.TotalSubnets++ - - // Flush subnet into subnetMap - sh.flushSubnet(rt, &st) + // Create the new subnet and register in SCA + st.registerSubnet(rt, shid, value) }) - return &AddSubnetReturn{Cid: shcid} + return &SubnetIDParam{ID: shid.String()} } // AddStake @@ -167,6 +148,10 @@ func (a SubnetCoordActor) AddStake(rt runtime.Runtime, _ *abi.EmptyValue) *abi.E return nil } +type FundParams struct { + Value abi.TokenAmount +} + // ReleaseStake // // Request from the subnet actor to release part of the stake locked for subnet. @@ -186,7 +171,7 @@ func (a SubnetCoordActor) ReleaseStake(rt runtime.Runtime, params *FundParams) * sh, has, err := st.getSubnetFromActorAddr(adt.AsStore(rt), SubnetActorAddr) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error fetching subnet state") if !has { - rt.Abortf(exitcode.ErrIllegalArgument, "subnet for for actor hasn't been registered yet") + rt.Abortf(exitcode.ErrIllegalArgument, "subnet for actor hasn't been registered yet") } // Check if the subnet actor is allowed to release the amount of stake specified. @@ -212,9 +197,157 @@ func (a SubnetCoordActor) ReleaseStake(rt runtime.Runtime, params *FundParams) * return nil } -// Kill +// CheckpointParams handles in/out communication of checkpoints +// To accommodate arbitrary schemas (and even if it introduces and overhead) +// is easier to transmit a marshalled version of the checkpoint. +// NOTE: Consider in the future if there is a better approach. +type CheckpointParams struct { + Checkpoint []byte +} + +// CommitChildCheckpoint accepts a checkpoint from a subnet for commitment. // -// Unregisters a subnet from the hierarchical consensus +// The subnet is responsible for running all the deep verifications about the checkpoint, +// the SCA is only able to enforce some basic consistency verifications. +func (a SubnetCoordActor) CommitChildCheckpoint(rt runtime.Runtime, params *CheckpointParams) *abi.EmptyValue { + // Only subnet actors are allowed to commit a checkpoint after their + // verification and aggregation. + rt.ValidateImmediateCallerType(actor.SubnetActorCodeID) + commit := &schema.Checkpoint{} + err := commit.UnmarshalBinary(params.Checkpoint) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "error unmarshalling checkpoint in params") + subnetActorAddr := rt.Caller() + + // Check the source of the checkpoint. + source, err := address.SubnetID(commit.Data.Source).Actor() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "error getting checkpoint source") + if source != subnetActorAddr { + rt.Abortf(exitcode.ErrIllegalArgument, "checkpoint committed doesn't belong to source subnet") + } + + // TODO: We could optionally check here if the checkpoint includes a valid signature. I don't + // think this makes sense as in its current implementation the subnet actor receives an + // independent signature for each miner and counts the number of "votes" for the checkpoint. + var st SCAState + // burnValue keeps track of the funds that are leaving the subnet in msgMeta and + // that need to be burnt. + burnValue := abi.NewTokenAmount(0) + rt.StateTransaction(&st, func() { + // Check that the subnet is registered and active + shid := address.NewSubnetID(st.NetworkName, subnetActorAddr) + // Check if the subnet for the actor exists + sh, has, err := st.GetSubnet(adt.AsStore(rt), shid) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error fetching subnet state") + if !has { + rt.Abortf(exitcode.ErrIllegalArgument, "subnet for actor hasn't been registered yet") + } + // Check that it is active. Only active shards can commit checkpoints. + if sh.Status != Active { + rt.Abortf(exitcode.ErrIllegalState, "can't commit a checkpoint for a subnet that is not active") + } + // Get the checkpoint for the current window. + ch := st.currWindowCheckpoint(rt) + + // Verify that the submitted checkpoint has higher epoch and is + // consistent with previous checkpoint before committing. + prevCom := sh.PrevCheckpoint + + // If no previous checkpoint for child chain, it means this is the first one + // and we can add it without additional verifications. + if empty, _ := prevCom.IsEmpty(); empty { + // Apply cross messages from child checkpoint + burnValue = st.applyCheckMsgs(rt, sh, ch, commit) + // Append the new checkpoint to the list of childs. + err := ch.AddChild(commit) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error committing checkpoint to this epoch") + st.flushCheckpoint(rt, ch) + // Update previous checkpoint for child. + sh.PrevCheckpoint = *commit + st.flushSubnet(rt, sh) + return + } + + // Check that the epoch is consistent. + if prevCom.Data.Epoch > commit.Data.Epoch { + rt.Abortf(exitcode.ErrIllegalArgument, "new checkpoint being committed belongs to the past") + } + + // Check that the previous Cid is consistent with the committed one. + prevCid, err := prevCom.Cid() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error computing checkpoint's Cid") + if pr, _ := commit.PreviousCheck(); prevCid != pr { + rt.Abortf(exitcode.ErrIllegalArgument, "new checkpoint not consistent with previous one") + } + + // Apply cross messages from child checkpoint + burnValue = st.applyCheckMsgs(rt, sh, ch, commit) + // Checks passed, we can append the child. + err = ch.AddChild(commit) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error committing checkpoint to this epoch") + st.flushCheckpoint(rt, ch) + // Update previous checkpoint for child. + sh.PrevCheckpoint = *commit + st.flushSubnet(rt, sh) + }) + + // Burn funds leaving in metas the subnet + if burnValue.GreaterThan(abi.NewTokenAmount(0)) { + code := rt.Send(builtin.BurntFundsActorAddr, builtin.MethodSend, nil, burnValue, &builtin.Discard{}) + if !code.IsSuccess() { + rt.Abortf(exitcode.ErrIllegalState, + "failed to burn funds from msgmeta, code: %v", code) + } + } + return nil +} + +// applyCheckMsgs prepares messages to trigger their execution or propagate cross-messages +// coming from a checkpoint of a child subnet. +func (st *SCAState) applyCheckMsgs(rt runtime.Runtime, sh *Subnet, windowCh *schema.Checkpoint, childCh *schema.Checkpoint) abi.TokenAmount { + + burnValue := abi.NewTokenAmount(0) + // aux map[to]CrossMsg + aux := make(map[string][]schema.CrossMsgMeta) + for _, mm := range childCh.CrossMsgs() { + // if it is directed to this subnet, or another child of the subnet, + // add it to bottom-up messages + // for the consensus algorithm in the subnet to pick it up. + if mm.To == st.NetworkName.String() || + !hierarchical.IsBottomUp(st.NetworkName, address.SubnetID(mm.To)) { + // Add to BottomUpMsgMeta + st.storeBottomUpMsgMeta(rt, mm) + } else { + // Check if it comes from a valid child, i.e. we are their parent. + if address.SubnetID(mm.From).Parent() != st.NetworkName { + // Someone is trying to forge a cross-msgs into the checkpoint + // from a network from which we are not a parent. + continue + } + // If not add to the aux structure to update the checkpoint when we've + // gone through all crossMsgs + _, ok := aux[mm.To] + if !ok { + aux[mm.To] = []schema.CrossMsgMeta{mm} + } else { + aux[mm.To] = append(aux[mm.To], mm) + } + + } + // Value leaving in a crossMsgMeta needs to be burnt to update circ.supply + v, err := mm.GetValue() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error getting value from meta") + burnValue = big.Add(burnValue, v) + st.releaseCircSupply(rt, sh, address.SubnetID(mm.From), v) + } + + // Aggregate all the msgsMeta directed to other subnets in the hierarchy + // into the checkpoint + st.aggChildMsgMeta(rt, windowCh, aux) + + return burnValue +} + +// Kill unregisters a subnet from the hierarchical consensus func (a SubnetCoordActor) Kill(rt runtime.Runtime, _ *abi.EmptyValue) *abi.EmptyValue { // Can only be called by an actor implementing the subnet actor interface. rt.ValidateImmediateCallerType(actor.SubnetActorCodeID) @@ -224,14 +357,13 @@ func (a SubnetCoordActor) Kill(rt runtime.Runtime, _ *abi.EmptyValue) *abi.Empty var sh *Subnet rt.StateTransaction(&st, func() { var has bool - shid := hierarchical.NewSubnetID(st.NetworkName, SubnetActorAddr) - shcid, err := shid.Cid() - builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "failed computing CID from subnetID") + shid := address.NewSubnetID(st.NetworkName, SubnetActorAddr) // Check if the subnet for the actor exists - sh, has, err = st.GetSubnet(adt.AsStore(rt), shcid) + var err error + sh, has, err = st.GetSubnet(adt.AsStore(rt), shid) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error fetching subnet state") if !has { - rt.Abortf(exitcode.ErrIllegalArgument, "subnet for for actor hasn't been registered yet") + rt.Abortf(exitcode.ErrIllegalArgument, "subnet for actor hasn't been registered yet") } // This is a sanity check to ensure that there is enough balance in actor to return stakes @@ -239,10 +371,17 @@ func (a SubnetCoordActor) Kill(rt runtime.Runtime, _ *abi.EmptyValue) *abi.Empty rt.Abortf(exitcode.ErrIllegalState, "yikes! actor doesn't have enough balance to release these funds") } + // TODO: We should prevent a subnet from being killed if it still has user funds in circulation. + // We haven't figured out how to handle this yet, so in the meantime we just prevent from being able to kill + // the subnet when there are pending funds + if sh.CircSupply.GreaterThan(big.Zero()) { + rt.Abortf(exitcode.ErrForbidden, "you can't kill a subnet where users haven't released their funds yet") + } + // Remove subnet from subnet registry. subnets, err := adt.AsMap(adt.AsStore(rt), st.Subnets, builtin.DefaultHamtBitwidth) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load state for subnets") - err = subnets.Delete(abi.CidKey(shcid)) + err = subnets.Delete(hierarchical.SubnetKey(shid)) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to remove miner stake in stake map") // Flush stakes adding miner stake. st.Subnets, err = subnets.Root() @@ -258,31 +397,354 @@ func (a SubnetCoordActor) Kill(rt runtime.Runtime, _ *abi.EmptyValue) *abi.Empty return nil } -// addStake adds new funds to the stake of the subnet. +// Fund injects new funds from an account of the parent chain to a subnet. +// +// This functions receives a transaction with the FILs that want to be injected in the subnet. +// - Funds injected are frozen. +// - A new fund cross-message is created and stored to propagate it to the subnet. It will be +// picked up by miners to include it in the next possible block. +// - The cross-message nonce is updated. +func (a SubnetCoordActor) Fund(rt runtime.Runtime, params *SubnetIDParam) *abi.EmptyValue { + // Only account actors can inject funds to a subnet (for now). + rt.ValidateImmediateCallerType(builtin.AccountActorCodeID) + + // Check if the transaction includes funds + value := rt.ValueReceived() + if value.LessThanEqual(big.NewInt(0)) { + rt.Abortf(exitcode.ErrIllegalArgument, "no funds included in transaction") + } + + // Get SECP/BLS publickey to know the specific actor ID in the target subnet to + // whom the funds need to be sent. + // Funds are sent to the ID that controls the actor account in the destination subnet. + secpAddr := SecpBLSAddr(rt, rt.Caller()) + + // Increment stake locked for subnet. + var st SCAState + rt.StateTransaction(&st, func() { + msg := fundMsg(rt, address.SubnetID(params.ID), secpAddr, value) + commitTopDownMsg(rt, &st, msg) + + }) + return nil +} + +func commitTopDownMsg(rt runtime.Runtime, st *SCAState, msg types.Message) { + sto, err := msg.To.Subnet() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "error getting subnet from address") + sfrom, err := msg.From.Subnet() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "error getting subnet from address") + + // Get the next subnet to which the message needs to be sent. + sh, has, err := st.GetSubnet(adt.AsStore(rt), sto.Down(st.NetworkName)) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error fetching subnet state") + if !has { + // If the source is this subnet abort, if not send noop. + if sfrom == st.NetworkName { + rt.Abortf(exitcode.ErrIllegalArgument, "subnet for actor hasn't been registered yet") + } else { + ret := st.requireNoErrorWithNoop(rt, msg, exitcode.ErrIllegalArgument, xerrors.Errorf("subnet for actor hasn't been registered yet"), "error committing top-down message") + if ret { + return + } + } + } + + // Set nonce for message + msg.Nonce = sh.Nonce + + // Store in the list of cross messages. + sh.storeTopDownMsg(rt, &msg) + + // Increase nonce. + incrementNonce(rt, &sh.Nonce) + + // Increase circulating supply in subnet. + sh.CircSupply = big.Add(sh.CircSupply, msg.Value) + + // Flush subnet. + st.flushSubnet(rt, sh) +} + +// Release creates a new check message to release funds in parent chain +// +// This function burns the funds that will be released in the current subnet +// and propagates a new checkpoint message to the parent chain to signal +// the amount of funds that can be released for a specific address. +func (a SubnetCoordActor) Release(rt runtime.Runtime, _ *abi.EmptyValue) *abi.EmptyValue { + // Only account actors can release funds from a subnet (for now). + rt.ValidateImmediateCallerType(builtin.AccountActorCodeID) + + // Check if the transaction includes funds + value := rt.ValueReceived() + if value.LessThanEqual(big.NewInt(0)) { + rt.Abortf(exitcode.ErrIllegalArgument, "no funds included in transaction") + } + + code := rt.Send(builtin.BurntFundsActorAddr, builtin.MethodSend, nil, rt.ValueReceived(), &builtin.Discard{}) + if !code.IsSuccess() { + rt.Abortf(exitcode.ErrIllegalState, + "failed to send release funds to the burnt funds actor, code: %v", code) + } + + // Get SECP/BLS publickey to know the specific actor ID in the target subnet to + // whom the funds need to be sent. + // Funds are sent to the ID that controls the actor account in the destination subnet. + secpAddr := SecpBLSAddr(rt, rt.Caller()) + + var st SCAState + rt.StateTransaction(&st, func() { + // Create releaseMsg and include in currentwindow checkpoint + msg := st.releaseMsg(rt, value, secpAddr, st.Nonce) + commitBottomUpMsg(rt, &st, msg) + }) + return nil +} + +func commitBottomUpMsg(rt runtime.Runtime, st *SCAState, msg types.Message) { + // Store msg in registry, update msgMeta and include in checkpoint + sto, err := msg.To.Subnet() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error getting subnet from address") + st.storeCheckMsg(rt, msg, st.NetworkName, sto) + + // Increase nonce. + incrementNonce(rt, &st.Nonce) +} + +func SecpBLSAddr(rt runtime.Runtime, raw address.Address) address.Address { + resolved, ok := rt.ResolveAddress(raw) + if !ok { + rt.Abortf(exitcode.ErrIllegalArgument, "unable to resolve address %v", raw) + } + var pubkey address.Address + code := rt.Send(resolved, builtin.MethodsAccount.PubkeyAddress, nil, big.Zero(), &pubkey) + builtin.RequireSuccess(rt, code, "failed to fetch account pubkey from %v", resolved) + return pubkey +} + +// SendCross sends an arbitrary cross-message to other subnet in the hierarchy. +// +// If the message includes any funds they need to be burnt (like in Release) +// before being propagated to the corresponding subnet. +// The circulating supply in each subnet needs to be updated as the message passes through them. +func (a SubnetCoordActor) SendCross(rt runtime.Runtime, params *CrossMsgParams) *abi.EmptyValue { + // FIXME: Only support account addresses to send cross-messages for now. + rt.ValidateImmediateCallerType(builtin.AccountActorCodeID) + msg := params.Msg + var err error + + if params.Destination == address.UndefSubnetID { + rt.Abortf(exitcode.ErrIllegalArgument, "no desination subnet specified in cross-net message") + } + // Get SECP/BLS publickey to know the specific actor ID in the target subnet to + // whom the funds need to be sent. + // Funds are sent to the ID that controls the actor account in the destination subnet. + // FIXME: Additional processing may be required if we want to + // support cross-messages sent by actors. + secp := SecpBLSAddr(rt, rt.Caller()) + + var ( + st SCAState + tp hierarchical.MsgType + ) + + rt.StateTransaction(&st, func() { + if params.Destination == address.UndefSubnetID { + rt.Abortf(exitcode.ErrIllegalArgument, "destination subnet is current one. You are better of sending a good ol' msg") + } + // Transform to hierarchical-supported addresses + // NOTE: There is no address translation in msg.To. We could add additional + // checks to see the type of address and handle it accordingly. + msg.To, err = address.NewHAddress(params.Destination, msg.To) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to create HAddress") + msg.From, err = address.NewHAddress(st.NetworkName, secp) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to create HAddress") + + tp = st.sendCrossMsg(rt, msg) + + }) + + // For bottom-up messages with value, we need to burn the funds before propagating. + if tp == hierarchical.BottomUp && msg.Value.GreaterThan(big.Zero()) { + code := rt.Send(builtin.BurntFundsActorAddr, builtin.MethodSend, nil, rt.ValueReceived(), &builtin.Discard{}) + if !code.IsSuccess() { + rt.Abortf(exitcode.ErrIllegalState, + "failed to send release funds to the burnt funds actor, code: %v", code) + } + } + return nil +} + +func (st *SCAState) sendCrossMsg(rt runtime.Runtime, msg types.Message) hierarchical.MsgType { + tp := hierarchical.GetMsgType(&msg) + // Check the type of message. + switch tp { + case hierarchical.TopDown: + commitTopDownMsg(rt, st, msg) + case hierarchical.BottomUp: + // Burn the funds before doing anything else. + commitBottomUpMsg(rt, st, msg) + default: + rt.Abortf(exitcode.ErrIllegalArgument, "cross-message doesn't have the right type") + } + return tp +} + +// CrossMsgParams determines the cross message to apply. +type CrossMsgParams struct { + Msg types.Message + Destination address.SubnetID +} + +// ApplyMessage triggers the execution of a cross-subnet message validated through the consensus. // -// This function also accepts negative values to substract, and checks -// if the funds are enough for the subnet to be active. -func (sh *Subnet) addStake(rt runtime.Runtime, st *SCAState, value abi.TokenAmount) { - // Add stake to the subnet - sh.Stake = big.Add(sh.Stake, value) - - // Check if subnet has still stake to be active - if sh.Stake.LessThan(st.MinStake) { - sh.Status = Inactive +// This function can only be triggered using `ApplyImplicitMessage`, and the source needs to +// be the SystemActor. Cross messages are applied similarly to how rewards are applied once +// a block has been validated. This function: +// - Determines the type of cross-message. +// - Performs the corresponding state changes. +// - And updated the latest nonce applied for future checks. +func (a SubnetCoordActor) ApplyMessage(rt runtime.Runtime, params *CrossMsgParams) *abi.EmptyValue { + // Only system actor can trigger this function. + rt.ValidateImmediateCallerIs(builtin.SystemActorAddr) + + var st SCAState + rt.StateReadonly(&st) + buApply, err := hierarchical.ApplyAsBottomUp(st.NetworkName, ¶ms.Msg) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error computing type of message to apply") + + if buApply { + applyBottomUp(rt, params.Msg) + return nil } - // Flush subnet into subnetMap - sh.flushSubnet(rt, st) + applyTopDown(rt, params.Msg) + return nil +} +// RequireNoErrorWithNoop sends a opposite cross-net mesasge to revert state changes if error +func (st *SCAState) requireNoErrorWithNoop(rt runtime.Runtime, msg types.Message, code exitcode.ExitCode, err error, errmsg string) bool { + if err != nil { + noop(rt, st, msg, code, xerrors.Errorf("%s: %s", errmsg, err)) + return true + } + return false +} + +// RequireNoErrorWithNoop sends a opposite cross-net mesasge to revert state changes if message code is not successful. +func requireSuccessWithNoop(rt runtime.Runtime, msg types.Message, code exitcode.ExitCode, errmsg string) bool { + if !code.IsSuccess() { + noopWithStateTransaction(rt, msg, code, xerrors.Errorf("%s", errmsg)) + return true + } + return false } -func (sh *Subnet) flushSubnet(rt runtime.Runtime, st *SCAState) { - // Update subnet in the list of subnets. - subnets, err := adt.AsMap(adt.AsStore(rt), st.Subnets, builtin.DefaultHamtBitwidth) - builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load state for subnets") - err = subnets.Put(abi.CidKey(sh.Cid), sh) - builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to put new subnet in subnet map") - // Flush subnets - st.Subnets, err = subnets.Root() - builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush subnets") +func (a SubnetCoordActor) InitAtomicExec(rt runtime.Runtime, params *AtomicExecParams) *atomic.LockedOutput { + rt.ValidateImmediateCallerType(builtin.AccountActorCodeID) + var ( + st SCAState + c cid.Cid + err error + ) + rt.StateTransaction(&st, func() { + c, err = params.Cid() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "error computing cid for atomic exec params") + execMap, err := adt.AsMap(adt.AsStore(rt), st.AtomicExecRegistry, builtin.DefaultHamtBitwidth) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error getting exec map") + // Check if execution already exists. + _, found, err := getAtomicExec(execMap, c) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error getting exec map") + if found { + rt.Abortf(exitcode.ErrIllegalArgument, "execution with cid %s already initialized", c) + } + + if len(params.Msgs) == 0 || len(params.Inputs) == 0 { + rt.Abortf(exitcode.ErrIllegalArgument, "no msgs or inputs provided for execution") + } + + // sanity-check: verify that all messages have same method and are directed to the same actor. + method := params.Msgs[0].Method + to := params.Msgs[0].To + for i := 1; i < len(params.Msgs); i++ { + // FIXME: this requirement can probably be relaxed, but leaving it like that until + // the level of generalization has been tested with other actors. + if method != params.Msgs[i].Method || to != params.Msgs[i].To { + rt.Abortf(exitcode.ErrIllegalArgument, "atomic exec does not support messages to different actors and methods right now") + } + } + // Store new initialized execution + err = st.putExecWithCid(execMap, c, &AtomicExec{Params: *params, Submitted: make(map[string]cid.Cid), Status: ExecInitialized}) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error storing new atomic execution") + }) + + // Return cid that identifies the execution. + return &atomic.LockedOutput{Cid: c} +} + +func (a SubnetCoordActor) SubmitAtomicExec(rt runtime.Runtime, params *SubmitExecParams) *SubmitOutput { + rt.ValidateImmediateCallerType(builtin.AccountActorCodeID) + var ( + st SCAState + exec *AtomicExec + ) + rt.StateTransaction(&st, func() { + execMap, err := adt.AsMap(adt.AsStore(rt), st.AtomicExecRegistry, builtin.DefaultHamtBitwidth) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error getting exec map") + // Check if execution exists. + var found bool + execCid, err := cid.Decode(params.Cid) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "error getting exec map") + exec, found, err = getAtomicExec(execMap, execCid) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error getting exec map") + if !found { + rt.Abortf(exitcode.ErrIllegalArgument, "execution with cid %s not found", params.Cid) + } + + // Check if the output has been aborted or succeeded. + if exec.Status != ExecInitialized { + rt.Abortf(exitcode.ErrIllegalState, "execution already aborted/succeeded. No need for additional submissions") + } + // Check if the user is involved in the execution. + _, ok := exec.Params.Inputs[rt.Caller().String()] + if !ok { + rt.Abortf(exitcode.ErrIllegalArgument, "caller not involved in the execution") + } + // Check if he already submitted the output. + _, ok = exec.Submitted[rt.Caller().String()] + if ok { + rt.Abortf(exitcode.ErrIllegalArgument, "caller already submitted an execution output") + } + // Check if this is an abort + if params.Abort { + exec.Status = ExecAborted + st.propagateExecResult(rt, exec, atomic.LockedState{}, params.Abort) + return + } + outputCid, err := params.Output.Cid() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error computing Cid for output state") + // Append the output only if it matches the existing ones. + // NOTE: checking here the cid of the lockedState (including the lock), consider + // making the comparison without the lock if we face inconsistencies in the e2e protocol. + for _, o := range exec.Submitted { + // NOTE: checking one should be enough and we could make it more efficient + // like that, but checking all for now as a sanity-check. + if o != outputCid { + rt.Abortf(exitcode.ErrIllegalArgument, "outputs don't match") + // FIXME: Should we abort right-away if this happens. + } + } + exec.Submitted[rt.Caller().String()] = outputCid + err = st.putExecWithCid(execMap, execCid, exec) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error putting exec state") + // If it is the final output update state of the execution. + if len(exec.Submitted) == len(exec.Params.Inputs) { + exec.Status = ExecSuccess + st.propagateExecResult(rt, exec, params.Output, params.Abort) + } + + }) + + // Return status of the execution + return &SubmitOutput{exec.Status} } diff --git a/chain/consensus/hierarchical/actors/sca/sca_apply.go b/chain/consensus/hierarchical/actors/sca/sca_apply.go new file mode 100644 index 000000000..4039ad5d8 --- /dev/null +++ b/chain/consensus/hierarchical/actors/sca/sca_apply.go @@ -0,0 +1,193 @@ +package sca + +import ( + "bytes" + + address "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + rtt "github.com/filecoin-project/go-state-types/rt" + "github.com/filecoin-project/lotus/chain/consensus/actors/reward" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical" + types "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/specs-actors/v7/actors/runtime" + "github.com/filecoin-project/specs-actors/v7/actors/util/adt" +) + +func fromToRawAddr(rt runtime.Runtime, from, to address.Address) (address.Address, address.Address) { + var err error + from, err = from.RawAddr() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to get raw address from HAddress") + to, err = to.RawAddr() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to get raw address from HAddress") + return from, to +} + +func applyTopDown(rt runtime.Runtime, msg types.Message) { + var st SCAState + _, rto := fromToRawAddr(rt, msg.From, msg.To) + sto, err := msg.To.Subnet() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to get subnet from HAddress") + + rt.StateTransaction(&st, func() { + if bu, err := hierarchical.ApplyAsBottomUp(st.NetworkName, &msg); bu { + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error checking type of message to be applied") + } + // NOTE: Check if the nonce of the message being applied is the subsequent one (we could relax a bit this + // requirement, but it would mean that we need to determine how we want to handle gaps, and messages + // being validated out-of-order). + if st.AppliedTopDownNonce != msg.Nonce { + rt.Abortf(exitcode.ErrIllegalState, "the message being applied doesn't hold the subsequent nonce (nonce=%d, applied=%d)", + msg.Nonce, st.AppliedTopDownNonce) + } + + // Increment latest nonce applied for topDown + incrementNonce(rt, &st.AppliedTopDownNonce) + }) + + // Mint funds for SCA so it can direct them accordingly as part of the message. + params := &reward.FundingParams{ + Addr: hierarchical.SubnetCoordActorAddr, + Value: msg.Value, + } + code := rt.Send(reward.RewardActorAddr, reward.Methods.ExternalFunding, params, big.Zero(), &builtin.Discard{}) + ret := requireSuccessWithNoop(rt, msg, code, "error applying bottomUp message") + if ret { + return + } + + // If not directed to this subnet we need to go down. + if sto != st.NetworkName { + rt.StateTransaction(&st, func() { + commitTopDownMsg(rt, &st, msg) + }) + } else { + // Send the cross-message + // FIXME: Should we not discard the output for any reason? + code = rt.SendWithSerializedParams(rto, msg.Method, msg.Params, msg.Value, &builtin.Discard{}) + requireSuccessWithNoop(rt, msg, code, "error applying bottomUp message") + } +} + +func applyBottomUp(rt runtime.Runtime, msg types.Message) { + var st SCAState + + _, rto := fromToRawAddr(rt, msg.From, msg.To) + sto, err := msg.To.Subnet() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to get subnet from HAddress") + + rt.StateTransaction(&st, func() { + if bu, err := hierarchical.ApplyAsBottomUp(st.NetworkName, &msg); !bu { + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error checking type of message to be applied") + } + bottomUpStateTransition(rt, &st, msg) + + if sto != st.NetworkName { + // If directed to a child we need to commit message as a + // top-down transaction to propagate it down. + commitTopDownMsg(rt, &st, msg) + } + }) + + if sto == st.NetworkName { + // Release funds to the destination address if it is directed to the current network. + // FIXME: Should we not discard the output for any reason? + code := rt.SendWithSerializedParams(rto, msg.Method, msg.Params, msg.Value, &builtin.Discard{}) + requireSuccessWithNoop(rt, msg, code, "error applying bottomUp message") + } +} + +func (st *SCAState) releaseCircSupply(rt runtime.Runtime, curr *Subnet, id address.SubnetID, value abi.TokenAmount) { + // For the current subnet, we don't need to get the subnet object again, + // we can modify it directly. + if curr.ID == id { + curr.releaseSupply(rt, value) + return + // It is flushed somwhere else. + } + + // Update circulating supply reducing release value. + sh, has, err := st.GetSubnet(adt.AsStore(rt), id) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error fetching subnet state") + if !has { + rt.Abortf(exitcode.ErrIllegalState, "subnet for actor hasn't been registered yet") + } + sh.releaseSupply(rt, value) + st.flushSubnet(rt, sh) +} + +func (sh *Subnet) releaseSupply(rt runtime.Runtime, value abi.TokenAmount) { + // Even if the actor has balance, we shouldn't allow releasing more than + // the current circulating supply, as it would mean that we are + // releasing funds from the collateral. + if sh.CircSupply.LessThan(value) { + rt.Abortf(exitcode.ErrIllegalState, "wtf! we can't release funds below the circulating supply. Something went wrong!") + } + sh.CircSupply = big.Sub(sh.CircSupply, value) +} + +func bottomUpStateTransition(rt runtime.Runtime, st *SCAState, msg types.Message) { + // Bottom-up messages include the nonce of their message meta. Several messages + // will include the same nonce. They need to be applied in order of nonce. + + // As soon as we see a message with the next msgMeta nonce, we increment the nonce + // and start accepting the one for the next nonce. + // FIXME: Once we have the end-to-end flow of cross-message this REALLY needs to + // be revisited. + if st.AppliedBottomUpNonce+1 == msg.Nonce { + // Increment latest nonce applied for bottomup + incrementNonce(rt, &st.AppliedBottomUpNonce) + } + + // NOTE: Check if the nonce of the message being applied is the subsequent one (we could relax a bit this + // requirement, but it would mean that we need to determine how we want to handle gaps, and messages + // being validated out-of-order). + if st.AppliedBottomUpNonce != msg.Nonce { + rt.Abortf(exitcode.ErrIllegalState, "the message being applied doesn't hold the subsequent nonce (nonce=%d, applied=%d)", + msg.Nonce, st.AppliedBottomUpNonce) + } + +} + +// ErrorParam wraps an error code to notify that the +// cross-messaged failed (at this point is not processed anywhere) +type ErrorParam struct { + Code int64 +} + +func errorParam(rt runtime.Runtime, code exitcode.ExitCode) []byte { + var buf bytes.Buffer + p := &ErrorParam{Code: int64(code)} + err := p.MarshalCBOR(&buf) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error marshalling error params") + return buf.Bytes() +} + +// noop is triggered to notify when a crossMsg fails to be applied successfully. +func noop(rt runtime.Runtime, st *SCAState, msg types.Message, code exitcode.ExitCode, err error) { + rt.Log(rtt.WARN, `cross-msg couldn't be applied. Failed with code: %v, error: %s. + A message will be sent to revert any state change performed by the cross-net message in its way here.`, code, err) + msg.From, msg.To = msg.To, msg.From + // Sending an errorParam that to give feedback to the source subnet about the error. + msg.Params = errorParam(rt, code) + + sto, err := msg.To.Subnet() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error getting subnet from HAddress") + if hierarchical.IsBottomUp(st.NetworkName, sto) { + commitBottomUpMsg(rt, st, msg) + return + } + commitTopDownMsg(rt, st, msg) +} + +func noopWithStateTransaction(rt runtime.Runtime, msg types.Message, code exitcode.ExitCode, err error) { + var st SCAState + // If the message is not well-formed and something fails when applying the mesasge, + // we increase the nonce and send a cross-message back to the source to notify about + // the error applying the message (and to revert all the state changes in the route traversed). + rt.StateTransaction(&st, func() { + noop(rt, &st, msg, code, err) + }) +} diff --git a/chain/consensus/hierarchical/actors/sca/sca_atomic.go b/chain/consensus/hierarchical/actors/sca/sca_atomic.go new file mode 100644 index 000000000..ce6275dd0 --- /dev/null +++ b/chain/consensus/hierarchical/actors/sca/sca_atomic.go @@ -0,0 +1,192 @@ +package sca + +import ( + "context" + + address "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + bstore "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/atomic" + types "github.com/filecoin-project/lotus/chain/types" + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/specs-actors/v7/actors/runtime" + "github.com/filecoin-project/specs-actors/v7/actors/util/adt" + cid "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +// ExecStatus defines the different state an execution can be in +type ExecStatus uint64 + +const ( + // ExecInitialized and waiting for submissions. + ExecInitialized ExecStatus = iota + // ExecSuccess - all submissions matched. + ExecSuccess + // ExecAborted - some party aborted the execution + ExecAborted +) + +// AtomicExec is the data structure held by SCA for +// atomic executions. +type AtomicExec struct { + Params AtomicExecParams + Submitted map[string]cid.Cid + Status ExecStatus +} + +type SubmitExecParams struct { + Cid string // NOTE: Using Cid as string so it can be sed as input parameter + Abort bool + Output atomic.LockedState +} + +type SubmitOutput struct { + Status ExecStatus +} + +// AtomicExecParams determines the conditions (input, msgs) for the atomic +// execution. +// +// Parties involved in the protocol use this information to perform their +// off-chain execution stage. +type AtomicExecParams struct { + Msgs []types.Message + Inputs map[string]LockedState +} + +type LockedState struct { + From address.SubnetID + Cid string // NOTE: Storing cid as string so it can be used as input parameter in actor fn. +} + +func (st *SCAState) putExecWithCid(execMap *adt.Map, c cid.Cid, exec *AtomicExec) error { + var err error + if err := execMap.Put(abi.CidKey(c), exec); err != nil { + return err + } + st.AtomicExecRegistry, err = execMap.Root() + return err +} + +func (st *SCAState) GetAtomicExec(s adt.Store, c cid.Cid) (*AtomicExec, bool, error) { + execMap, err := adt.AsMap(s, st.AtomicExecRegistry, builtin.DefaultHamtBitwidth) + if err != nil { + return nil, false, xerrors.Errorf("failed to load atomic exec: %w", err) + } + return getAtomicExec(execMap, c) +} + +func getAtomicExec(execMap *adt.Map, c cid.Cid) (*AtomicExec, bool, error) { + var out AtomicExec + found, err := execMap.Get(abi.CidKey(c), &out) + if err != nil { + return nil, false, xerrors.Errorf("failed to get execution for cid %v: %w", c, err) + } + if !found { + return nil, false, nil + } + return &out, true, nil +} + +// Cid computes the cid for the CrossMsg +func (ae *AtomicExecParams) Cid() (cid.Cid, error) { + cst := cbor.NewCborStore(bstore.NewMemory()) + store := blockadt.WrapStore(context.TODO(), cst) + cArr := blockadt.MakeEmptyArray(store) + mArr := blockadt.MakeEmptyMap(store) + + // Compute CID for list of messages generated in subnet + for i, m := range ae.Msgs { + c := cbg.CborCid(m.Cid()) + if err := cArr.Set(uint64(i), &c); err != nil { + return cid.Undef, err + } + } + + for _, input := range ae.Inputs { + mc, err := abi.CidBuilder.Sum([]byte(input.From.String() + input.Cid)) + if err != nil { + return cid.Undef, err + } + c := cbg.CborCid(mc) + if err := mArr.Put(abi.CidKey(mc), &c); err != nil { + return cid.Undef, err + } + } + + croot, err := cArr.Root() + if err != nil { + return cid.Undef, err + } + mroot, err := mArr.Root() + if err != nil { + return cid.Undef, err + } + + return store.Put(store.Context(), &MetaTag{ + MsgsCid: croot, + MetasCid: mroot, + }) +} + +func (st *SCAState) propagateExecResult(rt runtime.Runtime, ae *AtomicExec, output atomic.LockedState, abort bool) { + visited := map[address.SubnetID]struct{}{} + for _, l := range ae.Params.Inputs { + _, ok := visited[l.From] + if ok { + continue + } + // Send result of the execution as cross-msg + st.sendCrossMsg(rt, st.execResultMsg(rt, address.SubnetID(l.From), ae.Params.Msgs[0], output, abort)) + visited[l.From] = struct{}{} + } +} + +func (st *SCAState) execResultMsg(rt runtime.Runtime, toSub address.SubnetID, msg types.Message, output atomic.LockedState, abort bool) types.Message { + source := builtin.SystemActorAddr + + // to actor address responsible for execution + to, err := address.NewHAddress(toSub, msg.To) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to create HAddress") + from, err := address.NewHAddress(st.NetworkName, source) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to create HAddress") + + // lock params + lparams, err := atomic.WrapSerializedParams(msg.Method, msg.Params) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error wrapping serialized lock params") + var ( + method abi.MethodNum + enc []byte + ) + if abort { + method = atomic.MethodAbort + enc, err = actors.SerializeParams(lparams) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to create HAddress") + } else { + method = atomic.MethodUnlock + uparams, err := atomic.WrapSerializedUnlockParams(lparams, output.S) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error wrapping merge params") + enc, err = actors.SerializeParams(uparams) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to create HAddress") + } + + // Build message. + return types.Message{ + To: to, + From: from, + Value: big.Zero(), + Nonce: st.Nonce, + Method: method, + GasLimit: 1 << 30, // This is will be applied as an implicit msg, add enough gas + GasFeeCap: types.NewInt(0), + GasPremium: types.NewInt(0), + Params: enc, + } +} diff --git a/chain/consensus/hierarchical/actors/sca/sca_atomic_test.go b/chain/consensus/hierarchical/actors/sca/sca_atomic_test.go new file mode 100644 index 000000000..dad39f1da --- /dev/null +++ b/chain/consensus/hierarchical/actors/sca/sca_atomic_test.go @@ -0,0 +1,245 @@ +package sca_test + +import ( + "testing" + + address "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/lotus/chain/actors" + replace "github.com/filecoin-project/lotus/chain/consensus/actors/atomic-replace" + actor "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" + atomic "github.com/filecoin-project/lotus/chain/consensus/hierarchical/atomic" + types "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/specs-actors/v7/actors/util/adt" + "github.com/filecoin-project/specs-actors/v7/support/mock" + tutil "github.com/filecoin-project/specs-actors/v7/support/testing" + cid "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" +) + +func TestAtomicExec(t *testing.T) { + h := newHarness(t) + builder := mock.NewBuilder(builtin.StoragePowerActorAddr).WithCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) + rt := builder.Build(t) + h.constructAndVerify(rt) + caller := tutil.NewIDAddr(t, 101) + other := tutil.NewIDAddr(t, 102) + + snAddr1 := tutil.NewIDAddr(t, 1000) + sn1 := h.registerSubnet(rt, address.RootSubnet, snAddr1) + snAddr2 := tutil.NewIDAddr(t, 1001) + sn2 := h.registerSubnet(rt, address.RootSubnet, snAddr2) + + t.Log("init new atomic execution") + rt.SetCaller(caller, builtin.AccountActorCodeID) + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + params := &actor.AtomicExecParams{ + Msgs: execMsgs(t, other), + Inputs: lockedStates(t, sn1, sn2, caller, other), + } + ret := rt.Call(h.SubnetCoordActor.InitAtomicExec, params) + st := getState(rt) + execCid := ret.(*atomic.LockedOutput).Cid + exec, found, err := st.GetAtomicExec(adt.AsStore(rt), execCid) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, &exec.Params, params) + require.Equal(t, exec.Status, actor.ExecInitialized) + + t.Log("try initializing it again") + rt.SetCaller(caller, builtin.AccountActorCodeID) + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + rt.ExpectAbort(exitcode.ErrIllegalArgument, func() { + rt.Call(h.SubnetCoordActor.InitAtomicExec, params) + }) + + t.Log("caller submits output") + rt.SetCaller(caller, builtin.AccountActorCodeID) + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + cidOut, _ := abi.CidBuilder.Sum([]byte("outputTest")) + output, err := atomic.WrapLockableState(&replace.Owners{M: map[string]cid.Cid{other.String(): cidOut}}) + require.NoError(t, err) + oparams := &actor.SubmitExecParams{ + Cid: execCid.String(), + Output: *output, + } + ret = rt.Call(h.SubnetCoordActor.SubmitAtomicExec, oparams) + require.Equal(t, ret.(*actor.SubmitOutput).Status, actor.ExecInitialized) + + t.Log("fail if resubmission or caller not involved") + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + rt.ExpectAbort(exitcode.ErrIllegalArgument, func() { + rt.Call(h.SubnetCoordActor.SubmitAtomicExec, oparams) + }) + stranger := tutil.NewIDAddr(t, 103) + rt.SetCaller(stranger, builtin.AccountActorCodeID) + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + rt.ExpectAbort(exitcode.ErrIllegalArgument, func() { + rt.Call(h.SubnetCoordActor.SubmitAtomicExec, oparams) + }) + + t.Log("submitting the wrong output fails") + rt.SetCaller(other, builtin.AccountActorCodeID) + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + rt.ExpectAbort(exitcode.ErrIllegalArgument, func() { + c, _ := abi.CidBuilder.Sum([]byte("test1")) + output, err := atomic.WrapLockableState(&replace.Owners{M: map[string]cid.Cid{other.String(): c}}) + require.NoError(t, err) + ps := &actor.SubmitExecParams{ + Cid: execCid.String(), + Output: *output, + } + rt.Call(h.SubnetCoordActor.SubmitAtomicExec, ps) + }) + + t.Log("execution succeeds and no new submissions accepted") + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + ret = rt.Call(h.SubnetCoordActor.SubmitAtomicExec, oparams) + require.Equal(t, ret.(*actor.SubmitOutput).Status, actor.ExecSuccess) + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + rt.ExpectAbort(exitcode.ErrIllegalArgument, func() { + rt.Call(h.SubnetCoordActor.SubmitAtomicExec, oparams) + }) + + t.Log("check propagation messages in top-down message") + sh, found := h.getSubnet(rt, sn1) + require.True(h.t, found) + msg, found, err := sh.GetTopDownMsg(adt.AsStore(rt), 0) + require.NoError(h.t, err) + require.True(h.t, found) + exp, err := address.NewHAddress(address.RootSubnet, builtin.SystemActorAddr) + require.NoError(t, err) + require.Equal(h.t, msg.From, exp) + exp, err = address.NewHAddress(sn1, other) + require.NoError(t, err) + require.Equal(h.t, msg.To, exp) + require.Equal(h.t, msg.Method, atomic.MethodUnlock) + + sh, found = h.getSubnet(rt, sn2) + require.True(h.t, found) + msg, found, err = sh.GetTopDownMsg(adt.AsStore(rt), 0) + require.NoError(h.t, err) + require.True(h.t, found) + exp, err = address.NewHAddress(address.RootSubnet, builtin.SystemActorAddr) + require.NoError(t, err) + require.Equal(h.t, msg.From, exp) + exp, err = address.NewHAddress(sn2, other) + require.NoError(t, err) + require.Equal(h.t, msg.To, exp) + require.Equal(h.t, msg.Method, atomic.MethodUnlock) + + t.Log("check that we are propagating the right params") + inputMsg := execMsgs(t, other)[0] + lparams, err := atomic.WrapSerializedParams(inputMsg.Method, inputMsg.Params) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error wrapping serialized lock params") + uparams, err := atomic.WrapSerializedUnlockParams(lparams, output.S) + require.NoError(t, err) + enc, err := actors.SerializeParams(uparams) + require.NoError(t, err) + require.Equal(t, enc, msg.Params) +} + +func TestAbort(t *testing.T) { + h := newHarness(t) + builder := mock.NewBuilder(builtin.StoragePowerActorAddr).WithCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) + rt := builder.Build(t) + h.constructAndVerify(rt) + caller := tutil.NewIDAddr(t, 101) + other := tutil.NewIDAddr(t, 102) + + snAddr1 := tutil.NewIDAddr(t, 1000) + sn1 := h.registerSubnet(rt, address.RootSubnet, snAddr1) + snAddr2 := tutil.NewIDAddr(t, 1001) + sn2 := h.registerSubnet(rt, address.RootSubnet, snAddr2) + + t.Log("init new atomic execution") + rt.SetCaller(caller, builtin.AccountActorCodeID) + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + params := &actor.AtomicExecParams{ + Msgs: execMsgs(t, other), + Inputs: lockedStates(t, sn1, sn2, caller, other), + } + ret := rt.Call(h.SubnetCoordActor.InitAtomicExec, params) + st := getState(rt) + execCid := ret.(*atomic.LockedOutput).Cid + exec, found, err := st.GetAtomicExec(adt.AsStore(rt), execCid) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, &exec.Params, params) + require.Equal(t, exec.Status, actor.ExecInitialized) + + t.Log("caller aborts execution, no more submissions allowed") + rt.SetCaller(caller, builtin.AccountActorCodeID) + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + oparams := &actor.SubmitExecParams{ + Cid: execCid.String(), + Abort: true, + } + ret = rt.Call(h.SubnetCoordActor.SubmitAtomicExec, oparams) + require.Equal(t, ret.(*actor.SubmitOutput).Status, actor.ExecAborted) + + t.Log("check propagation messages in top-down message") + sh, found := h.getSubnet(rt, sn1) + require.True(h.t, found) + msg, found, err := sh.GetTopDownMsg(adt.AsStore(rt), 0) + require.NoError(h.t, err) + require.True(h.t, found) + exp, err := address.NewHAddress(address.RootSubnet, builtin.SystemActorAddr) + require.NoError(t, err) + require.Equal(h.t, msg.From, exp) + exp, err = address.NewHAddress(sn1, other) + require.NoError(t, err) + require.Equal(h.t, msg.To, exp) + require.Equal(h.t, msg.Method, atomic.MethodAbort) + + sh, found = h.getSubnet(rt, sn2) + require.True(h.t, found) + msg, found, err = sh.GetTopDownMsg(adt.AsStore(rt), 0) + require.NoError(h.t, err) + require.True(h.t, found) + exp, err = address.NewHAddress(address.RootSubnet, builtin.SystemActorAddr) + require.NoError(t, err) + require.Equal(h.t, msg.From, exp) + exp, err = address.NewHAddress(sn2, other) + require.NoError(t, err) + + require.Equal(h.t, msg.To, exp) + require.Equal(h.t, msg.Method, atomic.MethodAbort) +} + +func execMsgs(t *testing.T, addr address.Address) []types.Message { + return []types.Message{ + { + From: addr, + To: addr, + Value: abi.NewTokenAmount(0), + Method: replace.MethodReplace, + Params: nil, + GasPremium: big.Zero(), + GasFeeCap: big.Zero(), + GasLimit: 0, + }, + { + From: addr, + To: addr, + Value: abi.NewTokenAmount(0), + Method: replace.MethodReplace, + Params: nil, + GasPremium: big.Zero(), + GasFeeCap: big.Zero(), + GasLimit: 0, + }, + } +} + +func lockedStates(t *testing.T, sn1, sn2 address.SubnetID, caller, other address.Address) map[string]actor.LockedState { + c1, _ := abi.CidBuilder.Sum([]byte("test1")) + c2, _ := abi.CidBuilder.Sum([]byte("test2")) + return map[string]actor.LockedState{ + caller.String(): {From: sn1, Cid: c1.String()}, + other.String(): {From: sn2, Cid: c2.String()}, + } +} diff --git a/chain/consensus/hierarchical/actors/sca/sca_checkpoint_test.go b/chain/consensus/hierarchical/actors/sca/sca_checkpoint_test.go new file mode 100644 index 000000000..80e841390 --- /dev/null +++ b/chain/consensus/hierarchical/actors/sca/sca_checkpoint_test.go @@ -0,0 +1,462 @@ +package sca_test + +import ( + "testing" + + address "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + actors "github.com/filecoin-project/lotus/chain/consensus/actors" + actor "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" + schema "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/types" + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/specs-actors/v7/actors/util/adt" + "github.com/filecoin-project/specs-actors/v7/support/mock" + tutil "github.com/filecoin-project/specs-actors/v7/support/testing" + "github.com/stretchr/testify/require" +) + +func TestCheckpoints(t *testing.T) { + h := newHarness(t) + builder := mock.NewBuilder(builtin.StoragePowerActorAddr).WithCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) + rt := builder.Build(t) + h.constructAndVerify(rt) + SubnetActorAddr := tutil.NewIDAddr(t, 101) + SubnetActorAddr2 := tutil.NewIDAddr(t, 102) + + t.Log("register new subnet successfully") + // Send 2FIL of stake + value := abi.NewTokenAmount(2e18) + rt.SetCaller(SubnetActorAddr, actors.SubnetActorCodeID) + rt.SetReceived(value) + rt.SetBalance(value) + // Only subnet actors can call. + rt.ExpectValidateCallerType(actors.SubnetActorCodeID) + // Call Register function + ret := rt.Call(h.SubnetCoordActor.Register, nil) + res, ok := ret.(*actor.SubnetIDParam) + require.True(t, ok) + shid := address.SubnetID("/root/f0101") + // Verify the return value is correct. + require.Equal(t, res.ID, shid.String()) + rt.Verify() + require.Equal(t, getState(rt).TotalSubnets, uint64(1)) + // Verify instantiated subnet + sh, found := h.getSubnet(rt, shid) + nn1 := sh.ID + require.True(h.t, found) + require.Equal(t, sh.Stake, value) + require.Equal(t, sh.ID.String(), "/root/f0101") + require.Equal(t, sh.ParentID.String(), "/root") + require.Equal(t, sh.Status, actor.Active) + + t.Log("Register second subnet") + rt.SetCaller(SubnetActorAddr2, actors.SubnetActorCodeID) + rt.SetReceived(value) + rt.SetBalance(value) + // Only subnet actors can call. + rt.ExpectValidateCallerType(actors.SubnetActorCodeID) + // Call Register function + ret = rt.Call(h.SubnetCoordActor.Register, nil) + res, ok = ret.(*actor.SubnetIDParam) + require.True(t, ok) + shid = address.SubnetID("/root/f0102") + // Verify the return value is correct. + require.Equal(t, res.ID, shid.String()) + rt.Verify() + require.Equal(t, getState(rt).TotalSubnets, uint64(2)) + // Verify instantiated subnet + sh, found = h.getSubnet(rt, shid) + nn2 := sh.ID + require.True(h.t, found) + require.Equal(t, sh.Stake, value) + require.Equal(t, sh.ID.String(), "/root/f0102") + require.Equal(t, sh.ParentID.String(), "/root") + require.Equal(t, sh.Status, actor.Active) + + t.Log("commit first checkpoint in first window for first subnet") + epoch := abi.ChainEpoch(10) + rt.SetCaller(SubnetActorAddr, actors.SubnetActorCodeID) + // Only subnet actors can call. + rt.ExpectValidateCallerType(actors.SubnetActorCodeID) + rt.SetEpoch(epoch) + ch := newCheckpoint(nn1, epoch+9) + b, err := ch.MarshalBinary() + require.NoError(t, err) + rt.Call(h.SubnetCoordActor.CommitChildCheckpoint, &actor.CheckpointParams{b}) + rt.Verify() + windowCh := currWindowCheckpoint(rt, epoch) + require.Equal(t, windowCh.Data.Epoch, 100) + // Check that child was added. + require.GreaterOrEqual(t, windowCh.HasChildSource(nn1), 0) + // Check previous checkpoint added + prevCh, found := h.getPrevChildCheckpoint(rt, nn1) + require.True(t, found) + eq, err := prevCh.Equals(ch) + require.NoError(t, err) + require.True(t, eq) + + t.Log("trying to commit a checkpoint from subnet twice") + epoch = abi.ChainEpoch(11) + rt.SetCaller(SubnetActorAddr, actors.SubnetActorCodeID) + // Only subnet actors can call. + rt.ExpectValidateCallerType(actors.SubnetActorCodeID) + rt.SetEpoch(epoch) + require.NoError(t, err) + rt.ExpectAbort(exitcode.ErrIllegalArgument, func() { + rt.Call(h.SubnetCoordActor.CommitChildCheckpoint, &actor.CheckpointParams{b}) + }) + + t.Log("appending child checkpoint for same source") + epoch = abi.ChainEpoch(12) + prevcid, err := ch.Cid() + require.NoError(t, err) + rt.SetCaller(SubnetActorAddr, actors.SubnetActorCodeID) + // Only subnet actors can call. + rt.ExpectValidateCallerType(actors.SubnetActorCodeID) + rt.SetEpoch(epoch) + ch = newCheckpoint(nn1, epoch+10) + ch.SetPrevious(prevcid) + b, err = ch.MarshalBinary() + require.NoError(t, err) + rt.Call(h.SubnetCoordActor.CommitChildCheckpoint, &actor.CheckpointParams{b}) + rt.Verify() + windowCh = currWindowCheckpoint(rt, epoch) + require.Equal(t, windowCh.Data.Epoch, 100) + // Check that child was appended for subnet. + require.GreaterOrEqual(t, windowCh.HasChildSource(nn1), 0) + require.Equal(t, len(windowCh.GetSourceChilds(nn1).Checks), 2) + // Check previous checkpoint added + prevCh, found = h.getPrevChildCheckpoint(rt, nn1) + require.True(t, found) + eq, err = prevCh.Equals(ch) + require.NoError(t, err) + require.True(t, eq) + + t.Log("trying to commit from wrong subnet") + epoch = abi.ChainEpoch(12) + rt.SetCaller(SubnetActorAddr, actors.SubnetActorCodeID) + // Only subnet actors can call. + rt.ExpectValidateCallerType(actors.SubnetActorCodeID) + rt.SetEpoch(epoch) + ch = newCheckpoint(nn2, epoch+9) + b, err = ch.MarshalBinary() + require.NoError(t, err) + rt.ExpectAbort(exitcode.ErrIllegalArgument, func() { + rt.Call(h.SubnetCoordActor.CommitChildCheckpoint, &actor.CheckpointParams{b}) + }) + + t.Log("trying to commit a checkpoint from the past") + epoch = abi.ChainEpoch(11) + rt.SetCaller(SubnetActorAddr, actors.SubnetActorCodeID) + // Only subnet actors can call. + rt.ExpectValidateCallerType(actors.SubnetActorCodeID) + rt.SetEpoch(epoch) + ch = newCheckpoint(nn1, epoch) + b, err = ch.MarshalBinary() + require.NoError(t, err) + rt.ExpectAbort(exitcode.ErrIllegalArgument, func() { + rt.Call(h.SubnetCoordActor.CommitChildCheckpoint, &actor.CheckpointParams{b}) + }) + + t.Log("raw checkpoint on first window is empty") + epoch = abi.ChainEpoch(12) + rt.SetEpoch(epoch) + st := getState(rt) + raw, err := actor.RawCheckpoint(st, adt.AsStore(rt), epoch) + require.NoError(t, err) + chEpoch := types.CheckpointEpoch(epoch, st.CheckPeriod) + emptyRaw := schema.NewRawCheckpoint(st.NetworkName, chEpoch) + require.NoError(t, err) + eq, err = raw.Equals(emptyRaw) + require.NoError(t, err) + require.True(t, eq) + + t.Log("commit first checkpoint in first window for second subnet") + rt.SetCaller(SubnetActorAddr2, actors.SubnetActorCodeID) + // Only subnet actors can call. + rt.ExpectValidateCallerType(actors.SubnetActorCodeID) + rt.SetEpoch(epoch) + ch = newCheckpoint(nn2, epoch+10) + b, err = ch.MarshalBinary() + require.NoError(t, err) + rt.Call(h.SubnetCoordActor.CommitChildCheckpoint, &actor.CheckpointParams{b}) + windowCh = currWindowCheckpoint(rt, epoch) + // Check that child was added. + require.GreaterOrEqual(t, windowCh.HasChildSource(nn2), 0) + // Check that there are two childs. + require.Equal(t, windowCh.LenChilds(), 2) + // Check previous checkpoint added + prevCh, found = h.getPrevChildCheckpoint(rt, nn2) + require.True(t, found) + eq, err = prevCh.Equals(ch) + require.NoError(t, err) + require.True(t, eq) + t.Log("raw checkpoint in next period includes childs") + epoch = abi.ChainEpoch(120) + + st = getState(rt) + raw, err = actor.RawCheckpoint(st, adt.AsStore(rt), epoch) + require.NoError(t, err) + require.Equal(t, raw.Data.Epoch, 100) + require.GreaterOrEqual(t, raw.HasChildSource(nn2), 0) + require.Equal(t, raw.LenChilds(), 2) + pr, err := raw.PreviousCheck() + require.NoError(t, err) + require.Equal(t, pr, schema.NoPreviousCheck) + + t.Log("trying to commit wrong checkpoint (wrong subnet/wrong epoch/wrong prev") + // TODO: We need to populate this with more tests for different conditions. + +} + +func TestCheckpointCrossMsgs(t *testing.T) { + h := newHarness(t) + builder := mock.NewBuilder(builtin.StoragePowerActorAddr).WithCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) + rt := builder.Build(t) + netName := "/root/f01" + h.constructAndVerifyWithNetworkName(rt, address.SubnetID(netName)) + SubnetActorAddr := tutil.NewIDAddr(t, 101) + + t.Log("register new subnet successfully") + // Send 2FIL of stake + value := abi.NewTokenAmount(2e18) + rt.SetCaller(SubnetActorAddr, actors.SubnetActorCodeID) + rt.SetReceived(value) + rt.SetBalance(value) + // Only subnet actors can call. + rt.ExpectValidateCallerType(actors.SubnetActorCodeID) + // Call Register function + ret := rt.Call(h.SubnetCoordActor.Register, nil) + res, ok := ret.(*actor.SubnetIDParam) + require.True(t, ok) + shid := address.SubnetID(netName + "/f0101") + // Verify the return value is correct. + require.Equal(t, res.ID, shid.String()) + rt.Verify() + require.Equal(t, getState(rt).TotalSubnets, uint64(1)) + // Verify instantiated subnet + sh, found := h.getSubnet(rt, shid) + require.True(h.t, found) + require.Equal(t, sh.Stake, value) + require.Equal(t, sh.ID.String(), netName+"/f0101") + require.Equal(t, sh.ParentID.String(), netName) + require.Equal(t, sh.Status, actor.Active) + + t.Log("commit checkpoint with cross msgs") + epoch := abi.ChainEpoch(10) + rt.SetCaller(SubnetActorAddr, actors.SubnetActorCodeID) + // Only subnet actors can call. + rt.ExpectValidateCallerType(actors.SubnetActorCodeID) + rt.SetEpoch(epoch) + ch := newCheckpoint(sh.ID, epoch+9) + // Add msgMeta directed to other subnets + addMsgMeta(ch, sh.ID, address.SubnetID("/root/f0102/child1"), "rand1", big.Zero()) + // By not adding a random string we are checking that nothing fails when to MsgMeta + // for different subnets are propagating the same CID. This will probably never be the + // case for honest peers, but it is an attack vector. + addMsgMeta(ch, sh.ID, address.SubnetID("/root/f0102/child2"), "", big.Zero()) + addMsgMeta(ch, sh.ID, address.SubnetID("/root/f0102/child3"), "", big.Zero()) + // And to this subnet + addMsgMeta(ch, sh.ID, address.SubnetID(netName), "", big.Zero()) + addMsgMeta(ch, sh.ID, address.SubnetID(netName), "rand", big.Zero()) + // And to a child from other branch (with cross-net messages) + addMsgMeta(ch, sh.ID, address.SubnetID(netName+"/f02"), "rand", big.Zero()) + prevcid, _ := ch.Cid() + + b, err := ch.MarshalBinary() + require.NoError(t, err) + rt.Call(h.SubnetCoordActor.CommitChildCheckpoint, &actor.CheckpointParams{b}) + rt.Verify() + windowCh := currWindowCheckpoint(rt, epoch) + require.Equal(t, windowCh.Data.Epoch, 100) + // Check that child was added. + require.GreaterOrEqual(t, windowCh.HasChildSource(sh.ID), 0) + // Check previous checkpoint added + prevCh, found := h.getPrevChildCheckpoint(rt, sh.ID) + require.True(t, found) + eq, err := prevCh.Equals(ch) + require.NoError(t, err) + require.True(t, eq) + + // Check that the BottomUpMsgs to be applied are added + st := getState(rt) + for i := uint64(0); i < 3; i++ { + _, found, err = st.GetBottomUpMsgMeta(adt.AsStore(rt), i) + require.NoError(t, err) + require.True(t, found) + } + + // Check msgMeta to other subnets are aggregated + m := windowCh.CrossMsgs() + subs := []address.SubnetID{"/root/f0102/child1", "/root/f0102/child2", "/root/f0102/child3"} + require.Equal(t, len(m), 3) + prevs := make(map[string]schema.CrossMsgMeta) + for i, mm := range m { + // Check that from has been renamed + require.Equal(t, mm.From, netName) + // Check the to is kept + require.Equal(t, len(windowCh.CrossMsgsTo(subs[i])), 1) + // Append for the future + prevs[mm.To] = mm + } + + t.Log("commit checkpoint with more cross-msgs for subnet") + epoch = abi.ChainEpoch(13) + rt.SetCaller(SubnetActorAddr, actors.SubnetActorCodeID) + // Only subnet actors can call. + rt.ExpectValidateCallerType(actors.SubnetActorCodeID) + rt.SetEpoch(epoch) + ch = newCheckpoint(sh.ID, epoch+6) + // Msgs to this subnet + addMsgMeta(ch, sh.ID, address.SubnetID(netName), "r2", big.Zero()) + addMsgMeta(ch, sh.ID, address.SubnetID(netName), "r3", big.Zero()) + ch.SetPrevious(prevcid) + prevcid, _ = ch.Cid() + + b, err = ch.MarshalBinary() + require.NoError(t, err) + rt.Call(h.SubnetCoordActor.CommitChildCheckpoint, &actor.CheckpointParams{b}) + rt.Verify() + + // Check that the BottomUpMsgs to be applied are added with the right nonce. + st = getState(rt) + _, found, err = st.GetBottomUpMsgMeta(adt.AsStore(rt), 2) + require.NoError(t, err) + require.True(t, found) + _, found, err = st.GetBottomUpMsgMeta(adt.AsStore(rt), 3) + require.NoError(t, err) + require.True(t, found) + + // Funding subnet so it can propagate some funds + funder := tutil.NewIDAddr(h.t, 1000) + value = abi.NewTokenAmount(1e18) + fund(h, rt, sh.ID, funder, value, 1, value, value) + + t.Log("commit second checkpoint with overlapping metas and funds") + rt.SetCaller(SubnetActorAddr, actors.SubnetActorCodeID) + // Only subnet actors can call. + rt.ExpectValidateCallerType(actors.SubnetActorCodeID) + rt.SetEpoch(epoch) + ch = newCheckpoint(sh.ID, epoch+9) + ch.SetPrevious(prevcid) + // Add msgMeta directed to other subnets + addMsgMeta(ch, sh.ID, address.SubnetID("/root/f0102/child1"), "", big.Zero()) + addMsgMeta(ch, sh.ID, address.SubnetID("/root/f0102/child2"), "", big.Zero()) + addMsgMeta(ch, sh.ID, address.SubnetID("/root/f0102/child3"), "", abi.NewTokenAmount(100)) + addMsgMeta(ch, sh.ID, address.SubnetID("/root/f0102/child4"), "", abi.NewTokenAmount(100)) + + b, err = ch.MarshalBinary() + require.NoError(t, err) + // Expect burning some funds + rt.ExpectSend(builtin.BurntFundsActorAddr, builtin.MethodSend, nil, abi.NewTokenAmount(200), nil, exitcode.Ok) + rt.Call(h.SubnetCoordActor.CommitChildCheckpoint, &actor.CheckpointParams{b}) + rt.Verify() + windowCh = currWindowCheckpoint(rt, epoch) + require.Equal(t, windowCh.Data.Epoch, 100) + // Check that child was added. + require.GreaterOrEqual(t, windowCh.HasChildSource(sh.ID), 0) + // Check previous checkpoint added + prevCh, found = h.getPrevChildCheckpoint(rt, sh.ID) + require.True(t, found) + eq, err = prevCh.Equals(ch) + require.NoError(t, err) + require.True(t, eq) + + // Check msgMeta to other subnets are aggregated + m = windowCh.CrossMsgs() + subs = []address.SubnetID{"/root/f0102/child1", "/root/f0102/child2", "/root/f0102/child3", "/root/f0102/child4"} + require.Equal(t, len(m), 4) + for i, mm := range m { + // Check that from has been renamed + require.Equal(t, mm.From, netName) + // Check the to is kept + require.Equal(t, len(windowCh.CrossMsgsTo(subs[i])), 1) + // Get current msgMetas + mcid, _ := mm.Cid() + msgmeta, found := h.getMsgMeta(rt, mcid) + require.True(t, found) + prev, ok := prevs[mm.To] + if ok { + prevCid, _ := prev.Cid() + _, found := h.getMsgMeta(rt, prevCid) + // The one subnet updated should have removed the previous + if mm.To == subs[0].String() { + require.False(h.t, found) + } else { + // The rest should stil be accessible + require.True(h.t, found) + } + } + // There should be one in every subnet (because its a new one, + // or they were either equal except for the first one where the + // cids of the msgMeta where different. + if mm.To == subs[0].String() { + require.Equal(t, len(msgmeta.Metas), 2) + } else { + require.Equal(t, len(msgmeta.Metas), 1) + } + } +} + +func TestCheckpointInactive(t *testing.T) { + h := newHarness(t) + builder := mock.NewBuilder(builtin.StoragePowerActorAddr).WithCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) + rt := builder.Build(t) + h.constructAndVerify(rt) + SubnetActorAddr := tutil.NewIDAddr(t, 101) + + t.Log("register new subnet successfully") + // Send 2FIL of stake + value := abi.NewTokenAmount(2e18) + rt.SetCaller(SubnetActorAddr, actors.SubnetActorCodeID) + rt.SetReceived(value) + rt.SetBalance(value) + // Only subnet actors can call. + rt.ExpectValidateCallerType(actors.SubnetActorCodeID) + // Call Register function + ret := rt.Call(h.SubnetCoordActor.Register, nil) + res, ok := ret.(*actor.SubnetIDParam) + require.True(t, ok) + shid := address.SubnetID("/root/f0101") + // Verify the return value is correct. + require.Equal(t, res.ID, shid.String()) + rt.Verify() + require.Equal(t, getState(rt).TotalSubnets, uint64(1)) + // Verify instantiated subnet + sh, found := h.getSubnet(rt, shid) + nn1 := sh.ID + require.True(h.t, found) + require.Equal(t, sh.Stake, value) + require.Equal(t, sh.ID.String(), "/root/f0101") + require.Equal(t, sh.ParentID.String(), "/root") + require.Equal(t, sh.Status, actor.Active) + + t.Log("release some stake to inactivate") + releaseVal := abi.NewTokenAmount(2e18) + params := &actor.FundParams{Value: releaseVal} + rt.SetCaller(SubnetActorAddr, actors.SubnetActorCodeID) + // Only subnet actors can call. + rt.ExpectValidateCallerType(actors.SubnetActorCodeID) + rt.ExpectSend(SubnetActorAddr, builtin.MethodSend, nil, releaseVal, nil, exitcode.Ok) + rt.Call(h.SubnetCoordActor.ReleaseStake, params) + rt.Verify() + sh, found = h.getSubnet(rt, shid) + require.True(h.t, found) + require.Equal(t, sh.Status, actor.Inactive) + + t.Log("trying to commit checkpoint for inactive subnet") + epoch := abi.ChainEpoch(32) + rt.SetCaller(SubnetActorAddr, actors.SubnetActorCodeID) + // Only subnet actors can call. + rt.ExpectValidateCallerType(actors.SubnetActorCodeID) + rt.SetEpoch(epoch) + ch := newCheckpoint(nn1, epoch+20) + b, err := ch.MarshalBinary() + require.NoError(t, err) + rt.ExpectAbort(exitcode.ErrIllegalState, func() { + rt.Call(h.SubnetCoordActor.CommitChildCheckpoint, &actor.CheckpointParams{b}) + }) +} diff --git a/chain/consensus/hierarchical/actors/sca/sca_cross.go b/chain/consensus/hierarchical/actors/sca/sca_cross.go new file mode 100644 index 000000000..a6a13baf4 --- /dev/null +++ b/chain/consensus/hierarchical/actors/sca/sca_cross.go @@ -0,0 +1,400 @@ +package sca + +import ( + "context" + + address "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + bstore "github.com/filecoin-project/lotus/blockstore" + schema "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" + ltypes "github.com/filecoin-project/lotus/chain/types" + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/specs-actors/v7/actors/runtime" + "github.com/filecoin-project/specs-actors/v7/actors/util/adt" + cid "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +// CrossMsgs aggregates all the information related to crossMsgs that need to be persisted +type CrossMsgs struct { + Msgs []ltypes.Message // Raw msgs from the subnet + Metas []schema.CrossMsgMeta // Metas propagated from child subnets +} + +// MetaTag is a convenient struct +// used to compute the Cid of the MsgMeta +type MetaTag struct { + MsgsCid cid.Cid + MetasCid cid.Cid +} + +// Cid computes the cid for the CrossMsg +func (cm *CrossMsgs) Cid() (cid.Cid, error) { + cst := cbor.NewCborStore(bstore.NewMemory()) + store := blockadt.WrapStore(context.TODO(), cst) + cArr := blockadt.MakeEmptyArray(store) + mArr := blockadt.MakeEmptyArray(store) + + // Compute CID for list of messages generated in subnet + for i, m := range cm.Msgs { + c := cbg.CborCid(m.Cid()) + if err := cArr.Set(uint64(i), &c); err != nil { + return cid.Undef, err + } + } + + // Compute Cid for msgsMeta propagated from child subnets. + for i, m := range cm.Metas { + // NOTE: Instead of using the metaCID to compute CID of msgMeta + // we use from/to to de-duplicate between Cids of different msgMeta. + // This may be deemed unecessary, but it is a sanity-check for the case + // where a subnet may try to push the same Cid of MsgMeta of other subnets + // and thus remove previously stored msgMetas. + // _, mc, err := cid.CidFromBytes(m.MsgsCid) + // if err != nil { + // return cid.Undef, err + // } + mc, err := abi.CidBuilder.Sum([]byte(string(m.MsgsCid) + m.From + m.To)) + if err != nil { + return cid.Undef, err + } + c := cbg.CborCid(mc) + if err := mArr.Set(uint64(i), &c); err != nil { + return cid.Undef, err + } + } + + croot, err := cArr.Root() + if err != nil { + return cid.Undef, err + } + mroot, err := mArr.Root() + if err != nil { + return cid.Undef, err + } + + return store.Put(store.Context(), &MetaTag{ + MsgsCid: croot, + MetasCid: mroot, + }) +} + +// AddMsg adds a the Cid of a new message to MsgMeta +func (cm *CrossMsgs) AddMsg(msg ltypes.Message) { + cm.Msgs = append(cm.Msgs, msg) +} + +func (cm *CrossMsgs) hasEqualMeta(meta *schema.CrossMsgMeta) bool { + for _, m := range cm.Metas { + if m.Equal(meta) { + return true + } + } + return false +} + +// AddMetas adds a list of MsgMetas from child subnets to the CrossMsgs +func (cm *CrossMsgs) AddMetas(metas []schema.CrossMsgMeta) { + for _, m := range metas { + // If the same meta is already there don't include it. + if cm.hasEqualMeta(&m) { + continue + } + cm.Metas = append(cm.Metas, m) + } +} + +// AddMsgMeta adds a the Cid of a msgMeta from a child subnet +// to aggregate it and propagated in the checkpoint +func (cm *CrossMsgs) AddMsgMeta(from, to address.SubnetID, meta schema.CrossMsgMeta) { + cm.Metas = append(cm.Metas, meta) +} + +func (st *SCAState) releaseMsg(rt runtime.Runtime, value big.Int, to address.Address, nonce uint64) ltypes.Message { + // The way we identify it is a release message from the subnet is by + // setting the burntFundsActor as the from of the message + // See hierarchical/types.go + source := builtin.BurntFundsActorAddr + + // Transform To and From to HAddresses + to, err := address.NewHAddress(st.NetworkName.Parent(), to) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to create HAddress") + from, err := address.NewHAddress(st.NetworkName, source) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to create HAddress") + + // Build message. + return ltypes.Message{ + To: to, + From: from, + Value: value, + Nonce: st.Nonce, + Method: builtin.MethodSend, + GasLimit: 1 << 30, // This is will be applied as an implicit msg, add enough gas + GasFeeCap: ltypes.NewInt(0), + GasPremium: ltypes.NewInt(0), + Params: nil, + } + +} + +func (st *SCAState) storeBottomUpMsgMeta(rt runtime.Runtime, meta schema.CrossMsgMeta) { + meta.Nonce = int(st.BottomUpNonce) + crossMsgs, err := adt.AsArray(adt.AsStore(rt), st.BottomUpMsgsMeta, CrossMsgsAMTBitwidth) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load cross-messages") + // Set message in AMT + err = crossMsgs.Set(uint64(meta.Nonce), &meta) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to store cross-messages") + // Flush AMT + st.BottomUpMsgsMeta, err = crossMsgs.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush cross-messages") + + // Increase nonce. + incrementNonce(rt, &st.BottomUpNonce) +} + +func (st *SCAState) GetTopDownMsg(s adt.Store, id address.SubnetID, nonce uint64) (*ltypes.Message, bool, error) { + sh, found, err := st.GetSubnet(s, id) + if err != nil { + return nil, false, err + } + if !found { + return nil, false, xerrors.Errorf("subnet not registered in hierarchical consensus") + } + return sh.GetTopDownMsg(s, nonce) +} + +func (st *SCAState) GetBottomUpMsgMeta(s adt.Store, nonce uint64) (*schema.CrossMsgMeta, bool, error) { + crossMsgs, err := adt.AsArray(s, st.BottomUpMsgsMeta, CrossMsgsAMTBitwidth) + if err != nil { + return nil, false, xerrors.Errorf("failed to load cross-msgs: %w", err) + } + return getBottomUpMsgMeta(crossMsgs, nonce) +} + +func getBottomUpMsgMeta(crossMsgs *adt.Array, nonce uint64) (*schema.CrossMsgMeta, bool, error) { + if nonce > MaxNonce { + return nil, false, xerrors.Errorf("maximum cross-message nonce is 2^63-1") + } + var out schema.CrossMsgMeta + found, err := crossMsgs.Get(nonce, &out) + if err != nil { + return nil, false, xerrors.Errorf("failed to get cross-msg with nonce %v: %w", nonce, err) + } + if !found { + return nil, false, nil + } + return &out, true, nil +} + +// BottomUpMsgFromNonce gets the latest bottomUpMetas from a specific nonce +// (including the one specified, i.e. [nonce, latest], both limits +// included). +func (st *SCAState) BottomUpMsgFromNonce(s adt.Store, nonce uint64) ([]*schema.CrossMsgMeta, error) { + crossMsgs, err := adt.AsArray(s, st.BottomUpMsgsMeta, CrossMsgsAMTBitwidth) + if err != nil { + return nil, xerrors.Errorf("failed to load cross-msgs meta: %w", err) + } + // FIXME: Consider setting the length of the slice in advance + // to improve performance. + out := make([]*schema.CrossMsgMeta, 0) + for i := nonce; i < st.BottomUpNonce; i++ { + meta, found, err := getBottomUpMsgMeta(crossMsgs, i) + if err != nil { + return nil, err + } + if found { + out = append(out, meta) + } + } + return out, nil +} + +// Using this approach to increment nonce to avoid code repetition. +// We could probably do better and be more efficient if we had generics. +func incrementNonce(rt runtime.Runtime, nonceCounter *uint64) { + // Increment nonce. + (*nonceCounter)++ + + // If overflow we restart from zero. + if *nonceCounter > MaxNonce { + // FIXME: This won't be a problem in the short-term, but we should handle this. + // We could maybe use a snapshot or paging approach so new peers can sync + // from scratch while restarting the nonce for cross-message for subnets to zero. + // sh.Nonce = 0 + rt.Abortf(exitcode.ErrIllegalState, "nonce overflow not supported yet") + } +} + +func (st *SCAState) aggChildMsgMeta(rt runtime.Runtime, ch *schema.Checkpoint, aux map[string][]schema.CrossMsgMeta) { + for to, mm := range aux { + // Get the cid of MsgMeta from this subnet (if any) + metaIndex, msgMeta := ch.CrossMsgMeta(st.NetworkName, address.SubnetID(to)) + if msgMeta == nil { + msgMeta = schema.NewCrossMsgMeta(st.NetworkName, address.SubnetID(to)) + } + + value := abi.NewTokenAmount(0) + // All value inside msgMetas + for _, mt := range mm { + v, err := mt.GetValue() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error getting value from meta") + value = big.Add(value, v) + } + + // If there is already a msgMeta for that to/from update with new message + if len(msgMeta.MsgsCid) != 0 { + _, prevMetaCid, err := cid.CidFromBytes(msgMeta.MsgsCid) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to compute Cid for msgMeta") + metaCid := st.appendMetasToMeta(rt, prevMetaCid, mm) + // Update msgMeta in checkpoint + ch.SetMsgMetaCid(metaIndex, metaCid) + ch.AddValueMetaCid(metaIndex, value) + } else { + // if not populate a new one + meta := &CrossMsgs{Metas: mm} + msgMetas, err := adt.AsMap(adt.AsStore(rt), st.CheckMsgsRegistry, builtin.DefaultHamtBitwidth) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load msgMeta registry") + metaCid, err := putMsgMeta(msgMetas, meta) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to put updates MsgMeta in registry") + // Flush registry + st.CheckMsgsRegistry, err = msgMetas.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush msgMeta registry") + // Append msgMeta to registry + msgMeta.MsgsCid = metaCid.Bytes() + msgMeta.AddValue(value) + ch.AppendMsgMeta(msgMeta) + } + } +} + +func (st *SCAState) storeCheckMsg(rt runtime.Runtime, msg ltypes.Message, from, to address.SubnetID) { + // Get the checkpoint for the current window + ch := st.currWindowCheckpoint(rt) + // Get the cid of MsgMeta + metaIndex, msgMeta := ch.CrossMsgMeta(from, to) + if msgMeta == nil { + msgMeta = schema.NewCrossMsgMeta(from, to) + } + + // If there is already a msgMeta for that to/from update with new message + if len(msgMeta.MsgsCid) != 0 { + _, prevMetaCid, err := cid.CidFromBytes(msgMeta.MsgsCid) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to compute Cid for msgMeta") + metaCid := st.appendMsgToMeta(rt, prevMetaCid, msg) + // Update msgMeta in checkpoint + ch.SetMsgMetaCid(metaIndex, metaCid) + ch.AddValueMetaCid(metaIndex, msg.Value) + } else { + // if not populate a new one + meta := &CrossMsgs{Msgs: []ltypes.Message{msg}} + msgMetas, err := adt.AsMap(adt.AsStore(rt), st.CheckMsgsRegistry, builtin.DefaultHamtBitwidth) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load msgMeta registry") + metaCid, err := putMsgMeta(msgMetas, meta) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to put updates MsgMeta in registry") + // Flush registry + st.CheckMsgsRegistry, err = msgMetas.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush msgMeta registry") + // Append msgMeta to registry + msgMeta.MsgsCid = metaCid.Bytes() + msgMeta.AddValue(msg.Value) + ch.AppendMsgMeta(msgMeta) + } + + st.flushCheckpoint(rt, ch) + +} + +// GetCrossMsgs returns the crossmsgs from a CID in the registry. +func (st *SCAState) GetCrossMsgs(store adt.Store, c cid.Cid) (*CrossMsgs, bool, error) { + msgMetas, err := adt.AsMap(store, st.CheckMsgsRegistry, builtin.DefaultHamtBitwidth) + if err != nil { + return nil, false, err + } + var out CrossMsgs + found, err := msgMetas.Get(abi.CidKey(c), &out) + if err != nil { + return nil, false, xerrors.Errorf("failed to get crossMsgMeta from registry with cid %v: %w", c, err) + } + if !found { + return nil, false, nil + } + return &out, true, nil +} + +func getMsgMeta(msgMetas *adt.Map, c cid.Cid) (*CrossMsgs, bool, error) { + var out CrossMsgs + found, err := msgMetas.Get(abi.CidKey(c), &out) + if err != nil { + return nil, false, xerrors.Errorf("failed to get crossMsgMeta from registry with cid %v: %w", c, err) + } + if !found { + return nil, false, nil + } + return &out, true, nil +} + +// PutMsgMeta puts a new msgMeta in registry and returns the Cid of the MsgMeta +func putMsgMeta(msgMetas *adt.Map, meta *CrossMsgs) (cid.Cid, error) { + metaCid, err := meta.Cid() + if err != nil { + return cid.Undef, err + } + return metaCid, msgMetas.Put(abi.CidKey(metaCid), meta) +} + +// Puts meta in registry, deletes previous one, and flushes updated registry +func (st *SCAState) putDeleteFlushMeta(rt runtime.Runtime, msgMetas *adt.Map, prevMetaCid cid.Cid, meta *CrossMsgs) cid.Cid { + // Put updated msgMeta + metaCid, err := putMsgMeta(msgMetas, meta) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to put updates MsgMeta in registry") + // Delete previous meta, is no longer needed + err = msgMetas.Delete(abi.CidKey(prevMetaCid)) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to delete previous MsgMeta from registry") + // Flush registry + st.CheckMsgsRegistry, err = msgMetas.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush msgMeta registry") + return metaCid +} + +// appendMsgToMsgMeta appends the message to MsgMeta in the registry and returns the updated Cid. +func (st *SCAState) appendMsgToMeta(rt runtime.Runtime, prevMetaCid cid.Cid, msg ltypes.Message) cid.Cid { + msgMetas, err := adt.AsMap(adt.AsStore(rt), st.CheckMsgsRegistry, builtin.DefaultHamtBitwidth) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load msgMeta registry") + + // Get previous meta + meta, found, err := getMsgMeta(msgMetas, prevMetaCid) + if !found || err != nil { + rt.Abortf(exitcode.ErrIllegalState, "error fetching meta by cid or not found: err=%v", err) + } + // Add new msg to meta + meta.AddMsg(msg) + return st.putDeleteFlushMeta(rt, msgMetas, prevMetaCid, meta) +} + +// appendMsgToMsgMeta appends the message to MsgMeta in the registry and returns the updated Cid. +func (st *SCAState) appendMetasToMeta(rt runtime.Runtime, prevMetaCid cid.Cid, metas []schema.CrossMsgMeta) cid.Cid { + msgMetas, err := adt.AsMap(adt.AsStore(rt), st.CheckMsgsRegistry, builtin.DefaultHamtBitwidth) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load msgMeta registry") + + // Get previous meta + meta, found, err := getMsgMeta(msgMetas, prevMetaCid) + if !found || err != nil { + rt.Abortf(exitcode.ErrIllegalState, "error fetching meta by cid or not found: err=%v", err) + } + // Add new msg to meta + meta.AddMetas(metas) + // If the Cid hasn't change return without persisting + mcid, err := meta.Cid() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to compute Cid") + if mcid == prevMetaCid { + return mcid + } + // FIXME: We can prevent one computation of metaCid here by adding mcid as + // an argument in this function. + return st.putDeleteFlushMeta(rt, msgMetas, prevMetaCid, meta) +} diff --git a/chain/consensus/hierarchical/actors/sca/sca_cross_test.go b/chain/consensus/hierarchical/actors/sca/sca_cross_test.go new file mode 100644 index 000000000..b2bca530c --- /dev/null +++ b/chain/consensus/hierarchical/actors/sca/sca_cross_test.go @@ -0,0 +1,593 @@ +package sca_test + +import ( + "testing" + + address "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + actors "github.com/filecoin-project/lotus/chain/consensus/actors" + "github.com/filecoin-project/lotus/chain/consensus/actors/reward" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical" + actor "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" + ltypes "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/specs-actors/v7/actors/util/adt" + "github.com/filecoin-project/specs-actors/v7/support/mock" + tutil "github.com/filecoin-project/specs-actors/v7/support/testing" + cid "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" +) + +func TestFund(t *testing.T) { + h := newHarness(t) + builder := mock.NewBuilder(builtin.StoragePowerActorAddr).WithCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) + rt := builder.Build(t) + h.constructAndVerify(rt) + SubnetActorAddr := tutil.NewIDAddr(t, 101) + + t.Log("register new subnet successfully") + // Send 2FIL of stake + value := abi.NewTokenAmount(2e18) + rt.SetCaller(SubnetActorAddr, actors.SubnetActorCodeID) + rt.SetReceived(value) + rt.SetBalance(value) + // Only subnet actors can call. + rt.ExpectValidateCallerType(actors.SubnetActorCodeID) + // Call Register function + ret := rt.Call(h.SubnetCoordActor.Register, nil) + res, ok := ret.(*actor.SubnetIDParam) + require.True(t, ok) + shid := address.SubnetID("/root/f0101") + // Verify the return value is correct. + require.Equal(t, res.ID, shid.String()) + rt.Verify() + require.Equal(t, getState(rt).TotalSubnets, uint64(1)) + // Verify instantiated subnet + sh, found := h.getSubnet(rt, shid) + nn1 := sh.ID + require.True(h.t, found) + require.Equal(t, sh.Stake, value) + require.Equal(t, sh.ID.String(), "/root/f0101") + require.Equal(t, sh.ParentID.String(), "/root") + require.Equal(t, sh.Status, actor.Active) + + t.Log("inject some funds in subnet") + funder := tutil.NewIDAddr(h.t, 1000) + value = abi.NewTokenAmount(1e18) + fund(h, rt, nn1, funder, value, 1, value, value) + newfunder := tutil.NewIDAddr(h.t, 1001) + fund(h, rt, nn1, newfunder, value, 2, big.Mul(big.NewInt(2), value), value) + fund(h, rt, nn1, newfunder, value, 3, big.Mul(big.NewInt(3), value), big.Mul(big.NewInt(2), value)) + + t.Log("get cross messages from nonce") + sh, _ = h.getSubnet(rt, nn1) + msgs, err := sh.TopDownMsgFromNonce(adt.AsStore(rt), 0) + require.NoError(h.t, err) + require.Equal(h.t, len(msgs), 3) + msgs, err = sh.TopDownMsgFromNonce(adt.AsStore(rt), 2) + require.NoError(h.t, err) + require.Equal(h.t, len(msgs), 1) +} + +func TestReleaseFunds(t *testing.T) { + h := newHarness(t) + builder := mock.NewBuilder(builtin.StoragePowerActorAddr).WithCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) + rt := builder.Build(t) + shid := address.SubnetID("/root/f0101") + h.constructAndVerifyWithNetworkName(rt, shid) + + t.Log("release some funds from subnet") + releaser := tutil.NewIDAddr(h.t, 1000) + value := abi.NewTokenAmount(1e18) + prev := release(h, rt, shid, releaser, value, 0, cid.Undef) + release(h, rt, shid, releaser, value, 1, prev) + +} + +func TestCrossMsg(t *testing.T) { + h := newHarness(t) + builder := mock.NewBuilder(builtin.StoragePowerActorAddr).WithCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) + rt := builder.Build(t) + shid := address.SubnetID("/root/f0101") + h.constructAndVerifyWithNetworkName(rt, shid) + h.sn = shid + + from := tutil.NewIDAddr(h.t, 1000) + to := tutil.NewIDAddr(h.t, 1011) + value := abi.NewTokenAmount(1e18) + + // Bottom up + crossmsg(h, rt, "/root/f0102/f0101", from, to, value, 0, big.Zero()) + crossmsg(h, rt, "/root/f0102/f0101", from, tutil.NewIDAddr(h.t, 1011), value, 1, big.Zero()) + crossmsg(h, rt, "/root", from, to, value, 0, big.Zero()) + + // TopDown + snAddr := tutil.NewIDAddr(t, 101) + h.registerSubnet(rt, shid, snAddr) + crossmsg(h, rt, "/root/f0101/f0101", from, to, value, 1, value) + crossmsg(h, rt, "/root/f0101/f0101", from, tutil.NewIDAddr(h.t, 1011), value, 2, big.Mul(big.NewInt(2), value)) + crossmsg(h, rt, "/root/f0101/f0101/f0102", from, to, value, 3, big.Mul(big.NewInt(3), value)) +} + +func TestApplyRouting(t *testing.T) { + h := newHarness(t) + builder := mock.NewBuilder(builtin.StoragePowerActorAddr).WithCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) + rt := builder.Build(t) + shid := address.SubnetID("/root/f0101") + h.constructAndVerifyWithNetworkName(rt, shid) + snAddr1 := tutil.NewIDAddr(t, 101) + h.registerSubnet(rt, shid, snAddr1) + snAddr2 := tutil.NewIDAddr(t, 102) + h.registerSubnet(rt, shid, snAddr2) + sn1 := address.NewSubnetID(shid, snAddr1) + sn2 := address.NewSubnetID(shid, snAddr2) + + // Inject some funds + funderID := tutil.NewIDAddr(h.t, 1000) + t.Log("inject some funds in subnets") + init := abi.NewTokenAmount(1e18) + // NOTE: we should strictly apply this fund messages and start + // application below from 1, but the final result won't be affected. + fund(h, rt, sn1, funderID, init, 1, init, init) + fund(h, rt, sn2, funderID, init, 1, init, init) + + from := tutil.NewSECP256K1Addr(h.t, "from") + to := tutil.NewSECP256K1Addr(h.t, "to") + + // TopDown + ff, err := address.NewHAddress(address.SubnetID("/root"), from) + require.NoError(t, err) + tt, err := address.NewHAddress(sn1, to) + require.NoError(t, err) + h.applyCrossMsg(rt, ff, tt, abi.NewTokenAmount(1e17), 0, 1, false) + tt, err = address.NewHAddress(sn2, to) + require.NoError(t, err) + h.applyCrossMsg(rt, ff, tt, abi.NewTokenAmount(1e17), 1, 1, false) + ff, err = address.NewHAddress(address.SubnetID("/root/f01/f012"), from) + require.NoError(t, err) + tt, err = address.NewHAddress(sn1, to) + require.NoError(t, err) + h.applyCrossMsg(rt, ff, tt, abi.NewTokenAmount(1e17), 2, 2, false) + // Directed to current subnet + tt, err = address.NewHAddress(shid, to) + require.NoError(t, err) + h.applyCrossMsg(rt, ff, tt, abi.NewTokenAmount(1e17), 3, 0, false) + + // BottomUp + ff, err = address.NewHAddress(sn1, to) + require.NoError(t, err) + tt, err = address.NewHAddress(address.SubnetID("/root/f0101/f0102/f011"), from) + require.NoError(t, err) + h.applyCrossMsg(rt, ff, tt, abi.NewTokenAmount(1e17), 0, 2, false) + ff, err = address.NewHAddress(sn2, to) + require.NoError(t, err) + tt, err = address.NewHAddress(address.SubnetID("/root/f0101/f0101/f011"), from) + require.NoError(t, err) + h.applyCrossMsg(rt, ff, tt, abi.NewTokenAmount(1e17), 1, 3, false) + // Directed to current subnet + ff, err = address.NewHAddress(address.SubnetID("/root/f0101/f0102/f011"), from) + require.NoError(t, err) + tt, err = address.NewHAddress(shid, to) + require.NoError(t, err) + h.applyCrossMsg(rt, ff, tt, abi.NewTokenAmount(1e17), 1, 0, false) + +} + +func TestNoopMessageWhenError(t *testing.T) { + h := newHarness(t) + builder := mock.NewBuilder(builtin.StoragePowerActorAddr).WithCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) + rt := builder.Build(t) + shid := address.SubnetID("/root/f0101") + h.constructAndVerifyWithNetworkName(rt, shid) + snAddr1 := tutil.NewIDAddr(t, 101) + h.registerSubnet(rt, shid, snAddr1) + snAddr2 := tutil.NewIDAddr(t, 102) + // h.registerSubnet(rt, shid, snAddr2) + sn1 := address.NewSubnetID(shid, snAddr1) + sn2 := address.NewSubnetID(shid, snAddr2) + + // Inject some funds + funderID := tutil.NewIDAddr(h.t, 1000) + t.Log("inject some funds in subnets") + init := abi.NewTokenAmount(1e18) + fund(h, rt, sn1, funderID, init, 1, init, init) + + from := tutil.NewSECP256K1Addr(h.t, "from") + to := tutil.NewSECP256K1Addr(h.t, "to") + + // TopDown + ff, err := address.NewHAddress(address.SubnetID("/root"), from) + require.NoError(t, err) + tt, err := address.NewHAddress(sn2, to) + require.NoError(t, err) + h.applyCrossMsg(rt, ff, tt, abi.NewTokenAmount(1e17), 0, 1, true) + + // BottomUp + ff, err = address.NewHAddress(sn1, to) + require.NoError(t, err) + tt, err = address.NewHAddress(address.SubnetID("/root/f0101/f0102/f011"), from) + require.NoError(t, err) + h.applyCrossMsg(rt, ff, tt, abi.NewTokenAmount(1e17), 0, 1, true) + + // TODO: Maybe include more extensive tests? +} + +func TestApplyMsg(t *testing.T) { + h := newHarness(t) + builder := mock.NewBuilder(builtin.StoragePowerActorAddr).WithCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) + rt := builder.Build(t) + h.constructAndVerify(rt) + snAddr := tutil.NewIDAddr(t, 101) + h.registerSubnet(rt, address.RootSubnet, snAddr) + funder, err := address.NewHAddress(h.sn.Parent(), tutil.NewSECP256K1Addr(h.t, "asd")) + require.NoError(h.t, err) + funderID := tutil.NewIDAddr(h.t, 1000) + + // Inject some funds to test circSupply + t.Log("inject some funds in subnet") + init := abi.NewTokenAmount(1e18) + fund(h, rt, h.sn, funderID, init, 1, init, init) + value := abi.NewTokenAmount(1e17) + + t.Log("apply fund messages") + for i := 0; i < 5; i++ { + h.applyFundMsg(rt, funder, value, uint64(i), false) + } + // Applying already used nonces or non-subsequent should fail + rt.ExpectAbort(exitcode.ErrIllegalState, func() { + h.applyFundMsg(rt, funder, value, 10, true) + }) + rt.ExpectAbort(exitcode.ErrIllegalState, func() { + h.applyFundMsg(rt, funder, value, 1, true) + }) + + // Register subnet for update in circulating supply + releaser, err := address.NewHAddress(h.sn.Parent(), tutil.NewSECP256K1Addr(h.t, "asd")) + require.NoError(h.t, err) + + t.Log("apply release messages") + // Three messages with the same nonce + for i := 0; i < 3; i++ { + h.applyReleaseMsg(rt, releaser, value, uint64(0)) + } + // The following with increasing nonces + for i := 0; i < 3; i++ { + h.applyReleaseMsg(rt, releaser, value, uint64(i)) + } + // Check that circ supply is updated successfully. + // NOTE: The update in circ supply is now performed as the checkpoint propagates + // and not when the message is applied. + // sh, found := h.getSubnet(rt, h.sn) + // require.True(h.t, found) + // require.Equal(h.t, sh.CircSupply, big.Sub(init, big.Mul(big.NewInt(6), value))) + + // Trying to release over the circulating supply + rt.ExpectAbort(exitcode.ErrIllegalState, func() { + rt.SetCaller(snAddr, actors.SubnetActorCodeID) + rt.ExpectValidateCallerType(actors.SubnetActorCodeID) + ch := newCheckpoint(h.sn, abi.ChainEpoch(9)) + v := big.Mul(big.NewInt(2), init) + rt.SetBalance(init) + addMsgMeta(ch, h.sn, address.RootSubnet, "rand", v) + b, err := ch.MarshalBinary() + require.NoError(t, err) + rt.ExpectSend(builtin.BurntFundsActorAddr, builtin.MethodSend, nil, v, nil, exitcode.Ok) + rt.Call(h.SubnetCoordActor.CommitChildCheckpoint, &actor.CheckpointParams{b}) + rt.Verify() + }) + // Applying already used nonces or non-subsequent should fail + rt.ExpectAbort(exitcode.ErrIllegalState, func() { + h.applyReleaseMsg(rt, releaser, value, 10) + }) + rt.ExpectAbort(exitcode.ErrIllegalState, func() { + h.applyReleaseMsg(rt, releaser, value, 1) + }) +} + +func (h *shActorHarness) applyFundMsg(rt *mock.Runtime, addr address.Address, value big.Int, nonce uint64, abort bool) { + rt.SetCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) + params := &actor.CrossMsgParams{ + Msg: ltypes.Message{ + To: addr, + From: addr, + Value: value, + Nonce: nonce, + Method: builtin.MethodSend, + GasLimit: 1 << 30, // This is will be applied as an implicit msg, add enough gas + GasFeeCap: ltypes.NewInt(0), + GasPremium: ltypes.NewInt(0), + Params: nil, + }, + } + + rewParams := &reward.FundingParams{ + Addr: hierarchical.SubnetCoordActorAddr, + Value: value, + } + rt.ExpectValidateCallerAddr(builtin.SystemActorAddr) + if !abort { + rt.ExpectSend(reward.RewardActorAddr, reward.Methods.ExternalFunding, rewParams, big.Zero(), nil, exitcode.Ok) + raddr, err := addr.RawAddr() + require.NoError(h.t, err) + rt.ExpectSend(raddr, params.Msg.Method, nil, params.Msg.Value, nil, exitcode.Ok) + } + rt.Call(h.SubnetCoordActor.ApplyMessage, params) + rt.Verify() + st := getState(rt) + require.Equal(h.t, st.AppliedTopDownNonce, nonce+1) +} + +func (h *shActorHarness) applyReleaseMsg(rt *mock.Runtime, addr address.Address, value big.Int, nonce uint64) { + rt.SetCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) + rt.SetBalance(value) + from, err := address.NewHAddress(h.sn, builtin.BurntFundsActorAddr) + require.NoError(h.t, err) + params := &actor.CrossMsgParams{ + Msg: ltypes.Message{ + To: addr, + From: from, + Method: builtin.MethodSend, + Value: value, + Nonce: nonce, + GasLimit: 1 << 30, // This is will be applied as an implicit msg, add enough gas + GasFeeCap: ltypes.NewInt(0), + GasPremium: ltypes.NewInt(0), + Params: nil, + }, + } + + rt.ExpectValidateCallerAddr(builtin.SystemActorAddr) + rto, err := addr.RawAddr() + require.NoError(h.t, err) + rt.ExpectSend(rto, builtin.MethodSend, nil, value, nil, exitcode.Ok) + rt.Call(h.SubnetCoordActor.ApplyMessage, params) + rt.Verify() + st := getState(rt) + require.Equal(h.t, st.AppliedBottomUpNonce, nonce) +} + +func (h *shActorHarness) applyCrossMsg(rt *mock.Runtime, from, to address.Address, value big.Int, msgNonce, tdNonce uint64, noop bool) { + rt.SetCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) + rt.SetBalance(value) + params := &actor.CrossMsgParams{ + Msg: ltypes.Message{ + To: to, + From: from, + Method: builtin.MethodSend, + Value: value, + Nonce: msgNonce, + GasLimit: 1 << 30, + GasFeeCap: ltypes.NewInt(0), + GasPremium: ltypes.NewInt(0), + Params: nil, + }, + } + + st := getState(rt) + // BottomUp + sto, err := params.Msg.To.Subnet() + require.NoError(h.t, err) + if isBu, _ := hierarchical.ApplyAsBottomUp(st.NetworkName, ¶ms.Msg); isBu { + rt.ExpectValidateCallerAddr(builtin.SystemActorAddr) + if sto == st.NetworkName { + rto, err := to.RawAddr() + require.NoError(h.t, err) + rt.ExpectSend(rto, builtin.MethodSend, nil, value, nil, exitcode.Ok) + } + rt.Call(h.SubnetCoordActor.ApplyMessage, params) + rt.Verify() + st := getState(rt) + require.Equal(h.t, st.AppliedBottomUpNonce, msgNonce) + } else { + rewParams := &reward.FundingParams{ + Addr: hierarchical.SubnetCoordActorAddr, + Value: value, + } + rt.ExpectValidateCallerAddr(builtin.SystemActorAddr) + rt.ExpectSend(reward.RewardActorAddr, reward.Methods.ExternalFunding, rewParams, big.Zero(), nil, exitcode.Ok) + if sto == st.NetworkName { + raddr, err := params.Msg.To.RawAddr() + require.NoError(h.t, err) + rt.ExpectSend(raddr, params.Msg.Method, nil, params.Msg.Value, nil, exitcode.Ok) + } + rt.Call(h.SubnetCoordActor.ApplyMessage, params) + rt.Verify() + st := getState(rt) + require.Equal(h.t, st.AppliedTopDownNonce, msgNonce+1) + } + + // If this is noop + if noop { + msg := params.Msg + msg.From, msg.To = msg.To, msg.From + // sfrom, err := msg.From.Subnet() + // require.NoError(h.t, err) + sto, err := msg.To.Subnet() + require.NoError(h.t, err) + if hierarchical.IsBottomUp(h.sn.Parent(), sto) { + // Check that msgMeta included in checkpoint + windowCh := currWindowCheckpoint(rt, 0) + _, chmeta := windowCh.CrossMsgMeta(h.sn.Parent(), sto) + require.NotNil(h.t, chmeta) + cidmeta, err := chmeta.Cid() + require.NoError(h.t, err) + meta, found := h.getMsgMeta(rt, cidmeta) + require.True(h.t, found) + require.Equal(h.t, len(meta.Msgs), 1) + msg := meta.Msgs[0] + require.Equal(h.t, msg.From, to) + require.Equal(h.t, msg.To, from) + } else { + // TopDown + sh, found := h.getSubnet(rt, sto.Down(h.sn.Parent())) + require.True(h.t, found) + msg, found, err := sh.GetTopDownMsg(adt.AsStore(rt), tdNonce) + require.NoError(h.t, err) + require.True(h.t, found) + require.Equal(h.t, msg.From, to) + require.Equal(h.t, msg.To, from) + } + } else if sto != st.NetworkName { + sh, found := h.getSubnet(rt, sto.Down(st.NetworkName)) + require.True(h.t, found) + msg, found, err := sh.GetTopDownMsg(adt.AsStore(rt), tdNonce) + require.NoError(h.t, err) + require.True(h.t, found) + require.Equal(h.t, msg.From, from) + require.Equal(h.t, msg.To, to) + require.Equal(h.t, msg.Value, value) + } +} + +func crossmsg(h *shActorHarness, rt *mock.Runtime, shid address.SubnetID, from, to address.Address, value big.Int, nonce uint64, circSupply abi.TokenAmount) { + // Test SECP to use for calling + testSecp := tutil.NewSECP256K1Addr(h.t, "asd") + rt.SetReceived(value) + rt.SetBalance(value) + rt.SetCaller(from, builtin.AccountActorCodeID) + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + params := &actor.CrossMsgParams{ + Destination: shid, + Msg: ltypes.Message{ + To: to, + From: from, + Value: value, + Nonce: nonce, + Method: builtin.MethodSend, + GasLimit: 1 << 30, + GasFeeCap: ltypes.NewInt(0), + GasPremium: ltypes.NewInt(0), + Params: []byte("params"), + }, + } + // Expect a send to get pkey + rt.ExpectSend(from, builtin.MethodsAccount.PubkeyAddress, nil, big.Zero(), &testSecp, exitcode.Ok) + + if hierarchical.IsBottomUp(h.sn, params.Destination) { + // Burn funds before leaving the subnet. + rt.ExpectSend(builtin.BurntFundsActorAddr, builtin.MethodSend, nil, value, nil, exitcode.Ok) + } + + rt.Call(h.SubnetCoordActor.SendCross, params) + rt.Verify() + st := getState(rt) + + // BottomUp + if hierarchical.IsBottomUp(h.sn, params.Destination) { + // Check that msgMeta included in checkpoint + windowCh := currWindowCheckpoint(rt, 0) + _, chmeta := windowCh.CrossMsgMeta(h.sn, params.Destination) + require.NotNil(h.t, chmeta) + cidmeta, err := chmeta.Cid() + require.NoError(h.t, err) + meta, found := h.getMsgMeta(rt, cidmeta) + require.True(h.t, found) + require.Equal(h.t, len(meta.Msgs), int(nonce+1)) + msg := meta.Msgs[nonce] + + from, err := address.NewHAddress(h.sn, testSecp) + require.NoError(h.t, err) + to, err := address.NewHAddress(params.Destination, to) + require.NoError(h.t, err) + require.Equal(h.t, msg.From, from) + require.Equal(h.t, msg.To, to) + require.Equal(h.t, msg.Value, value) + require.Equal(h.t, msg.Nonce, nonce) + } else { + // TopDown + sh, found := h.getSubnet(rt, h.sn) + require.True(h.t, found) + require.Equal(h.t, sh.CircSupply, circSupply) + require.Equal(h.t, sh.Nonce, nonce) + msg, found, err := sh.GetTopDownMsg(adt.AsStore(rt), nonce-1) + require.NoError(h.t, err) + require.True(h.t, found) + require.Equal(h.t, msg.Value, value) + + from, err = address.NewHAddress(st.NetworkName, testSecp) + require.NoError(h.t, err) + to, err = address.NewHAddress(params.Destination, to) + require.NoError(h.t, err) + require.Equal(h.t, msg.From, from) + require.Equal(h.t, msg.To, to) + require.Equal(h.t, msg.Nonce, nonce-1) + } +} + +func release(h *shActorHarness, rt *mock.Runtime, shid address.SubnetID, releaser address.Address, value big.Int, nonce uint64, prevMeta cid.Cid) cid.Cid { + // Test SECP to use for calling + testSecp := tutil.NewSECP256K1Addr(h.t, "asd") + rt.SetReceived(value) + rt.SetBalance(value) + rt.SetCaller(releaser, builtin.AccountActorCodeID) + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + rt.ExpectSend(builtin.BurntFundsActorAddr, builtin.MethodSend, nil, value, nil, exitcode.Ok) + // Expect a send to get pkey + rt.ExpectSend(releaser, builtin.MethodsAccount.PubkeyAddress, nil, big.Zero(), &testSecp, exitcode.Ok) + rt.Call(h.SubnetCoordActor.Release, nil) + rt.Verify() + + // Check that msgMeta included in checkpoint + windowCh := currWindowCheckpoint(rt, 0) + _, chmeta := windowCh.CrossMsgMeta(shid, shid.Parent()) + require.NotNil(h.t, chmeta) + cidmeta, err := chmeta.Cid() + require.NoError(h.t, err) + meta, found := h.getMsgMeta(rt, cidmeta) + require.True(h.t, found) + require.Equal(h.t, len(meta.Msgs), int(nonce+1)) + msg := meta.Msgs[nonce] + + // Comes from child + from, err := address.NewHAddress(shid, builtin.BurntFundsActorAddr) + require.NoError(h.t, err) + // Goes to parent + to, err := address.NewHAddress(shid.Parent(), testSecp) + require.NoError(h.t, err) + require.Equal(h.t, msg.From, from) + // The "to" should have been updated to the secp addr + require.Equal(h.t, msg.To, to) + require.Equal(h.t, msg.Value, value) + require.Equal(h.t, msg.Nonce, nonce) + // check previous meta is removed + if prevMeta != cid.Undef { + _, found := h.getMsgMeta(rt, prevMeta) + require.False(h.t, found) + } + // return cid of meta + return cidmeta + +} + +func fund(h *shActorHarness, rt *mock.Runtime, sn address.SubnetID, funder address.Address, value abi.TokenAmount, + expectedNonce uint64, expectedCircSupply big.Int, expectedAddrFunds abi.TokenAmount) { + testSecp := tutil.NewSECP256K1Addr(h.t, funder.String()) + rt.SetReceived(value) + params := &actor.SubnetIDParam{ID: sn.String()} + rt.SetCaller(funder, builtin.AccountActorCodeID) + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + // Expect a send to get pkey + rt.ExpectSend(funder, builtin.MethodsAccount.PubkeyAddress, nil, big.Zero(), &testSecp, exitcode.Ok) + rt.Call(h.SubnetCoordActor.Fund, params) + rt.Verify() + sh, found := h.getSubnet(rt, sn) + require.True(h.t, found) + require.Equal(h.t, sh.CircSupply, expectedCircSupply) + require.Equal(h.t, sh.Nonce, expectedNonce) + msg, found, err := sh.GetTopDownMsg(adt.AsStore(rt), expectedNonce-1) + require.NoError(h.t, err) + require.True(h.t, found) + // TODO: Add additional checks over msg? + require.Equal(h.t, msg.Value, value) + // Comes from parent network. + from, err := address.NewHAddress(sh.ID.Parent(), testSecp) + require.NoError(h.t, err) + // Goes to subnet with same address + to, err := address.NewHAddress(sh.ID, testSecp) + require.NoError(h.t, err) + require.Equal(h.t, msg.From, from) + require.Equal(h.t, msg.To, to) + require.Equal(h.t, msg.Nonce, expectedNonce-1) +} diff --git a/chain/consensus/hierarchical/actors/sca/sca_state.go b/chain/consensus/hierarchical/actors/sca/sca_state.go index 18be55177..8af281ece 100644 --- a/chain/consensus/hierarchical/actors/sca/sca_state.go +++ b/chain/consensus/hierarchical/actors/sca/sca_state.go @@ -3,13 +3,38 @@ package sca import ( address "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/lotus/chain/consensus/hierarchical" - "github.com/filecoin-project/specs-actors/v6/actors/builtin" - "github.com/filecoin-project/specs-actors/v6/actors/util/adt" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/types" + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/specs-actors/v7/actors/runtime" + "github.com/filecoin-project/specs-actors/v7/actors/util/adt" cid "github.com/ipfs/go-cid" "golang.org/x/xerrors" ) +const ( + // DefaultCheckpointPeriod defines 10 epochs + // as the default checkpoint period for a subnet. + // This may be too short, but at this point it comes pretty handy + // for testing purposes. + DefaultCheckpointPeriod = abi.ChainEpoch(10) + // MinCheckpointPeriod allowed for subnets + MinCheckpointPeriod = abi.ChainEpoch(10) + + // CrossMsgsAMTBitwidth determines the bitwidth to use for cross-msg AMT. + // TODO: We probably need some empirical experiments to determine the best values + // for these constants. + CrossMsgsAMTBitwidth = 3 + + // MaxNonce supported in cross messages + // Bear in mind that we cast to Int64 when marshalling in + // some places + MaxNonce = ^uint64(0) +) + var ( // MinSubnetStake required to register a new subnet // TODO: Kept in 1FIL for testing, change to the right @@ -30,63 +55,93 @@ const ( // SCAState represents the state of the Subnet Coordinator Actor type SCAState struct { - // CID of the current network - Network cid.Cid // ID of the current network - NetworkName hierarchical.SubnetID + NetworkName address.SubnetID // Total subnets below this one. TotalSubnets uint64 // Minimum stake to create a new subnet MinStake abi.TokenAmount // List of subnets Subnets cid.Cid // HAMT[cid.Cid]Subnet -} -type Subnet struct { - Cid cid.Cid // Cid of the subnet ID - ID hierarchical.SubnetID // human-readable name of the subnet ID (path in the hierarchy) - Parent cid.Cid - ParentID hierarchical.SubnetID - Stake abi.TokenAmount - // The SCA doesn't keep track of the stake from miners, just locks the funds. - // Is up to the subnet actor to handle this and distribute the stake - // when the subnet is killed. - // NOTE: We may want to keep track of this in the future. - // Stake cid.Cid // BalanceTable with locked stake. - Funds cid.Cid // BalanceTable with funds from addresses that entered the subnet. - Status Status + // Checkpoint period in number of epochs + CheckPeriod abi.ChainEpoch + // Checkpoints committed in SCA + Checkpoints cid.Cid // HAMT[epoch]Checkpoint + + // CheckMsgMetaRegistry + // Stores information about the list of messages and child msgMetas being + // propagated in checkpoints to the top of the hierarchy. + CheckMsgsRegistry cid.Cid // HAMT[cid]CrossMsgs + Nonce uint64 // Latest nonce of cross message sent from subnet. + BottomUpNonce uint64 // BottomUpNonce of bottomup messages for msgMeta received from checkpoints (probably redundant) + BottomUpMsgsMeta cid.Cid // AMT[schema.CrossMsgs] from child subnets to apply. + + // AppliedNonces + // + // Keep track of the next nonce of the message to be applied. + AppliedBottomUpNonce uint64 + AppliedTopDownNonce uint64 + + // Atomic execution state + AtomicExecRegistry cid.Cid // HAMT[cid] } -func ConstructSCAState(store adt.Store, networkName hierarchical.SubnetID) (*SCAState, error) { +func ConstructSCAState(store adt.Store, params *ConstructorParams) (*SCAState, error) { emptySubnetsMapCid, err := adt.StoreEmptyMap(store, builtin.DefaultHamtBitwidth) if err != nil { return nil, xerrors.Errorf("failed to create empty map: %w", err) } - networkCid, err := networkName.Cid() + emptyCheckpointsMapCid, err := adt.StoreEmptyMap(store, builtin.DefaultHamtBitwidth) + if err != nil { + return nil, xerrors.Errorf("failed to create empty map: %w", err) + } + emptyMsgsMetaMapCid, err := adt.StoreEmptyMap(store, builtin.DefaultHamtBitwidth) if err != nil { - panic(err) + return nil, xerrors.Errorf("failed to create empty map: %w", err) + } + emptyAtomicMapCid, err := adt.StoreEmptyMap(store, builtin.DefaultHamtBitwidth) + if err != nil { + return nil, xerrors.Errorf("failed to create empty map: %w", err) } + emptyBottomUpMsgsAMT, err := adt.StoreEmptyArray(store, CrossMsgsAMTBitwidth) + if err != nil { + return nil, xerrors.Errorf("failed to create empty AMT: %w", err) + } + + nn := address.SubnetID(params.NetworkName) + // Don't allow really small checkpoint periods for now. + period := abi.ChainEpoch(params.CheckpointPeriod) + if period < MinCheckpointPeriod { + period = DefaultCheckpointPeriod + } + return &SCAState{ - Network: networkCid, - NetworkName: networkName, - TotalSubnets: 0, - MinStake: MinSubnetStake, - Subnets: emptySubnetsMapCid, + NetworkName: nn, + TotalSubnets: 0, + MinStake: MinSubnetStake, + Subnets: emptySubnetsMapCid, + CheckPeriod: period, + Checkpoints: emptyCheckpointsMapCid, + CheckMsgsRegistry: emptyMsgsMetaMapCid, + BottomUpMsgsMeta: emptyBottomUpMsgsAMT, + AppliedBottomUpNonce: MaxNonce, // We need inital nonce+1 to be 0 due to how msgs are applied. + AtomicExecRegistry: emptyAtomicMapCid, }, nil } // GetSubnet gets a subnet from the actor state. -func (st *SCAState) GetSubnet(s adt.Store, id cid.Cid) (*Subnet, bool, error) { - claims, err := adt.AsMap(s, st.Subnets, builtin.DefaultHamtBitwidth) +func (st *SCAState) GetSubnet(s adt.Store, id address.SubnetID) (*Subnet, bool, error) { + subnets, err := adt.AsMap(s, st.Subnets, builtin.DefaultHamtBitwidth) if err != nil { return nil, false, xerrors.Errorf("failed to load subnets: %w", err) } - return getSubnet(claims, id) + return getSubnet(subnets, id) } -func getSubnet(subnets *adt.Map, id cid.Cid) (*Subnet, bool, error) { +func getSubnet(subnets *adt.Map, id address.SubnetID) (*Subnet, bool, error) { var out Subnet - found, err := subnets.Get(abi.CidKey(id), &out) + found, err := subnets.Get(hierarchical.SubnetKey(id), &out) if err != nil { return nil, false, xerrors.Errorf("failed to get subnet with id %v: %w", id, err) } @@ -96,14 +151,125 @@ func getSubnet(subnets *adt.Map, id cid.Cid) (*Subnet, bool, error) { return &out, true, nil } +func (st *SCAState) flushSubnet(rt runtime.Runtime, sh *Subnet) { + // Update subnet in the list of subnets. + subnets, err := adt.AsMap(adt.AsStore(rt), st.Subnets, builtin.DefaultHamtBitwidth) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load state for subnets") + err = subnets.Put(hierarchical.SubnetKey(sh.ID), sh) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to put new subnet in subnet map") + // Flush subnets + st.Subnets, err = subnets.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush subnets") +} + +// currWindowCheckpoint gets the template of the checkpoint being +// populated in the current window. +// +// If it hasn't been instantiated, a template is created. From there on, +// the template is populated with every new x-net transaction and +// child checkpoint, until the windows passes that the template is frozen +// and is ready for miners to populate the rest and sign it. +func (st *SCAState) CurrWindowCheckpoint(store adt.Store, epoch abi.ChainEpoch) (*schema.Checkpoint, error) { + chEpoch := types.WindowEpoch(epoch, st.CheckPeriod) + ch, found, err := st.GetCheckpoint(store, chEpoch) + if err != nil { + return nil, err + } + if !found { + ch = schema.NewRawCheckpoint(st.NetworkName, chEpoch) + } + return ch, nil +} + +func (st *SCAState) currWindowCheckpoint(rt runtime.Runtime) *schema.Checkpoint { + ch, err := st.CurrWindowCheckpoint(adt.AsStore(rt), rt.CurrEpoch()) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to get checkpoint template for epoch") + return ch +} + +// RawCheckpoint gets the template of the checkpoint in +// the signing window for an epoch +// +// It returns the checkpoint that is ready to be signed +// and already includes all the checkpoints and x-net messages +// to include in it. Miners need to populate the prevCheckpoint +// and tipset of this template and sign ot. +func RawCheckpoint(st *SCAState, store adt.Store, epoch abi.ChainEpoch) (*schema.Checkpoint, error) { + if epoch < 0 { + return nil, xerrors.Errorf("epoch can't be negative") + } + chEpoch := types.CheckpointEpoch(epoch, st.CheckPeriod) + ch, found, err := st.GetCheckpoint(store, chEpoch) + if err != nil { + return nil, err + } + // If nothing has been populated yet return an empty checkpoint. + if !found { + ch = schema.NewRawCheckpoint(st.NetworkName, chEpoch) + } + return ch, nil +} + +// GetCheckpoint gets a checkpoint from its index +func (st *SCAState) GetCheckpoint(s adt.Store, epoch abi.ChainEpoch) (*schema.Checkpoint, bool, error) { + checkpoints, err := adt.AsMap(s, st.Checkpoints, builtin.DefaultHamtBitwidth) + if err != nil { + return nil, false, xerrors.Errorf("failed to load checkpoint: %w", err) + } + return getCheckpoint(checkpoints, epoch) +} + +func getCheckpoint(checkpoints *adt.Map, epoch abi.ChainEpoch) (*schema.Checkpoint, bool, error) { + var out schema.Checkpoint + found, err := checkpoints.Get(abi.UIntKey(uint64(epoch)), &out) + if err != nil { + return nil, false, xerrors.Errorf("failed to get checkpoint for epoch %v: %w", epoch, err) + } + if !found { + return nil, false, nil + } + return &out, true, nil +} + +func (st *SCAState) flushCheckpoint(rt runtime.Runtime, ch *schema.Checkpoint) { + // Update subnet in the list of checkpoints. + checks, err := adt.AsMap(adt.AsStore(rt), st.Checkpoints, builtin.DefaultHamtBitwidth) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load state for checkpoints") + err = checks.Put(abi.UIntKey(uint64(ch.Data.Epoch)), ch) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to put checkpoint in map") + // Flush checkpoints + st.Checkpoints, err = checks.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush checkpoints") +} + // Get subnet from its subnet actor address. func (st *SCAState) getSubnetFromActorAddr(s adt.Store, addr address.Address) (*Subnet, bool, error) { - shid := hierarchical.NewSubnetID(st.NetworkName, addr) - shcid, err := shid.Cid() - if err != nil { - return nil, false, err + shid := address.NewSubnetID(st.NetworkName, addr) + return st.GetSubnet(s, shid) +} + +func (st *SCAState) registerSubnet(rt runtime.Runtime, shid address.SubnetID, stake big.Int) { + emptyTopDownMsgsAMT, err := adt.StoreEmptyArray(adt.AsStore(rt), CrossMsgsAMTBitwidth) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to create empty top-down msgs array") + + // We always initialize in instantiated state + status := Active + + sh := &Subnet{ + ID: shid, + ParentID: st.NetworkName, + Stake: stake, + TopDownMsgs: emptyTopDownMsgsAMT, + CircSupply: big.Zero(), + Status: status, + PrevCheckpoint: *schema.EmptyCheckpoint, } - return st.GetSubnet(s, shcid) + + // Increase the number of child subnets for the current network. + st.TotalSubnets++ + + // Flush subnet into subnetMap + st.flushSubnet(rt, sh) } func ListSubnets(s adt.Store, st SCAState) ([]Subnet, error) { diff --git a/chain/consensus/hierarchical/actors/sca/sca_subnet.go b/chain/consensus/hierarchical/actors/sca/sca_subnet.go new file mode 100644 index 000000000..dec19d9cb --- /dev/null +++ b/chain/consensus/hierarchical/actors/sca/sca_subnet.go @@ -0,0 +1,130 @@ +package sca + +import ( + address "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + schema "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" + ltypes "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/specs-actors/v7/actors/runtime" + "github.com/filecoin-project/specs-actors/v7/actors/util/adt" + cid "github.com/ipfs/go-cid" + xerrors "golang.org/x/xerrors" +) + +type Subnet struct { + ID address.SubnetID // human-readable name of the subnet ID (path in the hierarchy) + ParentID address.SubnetID + Stake abi.TokenAmount + TopDownMsgs cid.Cid // AMT[ltypes.Messages] of cross top-down messages to subnet. + // NOTE: We can avoid explitly storing the Nonce here and use CrossMsgs length + // to determine the nonce. Deferring that for future iterations. + Nonce uint64 // Latest nonce of cross message submitted to subnet. + CircSupply abi.TokenAmount // Circulating supply of FIL in subnet. + Status Status + // NOTE: We could probably save some gas here without affecting the + // overall behavior of check committment by just keeping the information + // required for verification (prevCheck cid and epoch). + PrevCheckpoint schema.Checkpoint +} + +// addStake adds new funds to the stake of the subnet. +// +// This function also accepts negative values to substract, and checks +// if the funds are enough for the subnet to be active. +func (sh *Subnet) addStake(rt runtime.Runtime, st *SCAState, value abi.TokenAmount) { + // Add stake to the subnet + sh.Stake = big.Add(sh.Stake, value) + + // Check if subnet has still stake to be active + if sh.Stake.LessThan(st.MinStake) { + sh.Status = Inactive + } + + // Flush subnet into subnetMap + st.flushSubnet(rt, sh) + +} + +func fundMsg(rt runtime.Runtime, id address.SubnetID, secp address.Address, value big.Int) ltypes.Message { + + // Transform To and From to HAddresses + to, err := address.NewHAddress(id, secp) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to create HAddress") + from, err := address.NewHAddress(id.Parent(), secp) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to create HAddress") + + // Build message. + // + // Fund messages include the same to and from. + return ltypes.Message{ + To: to, + From: from, + Value: value, + Method: builtin.MethodSend, + GasLimit: 1 << 30, // This is will be applied as an implicit msg, add enough gas + GasFeeCap: ltypes.NewInt(0), + GasPremium: ltypes.NewInt(0), + Params: nil, + } +} + +func (sh *Subnet) storeTopDownMsg(rt runtime.Runtime, msg *ltypes.Message) { + crossMsgs, err := adt.AsArray(adt.AsStore(rt), sh.TopDownMsgs, CrossMsgsAMTBitwidth) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load cross-messages") + // Set message in AMT + err = crossMsgs.Set(msg.Nonce, msg) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to store cross-messages") + // Flush AMT + sh.TopDownMsgs, err = crossMsgs.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush cross-messages") + +} + +func (sh *Subnet) GetTopDownMsg(s adt.Store, nonce uint64) (*ltypes.Message, bool, error) { + crossMsgs, err := adt.AsArray(s, sh.TopDownMsgs, CrossMsgsAMTBitwidth) + if err != nil { + return nil, false, xerrors.Errorf("failed to load cross-msgs: %w", err) + } + return getTopDownMsg(crossMsgs, nonce) +} + +func getTopDownMsg(crossMsgs *adt.Array, nonce uint64) (*ltypes.Message, bool, error) { + if nonce > MaxNonce { + return nil, false, xerrors.Errorf("maximum cross-message nonce is 2^63-1") + } + var out ltypes.Message + found, err := crossMsgs.Get(nonce, &out) + if err != nil { + return nil, false, xerrors.Errorf("failed to get cross-msg with nonce %v: %w", nonce, err) + } + if !found { + return nil, false, nil + } + return &out, true, nil +} + +// TopDownMsgFromNonce gets the latest topDownMessages from a specific nonce +// (including the one specified, i.e. [nonce, latest], both limits +// included). +func (sh *Subnet) TopDownMsgFromNonce(s adt.Store, nonce uint64) ([]*ltypes.Message, error) { + crossMsgs, err := adt.AsArray(s, sh.TopDownMsgs, CrossMsgsAMTBitwidth) + if err != nil { + return nil, xerrors.Errorf("failed to load cross-msgs: %w", err) + } + // FIXME: Consider setting the length of the slice in advance + // to improve performance. + out := make([]*ltypes.Message, 0) + for i := nonce; i < sh.Nonce; i++ { + msg, found, err := getTopDownMsg(crossMsgs, i) + if err != nil { + return nil, err + } + if found { + out = append(out, msg) + } + } + return out, nil +} diff --git a/chain/consensus/hierarchical/actors/sca/sca_test.go b/chain/consensus/hierarchical/actors/sca/sca_test.go index 4ba02dcc3..42efa0373 100644 --- a/chain/consensus/hierarchical/actors/sca/sca_test.go +++ b/chain/consensus/hierarchical/actors/sca/sca_test.go @@ -3,18 +3,22 @@ package sca_test import ( "testing" + address "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/exitcode" actors "github.com/filecoin-project/lotus/chain/consensus/actors" - initactor "github.com/filecoin-project/lotus/chain/consensus/actors/init" "github.com/filecoin-project/lotus/chain/consensus/hierarchical" actor "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" - "github.com/filecoin-project/specs-actors/v6/actors/builtin" - "github.com/filecoin-project/specs-actors/v6/actors/util/adt" - "github.com/filecoin-project/specs-actors/v6/support/mock" - tutil "github.com/filecoin-project/specs-actors/v6/support/testing" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/types" + ltypes "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/specs-actors/v7/actors/util/adt" + "github.com/filecoin-project/specs-actors/v7/support/mock" + tutil "github.com/filecoin-project/specs-actors/v7/support/testing" cid "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -24,7 +28,7 @@ func TestExports(t *testing.T) { } func TestConstruction(t *testing.T) { - builder := mock.NewBuilder(actor.SubnetCoordActorAddr).WithCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) + builder := mock.NewBuilder(hierarchical.SubnetCoordActorAddr).WithCaller(builtin.SystemActorAddr, builtin.SystemActorCodeID) actor := newHarness(t) t.Run("simple construction", func(t *testing.T) { @@ -51,12 +55,11 @@ func TestRegister(t *testing.T) { rt.ExpectValidateCallerType(actors.SubnetActorCodeID) // Call Register function ret := rt.Call(h.SubnetCoordActor.Register, nil) - res, ok := ret.(*actor.AddSubnetReturn) + res, ok := ret.(*actor.SubnetIDParam) require.True(t, ok) - shid, err := hierarchical.SubnetID("/root/t0101").Cid() - require.NoError(t, err) + shid := address.SubnetID("/root/f0101") // Verify the return value is correct. - require.Equal(t, res.Cid, shid) + require.Equal(t, res.ID, shid.String()) rt.Verify() require.Equal(t, getState(rt).TotalSubnets, uint64(1)) @@ -64,7 +67,7 @@ func TestRegister(t *testing.T) { sh, found := h.getSubnet(rt, shid) require.True(h.t, found) require.Equal(t, sh.Stake, value) - require.Equal(t, sh.ID.String(), "/root/t0101") + require.Equal(t, sh.ID.String(), "/root/f0101") require.Equal(t, sh.ParentID.String(), "/root") require.Equal(t, sh.Status, actor.Active) @@ -78,6 +81,7 @@ func TestRegister(t *testing.T) { rt.ExpectAbort(exitcode.ErrIllegalArgument, func() { rt.Call(h.SubnetCoordActor.Register, nil) }) + rt.Verify() t.Log("try registering without staking enough funds") SubnetActorAddr = tutil.NewIDAddr(t, 102) @@ -91,6 +95,7 @@ func TestRegister(t *testing.T) { rt.ExpectAbort(exitcode.ErrIllegalArgument, func() { rt.Call(h.SubnetCoordActor.Register, nil) }) + rt.Verify() t.Log("Register second subnet") rt.SetCaller(SubnetActorAddr, actors.SubnetActorCodeID) @@ -100,19 +105,18 @@ func TestRegister(t *testing.T) { rt.ExpectValidateCallerType(actors.SubnetActorCodeID) // Call Register function ret = rt.Call(h.SubnetCoordActor.Register, nil) - res, ok = ret.(*actor.AddSubnetReturn) + res, ok = ret.(*actor.SubnetIDParam) require.True(t, ok) - shid, err = hierarchical.SubnetID("/root/t0102").Cid() - require.NoError(t, err) + shid = address.SubnetID("/root/f0102") // Verify the return value is correct. - require.Equal(t, res.Cid, shid) + require.Equal(t, res.ID, shid.String()) rt.Verify() require.Equal(t, getState(rt).TotalSubnets, uint64(2)) // Verify instantiated subnet sh, found = h.getSubnet(rt, shid) require.True(h.t, found) require.Equal(t, sh.Stake, value) - require.Equal(t, sh.ID.String(), "/root/t0102") + require.Equal(t, sh.ID.String(), "/root/f0102") require.Equal(t, sh.ParentID.String(), "/root") require.Equal(t, sh.Status, actor.Active) } @@ -133,8 +137,9 @@ func TestAddStake(t *testing.T) { rt.ExpectValidateCallerType(actors.SubnetActorCodeID) // Call Register function ret := rt.Call(h.SubnetCoordActor.Register, nil) - res, ok := ret.(*actor.AddSubnetReturn) + res, ok := ret.(*actor.SubnetIDParam) require.True(t, ok) + rt.Verify() t.Log("add to unregistered subnet") newActorAddr := tutil.NewIDAddr(t, 102) @@ -149,6 +154,7 @@ func TestAddStake(t *testing.T) { rt.ExpectAbort(exitcode.ErrIllegalArgument, func() { rt.Call(h.SubnetCoordActor.AddStake, nil) }) + rt.Verify() t.Log("add some stake") rt.SetCaller(SubnetActorAddr, actors.SubnetActorCodeID) @@ -157,7 +163,7 @@ func TestAddStake(t *testing.T) { // Only subnet actors can call. rt.ExpectValidateCallerType(actors.SubnetActorCodeID) rt.Call(h.SubnetCoordActor.AddStake, nil) - sh, found := h.getSubnet(rt, res.Cid) + sh, found := h.getSubnet(rt, address.SubnetID(res.ID)) require.True(h.t, found) require.Equal(t, sh.Stake, big.Add(value, value)) require.Equal(t, sh.Status, actor.Active) @@ -172,6 +178,7 @@ func TestAddStake(t *testing.T) { rt.ExpectAbort(exitcode.ErrIllegalArgument, func() { rt.Call(h.SubnetCoordActor.AddStake, nil) }) + rt.Verify() } func TestReleaseStake(t *testing.T) { @@ -190,7 +197,7 @@ func TestReleaseStake(t *testing.T) { rt.ExpectValidateCallerType(actors.SubnetActorCodeID) // Call Register function ret := rt.Call(h.SubnetCoordActor.Register, nil) - res, ok := ret.(*actor.AddSubnetReturn) + res, ok := ret.(*actor.SubnetIDParam) require.True(t, ok) releaseVal := abi.NewTokenAmount(1e18) @@ -205,6 +212,7 @@ func TestReleaseStake(t *testing.T) { rt.ExpectAbort(exitcode.ErrIllegalArgument, func() { rt.Call(h.SubnetCoordActor.ReleaseStake, params) }) + rt.Verify() t.Log("release some stake") rt.SetCaller(SubnetActorAddr, actors.SubnetActorCodeID) @@ -212,10 +220,11 @@ func TestReleaseStake(t *testing.T) { rt.ExpectValidateCallerType(actors.SubnetActorCodeID) rt.ExpectSend(SubnetActorAddr, builtin.MethodSend, nil, releaseVal, nil, exitcode.Ok) rt.Call(h.SubnetCoordActor.ReleaseStake, params) - sh, found := h.getSubnet(rt, res.Cid) + sh, found := h.getSubnet(rt, address.SubnetID(res.ID)) require.True(h.t, found) require.Equal(t, sh.Stake, big.Sub(value, releaseVal)) require.Equal(t, sh.Status, actor.Active) + rt.Verify() t.Log("release to inactivate") currStake := sh.Stake @@ -226,10 +235,11 @@ func TestReleaseStake(t *testing.T) { rt.ExpectValidateCallerType(actors.SubnetActorCodeID) rt.ExpectSend(SubnetActorAddr, builtin.MethodSend, nil, releaseVal, nil, exitcode.Ok) rt.Call(h.SubnetCoordActor.ReleaseStake, params) - sh, found = h.getSubnet(rt, res.Cid) + sh, found = h.getSubnet(rt, address.SubnetID(res.ID)) require.True(h.t, found) require.Equal(t, sh.Stake, big.Sub(currStake, releaseVal)) require.Equal(t, sh.Status, actor.Inactive) + rt.Verify() t.Log("not enough funds to release") releaseVal = abi.NewTokenAmount(1e18) @@ -282,7 +292,7 @@ func TestKill(t *testing.T) { rt.ExpectValidateCallerType(actors.SubnetActorCodeID) // Call Register function ret := rt.Call(h.SubnetCoordActor.Register, nil) - res, ok := ret.(*actor.AddSubnetReturn) + res, ok := ret.(*actor.SubnetIDParam) require.True(t, ok) t.Log("kill subnet") @@ -291,14 +301,16 @@ func TestKill(t *testing.T) { rt.ExpectValidateCallerType(actors.SubnetActorCodeID) rt.ExpectSend(SubnetActorAddr, builtin.MethodSend, nil, value, nil, exitcode.Ok) rt.Call(h.SubnetCoordActor.Kill, nil) + rt.Verify() // The subnet has been removed. - _, found := h.getSubnet(rt, res.Cid) + _, found := h.getSubnet(rt, address.SubnetID(res.ID)) require.False(h.t, found) } type shActorHarness struct { actor.SubnetCoordActor - t *testing.T + t *testing.T + sn address.SubnetID } func newHarness(t *testing.T) *shActorHarness { @@ -309,8 +321,13 @@ func newHarness(t *testing.T) *shActorHarness { } func (h *shActorHarness) constructAndVerify(rt *mock.Runtime) { + shid := address.RootSubnet + h.constructAndVerifyWithNetworkName(rt, shid) +} + +func (h *shActorHarness) constructAndVerifyWithNetworkName(rt *mock.Runtime, shid address.SubnetID) { rt.ExpectValidateCallerAddr(builtin.SystemActorAddr) - ret := rt.Call(h.SubnetCoordActor.Constructor, &initactor.ConstructorParams{NetworkName: "/root"}) + ret := rt.Call(h.SubnetCoordActor.Constructor, &actor.ConstructorParams{NetworkName: shid.String(), CheckpointPeriod: 100}) assert.Nil(h.t, ret) rt.Verify() @@ -318,12 +335,11 @@ func (h *shActorHarness) constructAndVerify(rt *mock.Runtime) { rt.GetState(&st) assert.Equal(h.t, actor.MinSubnetStake, st.MinStake) - shid := hierarchical.RootSubnet - shcid, err := shid.Cid() - require.NoError(h.t, err) - assert.Equal(h.t, st.Network, shcid) assert.Equal(h.t, st.NetworkName, shid) + assert.Equal(h.t, st.CheckPeriod, abi.ChainEpoch(100)) verifyEmptyMap(h.t, rt, st.Subnets) + verifyEmptyMap(h.t, rt, st.CheckMsgsRegistry) + verifyEmptyMap(h.t, rt, st.Checkpoints) } func verifyEmptyMap(t testing.TB, rt *mock.Runtime, cid cid.Cid) { @@ -334,20 +350,95 @@ func verifyEmptyMap(t testing.TB, rt *mock.Runtime, cid cid.Cid) { assert.Empty(t, keys) } +func (h *shActorHarness) registerSubnet(rt *mock.Runtime, parent address.SubnetID, snAddr address.Address) address.SubnetID { + h.t.Log("register new subnet successfully") + // Send 2FIL of stake + value := abi.NewTokenAmount(2e18) + rt.SetCaller(snAddr, actors.SubnetActorCodeID) + rt.SetReceived(value) + rt.SetBalance(value) + // Only subnet actors can call. + rt.ExpectValidateCallerType(actors.SubnetActorCodeID) + // Call Register function + ret := rt.Call(h.SubnetCoordActor.Register, nil) + res, ok := ret.(*actor.SubnetIDParam) + require.True(h.t, ok) + shid := address.NewSubnetID(parent, snAddr) + // Verify the return value is correct. + require.Equal(h.t, res.ID, shid.String()) + rt.Verify() + h.sn = shid + return shid +} + func getState(rt *mock.Runtime) *actor.SCAState { var st actor.SCAState rt.GetState(&st) return &st } -func (h *shActorHarness) getSubnet(rt *mock.Runtime, id cid.Cid) (*actor.Subnet, bool) { +func (h *shActorHarness) getSubnet(rt *mock.Runtime, id address.SubnetID) (*actor.Subnet, bool) { var st actor.SCAState rt.GetState(&st) subnets, err := adt.AsMap(adt.AsStore(rt), st.Subnets, builtin.DefaultHamtBitwidth) require.NoError(h.t, err) var out actor.Subnet - found, err := subnets.Get(abi.CidKey(id), &out) + found, err := subnets.Get(hierarchical.SubnetKey(id), &out) + require.NoError(h.t, err) + + return &out, found +} + +func (h *shActorHarness) getPrevChildCheckpoint(rt *mock.Runtime, source address.SubnetID) (*schema.Checkpoint, bool) { + sh, found := h.getSubnet(rt, source) + if !found { + return nil, false + } + return &sh.PrevCheckpoint, true +} + +func currWindowCheckpoint(rt *mock.Runtime, epoch abi.ChainEpoch) *schema.Checkpoint { + st := getState(rt) + chEpoch := types.WindowEpoch(epoch, st.CheckPeriod) + ch, found, err := st.GetCheckpoint(adt.AsStore(rt), chEpoch) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to get checkpoint template for epoch") + if !found { + ch = schema.NewRawCheckpoint(st.NetworkName, chEpoch) + } + return ch +} + +func newCheckpoint(source address.SubnetID, epoch abi.ChainEpoch) *schema.Checkpoint { + cb := cid.V1Builder{Codec: cid.DagCBOR, MhType: multihash.BLAKE2B_MIN + 31} + ch := schema.NewRawCheckpoint(source, epoch) + c1, _ := cb.Sum([]byte("a")) + c2, _ := cb.Sum([]byte("b")) + c3, _ := cb.Sum([]byte("c")) + ts := ltypes.NewTipSetKey(c1, c2, c3) + ch.SetTipsetKey(ts) + return ch +} + +func addMsgMeta(ch *schema.Checkpoint, from, to address.SubnetID, rand string, value abi.TokenAmount) *schema.CrossMsgMeta { + cb := cid.V1Builder{Codec: cid.DagCBOR, MhType: multihash.BLAKE2B_MIN + 31} + c, _ := cb.Sum([]byte(from.String() + rand)) + m := schema.NewCrossMsgMeta(from, to) + m.SetCid(c) + m.AddValue(value) + ch.AppendMsgMeta(m) + return m + +} + +func (h *shActorHarness) getMsgMeta(rt *mock.Runtime, c cid.Cid) (*actor.CrossMsgs, bool) { + var st actor.SCAState + rt.GetState(&st) + + metas, err := adt.AsMap(adt.AsStore(rt), st.CheckMsgsRegistry, builtin.DefaultHamtBitwidth) + require.NoError(h.t, err) + var out actor.CrossMsgs + found, err := metas.Get(abi.CidKey(c), &out) require.NoError(h.t, err) return &out, found diff --git a/chain/consensus/hierarchical/actors/subnet/cbor_gen.go b/chain/consensus/hierarchical/actors/subnet/cbor_gen.go index 923b003f9..3da18639e 100644 --- a/chain/consensus/hierarchical/actors/subnet/cbor_gen.go +++ b/chain/consensus/hierarchical/actors/subnet/cbor_gen.go @@ -9,6 +9,7 @@ import ( "sort" address "github.com/filecoin-project/go-address" + abi "github.com/filecoin-project/go-state-types/abi" hierarchical "github.com/filecoin-project/lotus/chain/consensus/hierarchical" cid "github.com/ipfs/go-cid" cbg "github.com/whyrusleeping/cbor-gen" @@ -20,7 +21,7 @@ var _ = cid.Undef var _ = math.E var _ = sort.Sort -var lengthBufSubnetState = []byte{138} +var lengthBufSubnetState = []byte{140} func (t *SubnetState) MarshalCBOR(w io.Writer) error { if t == nil { @@ -45,13 +46,7 @@ func (t *SubnetState) MarshalCBOR(w io.Writer) error { return err } - // t.ParentCid (cid.Cid) (struct) - - if err := cbg.WriteCidBuf(scratch, w, t.ParentCid); err != nil { - return xerrors.Errorf("failed to write cid field t.ParentCid: %w", err) - } - - // t.ParentID (hierarchical.SubnetID) (string) + // t.ParentID (address.SubnetID) (string) if len(t.ParentID) > cbg.MaxLength { return xerrors.Errorf("Value in field t.ParentID was too long") } @@ -63,7 +58,7 @@ func (t *SubnetState) MarshalCBOR(w io.Writer) error { return err } - // t.Consensus (subnet.ConsensusType) (uint64) + // t.Consensus (hierarchical.ConsensusType) (uint64) if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Consensus)); err != nil { return err @@ -117,6 +112,30 @@ func (t *SubnetState) MarshalCBOR(w io.Writer) error { if _, err := w.Write(t.Genesis[:]); err != nil { return err } + + // t.CheckPeriod (abi.ChainEpoch) (int64) + if t.CheckPeriod >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.CheckPeriod)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.CheckPeriod-1)); err != nil { + return err + } + } + + // t.Checkpoints (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.Checkpoints); err != nil { + return xerrors.Errorf("failed to write cid field t.Checkpoints: %w", err) + } + + // t.WindowChecks (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.WindowChecks); err != nil { + return xerrors.Errorf("failed to write cid field t.WindowChecks: %w", err) + } + return nil } @@ -134,7 +153,7 @@ func (t *SubnetState) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type array") } - if extra != 10 { + if extra != 12 { return fmt.Errorf("cbor input had wrong number of fields") } @@ -148,19 +167,7 @@ func (t *SubnetState) UnmarshalCBOR(r io.Reader) error { t.Name = string(sval) } - // t.ParentCid (cid.Cid) (struct) - - { - - c, err := cbg.ReadCid(br) - if err != nil { - return xerrors.Errorf("failed to read cid field t.ParentCid: %w", err) - } - - t.ParentCid = c - - } - // t.ParentID (hierarchical.SubnetID) (string) + // t.ParentID (address.SubnetID) (string) { sval, err := cbg.ReadStringBuf(br, scratch) @@ -168,9 +175,9 @@ func (t *SubnetState) UnmarshalCBOR(r io.Reader) error { return err } - t.ParentID = hierarchical.SubnetID(sval) + t.ParentID = address.SubnetID(sval) } - // t.Consensus (subnet.ConsensusType) (uint64) + // t.Consensus (hierarchical.ConsensusType) (uint64) { @@ -181,7 +188,7 @@ func (t *SubnetState) UnmarshalCBOR(r io.Reader) error { if maj != cbg.MajUnsignedInt { return fmt.Errorf("wrong type for uint64 field") } - t.Consensus = ConsensusType(extra) + t.Consensus = hierarchical.ConsensusType(extra) } // t.MinMinerStake (big.Int) (struct) @@ -278,10 +285,59 @@ func (t *SubnetState) UnmarshalCBOR(r io.Reader) error { if _, err := io.ReadFull(br, t.Genesis[:]); err != nil { return err } + // t.CheckPeriod (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.CheckPeriod = abi.ChainEpoch(extraI) + } + // t.Checkpoints (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Checkpoints: %w", err) + } + + t.Checkpoints = c + + } + // t.WindowChecks (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.WindowChecks: %w", err) + } + + t.WindowChecks = c + + } return nil } -var lengthBufConstructParams = []byte{133} +var lengthBufConstructParams = []byte{134} func (t *ConstructParams) MarshalCBOR(w io.Writer) error { if t == nil { @@ -318,7 +374,7 @@ func (t *ConstructParams) MarshalCBOR(w io.Writer) error { return err } - // t.Consensus (subnet.ConsensusType) (uint64) + // t.Consensus (hierarchical.ConsensusType) (uint64) if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Consensus)); err != nil { return err @@ -333,6 +389,17 @@ func (t *ConstructParams) MarshalCBOR(w io.Writer) error { if err := t.DelegMiner.MarshalCBOR(w); err != nil { return err } + + // t.CheckPeriod (abi.ChainEpoch) (int64) + if t.CheckPeriod >= 0 { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.CheckPeriod)); err != nil { + return err + } + } else { + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajNegativeInt, uint64(-t.CheckPeriod-1)); err != nil { + return err + } + } return nil } @@ -350,7 +417,7 @@ func (t *ConstructParams) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type array") } - if extra != 5 { + if extra != 6 { return fmt.Errorf("cbor input had wrong number of fields") } @@ -374,7 +441,7 @@ func (t *ConstructParams) UnmarshalCBOR(r io.Reader) error { t.Name = string(sval) } - // t.Consensus (subnet.ConsensusType) (uint64) + // t.Consensus (hierarchical.ConsensusType) (uint64) { @@ -385,7 +452,7 @@ func (t *ConstructParams) UnmarshalCBOR(r io.Reader) error { if maj != cbg.MajUnsignedInt { return fmt.Errorf("wrong type for uint64 field") } - t.Consensus = ConsensusType(extra) + t.Consensus = hierarchical.ConsensusType(extra) } // t.MinMinerStake (big.Int) (struct) @@ -406,5 +473,109 @@ func (t *ConstructParams) UnmarshalCBOR(r io.Reader) error { } } + // t.CheckPeriod (abi.ChainEpoch) (int64) + { + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + var extraI int64 + if err != nil { + return err + } + switch maj { + case cbg.MajUnsignedInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 positive overflow") + } + case cbg.MajNegativeInt: + extraI = int64(extra) + if extraI < 0 { + return fmt.Errorf("int64 negative oveflow") + } + extraI = -1 - extraI + default: + return fmt.Errorf("wrong type for int64 field: %d", maj) + } + + t.CheckPeriod = abi.ChainEpoch(extraI) + } + return nil +} + +var lengthBufCheckVotes = []byte{129} + +func (t *CheckVotes) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufCheckVotes); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Miners ([]address.Address) (slice) + if len(t.Miners) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Miners was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Miners))); err != nil { + return err + } + for _, v := range t.Miners { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + return nil +} + +func (t *CheckVotes) UnmarshalCBOR(r io.Reader) error { + *t = CheckVotes{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Miners ([]address.Address) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Miners: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Miners = make([]address.Address, extra) + } + + for i := 0; i < int(extra); i++ { + + var v address.Address + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Miners[i] = v + } + return nil } diff --git a/chain/consensus/hierarchical/actors/subnet/delegated.go b/chain/consensus/hierarchical/actors/subnet/delegated.go index 0a7edc2e3..71a85080a 100644 --- a/chain/consensus/hierarchical/actors/subnet/delegated.go +++ b/chain/consensus/hierarchical/actors/subnet/delegated.go @@ -20,8 +20,8 @@ import ( ) // TODO: We can probably deduplicate some code with pow genesis generation. -func makeDelegatedGenesisBlock(ctx context.Context, bs bstore.Blockstore, template genesis.Template) (*genesis2.GenesisBootstrap, error) { - st, _, err := MakeInitialStateTree(ctx, bs, template) +func makeDelegatedGenesisBlock(ctx context.Context, bs bstore.Blockstore, template genesis.Template, checkPeriod abi.ChainEpoch) (*genesis2.GenesisBootstrap, error) { + st, _, err := MakeInitialStateTree(ctx, bs, template, checkPeriod) if err != nil { return nil, xerrors.Errorf("make initial state tree failed: %w", err) } @@ -54,12 +54,13 @@ func makeDelegatedGenesisBlock(ctx context.Context, bs bstore.Blockstore, templa mm := &types.MsgMeta{ BlsMessages: emptyroot, SecpkMessages: emptyroot, + CrossMessages: emptyroot, } mmb, err := mm.ToStorageBlock() if err != nil { return nil, xerrors.Errorf("serializing msgmeta failed: %w", err) } - if err := bs.Put(mmb); err != nil { + if err := bs.Put(ctx, mmb); err != nil { return nil, xerrors.Errorf("putting msgmeta block to blockstore: %w", err) } @@ -99,7 +100,7 @@ func makeDelegatedGenesisBlock(ctx context.Context, bs bstore.Blockstore, templa return nil, xerrors.Errorf("serializing block header failed: %w", err) } - if err := bs.Put(sb); err != nil { + if err := bs.Put(ctx, sb); err != nil { return nil, xerrors.Errorf("putting header to blockstore: %w", err) } diff --git a/chain/consensus/hierarchical/actors/subnet/gen/gen.go b/chain/consensus/hierarchical/actors/subnet/gen/gen.go index 0631e039f..b5e1b5a91 100644 --- a/chain/consensus/hierarchical/actors/subnet/gen/gen.go +++ b/chain/consensus/hierarchical/actors/subnet/gen/gen.go @@ -10,6 +10,7 @@ func main() { if err := gen.WriteTupleEncodersToFile("./cbor_gen.go", "subnet", actor.SubnetState{}, actor.ConstructParams{}, + actor.CheckVotes{}, ); err != nil { panic(err) } diff --git a/chain/consensus/hierarchical/actors/subnet/genesis.go b/chain/consensus/hierarchical/actors/subnet/genesis.go index 687e69cf1..46954cc6c 100644 --- a/chain/consensus/hierarchical/actors/subnet/genesis.go +++ b/chain/consensus/hierarchical/actors/subnet/genesis.go @@ -7,6 +7,7 @@ import ( "io" address "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/network" bstore "github.com/filecoin-project/lotus/blockstore" @@ -15,10 +16,15 @@ import ( "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" - "github.com/filecoin-project/lotus/chain/actors/builtin/reward" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" "github.com/filecoin-project/lotus/chain/actors/builtin/system" actor "github.com/filecoin-project/lotus/chain/consensus/actors" + "github.com/filecoin-project/lotus/chain/consensus/actors/mpower" + + "github.com/filecoin-project/lotus/chain/consensus/actors/reward" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical" "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" "github.com/filecoin-project/lotus/chain/gen" @@ -36,15 +42,15 @@ import ( ) const ( - networkVersion = network.Version14 + networkVersion = network.Version15 ) -func WriteGenesis(netName hierarchical.SubnetID, consensus ConsensusType, miner, vreg, rem address.Address, seq uint64, w io.Writer) error { +func WriteGenesis(netName address.SubnetID, consensus hierarchical.ConsensusType, miner, vreg, rem address.Address, checkPeriod abi.ChainEpoch, seq uint64, w io.Writer) error { bs := bstore.WrapIDStore(bstore.NewMemorySync()) var b *genesis2.GenesisBootstrap switch consensus { - case Delegated: + case hierarchical.Delegated: if miner == address.Undef { return xerrors.Errorf("no miner specified for delegated consensus") } @@ -52,19 +58,21 @@ func WriteGenesis(netName hierarchical.SubnetID, consensus ConsensusType, miner, if err != nil { return err } - b, err = makeDelegatedGenesisBlock(context.TODO(), bs, *template) + b, err = makeDelegatedGenesisBlock(context.TODO(), bs, *template, checkPeriod) if err != nil { return xerrors.Errorf("error making genesis delegated block: %w", err) } - case PoW: + case hierarchical.PoW: template, err := powGenTemplate(netName.String(), vreg, rem, seq) if err != nil { return err } - b, err = makePoWGenesisBlock(context.TODO(), bs, *template) + b, err = makePoWGenesisBlock(context.TODO(), bs, *template, checkPeriod) if err != nil { return xerrors.Errorf("error making genesis delegated block: %w", err) } + default: + return xerrors.Errorf("consensus type not supported. Not writing genesis") } offl := offline.Exchange(bs) @@ -77,7 +85,7 @@ func WriteGenesis(netName hierarchical.SubnetID, consensus ConsensusType, miner, return nil } -func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template genesis.Template) (*state.StateTree, map[address.Address]address.Address, error) { +func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template genesis.Template, checkPeriod abi.ChainEpoch) (*state.StateTree, map[address.Address]address.Address, error) { // Create empty state tree cst := cbor.NewCborStore(bs) @@ -111,43 +119,65 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge return nil, nil, xerrors.Errorf("set system actor: %w", err) } - // Create init actor - - idStart, initact, keyIDs, err := genesis2.SetupInitActor(ctx, bs, template.NetworkName, template.Accounts, template.VerifregRootKey, template.RemainderAccount, av) + // Create empty power actor + spact, err := SetupStoragePowerActor(ctx, bs, av) if err != nil { - return nil, nil, xerrors.Errorf("setup init actor: %w", err) + return nil, nil, xerrors.Errorf("setup storage power actor: %w", err) } - if err := state.SetActor(init_.Address, initact); err != nil { - return nil, nil, xerrors.Errorf("set init actor: %w", err) + if err := state.SetActor(power.Address, spact); err != nil { + return nil, nil, xerrors.Errorf("set storage power actor: %w", err) } - // Create empty power actor - spact, err := SetupStoragePowerActor(ctx, bs, av) + // Create empty mocked power actor + mockedact, err := SetupStorageMockedPowerActor(ctx, bs, av) if err != nil { return nil, nil, xerrors.Errorf("setup storage power actor: %w", err) } - if err := state.SetActor(mpower.PowerActorAddr, spact); err != nil { + if err := state.SetActor(mpower.PowerActorAddr, mockedact); err != nil { return nil, nil, xerrors.Errorf("set storage power actor: %w", err) } + // Create init actor + + idStart, initact, keyIDs, err := genesis2.SetupInitActor(ctx, bs, template.NetworkName, template.Accounts, template.VerifregRootKey, template.RemainderAccount, av) + if err != nil { + return nil, nil, xerrors.Errorf("setup init actor: %w", err) + } + if err := state.SetActor(init_.Address, initact); err != nil { + return nil, nil, xerrors.Errorf("set init actor: %w", err) + } + // Setup sca actor - scaact, err := SetupSubnetActor(ctx, bs, template.NetworkName) + params := &sca.ConstructorParams{ + NetworkName: template.NetworkName, + CheckpointPeriod: uint64(checkPeriod), + } + scaact, err := SetupSCAActor(ctx, bs, params) if err != nil { return nil, nil, err } - err = state.SetActor(sca.SubnetCoordActorAddr, scaact) + err = state.SetActor(hierarchical.SubnetCoordActorAddr, scaact) if err != nil { return nil, nil, xerrors.Errorf("set SCA actor: %w", err) } - // Setup reward - // RewardActor's state is overwritten by SetupStorageMiners, but needs to exist for miner creation messages - rewact, err := genesis2.SetupRewardActor(ctx, bs, big.Zero(), av) + // Create empty market actor + marketact, err := SetupStorageMarketActor(ctx, bs, av) + if err != nil { + return nil, nil, xerrors.Errorf("setup storage market actor: %w", err) + } + if err := state.SetActor(market.Address, marketact); err != nil { + return nil, nil, xerrors.Errorf("set storage market actor: %w", err) + } + // Setup reward actor + // This is a modified reward actor to support the needs of hierarchical consensus + // protocol. + rewact, err := SetupRewardActor(ctx, bs, big.Zero(), av) if err != nil { return nil, nil, xerrors.Errorf("setup reward actor: %w", err) } - err = state.SetActor(reward.Address, rewact) + err = state.SetActor(reward.RewardActorAddr, rewact) if err != nil { return nil, nil, xerrors.Errorf("set reward actor: %w", err) } @@ -236,9 +266,9 @@ func MakeInitialStateTree(ctx context.Context, bs bstore.Blockstore, template ge return state, keyIDs, nil } -func SetupSubnetActor(ctx context.Context, bs bstore.Blockstore, networkName string) (*types.Actor, error) { +func SetupSCAActor(ctx context.Context, bs bstore.Blockstore, params *sca.ConstructorParams) (*types.Actor, error) { cst := cbor.NewCborStore(bs) - st, err := sca.ConstructSCAState(adt.WrapStore(ctx, cst), hierarchical.SubnetID(networkName)) + st, err := sca.ConstructSCAState(adt.WrapStore(ctx, cst), params) if err != nil { return nil, err } @@ -259,18 +289,16 @@ func SetupSubnetActor(ctx context.Context, bs bstore.Blockstore, networkName str // This is our mocked power actor used in checkpointing module // This function allow initializing the state in our genesis file -func SetupStoragePowerActor(ctx context.Context, bs bstore.Blockstore, av actors.Version) (*types.Actor, error) { +func SetupStorageMockedPowerActor(ctx context.Context, bs bstore.Blockstore, av actors.Version) (*types.Actor, error) { cst := cbor.NewCborStore(bs) pst, err := mpower.ConstructState(adt.WrapStore(ctx, cbor.NewCborStore(bs))) if err != nil { return nil, err } - statecid, err := cst.Put(ctx, pst) if err != nil { return nil, err } - act := &types.Actor{ Code: actor.MpowerActorCodeID, Head: statecid, @@ -279,3 +307,78 @@ func SetupStoragePowerActor(ctx context.Context, bs bstore.Blockstore, av actors return act, nil } + +func SetupRewardActor(ctx context.Context, bs bstore.Blockstore, qaPower big.Int, av actors.Version) (*types.Actor, error) { + cst := cbor.NewCborStore(bs) + rst := reward.ConstructState(qaPower) + + statecid, err := cst.Put(ctx, rst) + if err != nil { + return nil, err + } + + // NOTE: For now, everything in the reward actor is the same except the code, + // where we included an additional method to fund accounts. This may change + // in the future when we design specific reward system for subnets. + act := &types.Actor{ + Code: actor.RewardActorCodeID, + // NOTE: This sets up the initial balance of the reward actor. + Balance: types.BigInt{Int: build.InitialRewardBalance}, + Head: statecid, + } + + return act, nil +} + +func SetupStorageMarketActor(ctx context.Context, bs bstore.Blockstore, av actors.Version) (*types.Actor, error) { + cst := cbor.NewCborStore(bs) + mst, err := market.MakeState(adt.WrapStore(ctx, cbor.NewCborStore(bs)), av) + if err != nil { + return nil, err + } + + statecid, err := cst.Put(ctx, mst.GetState()) + if err != nil { + return nil, err + } + + actcid, err := market.GetActorCodeID(av) + if err != nil { + return nil, err + } + + act := &types.Actor{ + Code: actcid, + Head: statecid, + Balance: big.Zero(), + } + + return act, nil +} + +func SetupStoragePowerActor(ctx context.Context, bs bstore.Blockstore, av actors.Version) (*types.Actor, error) { + + cst := cbor.NewCborStore(bs) + pst, err := power.MakeState(adt.WrapStore(ctx, cbor.NewCborStore(bs)), av) + if err != nil { + return nil, err + } + + statecid, err := cst.Put(ctx, pst.GetState()) + if err != nil { + return nil, err + } + + actcid, err := power.GetActorCodeID(av) + if err != nil { + return nil, err + } + + act := &types.Actor{ + Code: actcid, + Head: statecid, + Balance: big.Zero(), + } + + return act, nil +} diff --git a/chain/consensus/hierarchical/actors/subnet/iface.go b/chain/consensus/hierarchical/actors/subnet/iface.go index ee8ea46bf..0f168d247 100644 --- a/chain/consensus/hierarchical/actors/subnet/iface.go +++ b/chain/consensus/hierarchical/actors/subnet/iface.go @@ -1,16 +1,16 @@ package subnet import ( - "github.com/filecoin-project/go-state-types/cbor" - "github.com/filecoin-project/specs-actors/v6/actors/runtime" + abi "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" + "github.com/filecoin-project/specs-actors/v7/actors/runtime" ) -// ActorIface determines the interface to be implemented by every -// actor representing a subnet.. -type ActorIface interface { - Constructor(rt runtime.Runtime, params cbor.Marshaler) cbor.Marshaler - Join(rt runtime.Runtime, params cbor.Marshaler) cbor.Marshaler - Leave(rt runtime.Runtime, params cbor.Marshaler) cbor.Marshaler - Checkpoint(rt runtime.Runtime, params cbor.Marshaler) cbor.Marshaler - Kill(rt runtime.Runtime, params cbor.Marshaler) cbor.Marshaler +// SubnetIface defines the minimum interface that needs to be implemented by subnet +// actors +type SubnetIface interface { + Join(rt runtime.Runtime, _ *abi.EmptyValue) *abi.EmptyValue + Leave(rt runtime.Runtime, _ *abi.EmptyValue) *abi.EmptyValue + SubmitCheckpoint(rt runtime.Runtime, params *sca.CheckpointParams) *abi.EmptyValue + Kill(rt runtime.Runtime, _ *abi.EmptyValue) *abi.EmptyValue } diff --git a/chain/consensus/hierarchical/actors/subnet/subnet_actor.go b/chain/consensus/hierarchical/actors/subnet/subnet_actor.go index 40ede0d6b..6b08def0c 100644 --- a/chain/consensus/hierarchical/actors/subnet/subnet_actor.go +++ b/chain/consensus/hierarchical/actors/subnet/subnet_actor.go @@ -13,26 +13,30 @@ import ( actor "github.com/filecoin-project/lotus/chain/consensus/actors" "github.com/filecoin-project/lotus/chain/consensus/hierarchical" "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" + checkpoint "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" - "github.com/filecoin-project/specs-actors/v6/actors/builtin" - "github.com/filecoin-project/specs-actors/v6/actors/runtime" - "github.com/filecoin-project/specs-actors/v6/actors/util/adt" + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/specs-actors/v7/actors/runtime" + "github.com/filecoin-project/specs-actors/v7/actors/util/adt" cid "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" ) var _ runtime.VMActor = SubnetActor{} +var _ SubnetIface = SubnetActor{} var log = logging.Logger("subnet-actor") type SubnetActor struct{} var Methods = struct { - Constructor abi.MethodNum - Join abi.MethodNum - Leave abi.MethodNum - Kill abi.MethodNum -}{builtin0.MethodConstructor, 2, 3, 4} + Constructor abi.MethodNum + Join abi.MethodNum + Leave abi.MethodNum + Kill abi.MethodNum + SubmitCheckpoint abi.MethodNum +}{builtin0.MethodConstructor, 2, 3, 4, 5} func (a SubnetActor) Exports() []interface{} { return []interface{}{ @@ -40,7 +44,7 @@ func (a SubnetActor) Exports() []interface{} { 2: a.Join, 3: a.Leave, 4: a.Kill, - // Checkpoint - Add a new checkpoint to the subnet. + 5: a.SubmitCheckpoint, } } @@ -59,11 +63,12 @@ func (a SubnetActor) State() cbor.Er { // ConstructParams specifies the configuration parameters for the // subnet actor constructor. type ConstructParams struct { - NetworkName string // Name of the current network. - Name string // Name for the subnet - Consensus ConsensusType // Consensus for subnet. - MinMinerStake abi.TokenAmount // MinStake to give miner rights - DelegMiner address.Address // Miner in delegated consensus + NetworkName string // Name of the current network. + Name string // Name for the subnet + Consensus hierarchical.ConsensusType // Consensus for subnet. + MinMinerStake abi.TokenAmount // MinStake to give miner rights + DelegMiner address.Address // Miner in delegated consensus + CheckPeriod abi.ChainEpoch // Checkpointing period. } func (a SubnetActor) Constructor(rt runtime.Runtime, params *ConstructParams) *abi.EmptyValue { @@ -77,10 +82,6 @@ func (a SubnetActor) Constructor(rt runtime.Runtime, params *ConstructParams) *a return nil } -func (a SubnetActor) Checkpoint(rt runtime.Runtime, params abi.EmptyValue) abi.EmptyValue { - panic("checkpoint not implemented yet") -} - func (st *SubnetState) initGenesis(rt runtime.Runtime, params *ConstructParams) { // Build genesis for the subnet assigning delegMiner buf := new(bytes.Buffer) @@ -97,8 +98,9 @@ func (st *SubnetState) initGenesis(rt runtime.Runtime, params *ConstructParams) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed parsin rem addr") // Getting actor ID from recceiver. - netName := hierarchical.NewSubnetID(hierarchical.SubnetID(params.NetworkName), rt.Receiver()) - err = WriteGenesis(netName, st.Consensus, params.DelegMiner, vreg, rem, rt.ValueReceived().Uint64(), buf) + netName := address.NewSubnetID(address.SubnetID(params.NetworkName), rt.Receiver()) + err = WriteGenesis(netName, st.Consensus, params.DelegMiner, vreg, rem, + params.CheckPeriod, rt.ValueReceived().Uint64(), buf) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed genesis") st.Genesis = buf.Bytes() } @@ -123,7 +125,7 @@ func (a SubnetActor) Join(rt runtime.Runtime, _ *abi.EmptyValue) *abi.EmptyValue // Send a transaction with the total stake to the subnet actor. // We are discarding the result (which is the CID assigned for the subnet) // because we can compute it deterministically, but we can consider keeping it. - code := rt.Send(sca.SubnetCoordActorAddr, sca.Methods.Register, nil, st.TotalStake, &builtin.Discard{}) + code := rt.Send(hierarchical.SubnetCoordActorAddr, sca.Methods.Register, nil, st.TotalStake, &builtin.Discard{}) if !code.IsSuccess() { rt.Abortf(exitcode.ErrIllegalState, "failed registering subnet in SCA") } @@ -133,7 +135,7 @@ func (a SubnetActor) Join(rt runtime.Runtime, _ *abi.EmptyValue) *abi.EmptyValue // We need to send an addStake transaction to SCA if rt.CurrentBalance().GreaterThanEqual(value) { // Top-up stake in SCA - code := rt.Send(sca.SubnetCoordActorAddr, sca.Methods.AddStake, nil, value, &builtin.Discard{}) + code := rt.Send(hierarchical.SubnetCoordActorAddr, sca.Methods.AddStake, nil, value, &builtin.Discard{}) if !code.IsSuccess() { rt.Abortf(exitcode.ErrIllegalState, "failed sending addStake to SCA") } @@ -177,7 +179,7 @@ func (a SubnetActor) Leave(rt runtime.Runtime, _ *abi.EmptyValue) *abi.EmptyValu // Release stake from SCA if all the stake hasn't been released already because the subnet // is in a terminating state if st.Status != Terminating { - code := rt.Send(sca.SubnetCoordActorAddr, sca.Methods.ReleaseStake, &sca.FundParams{Value: minerStake}, big.Zero(), &builtin.Discard{}) + code := rt.Send(hierarchical.SubnetCoordActorAddr, sca.Methods.ReleaseStake, &sca.FundParams{Value: minerStake}, big.Zero(), &builtin.Discard{}) if !code.IsSuccess() { rt.Abortf(exitcode.ErrIllegalState, "failed releasing stake in SCA") } @@ -206,6 +208,145 @@ func (a SubnetActor) Leave(rt runtime.Runtime, _ *abi.EmptyValue) *abi.EmptyValu return nil } +// verifyCheck verifies the submitted checkpoint and returns the checkpoint signer if valid. +func (st *SubnetState) verifyCheck(rt runtime.Runtime, ch *schema.Checkpoint) address.Address { + // Check that the subnet is active. + if st.Status != Active { + rt.Abortf(exitcode.ErrIllegalState, "submitting checkpoints is not allowed while subnet is not active") + } + + // Check that the checkpoint for this epoch hasn't been committed yet. + if _, found, _ := st.epochCheckpoint(rt); found { + rt.Abortf(exitcode.ErrIllegalArgument, "cannot submit checkpoint for epoch that has been committed already") + } + + // Check that the source is correct. + shid := address.NewSubnetID(st.ParentID, rt.Receiver()) + if ch.Source() != shid { + rt.Abortf(exitcode.ErrIllegalArgument, "submitting a checkpoint with the wrong source") + } + + // Check that the epoch is correct. + if ch.Epoch()%st.CheckPeriod != 0 { + rt.Abortf(exitcode.ErrIllegalArgument, "epoch in checkpoint doesn't correspond with signing window") + } + + // Check that the previous checkpoint is correct. + prevCom, err := st.PrevCheckCid(adt.AsStore(rt), ch.Epoch()) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error fetching Cid for previous check") + if prev, _ := ch.PreviousCheck(); prevCom != prev { + rt.Abortf(exitcode.ErrIllegalArgument, "previous checkpoint not consistent with previous check committed") + } + + // Check the signature and get address. + // We are using a simple signature verifier, we could optionally use other verifiers. + ver := checkpoint.NewSingleSigner() + sigAddr, err := ver.Verify(ch) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "failed to verify signature for submitted checkpoint") + + /* + // Check that the ID address included in signature belongs to the pkey specified. + resolved, ok := rt.ResolveAddress(sigAddr.Addr) + if !ok { + rt.Abortf(exitcode.ErrIllegalArgument, "unable to resolve address %v", sigAddr.Addr) + } + */ + + addr := sigAddr.Addr + if sigAddr.IDAddr != address.Undef { + resolved, ok := rt.ResolveAddress(sigAddr.Addr) + if !ok { + rt.Abortf(exitcode.ErrIllegalArgument, "unable to resolve address %v", sigAddr.Addr) + } + if resolved != sigAddr.IDAddr { + rt.Abortf(exitcode.ErrIllegalArgument, "inconsistent pkey addr and ID addr in signature") + } + addr = sigAddr.IDAddr + } + + // Only miners (i.e. peers with collateral in subnet) are allowed to submit checkpoints. + if !st.IsMiner(addr) { + rt.Abortf(exitcode.ErrIllegalArgument, "checkpoint not signed by a miner") + } + + return addr + +} + +// SubmitCheckpoint accepts signed checkpoint votes for miners. +// +// This functions verifies that the checkpoint is valid before +// propagating it for commitment to the SCA. It expects at least +// votes from 2/3 of miners with collateral. +func (a SubnetActor) SubmitCheckpoint(rt runtime.Runtime, params *sca.CheckpointParams) *abi.EmptyValue { + // Only account actors can submit signed checkpoints for commitment. + rt.ValidateImmediateCallerType(builtin.AccountActorCodeID) + submit := &schema.Checkpoint{} + err := submit.UnmarshalBinary(params.Checkpoint) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalArgument, "error unmarshalling checkpoint in params") + + var st SubnetState + var majority bool + rt.StateTransaction(&st, func() { + // Verify checkpoint and get signer + signAddr := st.verifyCheck(rt, submit) + c, err := submit.Cid() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error computing Cid for checkpoint") + // Get windowChecks for submitted checkpoint + wch, found, err := st.GetWindowChecks(adt.AsStore(rt), c) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "get list of uncommitted checks") + if !found { + wch = &CheckVotes{make([]address.Address, 0)} + } + + // Check if miner already submitted this checkpoint. + if HasMiner(signAddr, wch.Miners) { + rt.Abortf(exitcode.ErrIllegalArgument, "miner already submitted a vote for this checkpoint") + } + + // Add miners vote + wch.Miners = append(wch.Miners, signAddr) + majority, err = st.majorityVote(rt, wch) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error fetching miner stakes") + if majority { + + // Update checkpoint in SubnetState + // NOTE: We are including the last signature. It won't be used for verification + // so this is OK for now. We could also optionally remove the signature to + // save gas. + st.flushCheckpoint(rt, submit) + + // Remove windowChecks, the checkpoint has been committed + // (do this only if they were found before, if not we don't have + // windowChecks yet) + if found { + err := st.rmChecks(adt.AsStore(rt), c) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error removing windowChecks") + // TODO: XXX Consider periodically emptying the full votes map to avoid + // keeping state for wrong Cids committed in the past. This could be + // a DoS vector/source of inefficiency. Use a rotating map scheme to empty every + // epoch? + } + return + } + + // If not flush checkWindow and we're good to go! + st.flushWindowChecks(rt, c, wch) + + }) + + // If we reached amjority propagate the commitment to SCA + if majority { + // If the checkpoint is correct we can reuse params and avoid having to marshal it again. + code := rt.Send(hierarchical.SubnetCoordActorAddr, sca.Methods.CommitChildCheckpoint, params, big.Zero(), &builtin.Discard{}) + if !code.IsSuccess() { + rt.Abortf(exitcode.ErrIllegalState, "failed committing checkpoint in SCA") + } + } + + return nil +} + // Kill is used to signal that the subnet must be terminated. // // In the current policy any user can terminate the subnet and recover their stake @@ -226,7 +367,7 @@ func (a SubnetActor) Kill(rt runtime.Runtime, _ *abi.EmptyValue) *abi.EmptyValue }) // Kill (unregister) subnet from SCA and release full stake - code := rt.Send(sca.SubnetCoordActorAddr, sca.Methods.Kill, nil, big.Zero(), &builtin.Discard{}) + code := rt.Send(hierarchical.SubnetCoordActorAddr, sca.Methods.Kill, nil, big.Zero(), &builtin.Discard{}) if !code.IsSuccess() { rt.Abortf(exitcode.ErrIllegalState, "failed killing subnet in SCA") } @@ -265,6 +406,15 @@ func (st *SubnetState) mutateState(rt runtime.Runtime) { break } } + +func (st *SubnetState) GetStake(store adt.Store, miner address.Address) (big.Int, error) { + stakes, err := adt.AsBalanceTable(store, st.Stake) + if err != nil { + return big.Zero(), err + } + return stakes.Get(miner) +} + func (st *SubnetState) addStake(rt runtime.Runtime, sourceAddr address.Address, value abi.TokenAmount) { // NOTE: There's currently no minimum stake required. Any stake is accepted even // if a peer is not granted mining rights. According to the final design we may @@ -274,11 +424,11 @@ func (st *SubnetState) addStake(rt runtime.Runtime, sourceAddr address.Address, // Add the amount staked by miner to stake map. err = stakes.Add(sourceAddr, value) builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "error adding stake to user balance table") - // Flust stakes adding miner stake. + // Flush stakes adding miner stake. st.Stake, err = stakes.Root() - builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flust stards") + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush subnet") - // Add to totalStake in the stard. + // Add to totalStake in the subnet. st.TotalStake = big.Add(st.TotalStake, value) // Check if the miner has staked enough to be granted mining rights. @@ -287,7 +437,7 @@ func (st *SubnetState) addStake(rt runtime.Runtime, sourceAddr address.Address, if minerStake.GreaterThanEqual(st.MinMinerStake) { // Except for delegated consensus if there is already a miner. // There can only be a single miner in delegated consensus. - if st.Consensus != Delegated || len(st.Miners) < 1 { + if st.Consensus != hierarchical.Delegated || len(st.Miners) < 1 { st.Miners = append(st.Miners, sourceAddr) } } diff --git a/chain/consensus/hierarchical/actors/subnet/subnet_state.go b/chain/consensus/hierarchical/actors/subnet/subnet_state.go index bf5457e05..8bb280218 100644 --- a/chain/consensus/hierarchical/actors/subnet/subnet_state.go +++ b/chain/consensus/hierarchical/actors/subnet/subnet_state.go @@ -1,11 +1,19 @@ package subnet import ( + mbig "math/big" + address "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" "github.com/filecoin-project/lotus/chain/consensus/hierarchical" - "github.com/filecoin-project/specs-actors/v6/actors/util/adt" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/types" + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/specs-actors/v7/actors/runtime" + "github.com/filecoin-project/specs-actors/v7/actors/util/adt" cid "github.com/ipfs/go-cid" "golang.org/x/xerrors" ) @@ -25,15 +33,11 @@ var ( // We'll need to decide what to do with the leftover stake, if to // burn it or keep it until the subnet is full killed. LeavingFeeCoeff = big.NewInt(1) -) -// ConsensusType for subnet -type ConsensusType uint64 - -// List of supported/implemented consensus for subnets. -const ( - Delegated ConsensusType = iota - PoW + // SignatureThreshold that determines the number of votes from + // total number of miners expected to propagate a checkpoint to + // SCA + SignatureThreshold = mbig.NewFloat(0.66) ) // SubnetStatus describes in what state in its lifecycle a subnet is. @@ -49,21 +53,56 @@ const ( ) type SubnetState struct { - Name string - ParentCid cid.Cid - ParentID hierarchical.SubnetID - Consensus ConsensusType - // Minimum stake required by new joiners. + // Human-readable name of the subnet. + Name string + // ID of the parent subnet + ParentID address.SubnetID + // Type of Consensus algorithm. + Consensus hierarchical.ConsensusType + // Minimum stake required for an address to join the subnet + // as a miner MinMinerStake abi.TokenAmount - // NOTE: Consider adding miners list as AMT - Miners []address.Address + // List of miners in the subnet. + // NOTE: Consider using AMT. + Miners []address.Address + // Total collateral currently deposited in the TotalStake abi.TokenAmount - Stake cid.Cid // BalanceTable with the distribution of stake by miners - // State of the subnet + // BalanceTable with the distribution of stake by address + Stake cid.Cid // HAMT[tokenAmount]address + // State of the subnet (Active, Inactive, Terminating) Status Status // Genesis bootstrap for the subnet. This is created // when the subnet is generated. Genesis []byte + // Checkpointing period. + CheckPeriod abi.ChainEpoch + // Checkpoints submit to SubnetActor per epoch + Checkpoints cid.Cid // HAMT[epoch]Checkpoint + // WindowChecks + WindowChecks cid.Cid // HAMT[cid]CheckVotes +} + +type CheckVotes struct { + // NOTE: I don't think we need to store the checkpoint for anything. + // By keeping the Cid of the checkpoint as the key is enough and we + // save space + // Checkpoint schema.Checkpoint + Miners []address.Address +} + +func (st SubnetState) majorityVote(rt runtime.Runtime, wch *CheckVotes) (bool, error) { + sum := big.Zero() + for _, m := range wch.Miners { + stake, err := st.GetStake(adt.AsStore(rt), m) + if err != nil { + return false, err + } + sum = big.Sum(sum, stake) + } + fsum := new(mbig.Float).SetInt(sum.Int) + fTotal := new(mbig.Float).SetInt(st.TotalStake.Int) + div := new(mbig.Float).Quo(fsum, fTotal) + return div.Cmp(SignatureThreshold) >= 0, nil } func ConstructSubnetState(store adt.Store, params *ConstructParams) (*SubnetState, error) { @@ -71,6 +110,20 @@ func ConstructSubnetState(store adt.Store, params *ConstructParams) (*SubnetStat if err != nil { return nil, xerrors.Errorf("failed to create stakes balance table: %w", err) } + emptyCheckpointsMapCid, err := adt.StoreEmptyMap(store, builtin.DefaultHamtBitwidth) + if err != nil { + return nil, xerrors.Errorf("failed to create empty map: %w", err) + } + emptyWindowChecks, err := adt.StoreEmptyMap(store, builtin.DefaultHamtBitwidth) + if err != nil { + return nil, xerrors.Errorf("failed to create empty map: %w", err) + } + + // Don't allow really small checkpoint periods for now. + period := abi.ChainEpoch(params.CheckPeriod) + if period < sca.MinCheckpointPeriod { + period = sca.DefaultCheckpointPeriod + } /* Initialize AMT of miners. emptyArr, err := adt.MakeEmptyArray(adt.AsStore(rt), LaneStatesAmtBitwidth) @@ -79,24 +132,126 @@ func ConstructSubnetState(store adt.Store, params *ConstructParams) (*SubnetStat builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to persist empty array") */ - parentID := hierarchical.SubnetID(params.NetworkName) - parentCid, err := parentID.Cid() - if err != nil { - panic(err) - } + parentID := address.SubnetID(params.NetworkName) + return &SubnetState{ - ParentCid: parentCid, ParentID: parentID, Consensus: params.Consensus, MinMinerStake: params.MinMinerStake, Miners: make([]address.Address, 0), Stake: emptyStakeCid, Status: Instantiated, + CheckPeriod: period, + Checkpoints: emptyCheckpointsMapCid, + WindowChecks: emptyWindowChecks, }, nil + +} + +// windowCheckpoint returns the checkpoint for the current signing window (if any). +func (st *SubnetState) epochCheckpoint(rt runtime.Runtime) (*schema.Checkpoint, bool, error) { + chEpoch := types.CheckpointEpoch(rt.CurrEpoch(), st.CheckPeriod) + return st.GetCheckpoint(adt.AsStore(rt), chEpoch) +} + +// PrevCheckCid returns the Cid of the previously committed checkpoint +func (st *SubnetState) PrevCheckCid(store adt.Store, epoch abi.ChainEpoch) (cid.Cid, error) { + ep := epoch - st.CheckPeriod + // From epoch back if we found a previous checkpoint + // committed we return its CID + for ep >= 0 { + ch, found, err := st.GetCheckpoint(store, ep) + if err != nil { + return cid.Undef, err + } + if found { + return ch.Cid() + } + ep = ep - st.CheckPeriod + } + // If nothing is found return NoPreviousCheckCommit + return schema.NoPreviousCheck, nil +} + +// GetCheckpoint gets a checkpoint from its index +func (st *SubnetState) GetCheckpoint(s adt.Store, epoch abi.ChainEpoch) (*schema.Checkpoint, bool, error) { + checkpoints, err := adt.AsMap(s, st.Checkpoints, builtin.DefaultHamtBitwidth) + if err != nil { + return nil, false, xerrors.Errorf("failed to load checkpoint: %w", err) + } + return getCheckpoint(checkpoints, epoch) +} + +func getCheckpoint(checkpoints *adt.Map, epoch abi.ChainEpoch) (*schema.Checkpoint, bool, error) { + var out schema.Checkpoint + found, err := checkpoints.Get(abi.UIntKey(uint64(epoch)), &out) + if err != nil { + return nil, false, xerrors.Errorf("failed to get checkpoint for epoch %v: %w", epoch, err) + } + if !found { + return nil, false, nil + } + return &out, true, nil +} + +func (st *SubnetState) flushCheckpoint(rt runtime.Runtime, ch *schema.Checkpoint) { + // Update subnet in the list of checkpoints. + checks, err := adt.AsMap(adt.AsStore(rt), st.Checkpoints, builtin.DefaultHamtBitwidth) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load state for checkpoints") + err = checks.Put(abi.UIntKey(uint64(ch.Data.Epoch)), ch) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to put checkpoint in map") + // Flush checkpoints + st.Checkpoints, err = checks.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush checkpoints") +} + +// GetWindowChecks with the list of uncommitted checkpoints. +func (st *SubnetState) GetWindowChecks(s adt.Store, checkCid cid.Cid) (*CheckVotes, bool, error) { + checks, err := adt.AsMap(s, st.WindowChecks, builtin.DefaultHamtBitwidth) + if err != nil { + return nil, false, xerrors.Errorf("failed to load windowCheck: %w", err) + } + + var out CheckVotes + found, err := checks.Get(abi.CidKey(checkCid), &out) + if err != nil { + return nil, false, xerrors.Errorf("failed to get windowCheck for Cid %v: %w", checkCid, err) + } + if !found { + return nil, false, nil + } + return &out, true, nil +} + +func (st *SubnetState) rmChecks(s adt.Store, checkCid cid.Cid) error { + checks, err := adt.AsMap(s, st.WindowChecks, builtin.DefaultHamtBitwidth) + if err != nil { + return xerrors.Errorf("failed to load windowCheck: %w", err) + } + + if err := checks.Delete(abi.CidKey(checkCid)); err != nil { + return err + } + st.WindowChecks, err = checks.Root() + return err +} + +func (st *SubnetState) flushWindowChecks(rt runtime.Runtime, checkCid cid.Cid, w *CheckVotes) { + checks, err := adt.AsMap(adt.AsStore(rt), st.WindowChecks, builtin.DefaultHamtBitwidth) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to load state for windowChecks") + err = checks.Put(abi.CidKey(checkCid), w) + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to put windowCheck in map") + // Flush windowCheck + st.WindowChecks, err = checks.Root() + builtin.RequireNoErr(rt, err, exitcode.ErrIllegalState, "failed to flush windowChecks") } func (st *SubnetState) IsMiner(addr address.Address) bool { - for _, a := range st.Miners { + return HasMiner(addr, st.Miners) +} + +func HasMiner(addr address.Address, miners []address.Address) bool { + for _, a := range miners { if a == addr { return true } diff --git a/chain/consensus/hierarchical/actors/subnet/subnet_test.go b/chain/consensus/hierarchical/actors/subnet/subnet_test.go index 2fde8cdf2..c9ba591c3 100644 --- a/chain/consensus/hierarchical/actors/subnet/subnet_test.go +++ b/chain/consensus/hierarchical/actors/subnet/subnet_test.go @@ -1,19 +1,26 @@ package subnet_test import ( + "context" "testing" address "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/consensus/hierarchical" "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" actor "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/subnet" - "github.com/filecoin-project/specs-actors/v6/actors/builtin" - "github.com/filecoin-project/specs-actors/v6/actors/util/adt" - "github.com/filecoin-project/specs-actors/v6/support/mock" - tutil "github.com/filecoin-project/specs-actors/v6/support/testing" + checkpoint "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/utils" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/specs-actors/v7/actors/builtin" + "github.com/filecoin-project/specs-actors/v7/actors/util/adt" + "github.com/filecoin-project/specs-actors/v7/support/mock" + tutil "github.com/filecoin-project/specs-actors/v7/support/testing" cid "github.com/ipfs/go-cid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -49,6 +56,7 @@ func TestJoin(t *testing.T) { // Anyone can call rt.ExpectValidateCallerAny() ret := rt.Call(h.SubnetActor.Join, nil) + rt.Verify() assert.Nil(h.t, ret) // Check that the subnet is instantiated but not active. st := getState(rt) @@ -65,8 +73,9 @@ func TestJoin(t *testing.T) { rt.SetBalance(totalStake) rt.SetCaller(miner, builtin.AccountActorCodeID) rt.ExpectValidateCallerAny() - rt.ExpectSend(sca.SubnetCoordActorAddr, sca.Methods.Register, nil, totalStake, nil, exitcode.Ok) + rt.ExpectSend(hierarchical.SubnetCoordActorAddr, sca.Methods.Register, nil, totalStake, nil, exitcode.Ok) rt.Call(h.SubnetActor.Join, nil) + rt.Verify() // Check that we are active st = getState(rt) require.Equal(t, len(st.Miners), 1) @@ -81,8 +90,9 @@ func TestJoin(t *testing.T) { rt.SetCaller(notMiner, builtin.AccountActorCodeID) rt.ExpectValidateCallerAny() // Triggers a stake top-up in SCA - rt.ExpectSend(sca.SubnetCoordActorAddr, sca.Methods.AddStake, nil, value, nil, exitcode.Ok) + rt.ExpectSend(hierarchical.SubnetCoordActorAddr, sca.Methods.AddStake, nil, value, nil, exitcode.Ok) rt.Call(h.SubnetActor.Join, nil) + rt.Verify() // Check that the subnet is instantiated but not active. st = getState(rt) // If we use delegated consensus we only accept one miner. @@ -110,7 +120,7 @@ func TestLeaveAndKill(t *testing.T) { totalStake = big.Add(totalStake, value) // Anyone can call rt.ExpectValidateCallerAny() - rt.ExpectSend(sca.SubnetCoordActorAddr, sca.Methods.Register, nil, totalStake, nil, exitcode.Ok) + rt.ExpectSend(hierarchical.SubnetCoordActorAddr, sca.Methods.Register, nil, totalStake, nil, exitcode.Ok) ret := rt.Call(h.SubnetActor.Join, nil) assert.Nil(h.t, ret) // Check that the subnet is instantiated but not active. @@ -127,8 +137,9 @@ func TestLeaveAndKill(t *testing.T) { rt.SetBalance(value) rt.SetCaller(joiner2, builtin.AccountActorCodeID) rt.ExpectValidateCallerAny() - rt.ExpectSend(sca.SubnetCoordActorAddr, sca.Methods.AddStake, nil, value, nil, exitcode.Ok) + rt.ExpectSend(hierarchical.SubnetCoordActorAddr, sca.Methods.AddStake, nil, value, nil, exitcode.Ok) rt.Call(h.SubnetActor.Join, nil) + rt.Verify() // Check that we are active st = getState(rt) require.Equal(t, len(st.Miners), 2) @@ -143,8 +154,9 @@ func TestLeaveAndKill(t *testing.T) { rt.SetBalance(value) rt.SetCaller(joiner3, builtin.AccountActorCodeID) rt.ExpectValidateCallerAny() - rt.ExpectSend(sca.SubnetCoordActorAddr, sca.Methods.AddStake, nil, value, nil, exitcode.Ok) + rt.ExpectSend(hierarchical.SubnetCoordActorAddr, sca.Methods.AddStake, nil, value, nil, exitcode.Ok) rt.Call(h.SubnetActor.Join, nil) + rt.Verify() // Check that we are active st = getState(rt) require.Equal(t, len(st.Miners), 2) @@ -158,9 +170,10 @@ func TestLeaveAndKill(t *testing.T) { minerStake := getStake(t, rt, joiner2) totalStake = big.Sub(totalStake, minerStake) rt.SetBalance(minerStake) - rt.ExpectSend(sca.SubnetCoordActorAddr, sca.Methods.ReleaseStake, &sca.FundParams{Value: minerStake}, big.Zero(), nil, exitcode.Ok) + rt.ExpectSend(hierarchical.SubnetCoordActorAddr, sca.Methods.ReleaseStake, &sca.FundParams{Value: minerStake}, big.Zero(), nil, exitcode.Ok) rt.ExpectSend(joiner2, builtin.MethodSend, nil, big.Div(minerStake, actor.LeavingFeeCoeff), nil, exitcode.Ok) rt.Call(h.SubnetActor.Leave, nil) + rt.Verify() st = getState(rt) require.Equal(t, st.Status, actor.Active) require.Equal(t, len(st.Miners), 1) @@ -180,9 +193,10 @@ func TestLeaveAndKill(t *testing.T) { minerStake = getStake(t, rt, joiner) totalStake = big.Sub(totalStake, minerStake) rt.SetBalance(minerStake) - rt.ExpectSend(sca.SubnetCoordActorAddr, sca.Methods.ReleaseStake, &sca.FundParams{Value: minerStake}, big.Zero(), nil, exitcode.Ok) + rt.ExpectSend(hierarchical.SubnetCoordActorAddr, sca.Methods.ReleaseStake, &sca.FundParams{Value: minerStake}, big.Zero(), nil, exitcode.Ok) rt.ExpectSend(joiner, builtin.MethodSend, nil, big.Div(minerStake, actor.LeavingFeeCoeff), nil, exitcode.Ok) rt.Call(h.SubnetActor.Leave, nil) + rt.Verify() st = getState(rt) require.Equal(t, st.Status, actor.Inactive) require.Equal(t, len(st.Miners), 0) @@ -200,8 +214,9 @@ func TestLeaveAndKill(t *testing.T) { rt.SetCaller(joiner3, builtin.AccountActorCodeID) rt.ExpectValidateCallerAny() rt.SetBalance(minerStake) - rt.ExpectSend(sca.SubnetCoordActorAddr, sca.Methods.Kill, nil, big.Zero(), nil, exitcode.Ok) + rt.ExpectSend(hierarchical.SubnetCoordActorAddr, sca.Methods.Kill, nil, big.Zero(), nil, exitcode.Ok) rt.Call(h.SubnetActor.Kill, nil) + rt.Verify() st = getState(rt) require.Equal(t, st.Status, actor.Terminating) @@ -216,49 +231,187 @@ func TestLeaveAndKill(t *testing.T) { rt.SetBalance(minerStake) rt.ExpectSend(joiner3, builtin.MethodSend, nil, big.Div(minerStake, actor.LeavingFeeCoeff), nil, exitcode.Ok) rt.Call(h.SubnetActor.Leave, nil) + rt.Verify() st = getState(rt) require.Equal(t, st.Status, actor.Killed) require.Equal(t, len(st.Miners), 0) require.Equal(t, getStake(t, rt, joiner3), big.Zero()) require.Equal(t, st.TotalStake.Abs(), totalStake.Abs()) - // TODO: Check that a miner can't leave twice and get their stake twice. - // TODO: Check killing states. Joiner 2 calls kill and then the other guy takes it stake. - /* +} - t.Log("adder leaves the subnet") - rt.ExpectValidateCallerAny() - rt.SetCaller(owner, builtin.AccountActorCodeID) - rt.ExpectSend(owner, builtin.MethodSend, nil, big.Div(addValue, actor.LeavingFeeCoeff), nil, exitcode.Ok) - rt.Call(h.SubnetActor.Leave, leaveParams) - sh, found = h.getSubnet(rt, shid) - require.True(h.t, found) - require.Equal(t, sh.Status, actor.Terminating) - // Not in stakes anymore. - _, found = h.getMinerState(rt, sh, owner) - require.False(h.t, found) - _, found = h.getMinerState(rt, sh, joiner) - require.True(h.t, found) - // Also removed from miners list. - require.Equal(t, len(sh.Miners), 0) - - t.Log("calling twice to get stake twice") - rt.ExpectValidateCallerAny() - rt.SetCaller(owner, builtin.AccountActorCodeID) - rt.ExpectAbort(exitcode.ErrForbidden, func() { - rt.Call(h.SubnetActor.Leave, leaveParams) - }) +func TestCheckpoints(t *testing.T) { + ctx := context.Background() + h := newHarness(t) + rt := getRuntime(t) + h.constructAndVerify(t, rt) + w, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + miners := []address.Address{} + for i := 0; i < 3; i++ { + addr, err := w.WalletNew(ctx, types.KTSecp256k1) + require.NoError(t, err) + miners = append(miners, addr) + } + totalStake := abi.NewTokenAmount(0) - t.Log("joiner leaves the subnet") + t.Log("three miners join subnet") + for i, m := range miners { + value := abi.NewTokenAmount(1e18) + rt.SetCaller(m, builtin.AccountActorCodeID) + rt.SetReceived(value) + rt.SetBalance(value) + totalStake = big.Add(totalStake, value) + // Anyone can call rt.ExpectValidateCallerAny() - rt.SetCaller(joiner, builtin.AccountActorCodeID) - rt.ExpectSend(joiner, builtin.MethodSend, nil, big.Div(joinValue, actor.LeavingFeeCoeff), nil, exitcode.Ok) - rt.Call(h.SubnetActor.Leave, leaveParams) - // The subnet is completely removed - _, found = h.getSubnet(rt, shid) - require.False(h.t, found) - require.Equal(t, getState(rt).TotalSubnets, uint64(0)) - */ + // The first miner triggers a register message to SCA + if i == 0 { + rt.ExpectSend(hierarchical.SubnetCoordActorAddr, sca.Methods.Register, nil, totalStake, nil, exitcode.Ok) + } else { + rt.ExpectSend(hierarchical.SubnetCoordActorAddr, sca.Methods.AddStake, nil, value, nil, exitcode.Ok) + } + ret := rt.Call(h.SubnetActor.Join, nil) + rt.Verify() + assert.Nil(h.t, ret) + } + st := getState(rt) + require.Equal(t, len(st.Miners), 3) + require.Equal(t, st.Status, actor.Active) + + ver := checkpoint.NewSingleSigner() + addr := tutil.NewIDAddr(t, 100) + shid := address.NewSubnetID(address.RootSubnet, addr) + + t.Log("checkpoint in first and second epoch from three miners") + h.fullSignCheckpoint(t, rt, miners, w, st.CheckPeriod) + h.fullSignCheckpoint(t, rt, miners, w, 2*st.CheckPeriod) + + t.Log("submit in next epoch") + st = getState(rt) + // Submit in the next epoch + epoch := 3 * st.CheckPeriod + ch := schema.NewRawCheckpoint(shid, epoch) + // Add child checkpoints + ch.AddListChilds(utils.GenRandChecks(3)) + // Sign + err = ver.Sign(ctx, w, miners[0], ch) + require.NoError(t, err) + + // Submit checkpoint from first miner in second period + rt.SetCaller(miners[0], builtin.AccountActorCodeID) + rt.SetEpoch(abi.ChainEpoch(epoch + 20)) + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + b, err := ch.MarshalBinary() + require.NoError(t, err) + // The previous checkpoint fails + params := &sca.CheckpointParams{Checkpoint: b} + rt.ExpectAbort(exitcode.ErrIllegalArgument, func() { + rt.Call(h.SubnetActor.SubmitCheckpoint, params) + }) + // Set the right previous checkpoint and send + // without re-signing so it will fail + prevcid, err := st.PrevCheckCid(adt.AsStore(rt), epoch) + require.NoError(t, err) + ch.SetPrevious(prevcid) + b, err = ch.MarshalBinary() + require.NoError(t, err) + params = &sca.CheckpointParams{Checkpoint: b} + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + rt.ExpectAbort(exitcode.ErrIllegalArgument, func() { + rt.Call(h.SubnetActor.SubmitCheckpoint, params) + }) + // Now sign and send and it should be correct + err = ver.Sign(ctx, w, miners[0], ch) + require.NoError(t, err) + b, err = ch.MarshalBinary() + require.NoError(t, err) + params = &sca.CheckpointParams{Checkpoint: b} + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + rt.Call(h.SubnetActor.SubmitCheckpoint, params) + rt.Verify() + st = getState(rt) + chcid, err := ch.Cid() + require.NoError(t, err) + wch, found, err := st.GetWindowChecks(adt.AsStore(rt), chcid) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, len(wch.Miners), 1) + // No checkpoint committed for that epoch + _, found, err = st.GetCheckpoint(adt.AsStore(rt), epoch) + require.NoError(t, err) + require.False(t, found) + + t.Log("submit next epoch when previous was not committed") + // Submit in the next epoch + epoch = 4 * st.CheckPeriod + ch = schema.NewRawCheckpoint(shid, epoch) + prevcid, err = st.PrevCheckCid(adt.AsStore(rt), epoch) + require.NoError(t, err) + ch.SetPrevious(prevcid) + // Add child checkpoints + ch.AddListChilds(utils.GenRandChecks(3)) + // Sign + err = ver.Sign(ctx, w, miners[0], ch) + require.NoError(t, err) + + // Submit checkpoint from first miner in third period + rt.SetCaller(miners[0], builtin.AccountActorCodeID) + rt.SetEpoch(abi.ChainEpoch(epoch + 20)) + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + // Now sign and send and it should be correct + err = ver.Sign(ctx, w, miners[0], ch) + require.NoError(t, err) + b, err = ch.MarshalBinary() + require.NoError(t, err) + params = &sca.CheckpointParams{Checkpoint: b} + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + rt.Call(h.SubnetActor.SubmitCheckpoint, params) + rt.Verify() + st = getState(rt) + chcid, err = ch.Cid() + require.NoError(t, err) + wch, found, err = st.GetWindowChecks(adt.AsStore(rt), chcid) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, len(wch.Miners), 1) + // No checkpoint committed for that epoch + _, found, err = st.GetCheckpoint(adt.AsStore(rt), epoch) + require.NoError(t, err) + require.False(t, found) + + // Submit checkpoint from second miner + rt.SetCaller(miners[1], builtin.AccountActorCodeID) + rt.SetEpoch(abi.ChainEpoch(epoch + 22)) + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + err = ver.Sign(ctx, w, miners[1], ch) + require.NoError(t, err) + b, err = ch.MarshalBinary() + require.NoError(t, err) + params = &sca.CheckpointParams{Checkpoint: b} + rt.ExpectSend(hierarchical.SubnetCoordActorAddr, sca.Methods.CommitChildCheckpoint, params, big.Zero(), nil, exitcode.Ok) + rt.Call(h.SubnetActor.SubmitCheckpoint, params) + rt.Verify() + st = getState(rt) + chcid, err = ch.Cid() + require.NoError(t, err) + // WindowChecks cleaned + _, found, err = st.GetWindowChecks(adt.AsStore(rt), chcid) + require.NoError(t, err) + require.False(t, found) + ch, found, err = st.GetCheckpoint(adt.AsStore(rt), epoch) + require.NoError(t, err) + require.True(t, found) + comcid, err := ch.Cid() + require.NoError(t, err) + require.Equal(t, comcid, chcid) +} + +func TestZeroCheckPeriod(t *testing.T) { + h := newHarness(t) + rt := getRuntime(t) + h.constructAndVerifyZeroCheck(t, rt) } type shActorHarness struct { @@ -277,11 +430,12 @@ func (h *shActorHarness) constructAndVerify(t *testing.T, rt *mock.Runtime) { rt.ExpectValidateCallerType(builtin.InitActorCodeID) ret := rt.Call(h.SubnetActor.Constructor, &actor.ConstructParams{ - NetworkName: hierarchical.RootSubnet.String(), + NetworkName: address.RootSubnet.String(), Name: "myTestSubnet", - Consensus: actor.PoW, + Consensus: hierarchical.PoW, MinMinerStake: actor.MinMinerStake, DelegMiner: tutil.NewIDAddr(t, 101), + CheckPeriod: abi.ChainEpoch(100), }) assert.Nil(h.t, ret) rt.Verify() @@ -289,17 +443,38 @@ func (h *shActorHarness) constructAndVerify(t *testing.T, rt *mock.Runtime) { var st actor.SubnetState rt.GetState(&st) - parentcid, err := hierarchical.RootSubnet.Cid() - require.NoError(h.t, err) - assert.Equal(h.t, st.ParentID, hierarchical.RootSubnet) - assert.Equal(h.t, st.ParentCid, parentcid) - assert.Equal(h.t, st.Consensus, actor.PoW) + assert.Equal(h.t, st.ParentID, address.RootSubnet) + assert.Equal(h.t, st.Consensus, hierarchical.PoW) assert.Equal(h.t, st.MinMinerStake, actor.MinMinerStake) assert.Equal(h.t, st.Status, actor.Instantiated) + assert.Equal(h.t, st.CheckPeriod, abi.ChainEpoch(100)) // Verify that the genesis for the subnet has been generated. // TODO: Consider making some test verifications over genesis. assert.NotEqual(h.t, len(st.Genesis), 0) verifyEmptyMap(h.t, rt, st.Stake) + verifyEmptyMap(h.t, rt, st.Checkpoints) + verifyEmptyMap(h.t, rt, st.WindowChecks) +} + +// Check what happens if we set a check period equal to zero. +// We should be assigning the defualt period. +func (h *shActorHarness) constructAndVerifyZeroCheck(t *testing.T, rt *mock.Runtime) { + rt.ExpectValidateCallerType(builtin.InitActorCodeID) + ret := rt.Call(h.SubnetActor.Constructor, + &actor.ConstructParams{ + NetworkName: address.RootSubnet.String(), + Name: "myTestSubnet", + Consensus: hierarchical.PoW, + MinMinerStake: actor.MinMinerStake, + DelegMiner: tutil.NewIDAddr(t, 101), + }) + assert.Nil(h.t, ret) + rt.Verify() + + var st actor.SubnetState + + rt.GetState(&st) + assert.Equal(h.t, st.CheckPeriod, sca.DefaultCheckpointPeriod) } func verifyEmptyMap(t testing.TB, rt *mock.Runtime, cid cid.Cid) { @@ -331,3 +506,103 @@ func getStake(t *testing.T, rt *mock.Runtime, addr address.Address) abi.TokenAmo require.NoError(t, err) return out } + +func (h *shActorHarness) fullSignCheckpoint(t *testing.T, rt *mock.Runtime, miners []address.Address, w api.Wallet, epoch abi.ChainEpoch) { + st := getState(rt) + ctx := context.Background() + var err error + ver := checkpoint.NewSingleSigner() + addr := tutil.NewIDAddr(t, 100) + shid := address.NewSubnetID(address.RootSubnet, addr) + ch := schema.NewRawCheckpoint(shid, epoch) + prevcid, err := st.PrevCheckCid(adt.AsStore(rt), epoch) + require.NoError(t, err) + ch.SetPrevious(prevcid) + // Add child checkpoints + ch.AddListChilds(utils.GenRandChecks(3)) + // Sign + err = ver.Sign(ctx, w, miners[0], ch) + require.NoError(t, err) + // Submit checkpoint from first miner + rt.SetCaller(miners[0], builtin.AccountActorCodeID) + rt.SetEpoch(abi.ChainEpoch(epoch + 20)) + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + b, err := ch.MarshalBinary() + require.NoError(t, err) + params := &sca.CheckpointParams{Checkpoint: b} + rt.Call(h.SubnetActor.SubmitCheckpoint, params) + st = getState(rt) + chcid, err := ch.Cid() + require.NoError(t, err) + wch, found, err := st.GetWindowChecks(adt.AsStore(rt), chcid) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, len(wch.Miners), 1) + // No checkpoint committed for that epoch + _, found, err = st.GetCheckpoint(adt.AsStore(rt), epoch) + require.NoError(t, err) + require.False(t, found) + + // Can't send checkpoint for the same miner twice + rt.SetCaller(miners[0], builtin.AccountActorCodeID) + rt.SetEpoch(abi.ChainEpoch(epoch + 21)) + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + params = &sca.CheckpointParams{Checkpoint: b} + rt.ExpectAbort(exitcode.ErrIllegalArgument, func() { + rt.Call(h.SubnetActor.SubmitCheckpoint, params) + }) + + // Check if the epoch is wrong. + chbad := schema.NewRawCheckpoint(shid, epoch+1) + b, err = chbad.MarshalBinary() + require.NoError(t, err) + params = &sca.CheckpointParams{Checkpoint: b} + err = ver.Sign(ctx, w, miners[0], ch) + require.NoError(t, err) + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + params = &sca.CheckpointParams{Checkpoint: b} + rt.ExpectAbort(exitcode.ErrIllegalArgument, func() { + rt.Call(h.SubnetActor.SubmitCheckpoint, params) + }) + + // Check if the miner is wrong. + nonminer, err := w.WalletNew(ctx, types.KTSecp256k1) + require.NoError(t, err) + rt.SetCaller(nonminer, builtin.AccountActorCodeID) + rt.SetEpoch(abi.ChainEpoch(epoch + 22)) + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + err = ver.Sign(ctx, w, nonminer, ch) + require.NoError(t, err) + b, err = ch.MarshalBinary() + require.NoError(t, err) + params = &sca.CheckpointParams{Checkpoint: b} + rt.ExpectAbort(exitcode.ErrIllegalArgument, func() { + rt.Call(h.SubnetActor.SubmitCheckpoint, params) + }) + + // Submit checkpoint from second miner + rt.SetCaller(miners[1], builtin.AccountActorCodeID) + rt.SetEpoch(abi.ChainEpoch(epoch + 22)) + rt.ExpectValidateCallerType(builtin.AccountActorCodeID) + err = ver.Sign(ctx, w, miners[1], ch) + require.NoError(t, err) + b, err = ch.MarshalBinary() + require.NoError(t, err) + params = &sca.CheckpointParams{Checkpoint: b} + rt.ExpectSend(hierarchical.SubnetCoordActorAddr, sca.Methods.CommitChildCheckpoint, params, big.Zero(), nil, exitcode.Ok) + rt.Call(h.SubnetActor.SubmitCheckpoint, params) + st = getState(rt) + chcid, err = ch.Cid() + require.NoError(t, err) + // WindowChecks cleaned + _, found, err = st.GetWindowChecks(adt.AsStore(rt), chcid) + require.NoError(t, err) + require.False(t, found) + // WindowChecks cleaned + ch, found, err = st.GetCheckpoint(adt.AsStore(rt), epoch) + require.NoError(t, err) + require.True(t, found) + comcid, err := ch.Cid() + require.NoError(t, err) + require.Equal(t, comcid, chcid) +} diff --git a/chain/consensus/hierarchical/actors/subnet/tspow.go b/chain/consensus/hierarchical/actors/subnet/tspow.go index 46c15ae3a..c80638b2f 100644 --- a/chain/consensus/hierarchical/actors/subnet/tspow.go +++ b/chain/consensus/hierarchical/actors/subnet/tspow.go @@ -10,7 +10,7 @@ import ( "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin/system" - param "github.com/filecoin-project/lotus/chain/consensus/params" + param "github.com/filecoin-project/lotus/chain/consensus/common/params" genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/genesis" @@ -20,8 +20,8 @@ import ( xerrors "golang.org/x/xerrors" ) -func makePoWGenesisBlock(ctx context.Context, bs bstore.Blockstore, template genesis.Template) (*genesis2.GenesisBootstrap, error) { - st, _, err := MakeInitialStateTree(ctx, bs, template) +func makePoWGenesisBlock(ctx context.Context, bs bstore.Blockstore, template genesis.Template, checkPeriod abi.ChainEpoch) (*genesis2.GenesisBootstrap, error) { + st, _, err := MakeInitialStateTree(ctx, bs, template, checkPeriod) if err != nil { return nil, xerrors.Errorf("make initial state tree failed: %w", err) } @@ -54,12 +54,13 @@ func makePoWGenesisBlock(ctx context.Context, bs bstore.Blockstore, template gen mm := &types.MsgMeta{ BlsMessages: emptyroot, SecpkMessages: emptyroot, + CrossMessages: emptyroot, } mmb, err := mm.ToStorageBlock() if err != nil { return nil, xerrors.Errorf("serializing msgmeta failed: %w", err) } - if err := bs.Put(mmb); err != nil { + if err := bs.Put(ctx, mmb); err != nil { return nil, xerrors.Errorf("putting msgmeta block to blockstore: %w", err) } @@ -101,7 +102,7 @@ func makePoWGenesisBlock(ctx context.Context, bs bstore.Blockstore, template gen return nil, xerrors.Errorf("serializing block header failed: %w", err) } - if err := bs.Put(sb); err != nil { + if err := bs.Put(ctx, sb); err != nil { return nil, xerrors.Errorf("putting header to blockstore: %w", err) } diff --git a/chain/consensus/hierarchical/atomic/cbor_gen.go b/chain/consensus/hierarchical/atomic/cbor_gen.go new file mode 100644 index 000000000..acd4e6df6 --- /dev/null +++ b/chain/consensus/hierarchical/atomic/cbor_gen.go @@ -0,0 +1,421 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package atomic + +import ( + "fmt" + "io" + "math" + "sort" + + abi "github.com/filecoin-project/go-state-types/abi" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufMergeParams = []byte{129} + +func (t *MergeParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufMergeParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.State ([]uint8) (slice) + if len(t.State) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.State was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.State))); err != nil { + return err + } + + if _, err := w.Write(t.State[:]); err != nil { + return err + } + return nil +} + +func (t *MergeParams) UnmarshalCBOR(r io.Reader) error { + *t = MergeParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.State ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.State: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.State = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.State[:]); err != nil { + return err + } + return nil +} + +var lengthBufUnlockParams = []byte{130} + +func (t *UnlockParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufUnlockParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Params (atomic.LockParams) (struct) + if err := t.Params.MarshalCBOR(w); err != nil { + return err + } + + // t.State ([]uint8) (slice) + if len(t.State) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.State was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.State))); err != nil { + return err + } + + if _, err := w.Write(t.State[:]); err != nil { + return err + } + return nil +} + +func (t *UnlockParams) UnmarshalCBOR(r io.Reader) error { + *t = UnlockParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Params (atomic.LockParams) (struct) + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + t.Params = new(LockParams) + if err := t.Params.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.Params pointer: %w", err) + } + } + + } + // t.State ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.State: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.State = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.State[:]); err != nil { + return err + } + return nil +} + +var lengthBufLockParams = []byte{130} + +func (t *LockParams) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufLockParams); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Method (abi.MethodNum) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Method)); err != nil { + return err + } + + // t.Params ([]uint8) (slice) + if len(t.Params) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.Params was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.Params))); err != nil { + return err + } + + if _, err := w.Write(t.Params[:]); err != nil { + return err + } + return nil +} + +func (t *LockParams) UnmarshalCBOR(r io.Reader) error { + *t = LockParams{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Method (abi.MethodNum) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Method = abi.MethodNum(extra) + + } + // t.Params ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.Params: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.Params = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.Params[:]); err != nil { + return err + } + return nil +} + +var lengthBufLockedOutput = []byte{129} + +func (t *LockedOutput) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufLockedOutput); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Cid (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.Cid); err != nil { + return xerrors.Errorf("failed to write cid field t.Cid: %w", err) + } + + return nil +} + +func (t *LockedOutput) UnmarshalCBOR(r io.Reader) error { + *t = LockedOutput{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Cid (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Cid: %w", err) + } + + t.Cid = c + + } + return nil +} + +var lengthBufLockedState = []byte{130} + +func (t *LockedState) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufLockedState); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.Lock (bool) (bool) + if err := cbg.WriteBool(w, t.Lock); err != nil { + return err + } + + // t.S ([]uint8) (slice) + if len(t.S) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.S was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.S))); err != nil { + return err + } + + if _, err := w.Write(t.S[:]); err != nil { + return err + } + return nil +} + +func (t *LockedState) UnmarshalCBOR(r io.Reader) error { + *t = LockedState{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 2 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.Lock (bool) (bool) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.Lock = false + case 21: + t.Lock = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.S ([]uint8) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.S: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.S = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.S[:]); err != nil { + return err + } + return nil +} diff --git a/chain/consensus/hierarchical/atomic/exec/exec.go b/chain/consensus/hierarchical/atomic/exec/exec.go new file mode 100644 index 000000000..44a71ce58 --- /dev/null +++ b/chain/consensus/hierarchical/atomic/exec/exec.go @@ -0,0 +1,223 @@ +package exec + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/filecoin-project/lotus/chain/consensus/actors/registry" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/atomic" + "github.com/filecoin-project/lotus/chain/rand" + "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + logging "github.com/ipfs/go-log/v2" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var log = logging.Logger("atomic-exec") + +// ComputeAtomicOutput receives as input a list of locked states from other subnets, and a list of +// messages to execute atomically in an actor, and output the final state for the actor after the execution +// in actorState. This output needs to be committed to the SCA in the parent chain to finalize the execution. +func ComputeAtomicOutput(ctx context.Context, sm *stmgr.StateManager, to address.Address, actorState interface{}, locked []atomic.LockableState, msgs []*types.Message) error { + log.Info("triggering off-chain execution for locked state") + // Get heaviest tipset + ts := sm.ChainStore().GetHeaviestTipSet() + // Search back till we find a height with no fork, or we reach the beginning. + for ts.Height() > 0 { + pts, err := sm.ChainStore().GetTipSetFromKey(ctx, ts.Parents()) + if err != nil { + return xerrors.Errorf("failed to find a non-forking epoch: %w", err) + } + ts = pts + } + + // Get base state parameters + pheight := ts.Height() + bstate := ts.ParentState() + tst, err := sm.StateTree(bstate) + if err != nil { + return err + } + // transplant actor state and state tree to temporary blockstore for off-chain computation + tmpbs, err := tmpState(ctx, sm.ChainStore().StateBlockstore(), tst, []address.Address{to}) + if err != nil { + return err + } + if err := vm.Copy(ctx, sm.ChainStore().StateBlockstore(), tmpbs, bstate); err != nil { + return err + } + + // vm init + vmopt := &vm.VMOpts{ + StateBase: bstate, + Epoch: pheight + 1, + Rand: rand.NewStateRand(sm.ChainStore(), ts.Cids(), sm.Beacon(), sm.GetNetworkVersion), + // Bstore: sm.ChainStore().StateBlockstore(), + Bstore: tmpbs, + Actors: registry.NewActorRegistry(), + Syscalls: sm.Syscalls, + CircSupplyCalc: sm.GetCirculatingSupply, + NetworkVersion: sm.GetNetworkVersion(ctx, pheight+1), + BaseFee: types.NewInt(0), + LookbackState: stmgr.LookbackStateGetterForTipset(sm, ts), + } + vmi, err := sm.VMConstructor()(ctx, vmopt) + if err != nil { + return xerrors.Errorf("failed to set up vm: %w", err) + } + + // Merge locked state to actor state. + for _, l := range locked { + mparams, err := atomic.WrapMergeParams(l) + if err != nil { + return xerrors.Errorf("error wrapping merge params: %w", err) + } + lmsg, err := mergeMsg(to, mparams) + if err != nil { + return xerrors.Errorf("error creating merge msg: %w", err) + } + err = computeMsg(ctx, vmi, lmsg) + if err != nil { + return xerrors.Errorf("error merging locked states: %w", err) + } + } + + // execute messages + for _, m := range msgs { + if m.GasLimit == 0 { + m.GasLimit = build.BlockGasLimit + } + if m.GasFeeCap == types.EmptyInt { + m.GasFeeCap = types.NewInt(0) + } + if m.GasPremium == types.EmptyInt { + m.GasPremium = types.NewInt(0) + } + + if m.Value == types.EmptyInt { + m.Value = types.NewInt(0) + } + + fromActor, err := vmi.StateTree().GetActor(m.From) + if err != nil { + return xerrors.Errorf("call raw get actor: %w", err) + } + + m.Nonce = fromActor.Nonce + err = computeMsg(ctx, vmi, m) + if err != nil { + return xerrors.Errorf("error executing atomic msg: %w", err) + } + } + + // flush state to process it. + _, err = vmi.Flush(ctx) + if err != nil { + return err + } + + // output state from actor in actorState + toActor, err := vmi.StateTree().GetActor(to) + if err != nil { + return xerrors.Errorf("call raw get actor: %s", err) + } + cst := cbor.NewCborStore(tmpbs) + if err := cst.Get(ctx, toActor.Head, actorState); err != nil { + return err + } + + return nil +} + +func computeMsg(ctx context.Context, vmi *vm.VM, m *types.Message) error { + // apply msg implicitly to execute new state + ret, err := vmi.ApplyImplicitMessage(ctx, m) + if err != nil { + return xerrors.Errorf("apply message failed: %w", err) + } + + if err := ret.ActorErr; err != nil { + return err + } + return nil +} + +func mergeMsg(to address.Address, mparams *atomic.MergeParams) (*types.Message, error) { + enc, err := actors.SerializeParams(mparams) + if err != nil { + return nil, err + } + m := &types.Message{ + From: builtin.SystemActorAddr, + To: to, + Value: abi.NewTokenAmount(0), + Method: atomic.MethodMerge, + Params: enc, + } + m.GasLimit = build.BlockGasLimit + return m, nil +} + +// tmpState creates a temporary blockstore with all the state required to perform +// the off-chain execution. +func tmpState(ctx context.Context, frombs blockstore.Blockstore, src *state.StateTree, pluck []address.Address) (blockstore.Blockstore, error) { + + tmpbs := blockstore.NewMemory() + cstore := cbor.NewCborStore(tmpbs) + dst, err := state.NewStateTree(cstore, src.Version()) + if err != nil { + return nil, err + } + for _, a := range pluck { + actor, err := src.GetActor(a) + if err != nil { + return nil, xerrors.Errorf("get actor %s failed: %w", a, err) + } + + err = dst.SetActor(a, actor) + if err != nil { + return nil, err + } + + // recursive copy of the actor state. + err = vm.Copy(context.TODO(), frombs, tmpbs, actor.Head) + if err != nil { + return nil, err + } + + actorState, err := chainReadObj(ctx, frombs, actor.Head) + if err != nil { + return nil, err + } + + cid, err := cstore.Put(ctx, &cbg.Deferred{Raw: actorState}) + if err != nil { + return nil, err + } + + if cid != actor.Head { + return nil, xerrors.Errorf("mismatch in head cid after actor transplant") + } + } + + return tmpbs, nil +} + +func chainReadObj(ctx context.Context, bs blockstore.Blockstore, obj cid.Cid) ([]byte, error) { + blk, err := bs.Get(ctx, obj) + if err != nil { + return nil, xerrors.Errorf("blockstore get: %w", err) + } + + return blk.RawData(), nil +} diff --git a/chain/consensus/hierarchical/atomic/exec/exec_test.go b/chain/consensus/hierarchical/atomic/exec/exec_test.go new file mode 100644 index 000000000..5cb08fbc3 --- /dev/null +++ b/chain/consensus/hierarchical/atomic/exec/exec_test.go @@ -0,0 +1,789 @@ +package exec_test + +import ( + "context" + "encoding/json" + "fmt" + "io" + "testing" + "time" + + "github.com/google/uuid" + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" + xerrors "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/adt" + actbuiltin "github.com/filecoin-project/lotus/chain/actors/builtin" + init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/actors/builtin/system" + "github.com/filecoin-project/lotus/chain/beacon" + actor "github.com/filecoin-project/lotus/chain/consensus/actors" + replace "github.com/filecoin-project/lotus/chain/consensus/actors/atomic-replace" + "github.com/filecoin-project/lotus/chain/consensus/actors/reward" + "github.com/filecoin-project/lotus/chain/consensus/common" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" + atom "github.com/filecoin-project/lotus/chain/consensus/hierarchical/atomic" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/atomic/exec" + genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis" + "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/genesis" + "github.com/filecoin-project/lotus/journal" + _ "github.com/filecoin-project/lotus/lib/sigs/bls" + _ "github.com/filecoin-project/lotus/lib/sigs/secp" + "github.com/filecoin-project/lotus/node/repo" + "github.com/filecoin-project/specs-actors/actors/builtin" + adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + cbor "github.com/ipfs/go-ipld-cbor" +) + +var cidUndef, _ = abi.CidBuilder.Sum([]byte("test")) + +var ReplaceActorAddr = func() address.Address { + a, err := address.NewIDAddress(999) + if err != nil { + panic(err) + } + return a +}() + +func TestComputeState(t *testing.T) { + ctx := context.TODO() + + // cg, err := gen.NewGenerator() + cg, err := NewGenerator(t) + if err != nil { + t.Fatal(err) + } + + t.Log("Execute messages atomically from cg.Banker()") + target, err := address.NewIDAddress(101) + if err != nil { + t.Fatal(err) + } + msgs := []*types.Message{} + enc, err := actors.SerializeParams(&replace.OwnParams{Seed: "testSeed"}) + if err != nil { + t.Fatal(err) + } + msgs = append(msgs, &types.Message{ + From: cg.Banker(), + To: ReplaceActorAddr, + Value: abi.NewTokenAmount(0), + Method: replace.MethodOwn, + Params: enc, + }) + enc, err = actors.SerializeParams(&replace.ReplaceParams{Addr: target}) + if err != nil { + t.Fatal(err) + } + msgs = append(msgs, &types.Message{ + From: cg.Banker(), + To: ReplaceActorAddr, + Value: abi.NewTokenAmount(0), + Method: replace.MethodReplace, + Params: enc, + }) + own1 := &replace.Owners{M: map[string]cid.Cid{target.String(): cidUndef}} + var st replace.ReplaceState + err = exec.ComputeAtomicOutput(ctx, cg.StateManager(), msgs[0].To, &st, []atom.LockableState{own1}, msgs) + require.NoError(t, err) + owners, err := st.UnwrapOwners() + require.NoError(t, err) + + // predicting the address here... may break if other assumptions change + // FIXME: we could resolve the address to have the right ID always + // (but feeling lazy now, want to test fast) + taddr, err := address.NewIDAddress(100) + if err != nil { + t.Fatal(err) + } + // Check that the atomic replace happened. + c, ok := owners.M[taddr.String()] + require.True(t, ok) + require.Equal(t, c, cidUndef) + c, ok = owners.M[target.String()] + require.True(t, ok) + exp, _ := abi.CidBuilder.Sum([]byte("testSeed")) + require.Equal(t, c, exp) + + t.Log("Execute messages atomically from target's view") + // Compute the opposite and compare output CID + msgs = []*types.Message{} + enc, err = actors.SerializeParams(&replace.OwnParams{Seed: "test"}) + if err != nil { + t.Fatal(err) + } + msgs = append(msgs, &types.Message{ + From: target, + To: ReplaceActorAddr, + Value: abi.NewTokenAmount(0), + Method: replace.MethodOwn, + Params: enc, + }) + enc, err = actors.SerializeParams(&replace.ReplaceParams{Addr: taddr}) + if err != nil { + t.Fatal(err) + } + msgs = append(msgs, &types.Message{ + From: target, + To: ReplaceActorAddr, + Value: abi.NewTokenAmount(0), + Method: replace.MethodReplace, + Params: enc, + }) + own1 = &replace.Owners{M: map[string]cid.Cid{taddr.String(): exp}} + var st2 replace.ReplaceState + err = exec.ComputeAtomicOutput(ctx, cg.StateManager(), msgs[0].To, &st2, []atom.LockableState{own1}, msgs) + require.NoError(t, err) + + // Check that the atomic replace happened. + owners, err = st.UnwrapOwners() + require.NoError(t, err) + c, ok = owners.M[taddr.String()] + require.True(t, ok) + require.Equal(t, c, cidUndef) + c, ok = owners.M[target.String()] + require.True(t, ok) + exp, _ = abi.CidBuilder.Sum([]byte("testSeed")) + require.Equal(t, c, exp) + + t.Log("Comparing outputs of independent off-chain execution through CID") + // Compare output cids. + oc1, err := st.Owners.Cid() + require.NoError(t, err) + oc2, err := st2.Owners.Cid() + require.NoError(t, err) + require.Equal(t, oc1, oc2) + +} + +var rootkeyMultisig = genesis.MultisigMeta{ + Signers: []address.Address{remAccTestKey}, + Threshold: 1, + VestingDuration: 0, + VestingStart: 0, +} + +var DefaultVerifregRootkeyActor = genesis.Actor{ + Type: genesis.TMultisig, + Balance: big.NewInt(0), + Meta: rootkeyMultisig.ActorMeta(), +} + +var remAccTestKey, _ = address.NewFromString("t1ceb34gnsc6qk5dt6n7xg6ycwzasjhbxm3iylkiy") +var remAccMeta = genesis.MultisigMeta{ + Signers: []address.Address{remAccTestKey}, + Threshold: 1, +} + +var DefaultRemainderAccountActor = genesis.Actor{ + Type: genesis.TMultisig, + Balance: big.NewInt(0), + Meta: remAccMeta.ActorMeta(), +} + +type ChainGen struct { + msgsPerBlock int + + bs blockstore.Blockstore + + cs *store.ChainStore + + beacon beacon.Schedule + + sm *stmgr.StateManager + + genesis *types.BlockHeader + CurTipset *store.FullTipSet + + w *wallet.LocalWallet + + Miners []address.Address + receivers []address.Address + // a SecP address + banker address.Address + + r repo.Repo + lr repo.LockedRepo +} + +const msgsPerBlock = 20 + +func NewGenerator(t *testing.T) (*ChainGen, error) { + j := journal.NilJournal() + + mr := repo.NewMemory(nil) + lr, err := mr.Lock(repo.StorageMiner) + if err != nil { + return nil, xerrors.Errorf("taking mem-repo lock failed: %w", err) + } + + ds, err := lr.Datastore(context.TODO(), "/metadata") + if err != nil { + return nil, xerrors.Errorf("failed to get metadata datastore: %w", err) + } + + bs, err := lr.Blockstore(context.TODO(), repo.UniversalBlockstore) + if err != nil { + return nil, err + } + + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + fmt.Printf("WARN: failed to close blockstore: %s\n", err) + } + } + }() + + ks, err := lr.KeyStore() + if err != nil { + return nil, xerrors.Errorf("getting repo keystore failed: %w", err) + } + + w, err := wallet.NewWallet(ks) + if err != nil { + return nil, xerrors.Errorf("creating memrepo wallet failed: %w", err) + } + + banker, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + return nil, xerrors.Errorf("failed to generate banker key: %w", err) + } + + receievers := make([]address.Address, msgsPerBlock) + for r := range receievers { + receievers[r], err = w.WalletNew(context.Background(), types.KTBLS) + if err != nil { + return nil, xerrors.Errorf("failed to generate receiver key: %w", err) + } + } + + template := genesis.Template{ + NetworkVersion: network.Version15, + Accounts: []genesis.Actor{ + { + Type: genesis.TAccount, + Balance: types.FromFil(50000), + Meta: (&genesis.AccountMeta{Owner: banker}).ActorMeta(), + }, + }, + VerifregRootKey: DefaultVerifregRootkeyActor, + RemainderAccount: DefaultRemainderAccountActor, + NetworkName: uuid.New().String(), + Timestamp: uint64(build.Clock.Now().Add(-500 * time.Duration(build.BlockDelaySecs) * time.Second).Unix()), + } + + genb, err := makeDelegatedGenesisBlock(context.TODO(), bs, template, abi.ChainEpoch(100)) + require.NoError(t, err) + weight := func(ctx context.Context, stateBs blockstore.Blockstore, ts *types.TipSet) (types.BigInt, error) { + if ts == nil { + return types.NewInt(0), nil + } + + return big.NewInt(int64(ts.Height() + 1)), nil + } + cs := store.NewChainStore(bs, bs, ds, weight, j) + + genfb := &types.FullBlock{Header: genb.Genesis} + gents := store.NewFullTipSet([]*types.FullBlock{genfb}) + + if err := cs.SetGenesis(context.TODO(), genb.Genesis); err != nil { + return nil, xerrors.Errorf("set genesis failed: %w", err) + } + + miners := []address.Address{} + + beac := beacon.Schedule{{Start: 0, Beacon: beacon.NewMockBeacon(time.Second)}} + + sys := vm.Syscalls(&genFakeVerifier{}) + sm, err := stmgr.NewStateManager(cs, common.RootTipSetExecutor(), nil, sys, common.DefaultUpgradeSchedule(), beac) + if err != nil { + return nil, xerrors.Errorf("initing stmgr: %w", err) + } + + gen := &ChainGen{ + bs: bs, + cs: cs, + sm: sm, + msgsPerBlock: msgsPerBlock, + genesis: genb.Genesis, + beacon: beac, + w: w, + + Miners: miners, + banker: banker, + receivers: receievers, + + CurTipset: gents, + + r: mr, + lr: lr, + } + + return gen, nil +} + +func makeDelegatedGenesisBlock(ctx context.Context, bs blockstore.Blockstore, template genesis.Template, checkPeriod abi.ChainEpoch) (*genesis2.GenesisBootstrap, error) { + st, _, err := MakeInitialStateTree(ctx, bs, template, checkPeriod) + if err != nil { + return nil, xerrors.Errorf("make initial state tree failed: %w", err) + } + + stateroot, err := st.Flush(ctx) + if err != nil { + return nil, xerrors.Errorf("flush state tree failed: %w", err) + } + + // temp chainstore + //cs := store.NewChainStore(bs, bs, datastore.NewMapDatastore(), j) + + /* // Verify PreSealed Data + stateroot, err = VerifyPreSealedData(ctx, cs, sys, stateroot, template, keyIDs, template.NetworkVersion) + if err != nil { + return nil, xerrors.Errorf("failed to verify presealed data: %w", err) + } + + stateroot, err = SetupStorageMiners(ctx, cs, sys, stateroot, template.Miners, template.NetworkVersion) + if err != nil { + return nil, xerrors.Errorf("setup miners failed: %w", err) + }*/ + + store := adt.WrapStore(ctx, cbor.NewCborStore(bs)) + emptyroot, err := adt0.MakeEmptyArray(store).Root() + if err != nil { + return nil, xerrors.Errorf("amt build failed: %w", err) + } + + mm := &types.MsgMeta{ + BlsMessages: emptyroot, + SecpkMessages: emptyroot, + CrossMessages: emptyroot, + } + mmb, err := mm.ToStorageBlock() + if err != nil { + return nil, xerrors.Errorf("serializing msgmeta failed: %w", err) + } + if err := bs.Put(ctx, mmb); err != nil { + return nil, xerrors.Errorf("putting msgmeta block to blockstore: %w", err) + } + + tickBuf := make([]byte, 32) + // TODO: We can't use randomness in genesis block + // if want to make it deterministic. Consider using + // a seed to for the ticket generation? + // _, _ = rand.Read(tickBuf) + genesisticket := &types.Ticket{ + VRFProof: tickBuf, + } + + b := &types.BlockHeader{ + Miner: system.Address, + Ticket: genesisticket, + Parents: []cid.Cid{}, + Height: 0, + ParentWeight: types.NewInt(0), + ParentStateRoot: stateroot, + Messages: mmb.Cid(), + ParentMessageReceipts: emptyroot, + BLSAggregate: nil, + BlockSig: nil, + Timestamp: template.Timestamp, + ElectionProof: new(types.ElectionProof), + BeaconEntries: []types.BeaconEntry{ + { + Round: 0, + Data: make([]byte, 32), + }, + }, + ParentBaseFee: abi.NewTokenAmount(build.InitialBaseFee), + } + + sb, err := b.ToStorageBlock() + if err != nil { + return nil, xerrors.Errorf("serializing block header failed: %w", err) + } + + if err := bs.Put(ctx, sb); err != nil { + return nil, xerrors.Errorf("putting header to blockstore: %w", err) + } + + return &genesis2.GenesisBootstrap{ + Genesis: b, + }, nil +} + +func MakeInitialStateTree(ctx context.Context, bs blockstore.Blockstore, template genesis.Template, checkPeriod abi.ChainEpoch) (*state.StateTree, map[address.Address]address.Address, error) { + // Create empty state tree + + cst := cbor.NewCborStore(bs) + _, err := cst.Put(context.TODO(), []struct{}{}) + if err != nil { + return nil, nil, xerrors.Errorf("putting empty object: %w", err) + } + + sv, err := state.VersionForNetwork(template.NetworkVersion) + if err != nil { + return nil, nil, xerrors.Errorf("getting state tree version: %w", err) + } + + state, err := state.NewStateTree(cst, sv) + if err != nil { + return nil, nil, xerrors.Errorf("making new state tree: %w", err) + } + + av, err := actors.VersionForNetwork(template.NetworkVersion) + if err != nil { + return nil, nil, xerrors.Errorf("getting network version: %w", err) + } + + // Create system actor + + sysact, err := genesis2.SetupSystemActor(ctx, bs, av) + if err != nil { + return nil, nil, xerrors.Errorf("setup system actor: %w", err) + } + if err := state.SetActor(system.Address, sysact); err != nil { + return nil, nil, xerrors.Errorf("set system actor: %w", err) + } + + // Create empty power actor + spact, err := SetupStoragePowerActor(ctx, bs, av) + if err != nil { + return nil, nil, xerrors.Errorf("setup storage power actor: %w", err) + } + if err := state.SetActor(power.Address, spact); err != nil { + return nil, nil, xerrors.Errorf("set storage power actor: %w", err) + } + + // Create init actor + + idStart, initact, keyIDs, err := genesis2.SetupInitActor(ctx, bs, template.NetworkName, template.Accounts, template.VerifregRootKey, template.RemainderAccount, av) + if err != nil { + return nil, nil, xerrors.Errorf("setup init actor: %w", err) + } + if err := state.SetActor(init_.Address, initact); err != nil { + return nil, nil, xerrors.Errorf("set init actor: %w", err) + } + + // Setup sca actor + params := &sca.ConstructorParams{ + NetworkName: template.NetworkName, + CheckpointPeriod: uint64(checkPeriod), + } + scaact, err := SetupSCAActor(ctx, bs, params) + if err != nil { + return nil, nil, err + } + err = state.SetActor(hierarchical.SubnetCoordActorAddr, scaact) + if err != nil { + return nil, nil, xerrors.Errorf("set SCA actor: %w", err) + } + + // NOTE: Setting a replace actor at the beginning so we don't have + // to initialize it for testing. + ract, err := SetupReplaceActor(ctx, bs) + if err != nil { + return nil, nil, err + } + err = state.SetActor(ReplaceActorAddr, ract) + if err != nil { + return nil, nil, xerrors.Errorf("set Replace actor: %w", err) + } + + // Create empty market actor + marketact, err := SetupStorageMarketActor(ctx, bs, av) + if err != nil { + return nil, nil, xerrors.Errorf("setup storage market actor: %w", err) + } + if err := state.SetActor(market.Address, marketact); err != nil { + return nil, nil, xerrors.Errorf("set storage market actor: %w", err) + } + // Setup reward actor + // This is a modified reward actor to support the needs of hierarchical consensus + // protocol. + rewact, err := SetupRewardActor(ctx, bs, big.Zero(), av) + if err != nil { + return nil, nil, xerrors.Errorf("setup reward actor: %w", err) + } + + err = state.SetActor(reward.RewardActorAddr, rewact) + if err != nil { + return nil, nil, xerrors.Errorf("set reward actor: %w", err) + } + + bact, err := genesis2.MakeAccountActor(ctx, cst, av, builtin.BurntFundsActorAddr, big.Zero()) + if err != nil { + return nil, nil, xerrors.Errorf("setup burnt funds actor state: %w", err) + } + if err := state.SetActor(builtin.BurntFundsActorAddr, bact); err != nil { + return nil, nil, xerrors.Errorf("set burnt funds actor: %w", err) + } + + // Create accounts + for _, info := range template.Accounts { + + switch info.Type { + case genesis.TAccount: + if err := genesis2.CreateAccountActor(ctx, cst, state, info, keyIDs, av); err != nil { + return nil, nil, xerrors.Errorf("failed to create account actor: %w", err) + } + + case genesis.TMultisig: + + ida, err := address.NewIDAddress(uint64(idStart)) + if err != nil { + return nil, nil, err + } + idStart++ + + if err := genesis2.CreateMultisigAccount(ctx, cst, state, ida, info, keyIDs, av); err != nil { + return nil, nil, err + } + default: + return nil, nil, xerrors.New("unsupported account type") + } + + } + + totalFilAllocated := big.Zero() + + err = state.ForEach(func(addr address.Address, act *types.Actor) error { + if act.Balance.Nil() { + panic(fmt.Sprintf("actor %s (%s) has nil balance", addr, builtin.ActorNameByCode(act.Code))) + } + totalFilAllocated = big.Add(totalFilAllocated, act.Balance) + return nil + }) + if err != nil { + return nil, nil, xerrors.Errorf("summing account balances in state tree: %w", err) + } + + totalFil := big.Mul(big.NewInt(int64(build.FilBase)), big.NewInt(int64(build.FilecoinPrecision))) + remainingFil := big.Sub(totalFil, totalFilAllocated) + if remainingFil.Sign() < 0 { + return nil, nil, xerrors.Errorf("somehow overallocated filecoin (allocated = %s)", types.FIL(totalFilAllocated)) + } + + template.RemainderAccount.Balance = remainingFil + + switch template.RemainderAccount.Type { + case genesis.TAccount: + var ainfo genesis.AccountMeta + if err := json.Unmarshal(template.RemainderAccount.Meta, &ainfo); err != nil { + return nil, nil, xerrors.Errorf("unmarshaling account meta: %w", err) + } + + _, ok := keyIDs[ainfo.Owner] + if ok { + return nil, nil, fmt.Errorf("remainder account has already been declared, cannot be assigned 90: %s", ainfo.Owner) + } + + keyIDs[ainfo.Owner] = actbuiltin.ReserveAddress + err = genesis2.CreateAccountActor(ctx, cst, state, template.RemainderAccount, keyIDs, av) + if err != nil { + return nil, nil, xerrors.Errorf("creating remainder acct: %w", err) + } + + case genesis.TMultisig: + if err = genesis2.CreateMultisigAccount(ctx, cst, state, actbuiltin.ReserveAddress, template.RemainderAccount, keyIDs, av); err != nil { + return nil, nil, xerrors.Errorf("failed to set up remainder: %w", err) + } + default: + return nil, nil, xerrors.Errorf("unknown account type for remainder: %w", err) + } + + return state, keyIDs, nil +} + +func SetupSCAActor(ctx context.Context, bs blockstore.Blockstore, params *sca.ConstructorParams) (*types.Actor, error) { + cst := cbor.NewCborStore(bs) + st, err := sca.ConstructSCAState(adt.WrapStore(ctx, cst), params) + if err != nil { + return nil, err + } + + statecid, err := cst.Put(ctx, st) + if err != nil { + return nil, err + } + + act := &types.Actor{ + Code: actor.SubnetCoordActorCodeID, + Balance: big.Zero(), + Head: statecid, + } + + return act, nil +} + +func SetupReplaceActor(ctx context.Context, bs blockstore.Blockstore) (*types.Actor, error) { + cst := cbor.NewCborStore(bs) + st, err := replace.ConstructState() + if err != nil { + return nil, err + } + + statecid, err := cst.Put(ctx, st) + if err != nil { + return nil, err + } + + act := &types.Actor{ + Code: actor.ReplaceActorCodeID, + Balance: big.Zero(), + Head: statecid, + } + + return act, nil +} + +func SetupRewardActor(ctx context.Context, bs blockstore.Blockstore, qaPower big.Int, av actors.Version) (*types.Actor, error) { + cst := cbor.NewCborStore(bs) + rst := reward.ConstructState(qaPower) + + statecid, err := cst.Put(ctx, rst) + if err != nil { + return nil, err + } + + // NOTE: For now, everything in the reward actor is the same except the code, + // where we included an additional method to fund accounts. This may change + // in the future when we design specific reward system for subnets. + act := &types.Actor{ + Code: actor.RewardActorCodeID, + // NOTE: This sets up the initial balance of the reward actor. + Balance: types.BigInt{Int: build.InitialRewardBalance}, + Head: statecid, + } + + return act, nil +} + +func SetupStorageMarketActor(ctx context.Context, bs blockstore.Blockstore, av actors.Version) (*types.Actor, error) { + cst := cbor.NewCborStore(bs) + mst, err := market.MakeState(adt.WrapStore(ctx, cbor.NewCborStore(bs)), av) + if err != nil { + return nil, err + } + + statecid, err := cst.Put(ctx, mst.GetState()) + if err != nil { + return nil, err + } + + actcid, err := market.GetActorCodeID(av) + if err != nil { + return nil, err + } + + act := &types.Actor{ + Code: actcid, + Head: statecid, + Balance: big.Zero(), + } + + return act, nil +} + +func SetupStoragePowerActor(ctx context.Context, bs blockstore.Blockstore, av actors.Version) (*types.Actor, error) { + + cst := cbor.NewCborStore(bs) + pst, err := power.MakeState(adt.WrapStore(ctx, cbor.NewCborStore(bs)), av) + if err != nil { + return nil, err + } + + statecid, err := cst.Put(ctx, pst.GetState()) + if err != nil { + return nil, err + } + + actcid, err := power.GetActorCodeID(av) + if err != nil { + return nil, err + } + + act := &types.Actor{ + Code: actcid, + Head: statecid, + Balance: big.Zero(), + } + + return act, nil +} + +type genFakeVerifier struct{} + +var _ ffiwrapper.Verifier = (*genFakeVerifier)(nil) + +func (m genFakeVerifier) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) { + return true, nil +} + +func (m genFakeVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { + panic("not supported") +} + +func (m genFakeVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) { + panic("not supported") +} + +func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof7.WinningPoStVerifyInfo) (bool, error) { + panic("not supported") +} + +func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof7.WindowPoStVerifyInfo) (bool, error) { + panic("not supported") +} + +func (m genFakeVerifier) GenerateWinningPoStSectorChallenge(ctx context.Context, proof abi.RegisteredPoStProof, id abi.ActorID, randomness abi.PoStRandomness, u uint64) ([]uint64, error) { + panic("not supported") +} +func (cg *ChainGen) Blockstore() blockstore.Blockstore { + return cg.bs +} + +func (cg *ChainGen) StateManager() *stmgr.StateManager { + return cg.sm +} + +func (cg *ChainGen) SetStateManager(sm *stmgr.StateManager) { + cg.sm = sm +} + +func (cg *ChainGen) ChainStore() *store.ChainStore { + return cg.cs +} + +func (cg *ChainGen) BeaconSchedule() beacon.Schedule { + return cg.beacon +} + +func (cg *ChainGen) Genesis() *types.BlockHeader { + return cg.genesis +} + +func (cg *ChainGen) Banker() address.Address { + return cg.banker +} diff --git a/chain/consensus/hierarchical/atomic/gen/gen.go b/chain/consensus/hierarchical/atomic/gen/gen.go new file mode 100644 index 000000000..c7f52f7f1 --- /dev/null +++ b/chain/consensus/hierarchical/atomic/gen/gen.go @@ -0,0 +1,18 @@ +package main + +import ( + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/atomic" + gen "github.com/whyrusleeping/cbor-gen" +) + +func main() { + if err := gen.WriteTupleEncodersToFile("./cbor_gen.go", "atomic", + atomic.MergeParams{}, + atomic.UnlockParams{}, + atomic.LockParams{}, + atomic.LockedOutput{}, + atomic.LockedState{}, + ); err != nil { + panic(err) + } +} diff --git a/chain/consensus/hierarchical/atomic/lock.go b/chain/consensus/hierarchical/atomic/lock.go new file mode 100644 index 000000000..edb8c444a --- /dev/null +++ b/chain/consensus/hierarchical/atomic/lock.go @@ -0,0 +1,190 @@ +package atomic + +//go:generate go run ./gen/gen.go + +import ( + "bytes" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/cbor" + "github.com/filecoin-project/specs-actors/v7/actors/runtime" + "github.com/ipfs/go-cid" + xerrors "golang.org/x/xerrors" +) + +const ( + MethodLock abi.MethodNum = 2 + MethodMerge abi.MethodNum = 3 + MethodAbort abi.MethodNum = 4 + MethodUnlock abi.MethodNum = 5 +) + +type Marshalable interface { + cbor.Marshaler + cbor.Unmarshaler +} + +// LockableState defines the interface required for states +// that needs to be lockable. +type LockableState interface { + Marshalable + Merge(other LockableState) error +} + +type LockedOutput struct { + Cid cid.Cid +} + +// LockableActor defines the interface that needs to be implemented by actors +// that want to support the atomic execution of some (or all) of their functions. +type LockableActor interface { + // Lock defines how to lock the state in the actor. + Lock(rt runtime.Runtime, params *LockParams) *LockedOutput + // Merge takes external locked state and merges it to the current actors state. + Merge(rt runtime.Runtime, params *MergeParams) *abi.EmptyValue + // Unlock merges the output of an execution and unlocks the state. + Unlock(rt runtime.Runtime, params *UnlockParams) *abi.EmptyValue + // Abort unlocks the state and aborts the atomic execution. + Abort(rt runtime.Runtime, params *LockParams) *abi.EmptyValue +} + +// LockParams wraps serialized params from a message with the requested methodnum. +type LockParams struct { + Method abi.MethodNum + Params []byte +} + +func WrapLockParams(m abi.MethodNum, params Marshalable) (*LockParams, error) { + var buf bytes.Buffer + if err := params.MarshalCBOR(&buf); err != nil { + return nil, err + } + return &LockParams{m, buf.Bytes()}, nil +} + +func WrapSerializedParams(m abi.MethodNum, params []byte) (*LockParams, error) { + return &LockParams{m, params}, nil +} + +func UnwrapLockParams(params *LockParams, out Marshalable) error { + return out.UnmarshalCBOR(bytes.NewReader(params.Params)) +} + +// UnlockParams identifies the input params of a message +// along with the ouput state to merge. +type UnlockParams struct { + Params *LockParams + State []byte +} + +func WrapUnlockParams(params *LockParams, out LockableState) (*UnlockParams, error) { + var buf bytes.Buffer + if err := out.MarshalCBOR(&buf); err != nil { + return nil, err + } + return &UnlockParams{params, buf.Bytes()}, nil +} + +func WrapSerializedUnlockParams(params *LockParams, out []byte) (*UnlockParams, error) { + return &UnlockParams{params, out}, nil +} + +func UnwrapUnlockParams(params *UnlockParams, out LockableState) error { + return out.UnmarshalCBOR(bytes.NewReader(params.State)) +} + +// MergeParams wraps locked state to merge in params. +type MergeParams struct { + State []byte +} + +func WrapMergeParams(out LockableState) (*MergeParams, error) { + var buf bytes.Buffer + if err := out.MarshalCBOR(&buf); err != nil { + return nil, err + } + return &MergeParams{buf.Bytes()}, nil +} + +func UnwrapMergeParams(params *MergeParams, out LockableState) error { + return out.UnmarshalCBOR(bytes.NewReader(params.State)) +} + +// ValidateIfLocked checks if certain state in locked and thus can be +// modified. +func ValidateIfLocked(states ...*LockedState) error { + for _, s := range states { + if s.IsLocked() { + return xerrors.Errorf("abort. One of the state or more are locked") + } + } + return nil +} + +// Cid to identify uniquely locked state. +func (s *LockedState) Cid() (cid.Cid, error) { + var buf bytes.Buffer + err := s.MarshalCBOR(&buf) + if err != nil { + return cid.Undef, err + } + return abi.CidBuilder.Sum(buf.Bytes()) +} + +// LockState locks the state from being written. +func (s *LockedState) LockState() error { + if s.Lock { + return xerrors.Errorf("state already locked") + } + s.Lock = true + return nil +} + +// UnlockState frees the lock. +func (s *LockedState) UnlockState() error { + if !s.Lock { + return xerrors.Errorf("state already unlocked") + } + s.Lock = false + return nil +} + +// LockedState includes a lock in some state. +type LockedState struct { + Lock bool + S []byte +} + +func WrapLockableState(s LockableState) (*LockedState, error) { + var buf bytes.Buffer + if err := s.MarshalCBOR(&buf); err != nil { + return nil, err + } + return &LockedState{S: buf.Bytes()}, nil +} + +func (l *LockedState) SetState(s LockableState) error { + var buf bytes.Buffer + if err := s.MarshalCBOR(&buf); err != nil { + return err + } + l.S = buf.Bytes() + return nil +} + +func UnwrapLockableState(s *LockedState, out LockableState) error { + return out.UnmarshalCBOR(bytes.NewReader(s.S)) +} + +func (s *LockedState) IsLocked() bool { + return s.Lock +} + +func CidFromOutput(s LockableState) (cid.Cid, error) { + var buf bytes.Buffer + err := s.MarshalCBOR(&buf) + if err != nil { + return cid.Undef, err + } + return abi.CidBuilder.Sum(buf.Bytes()) +} diff --git a/chain/consensus/hierarchical/atomic/lock_test.go b/chain/consensus/hierarchical/atomic/lock_test.go new file mode 100644 index 000000000..f0b9d5530 --- /dev/null +++ b/chain/consensus/hierarchical/atomic/lock_test.go @@ -0,0 +1,171 @@ +package atomic_test + +import ( + "bytes" + "fmt" + "io" + "reflect" + "testing" + + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/atomic" + "github.com/stretchr/testify/require" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +func TestMarshal(t *testing.T) { + s := &SampleState{S: "something to test"} + l, err := atomic.WrapLockableState(s) + require.NoError(t, err) + var buf bytes.Buffer + + err = l.LockState() + require.NoError(t, err) + err = l.MarshalCBOR(&buf) + require.NoError(t, err) + + // Unmarshal and check equal + l2, err := atomic.WrapLockableState(&SampleState{}) + require.NoError(t, err) + err = l2.UnmarshalCBOR(&buf) + require.NoError(t, err) + require.True(t, reflect.DeepEqual(l2, l)) + sm := &SampleState{} + err = atomic.UnwrapLockableState(l2, sm) + require.NoError(t, err) + require.Equal(t, s, sm) + + p, err := atomic.WrapLockParams(12, s) + require.NoError(t, err) + err = p.MarshalCBOR(&buf) + require.NoError(t, err) + + // Unmarshal and check equal + p2 := &atomic.LockParams{} + err = p2.UnmarshalCBOR(&buf) + require.NoError(t, err) + require.Equal(t, p, p2) + out := &SampleState{} + err = atomic.UnwrapLockParams(p2, out) + require.NoError(t, err) + require.Equal(t, out, s) + + so := &SampleState{S: "some output"} + u, err := atomic.WrapUnlockParams(p, so) + require.NoError(t, err) + err = u.MarshalCBOR(&buf) + require.NoError(t, err) + + // Unmarshal and check equal + u2 := &atomic.UnlockParams{} + err = u2.UnmarshalCBOR(&buf) + require.NoError(t, err) + require.Equal(t, p, p2) + err = atomic.UnwrapUnlockParams(u2, out) + require.NoError(t, err) + require.Equal(t, out, so) + + m, err := atomic.WrapMergeParams(so) + require.NoError(t, err) + err = m.MarshalCBOR(&buf) + require.NoError(t, err) + + // Unmarshal and check equal + m2 := &atomic.MergeParams{} + err = m2.UnmarshalCBOR(&buf) + require.NoError(t, err) + err = atomic.UnwrapMergeParams(m2, out) + require.NoError(t, err) + require.Equal(t, out, so) + // TODO: Marshal wrapping the wrong type. +} + +func TestLock(t *testing.T) { + s := &SampleState{S: "something to test"} + l, err := atomic.WrapLockableState(s) + require.NoError(t, err) + err = l.LockState() + require.NoError(t, err) + err = l.LockState() + require.Error(t, err) + err = l.UnlockState() + require.NoError(t, err) + err = l.UnlockState() + require.Error(t, err) +} + +type SampleState struct { + S string +} + +var _ atomic.LockableState = &SampleState{} + +var lengthBufSampleState = []byte{129} + +func (t *SampleState) Merge(other atomic.LockableState) error { + // NaΓ―ve merging with the other value. + // It's up to the developer to chose the best way + // to merge + tt, ok := other.(*SampleState) + if !ok { + return xerrors.Errorf("type of LokableState not SampleState") + } + t.S = tt.S + return nil +} + +func (t *SampleState) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufSampleState); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.S (string) (string) + if len(t.S) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.S was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.S))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.S)); err != nil { + return err + } + return nil +} + +func (t *SampleState) UnmarshalCBOR(r io.Reader) error { + *t = SampleState{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 1 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.S (string) (string) + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.S = string(sval) + } + return nil +} diff --git a/chain/consensus/hierarchical/checkpoints/schema/checkpoint.go b/chain/consensus/hierarchical/checkpoints/schema/checkpoint.go new file mode 100644 index 000000000..dcfbf27bf --- /dev/null +++ b/chain/consensus/hierarchical/checkpoints/schema/checkpoint.go @@ -0,0 +1,542 @@ +package schema + +import ( + "bytes" + "io" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/lotus/chain/types" + "github.com/ipfs/go-cid" + ipld "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + "github.com/ipld/go-ipld-prime/codec/dagjson" + cidlink "github.com/ipld/go-ipld-prime/linking/cid" + "github.com/ipld/go-ipld-prime/node/bindnode" + "github.com/ipld/go-ipld-prime/schema" + "golang.org/x/xerrors" +) + +// Linkproto is the default link prototype used for Checkpoints +// It uses the default CidBuilder for Filecoin (see abi) +// +// NOTE: Maybe we should consider using another CID proto +// for checkpoints so they can be identified uniquely. +var Linkproto = cidlink.LinkPrototype{ + Prefix: cid.Prefix{ + Version: 1, + Codec: abi.CidBuilder.GetCodec(), + MhType: abi.HashFunction, + MhLength: 16, + }, +} + +var ( + CheckpointSchema schema.Type + MsgMetaSchema schema.Type + + // NoPreviousCheck is a work-around to avoid undefined CIDs, + // that results in unexpected errors when marshalling. + NoPreviousCheck cid.Cid + + // EmptyCheckpoint is an empty checkpoint that can be Marshalled + EmptyCheckpoint *Checkpoint +) + +func init() { + CheckpointSchema = initCheckpointSchema() + MsgMetaSchema = initCrossMsgMetaSchema() + var err error + NoPreviousCheck, err = Linkproto.Sum([]byte("nil")) + if err != nil { + panic(err) + } + + EmptyCheckpoint = &Checkpoint{ + Data: CheckData{ + Source: "", + Epoch: 0, + PrevCheckCid: NoPreviousCheck.Bytes(), + }, + } +} + +// ChildCheck +type ChildCheck struct { + Source string + // NOTE: Same problem as below, checks is + // []cid.Cid, but we are hiding it behind a bunch + // of bytes to prevent the VM from trying to fetch the + // cid from the state tree. We still want to use IPLD + // for now. We could fix this by setting an empty AMT + // with the list of Cids, but it may be too complex just + // for the sake of using CBOR. + Checks [][]byte //[]cid.Cid +} + +// CrossMsgMeta includes information about the messages being propagated from and to +// a subnet. +// +// MsgsCid is the cid of the list of cids of the mesasges propagated +// for a specific subnet in that checkpoint +type CrossMsgMeta struct { + From string // Determines the source of the messages being propagated in MsgsCid + To string // Determines the destination of the messages included in MsgsCid + MsgsCid []byte // cid.Cid of the msgMeta with the list of msgs. + Nonce int // Nonce of the msgMeta + Value string // Token amount being propagated in MsgMeta +} + +// CheckData is the data included in a Checkpoint. +type CheckData struct { + Source string + TipSet []byte // NOTE: For simplicity we add TipSetKey. We could include full TipSet + Epoch int + // FIXME: Under these bytes there's a cid.Cid. The reason for doing this is + // to prevent the VM from interpreting it as a CID from the state + // tree trying to fetch it and failing because it can't find anything, so we + // are "hiding" them behing a byte type. We could choose to use an EmptyCid + // and use cbor-gen. + PrevCheckCid []byte + Childs []ChildCheck // List of child checks + CrossMsgs []CrossMsgMeta // List with meta of msgs being propagated. +} + +// Checkpoint data structure +// +// - Data includes all the data for the checkpoint. The Cid of Data +// is what identifies a checkpoint uniquely. +// - Signature adds the signature from a miner. According to the verifier +// used for checkpoint this may be different things. +type Checkpoint struct { + Data CheckData + Signature []byte +} + +// initCheckpointType initializes the Checkpoint schema +func initCrossMsgMetaSchema() schema.Type { + ts := schema.TypeSystem{} + ts.Init() + ts.Accumulate(schema.SpawnString("String")) + ts.Accumulate(schema.SpawnInt("Int")) + ts.Accumulate(schema.SpawnLink("Link")) + ts.Accumulate(schema.SpawnBytes("Bytes")) + + ts.Accumulate(schema.SpawnStruct("CrossMsgMeta", + []schema.StructField{ + schema.SpawnStructField("From", "String", false, false), + schema.SpawnStructField("To", "String", false, false), + schema.SpawnStructField("MsgsCid", "Bytes", false, false), + schema.SpawnStructField("Nonce", "Int", false, false), + schema.SpawnStructField("Value", "String", false, false), + }, + schema.SpawnStructRepresentationMap(map[string]string{}), + )) + + return ts.TypeByName("CrossMsgMeta") +} + +// initCheckpointType initializes the Checkpoint schema +func initCheckpointSchema() schema.Type { + ts := schema.TypeSystem{} + ts.Init() + ts.Accumulate(schema.SpawnString("String")) + ts.Accumulate(schema.SpawnInt("Int")) + ts.Accumulate(schema.SpawnLink("Link")) + ts.Accumulate(schema.SpawnBytes("Bytes")) + + ts.Accumulate(schema.SpawnStruct("ChildCheck", + []schema.StructField{ + schema.SpawnStructField("Source", "String", false, false), + schema.SpawnStructField("Checks", "List_Bytes", false, false), + }, + schema.SpawnStructRepresentationMap(map[string]string{}), + )) + ts.Accumulate(initCrossMsgMetaSchema()) + + ts.Accumulate(schema.SpawnStruct("CheckData", + []schema.StructField{ + schema.SpawnStructField("Source", "String", false, false), + schema.SpawnStructField("TipSet", "Bytes", false, false), + schema.SpawnStructField("Epoch", "Int", false, false), + schema.SpawnStructField("PrevCheckCid", "Bytes", false, false), + schema.SpawnStructField("Childs", "List_ChildCheck", false, false), + schema.SpawnStructField("CrossMsgs", "List_CrossMsgMeta", false, false), + }, + schema.SpawnStructRepresentationMap(nil), + )) + ts.Accumulate(schema.SpawnStruct("Checkpoint", + []schema.StructField{ + schema.SpawnStructField("Data", "CheckData", false, false), + schema.SpawnStructField("Signature", "Bytes", false, false), + }, + schema.SpawnStructRepresentationMap(nil), + )) + ts.Accumulate(schema.SpawnList("List_String", "String", false)) + ts.Accumulate(schema.SpawnList("List_Link", "Link", false)) + ts.Accumulate(schema.SpawnList("List_Bytes", "Bytes", false)) + ts.Accumulate(schema.SpawnList("List_ChildCheck", "ChildCheck", false)) + ts.Accumulate(schema.SpawnList("List_CrossMsgMeta", "CrossMsgMeta", false)) + + return ts.TypeByName("Checkpoint") +} + +// Dumb linksystem used to generate links +// +// This linksystem doesn't store anything, just computes the Cid +// for a node. +func noStoreLinkSystem() ipld.LinkSystem { + lsys := cidlink.DefaultLinkSystem() + lsys.StorageWriteOpener = func(lctx ipld.LinkContext) (io.Writer, ipld.BlockWriteCommitter, error) { + buf := bytes.NewBuffer(nil) + return buf, func(lnk ipld.Link) error { + return nil + }, nil + } + return lsys +} + +// NewRawCheckpoint creates a checkpoint template to populate by the user. +// +// This is the template returned by the SCA actor for the miners to include +// the corresponding information and sign before commitment. +func NewRawCheckpoint(source address.SubnetID, epoch abi.ChainEpoch) *Checkpoint { + return &Checkpoint{ + Data: CheckData{ + Source: source.String(), + Epoch: int(epoch), + PrevCheckCid: NoPreviousCheck.Bytes(), + }, + } + +} + +func NewCrossMsgMeta(from, to address.SubnetID) *CrossMsgMeta { + return &CrossMsgMeta{ + From: from.String(), + To: to.String(), + Nonce: -1, + Value: "0", + } +} + +func (c *Checkpoint) IsEmpty() (bool, error) { + return c.Equals(EmptyCheckpoint) +} + +func (c *Checkpoint) SetPrevious(cid cid.Cid) { + c.Data.PrevCheckCid = cid.Bytes() +} + +func (c *Checkpoint) SetTipsetKey(ts types.TipSetKey) { + c.Data.TipSet = ts.Bytes() +} + +func (c *Checkpoint) SetEpoch(ep abi.ChainEpoch) { + c.Data.Epoch = int(ep) +} + +func (c *Checkpoint) PreviousCheck() (cid.Cid, error) { + _, cid, err := cid.CidFromBytes(c.Data.PrevCheckCid) + return cid, err +} + +func (c *Checkpoint) Source() address.SubnetID { + return address.SubnetID(c.Data.Source) +} + +func (c *Checkpoint) MarshalBinary() ([]byte, error) { + node := bindnode.Wrap(c, CheckpointSchema) + nodeRepr := node.Representation() + var buf bytes.Buffer + err := dagjson.Encode(nodeRepr, &buf) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (c *Checkpoint) UnmarshalBinary(b []byte) error { + // TODO: This could fix the need of NoPrevCheckpoint but it hasn't been implemented yet. + // This returns `panic: TODO: schema.StructRepresentation_Map` + // nb := bindnode.Prototype(c, CheckpointSchema).Representation().NewBuilder() + nb := bindnode.Prototype(c, CheckpointSchema).NewBuilder() + err := dagjson.Decode(nb, bytes.NewReader(b)) + if err != nil { + return err + } + n := bindnode.Unwrap(nb.Build()) + + ch, ok := n.(*Checkpoint) + if !ok { + return xerrors.Errorf("Unmarshalled node not of type Checkpoint") + } + *c = *ch + return nil +} + +func (c *Checkpoint) MarshalCBOR(w io.Writer) error { + node := bindnode.Wrap(c, CheckpointSchema) + nodeRepr := node.Representation() + err := dagcbor.Encode(nodeRepr, w) + if err != nil { + return err + } + return nil +} + +func (c *Checkpoint) UnmarshalCBOR(r io.Reader) error { + nb := bindnode.Prototype(c, CheckpointSchema).NewBuilder() + err := dagcbor.Decode(nb, r) + if err != nil { + return err + } + n := bindnode.Unwrap(nb.Build()) + + ch, ok := n.(*Checkpoint) + if !ok { + return xerrors.Errorf("Unmarshalled node not of type CheckData") + } + *c = *ch + return nil +} + +func (cm *CrossMsgMeta) Cid() (cid.Cid, error) { + _, c, err := cid.CidFromBytes(cm.MsgsCid) + return c, err +} + +func (cm *CrossMsgMeta) GetFrom() address.SubnetID { + return address.SubnetID(cm.From) +} + +func (cm *CrossMsgMeta) GetTo() address.SubnetID { + return address.SubnetID(cm.To) +} + +func (cm *CrossMsgMeta) SetCid(c cid.Cid) { + cm.MsgsCid = c.Bytes() +} + +func (cm *CrossMsgMeta) Equal(other *CrossMsgMeta) bool { + return cm.From == other.From && cm.To == other.To && bytes.Equal(cm.MsgsCid, other.MsgsCid) +} + +func (cm *CrossMsgMeta) MarshalCBOR(w io.Writer) error { + node := bindnode.Wrap(cm, MsgMetaSchema) + nodeRepr := node.Representation() + err := dagcbor.Encode(nodeRepr, w) + if err != nil { + return err + } + return nil +} + +func (cm *CrossMsgMeta) UnmarshalCBOR(r io.Reader) error { + nb := bindnode.Prototype(cm, MsgMetaSchema).NewBuilder() + err := dagcbor.Decode(nb, r) + if err != nil { + return err + } + n := bindnode.Unwrap(nb.Build()) + + ch, ok := n.(*CrossMsgMeta) + if !ok { + return xerrors.Errorf("Unmarshalled node not of type CheckData") + } + *cm = *ch + return nil +} + +func (cm *CrossMsgMeta) GetValue() (abi.TokenAmount, error) { + return big.FromString(cm.Value) +} + +func (cm *CrossMsgMeta) AddValue(x abi.TokenAmount) error { + v, err := cm.GetValue() + if err != nil { + return err + } + cm.Value = big.Add(v, x).String() + return nil +} + +func (cm *CrossMsgMeta) SubValue(x abi.TokenAmount) error { + v, err := cm.GetValue() + if err != nil { + return err + } + cm.Value = big.Sub(v, x).String() + return nil +} + +func (c *Checkpoint) Equals(ch *Checkpoint) (bool, error) { + c1, err := c.Cid() + if err != nil { + return false, err + } + c2, err := ch.Cid() + if err != nil { + return false, err + } + return c1 == c2, nil + +} + +// Cid returns the unique identifier for a checkpoint. +// +// It is computed by removing the signature from the checkpoint. +// The checkpoints are unique but miners need to include additional +// signature information. +func (c *Checkpoint) Cid() (cid.Cid, error) { + // The Cid of a checkpoint is computed from the data. + // The signature may differ according to the verifier used. + ch := &Checkpoint{Data: c.Data} + lsys := noStoreLinkSystem() + lnk, err := lsys.ComputeLink(Linkproto, bindnode.Wrap(ch, CheckpointSchema)) + if err != nil { + return cid.Undef, err + } + return lnk.(cidlink.Link).Cid, nil +} + +// AddListChilds adds a list of child checkpoints into the checkpoint. +func (c *Checkpoint) AddListChilds(childs []*Checkpoint) { + for _, ch := range childs { + c.AddChild(ch) + } +} + +// AddChild adds a single child to the checkpoint +// +// If a child with the same Cid or the same epoch already +// exists, nothing is added. +func (c *Checkpoint) AddChild(ch *Checkpoint) error { + chcid, err := ch.Cid() + if err != nil { + return err + } + ind := c.HasChildSource(ch.Source()) + if ind >= 0 { + if ci := c.Data.Childs[ind].hasCheck(chcid); ci >= 0 { + return xerrors.Errorf("source already has a checkpoint with that Cid") + } + c.Data.Childs[ind].Checks = append(c.Data.Childs[ind].Checks, chcid.Bytes()) + return nil + } + chcc := ChildCheck{ch.Data.Source, [][]byte{chcid.Bytes()}} + c.Data.Childs = append(c.Data.Childs, chcc) + return nil +} + +func (c *ChildCheck) hasCheck(cid cid.Cid) int { + for i, ch := range c.Checks { + if bytes.Equal(ch, cid.Bytes()) { + return i + } + } + return -1 +} + +func (c *Checkpoint) HasChildSource(source address.SubnetID) int { + for i, ch := range c.Data.Childs { + if ch.Source == source.String() { + return i + } + } + return -1 +} + +func (c *Checkpoint) LenChilds() int { + return len(c.Data.Childs) +} + +func (c *Checkpoint) GetSourceChilds(source address.SubnetID) ChildCheck { + i := c.HasChildSource(source) + return c.GetChilds()[i] +} + +func (c *Checkpoint) GetChilds() []ChildCheck { + return c.Data.Childs +} + +func (c *Checkpoint) Epoch() abi.ChainEpoch { + return abi.ChainEpoch(c.Data.Epoch) +} + +func (c *Checkpoint) TipSet() (types.TipSetKey, error) { + return types.TipSetKeyFromBytes(c.Data.TipSet) +} + +func (c *Checkpoint) EqualTipSet(tsk types.TipSetKey) bool { + return bytes.Equal(tsk.Bytes(), c.Data.TipSet) +} + +// CrossMsgs returns crossMsgs data included in checkpoint +func (c *Checkpoint) CrossMsgs() []CrossMsgMeta { + return c.Data.CrossMsgs +} + +// CrossMsgMeta returns the MsgMeta from and to a subnet from a checkpoint +// and the index the crossMsgMeta is in the slice +func (c *Checkpoint) CrossMsgMeta(from, to address.SubnetID) (int, *CrossMsgMeta) { + for i, m := range c.Data.CrossMsgs { + if m.From == from.String() && m.To == to.String() { + return i, &m + } + } + return -1, nil +} + +func (c *Checkpoint) AppendMsgMeta(meta *CrossMsgMeta) { + _, has := c.CrossMsgMeta(meta.GetFrom(), meta.GetTo()) + // If no previous, append right away + if has == nil { + c.Data.CrossMsgs = append(c.Data.CrossMsgs, *meta) + return + } + + // If not equal Cids + if !bytes.Equal(has.MsgsCid, meta.MsgsCid) { + c.Data.CrossMsgs = append(c.Data.CrossMsgs, *meta) + return + } + + // Do nothing in the rest of the cases +} + +func (c *Checkpoint) SetMsgMetaCid(i int, cd cid.Cid) { + c.Data.CrossMsgs[i].MsgsCid = cd.Bytes() +} + +func (c *Checkpoint) AddValueMetaCid(i int, x abi.TokenAmount) error { + return c.Data.CrossMsgs[i].AddValue(x) +} + +func (c *Checkpoint) SubValueMetaCid(i int, x abi.TokenAmount) error { + return c.Data.CrossMsgs[i].SubValue(x) +} + +// CrossMsgsTo returns the crossMsgsMeta directed to a specific subnet +func (c *Checkpoint) CrossMsgsTo(to address.SubnetID) []CrossMsgMeta { + out := make([]CrossMsgMeta, 0) + for _, m := range c.Data.CrossMsgs { + if m.To == to.String() { + out = append(out, m) + } + } + return out +} + +func ByteSliceToCidList(l [][]byte) ([]cid.Cid, error) { + out := make([]cid.Cid, len(l)) + for i, x := range l { + _, c, err := cid.CidFromBytes(x) + if err != nil { + return nil, err + } + out[i] = c + } + return out, nil +} diff --git a/chain/consensus/hierarchical/checkpoints/schema/envelope.go b/chain/consensus/hierarchical/checkpoints/schema/envelope.go new file mode 100644 index 000000000..3aab0d55b --- /dev/null +++ b/chain/consensus/hierarchical/checkpoints/schema/envelope.go @@ -0,0 +1,90 @@ +package schema + +import ( + "bytes" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/types" + "github.com/ipld/go-ipld-prime/codec/dagcbor" + "github.com/ipld/go-ipld-prime/node/bindnode" + "github.com/ipld/go-ipld-prime/schema" + "golang.org/x/xerrors" +) + +type SigEnvelope interface { + Type() types.EnvelopeType + MarshalCBOR() ([]byte, error) + UnmarshalCBOR(b []byte) error +} + +var SingleSignEnvSchema schema.Type + +type SingleSignEnvelope struct { + Address string + IDAddress string + Signature []byte +} + +func init() { + SingleSignEnvSchema = initSingleSignEnvSchema() +} + +var _ SigEnvelope = &SingleSignEnvelope{} + +func initSingleSignEnvSchema() schema.Type { + ts := schema.TypeSystem{} + ts.Init() + ts.Accumulate(schema.SpawnString("String")) + ts.Accumulate(schema.SpawnBytes("Bytes")) + + ts.Accumulate(schema.SpawnStruct("SingleSignEnvelope", + []schema.StructField{ + schema.SpawnStructField("Address", "String", false, false), + schema.SpawnStructField("IDAddress", "String", false, false), + schema.SpawnStructField("Signature", "Bytes", false, false), + }, + schema.SpawnStructRepresentationMap(map[string]string{}), + )) + + return ts.TypeByName("SingleSignEnvelope") +} + +func NewSingleSignEnvelope(addr address.Address, idAddr address.Address, sig []byte) *SingleSignEnvelope { + return &SingleSignEnvelope{addr.String(), idAddr.String(), sig} +} + +func (s *SingleSignEnvelope) Type() types.EnvelopeType { + return types.SingleSignature +} + +// MarshalCBOR the envelope +func (s *SingleSignEnvelope) MarshalCBOR() ([]byte, error) { + node := bindnode.Wrap(s, SingleSignEnvSchema) + nodeRepr := node.Representation() + var buf bytes.Buffer + err := dagcbor.Encode(nodeRepr, &buf) + if err != nil { + return nil, err + } + // TODO: Consider returning io.Writer + return buf.Bytes(), nil +} + +// UnmarshalCBOR the envelope +// TODO: Consider accepting io.Reader as input +func (s *SingleSignEnvelope) UnmarshalCBOR(b []byte) error { + nb := bindnode.Prototype(s, SingleSignEnvSchema).NewBuilder() + err := dagcbor.Decode(nb, bytes.NewReader(b)) + if err != nil { + return err + } + n := bindnode.Unwrap(nb.Build()) + + sg, ok := n.(*SingleSignEnvelope) + if !ok { + return xerrors.Errorf("Unmarshalled node not of type SingleSignEnvelope") + } + *s = *sg + + return nil +} diff --git a/chain/consensus/hierarchical/checkpoints/schema/schema_test.go b/chain/consensus/hierarchical/checkpoints/schema/schema_test.go new file mode 100644 index 000000000..eafd9f5ff --- /dev/null +++ b/chain/consensus/hierarchical/checkpoints/schema/schema_test.go @@ -0,0 +1,196 @@ +package schema_test + +import ( + "bytes" + "context" + "testing" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" + checkTypes "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/types" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/utils" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet" + tutil "github.com/filecoin-project/specs-actors/v7/support/testing" + "github.com/stretchr/testify/require" +) + +func TestMarshalCheckpoint(t *testing.T) { + c1, _ := schema.Linkproto.Sum([]byte("a")) + epoch := abi.ChainEpoch(1000) + ch := schema.NewRawCheckpoint(address.RootSubnet, epoch) + ch.SetPrevious(c1) + + // Add child checkpoints + ch.AddListChilds(utils.GenRandChecks(3)) + require.Equal(t, len(ch.GetChilds()), 3) + + // Marshal + var buf bytes.Buffer + err := ch.MarshalCBOR(&buf) + require.NoError(t, err) + + // Unmarshal and check equal + ch2 := &schema.Checkpoint{} + err = ch2.UnmarshalCBOR(&buf) + require.NoError(t, err) + eq, err := ch.Equals(ch2) + require.NoError(t, err) + require.True(t, eq) + + // Same for marshal binary + b, err := ch.MarshalBinary() + require.NoError(t, err) + + // Unmarshal and check equal + ch2 = &schema.Checkpoint{} + err = ch2.UnmarshalBinary(b) + require.NoError(t, err) + eq, err = ch.Equals(ch2) + require.NoError(t, err) + require.True(t, eq) + + // Check that Equals works. + c1, _ = schema.Linkproto.Sum([]byte("b")) + epoch = abi.ChainEpoch(1001) + ch = schema.NewRawCheckpoint(address.RootSubnet, epoch) + ch.SetPrevious(c1) + eq, err = ch.Equals(ch2) + require.NoError(t, err) + require.False(t, eq) + +} + +func TestMarshalMsgMeta(t *testing.T) { + ch := &schema.CrossMsgMeta{ + From: "asedf", + To: "sfg", + MsgsCid: []byte("asdg"), + Value: "10", + } + // Marshal + var buf bytes.Buffer + err := ch.MarshalCBOR(&buf) + require.NoError(t, err) + + // Unmarshal and check equal + ch2 := &schema.CrossMsgMeta{} + err = ch2.UnmarshalCBOR(&buf) + require.NoError(t, err) + require.NoError(t, err) + require.Equal(t, ch, ch2) + +} + +func TestMarshalEmptyPrevious(t *testing.T) { + epoch := abi.ChainEpoch(1000) + ch := schema.NewRawCheckpoint(address.RootSubnet, epoch) + pr, _ := ch.PreviousCheck() + require.Equal(t, pr, schema.NoPreviousCheck) + + // Add child checkpoints + ch.AddListChilds(utils.GenRandChecks(3)) + + // Marshal + var buf bytes.Buffer + err := ch.MarshalCBOR(&buf) + require.NoError(t, err) + + // Unmarshal and check equal + ch2 := &schema.Checkpoint{} + err = ch2.UnmarshalCBOR(&buf) + require.NoError(t, err) + eq, err := ch.Equals(ch2) + require.NoError(t, err) + require.True(t, eq) + + // Same for marshal binary + b, err := ch.MarshalBinary() + require.NoError(t, err) + + // Unmarshal and check equal + ch2 = &schema.Checkpoint{} + err = ch2.UnmarshalBinary(b) + require.NoError(t, err) + eq, err = ch.Equals(ch2) + require.NoError(t, err) + require.True(t, eq) +} + +func TestSignature(t *testing.T) { + ctx := context.Background() + w, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + idaddr := tutil.NewIDAddr(t, 103) + addr, err := w.WalletNew(ctx, types.KTSecp256k1) + require.NoError(t, err) + env := &schema.SingleSignEnvelope{addr.String(), idaddr.String(), []byte("test")} + sig, err := schema.NewSignature(env, checkTypes.SingleSignature) + require.NoError(t, err) + b, err := sig.MarshalBinary() + require.NoError(t, err) + sig2 := &schema.Signature{} + err = sig2.UnmarshalBinary(b) + require.NoError(t, err) + require.True(t, sig.Equal(*sig2)) + sig3 := &schema.Signature{} + require.False(t, sig.Equal(*sig3)) +} + +func TestEncodeDecodeSignature(t *testing.T) { + origsig := schema.Signature{ + SignatureID: 3, + Sig: []byte("test-data"), + } + sigBytes, err := origsig.MarshalBinary() + if err != nil { + t.Fatal(err) + } + + if len(sigBytes) == 0 { + t.Fatal("did not encode sig") + } + + var sig schema.Signature + if err := sig.UnmarshalBinary(sigBytes); err != nil { + t.Fatal(err) + } + if sig.SignatureID != origsig.SignatureID { + t.Fatal("got wrong protocol ID") + } + if !bytes.Equal(sig.Sig, origsig.Sig) { + t.Fatal("did not get expected data") + } + if !sig.Equal(origsig) { + t.Fatal("sig no equal after decode") + } + + // Zero the bytes and ensure the decoded struct still works. + // This will fail if UnmarshalBinary did not copy the inner data bytes. + copy(sigBytes, make([]byte, 1024)) + if !sig.Equal(origsig) { + t.Fatal("sig no equal after buffer zeroing") + } + + sig.SignatureID = origsig.SignatureID + 1 + if sig.Equal(origsig) { + t.Fatal("sig should not be equal") + } +} + +func TestCrossMsgMetaValue(t *testing.T) { + mt := schema.NewCrossMsgMeta(address.SubnetID("from"), address.SubnetID("to")) + err := mt.AddValue(abi.NewTokenAmount(30)) + require.NoError(t, err) + v, err := mt.GetValue() + require.NoError(t, err) + require.Equal(t, v, abi.NewTokenAmount(30)) + err = mt.SubValue(abi.NewTokenAmount(20)) + require.NoError(t, err) + v, err = mt.GetValue() + require.NoError(t, err) + require.Equal(t, v, abi.NewTokenAmount(10)) +} diff --git a/chain/consensus/hierarchical/checkpoints/schema/signature.go b/chain/consensus/hierarchical/checkpoints/schema/signature.go new file mode 100644 index 000000000..6132ab85e --- /dev/null +++ b/chain/consensus/hierarchical/checkpoints/schema/signature.go @@ -0,0 +1,61 @@ +package schema + +import ( + "bytes" + "encoding" + + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/types" + "github.com/multiformats/go-varint" +) + +type Signature struct { + // SignatureID defines the protocol used for the checkpoint signature + SignatureID types.EnvelopeType + // Signature data + Sig []byte +} + +var ( + _ encoding.BinaryMarshaler = (*Signature)(nil) + _ encoding.BinaryUnmarshaler = (*Signature)(nil) +) + +func NewSignature(e SigEnvelope, t types.EnvelopeType) (*Signature, error) { + b, err := e.MarshalCBOR() + if err != nil { + return nil, err + } + return &Signature{t, b}, nil +} + +// Equal determines if two signature values are equal. +func (m Signature) Equal(other Signature) bool { + return m.SignatureID == other.SignatureID && bytes.Equal(m.Sig, other.Sig) +} + +// MarshalBinary implements encoding.BinaryMarshaler. +func (m Signature) MarshalBinary() ([]byte, error) { + varintSize := varint.UvarintSize(uint64(m.SignatureID)) + buf := make([]byte, varintSize+len(m.Sig)) + varint.PutUvarint(buf, uint64(m.SignatureID)) + if len(m.Sig) != 0 { + copy(buf[varintSize:], m.Sig) + } + return buf, nil +} + +// UnmarshalBinary implements encoding.BinaryUnmarshaler. +func (m *Signature) UnmarshalBinary(data []byte) error { + id, sigLen, err := varint.FromUvarint(data) + if err != nil { + return err + } + m.SignatureID = types.EnvelopeType(id) + + // We can't hold onto the input data. Make a copy. + innerData := data[sigLen:] + m.Sig = make([]byte, len(innerData)) + copy(m.Sig, innerData) + + return nil +} diff --git a/chain/consensus/hierarchical/checkpoints/signer.go b/chain/consensus/hierarchical/checkpoints/signer.go new file mode 100644 index 000000000..a16df8b2f --- /dev/null +++ b/chain/consensus/hierarchical/checkpoints/signer.go @@ -0,0 +1,154 @@ +package checkpoint + +import ( + "context" + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/crypto" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/types" + "github.com/filecoin-project/lotus/lib/sigs" + "golang.org/x/xerrors" +) + +var CheckpointMsgType = api.MsgMeta{Type: "checkpoint"} + +// Signer implements the logic to sign and verify a checkpoint. +// +// Each subnet may choose to implement their own signature and +// verification strategies +// for checkpoints. A subnet shard looking to implement their own +// verifier will need to implement this interface with the desired logic. +type Signer interface { + Sign(ctx context.Context, w api.Wallet, addr address.Address, c *schema.Checkpoint, opts ...SigningOpts) error + Verify(c *schema.Checkpoint) (*AddrInfo, error) +} + +type AddrInfo struct { + Addr address.Address + IDAddr address.Address +} + +var _ Signer = SingleSigner{} + +// SingleSignVerifier is a simple verifier that checks +// if the signature envolope included in the checkpoint is valid. +type SingleSigner struct{} + +func NewSingleSigner() SingleSigner { + return SingleSigner{} +} + +// signingOpts are additional options for the signature +type signingOpts struct { + idAddr address.Address +} + +// apply applies the given options to this config +func (c *signingOpts) apply(opts ...SigningOpts) error { + for i, opt := range opts { + if err := opt(c); err != nil { + return fmt.Errorf("signing option %d failed: %s", i, err) + } + } + return nil +} + +// IDAddr to include in signature envelope +func IDAddr(id address.Address) SigningOpts { + return func(c *signingOpts) error { + if id.Protocol() != address.ID { + return xerrors.Errorf("IDAddress not of type address") + } + c.idAddr = id + return nil + } +} + +type SigningOpts func(*signingOpts) error + +func (v SingleSigner) Sign(ctx context.Context, w api.Wallet, addr address.Address, c *schema.Checkpoint, opts ...SigningOpts) error { + // Check if it is a pkey and not an ID. + if addr.Protocol() != address.SECP256K1 { + return xerrors.Errorf("must be secp address") + } + + var cfg signingOpts + if err := cfg.apply(opts...); err != nil { + return err + } + + // Get CID of checkpoint + cid, err := c.Cid() + if err != nil { + return err + } + // Create raw signature + sign, err := w.WalletSign(ctx, addr, cid.Hash(), CheckpointMsgType) + if err != nil { + return err + } + rawSig, err := sign.MarshalBinary() + if err != nil { + return err + } + // Package it inside an envelope and the signature of the checkpoint + sig, err := schema.NewSignature(schema.NewSingleSignEnvelope(addr, cfg.idAddr, rawSig), types.SingleSignature) + if err != nil { + return err + } + c.Signature, err = sig.MarshalBinary() + + return err +} + +func (v SingleSigner) Verify(c *schema.Checkpoint) (*AddrInfo, error) { + // Collect envelope from signature in checkpoint. + sig := schema.Signature{} + err := sig.UnmarshalBinary(c.Signature) + if err != nil { + return nil, err + } + // Check if the envelope has the right type. + if sig.SignatureID != types.SingleSignature { + return nil, xerrors.Errorf("wrong signer. Envelope is not of SingleSignType.") + } + + // Unmarshal the envelope. + e := schema.SingleSignEnvelope{} + err = e.UnmarshalCBOR(sig.Sig) + if err != nil { + return nil, err + } + // Get Cid of checkpoint to check signature. + cid, err := c.Cid() + if err != nil { + return nil, err + } + // Gather raw signature from envelope + checkSig := &crypto.Signature{} + err = checkSig.UnmarshalBinary(e.Signature) + if err != nil { + return nil, err + } + // Get address + addr, err := address.NewFromString(e.Address) + if err != nil { + return nil, err + } + idAddr := address.Undef + if e.IDAddress != "" { + idAddr, err = address.NewFromString(e.IDAddress) + if err != nil { + return nil, err + } + } + // Verify signature + err = sigs.Verify(checkSig, addr, cid.Hash()) + if err != nil { + return nil, err + } + return &AddrInfo{addr, idAddr}, nil +} diff --git a/chain/consensus/hierarchical/checkpoints/signer_test.go b/chain/consensus/hierarchical/checkpoints/signer_test.go new file mode 100644 index 000000000..a1ffc3a5b --- /dev/null +++ b/chain/consensus/hierarchical/checkpoints/signer_test.go @@ -0,0 +1,68 @@ +package checkpoint_test + +import ( + "context" + "testing" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + checkpoint "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/utils" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet" + tutil "github.com/filecoin-project/specs-actors/v7/support/testing" + "github.com/stretchr/testify/require" +) + +var cb = schema.Linkproto + +func TestSimpleSigner(t *testing.T) { + ctx := context.Background() + w, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + addr, err := w.WalletNew(ctx, types.KTSecp256k1) + require.NoError(t, err) + + ver := checkpoint.NewSingleSigner() + + c1, _ := cb.Sum([]byte("a")) + epoch := abi.ChainEpoch(1000) + ch := schema.NewRawCheckpoint(address.RootSubnet, epoch) + ch.SetPrevious(c1) + + // Add child checkpoints + ch.AddListChilds(utils.GenRandChecks(3)) + + // Sign without opts + err = ver.Sign(ctx, w, addr, ch) + require.NoError(t, err) + require.NotEqual(t, len(ch.Signature), 0) + + // Verify + sigAddr, err := ver.Verify(ch) + require.Equal(t, addr, sigAddr.Addr) + require.Equal(t, address.Undef, sigAddr.IDAddr) + require.NoError(t, err) + + // Verification fails if something in the checkpoint changes + ch.Data.Epoch = 120 + _, err = ver.Verify(ch) + require.Error(t, err) + + // Sign with opts + idaddr := tutil.NewIDAddr(t, 103) + err = ver.Sign(ctx, w, addr, ch, []checkpoint.SigningOpts{checkpoint.IDAddr(idaddr)}...) + require.NoError(t, err) + require.NotEqual(t, len(ch.Signature), 0) + + // Verify + sigAddr, err = ver.Verify(ch) + require.Equal(t, addr, sigAddr.Addr) + require.Equal(t, idaddr, sigAddr.IDAddr) + require.NoError(t, err) + + // TODO: Test that if the type is not right we return and error. +} diff --git a/chain/consensus/hierarchical/checkpoints/types/types.go b/chain/consensus/hierarchical/checkpoints/types/types.go new file mode 100644 index 000000000..80bce386d --- /dev/null +++ b/chain/consensus/hierarchical/checkpoints/types/types.go @@ -0,0 +1,31 @@ +package types + +import ( + "github.com/filecoin-project/go-state-types/abi" +) + +type EnvelopeType uint64 + +const ( + SingleSignature EnvelopeType = iota +) + +// CheckpointEpoch returns the epoch of the next checkpoint +// that needs to be signed +// +// Return the template of the checkpoint template that has been +// frozen and that is ready for signing and commitment in the +// current window. +func CheckpointEpoch(epoch abi.ChainEpoch, period abi.ChainEpoch) abi.ChainEpoch { + ind := epoch / period + return abi.ChainEpoch(period * ind) +} + +// WindowEpoch returns the epoch of the active checkpoint window +// +// Determines the epoch to which new checkpoints and xshard transactions need +// to be assigned. +func WindowEpoch(epoch abi.ChainEpoch, period abi.ChainEpoch) abi.ChainEpoch { + ind := epoch / period + return abi.ChainEpoch(period * (ind + 1)) +} diff --git a/chain/consensus/hierarchical/checkpoints/types/types_test.go b/chain/consensus/hierarchical/checkpoints/types/types_test.go new file mode 100644 index 000000000..c2b9a9e8c --- /dev/null +++ b/chain/consensus/hierarchical/checkpoints/types/types_test.go @@ -0,0 +1,15 @@ +package types + +import ( + "testing" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/stretchr/testify/require" +) + +func TestEpochs(t *testing.T) { + period := abi.ChainEpoch(100) + epoch := abi.ChainEpoch(120) + require.Equal(t, CheckpointEpoch(epoch, period), abi.ChainEpoch(100)) + require.Equal(t, WindowEpoch(epoch, period), abi.ChainEpoch(200)) +} diff --git a/chain/consensus/hierarchical/checkpoints/utils/utils.go b/chain/consensus/hierarchical/checkpoints/utils/utils.go new file mode 100644 index 000000000..7ba85d530 --- /dev/null +++ b/chain/consensus/hierarchical/checkpoints/utils/utils.go @@ -0,0 +1,22 @@ +package utils + +import ( + "strconv" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" +) + +func GenRandChecks(num int) []*schema.Checkpoint { + l := make([]*schema.Checkpoint, 0) + for i := 0; i < num; i++ { + s := strconv.FormatInt(int64(i), 10) + c, _ := schema.Linkproto.Sum([]byte(s)) + ch := schema.NewRawCheckpoint(address.SubnetID(s), abi.ChainEpoch(i)) + ch.SetPrevious(c) + l = append(l, ch) + + } + return l +} diff --git a/chain/consensus/hierarchical/modules/modules.go b/chain/consensus/hierarchical/modules/modules.go new file mode 100644 index 000000000..04d765123 --- /dev/null +++ b/chain/consensus/hierarchical/modules/modules.go @@ -0,0 +1,10 @@ +package module + +import ( + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet" + subnetmgr "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet/manager" +) + +func SetSubMgrIface(mgr *subnetmgr.SubnetMgr) subnet.SubnetMgr { + return mgr +} diff --git a/chain/consensus/hierarchical/naming.go b/chain/consensus/hierarchical/naming.go deleted file mode 100644 index ce778af01..000000000 --- a/chain/consensus/hierarchical/naming.go +++ /dev/null @@ -1,60 +0,0 @@ -package hierarchical - -import ( - "path" - - address "github.com/filecoin-project/go-address" - cid "github.com/ipfs/go-cid" - mh "github.com/multiformats/go-multihash" -) - -// Root is the ID of the root network -const RootSubnet = SubnetID("/root") - -// Undef is the undef ID -const UndefID = SubnetID("") - -// SubNetID represents the ID of a subnet -type SubnetID string - -// Builder to generate subnet IDs from their name -var builder = cid.V1Builder{Codec: cid.Raw, MhType: mh.IDENTITY} - -// NewSubnetID generates the ID for a subnet from the networkName -// of its parent. -// -// It takes the parent name and adds the source address of the subnet -// actor that represents the subnet. -func NewSubnetID(parentName SubnetID, SubnetActorAddr address.Address) SubnetID { - return SubnetID(path.Join(parentName.String(), SubnetActorAddr.String())) -} - -// Cid for the subnetID -func (id SubnetID) Cid() (cid.Cid, error) { - return builder.Sum([]byte(id)) -} - -// Parent returns the ID of the parent network. -func (id SubnetID) Parent() SubnetID { - if id == RootSubnet { - return UndefID - } - return SubnetID(path.Dir(string(id))) -} - -// Actor returns the subnet actor for a subnet -// -// Returns the address of the actor that handles the logic for a subnet -// in its parent Subnet. -func (id SubnetID) Actor() (address.Address, error) { - if id == RootSubnet { - return address.Undef, nil - } - _, saddr := path.Split(string(id)) - return address.NewFromString(saddr) -} - -// String returns the id in string form -func (id SubnetID) String() string { - return string(id) -} diff --git a/chain/consensus/hierarchical/naming_test.go b/chain/consensus/hierarchical/naming_test.go deleted file mode 100644 index 9d21a8d9b..000000000 --- a/chain/consensus/hierarchical/naming_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package hierarchical_test - -import ( - "testing" - - address "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/chain/consensus/hierarchical" - tutil "github.com/filecoin-project/specs-actors/v6/support/testing" - "github.com/stretchr/testify/require" -) - -func TestNaming(t *testing.T) { - addr1 := tutil.NewIDAddr(t, 101) - addr2 := tutil.NewIDAddr(t, 102) - root := hierarchical.RootSubnet - net1 := hierarchical.NewSubnetID(root, addr1) - net2 := hierarchical.NewSubnetID(net1, addr2) - - t.Log("Test actors") - actor1, err := net1.Actor() - require.NoError(t, err) - require.Equal(t, actor1, addr1) - actor2, err := net2.Actor() - require.NoError(t, err) - require.Equal(t, actor2, addr2) - actorRoot, err := root.Actor() - require.NoError(t, err) - require.Equal(t, actorRoot, address.Undef) - - t.Log("Test parents") - parent1 := net1.Parent() - require.Equal(t, root, parent1) - parent2 := net2.Parent() - require.Equal(t, parent2, net1) - parentRoot := root.Parent() - require.Equal(t, parentRoot, hierarchical.UndefID) - -} diff --git a/chain/consensus/hierarchical/subnet/consensus/consensus.go b/chain/consensus/hierarchical/subnet/consensus/consensus.go new file mode 100644 index 000000000..5a0b1196a --- /dev/null +++ b/chain/consensus/hierarchical/subnet/consensus/consensus.go @@ -0,0 +1,92 @@ +package consensus + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/api/v1api" + "github.com/filecoin-project/lotus/chain" + "github.com/filecoin-project/lotus/chain/beacon" + "github.com/filecoin-project/lotus/chain/consensus" + "github.com/filecoin-project/lotus/chain/consensus/delegcns" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet/resolver" + "github.com/filecoin-project/lotus/chain/consensus/tspow" + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/node/modules/dtypes" + logging "github.com/ipfs/go-log/v2" + "golang.org/x/xerrors" +) + +var log = logging.Logger("subnet-cns") + +// TODO // FIXME: Make an SubnetConsensus interface from this functions +// to avoid having to use so many switch/cases. Deferring to the next +// refactor. +func Weight(consensus hierarchical.ConsensusType) (store.WeightFunc, error) { + switch consensus { + case hierarchical.Delegated: + return delegcns.Weight, nil + case hierarchical.PoW: + return tspow.Weight, nil + default: + return nil, xerrors.New("consensus type not suported") + } +} + +func New(consensus hierarchical.ConsensusType, + sm *stmgr.StateManager, snMgr subnet.SubnetMgr, + beacon beacon.Schedule, r *resolver.Resolver, + verifier ffiwrapper.Verifier, + genesis chain.Genesis, netName dtypes.NetworkName) (consensus.Consensus, error) { + + switch consensus { + case hierarchical.Delegated: + return delegcns.NewDelegatedConsensus(sm, snMgr, beacon, r, verifier, genesis, netName), nil + case hierarchical.PoW: + return tspow.NewTSPoWConsensus(sm, snMgr, beacon, r, verifier, genesis, netName), nil + default: + return nil, xerrors.New("consensus type not suported") + } +} + +func Mine(ctx context.Context, api v1api.FullNode, cnsType hierarchical.ConsensusType) error { + // TODO: We should check if these processes throw an error + switch cnsType { + case hierarchical.Delegated: + go delegcns.Mine(ctx, api) + case hierarchical.PoW: + miner, err := GetWallet(ctx, api) + if err != nil { + log.Errorw("no valid identity found for PoW mining", "err", err) + return err + } + go tspow.Mine(ctx, miner, api) + default: + return xerrors.New("consensus type not suported") + } + return nil +} + +// Get an identity from the peer's wallet. +// First check if a default identity has been set and +// if not take the first from the list. +// NOTE: We should probably make this configurable. +func GetWallet(ctx context.Context, api v1api.FullNode) (address.Address, error) { + addr, err := api.WalletDefaultAddress(ctx) + // If no defualt wallet set + if err != nil || addr == address.Undef { + addrs, err := api.WalletList(ctx) + if err != nil { + return address.Undef, err + } + if len(addrs) == 0 { + return address.Undef, xerrors.Errorf("no valid wallet found in peer") + } + addr = addrs[0] + } + return addr, nil +} diff --git a/chain/consensus/hierarchical/subnet/events.go b/chain/consensus/hierarchical/subnet/events.go deleted file mode 100644 index 7edbb15a4..000000000 --- a/chain/consensus/hierarchical/subnet/events.go +++ /dev/null @@ -1,92 +0,0 @@ -package subnet - -import ( - "context" - "reflect" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/chain/consensus/hierarchical" - "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" - "github.com/filecoin-project/lotus/chain/events" - "github.com/filecoin-project/lotus/chain/types" - cbor "github.com/ipfs/go-ipld-cbor" -) - -// listenSCAEvents is the routine responsible for listening to events in -// the SCA of each subnet. -// -// TODO: This is a placeholder. with what we have implemented so far there -// is no need to synchronously listen to new events. We'll need this once -// we propagate checkpoints and support cross-subnet transactions. -func (s *SubnetMgr) listenSCAEvents(ctx context.Context, sh *Subnet) { - api := s.api - evs := s.events - id := hierarchical.RootSubnet - - // If subnet is nil, we are listening from the root chain. - // TODO: Revisit this, there is probably a most elegan way to - // do this. - if sh != nil { - id = sh.ID - api = sh.api - evs = sh.events - } - - checkFunc := func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) { - return false, true, nil - } - - changeHandler := func(oldTs, newTs *types.TipSet, states events.StateChange, curH abi.ChainEpoch) (more bool, err error) { - log.Infow("State change detected for SCA in subnet", "subnetID", id) - - // Trigger the detected change in subnets. - return s.triggerChange(ctx, api, struct{}{}) - - } - - revertHandler := func(ctx context.Context, ts *types.TipSet) error { - return nil - } - - match := func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) { - oldAct, err := api.StateGetActor(ctx, sca.SubnetCoordActorAddr, oldTs.Key()) - if err != nil { - return false, nil, err - } - newAct, err := api.StateGetActor(ctx, sca.SubnetCoordActorAddr, newTs.Key()) - if err != nil { - return false, nil, err - } - - var oldSt, newSt sca.SCAState - - bs := blockstore.NewAPIBlockstore(api) - cst := cbor.NewCborStore(bs) - if err := cst.Get(ctx, oldAct.Head, &oldSt); err != nil { - return false, nil, err - } - if err := cst.Get(ctx, newAct.Head, &newSt); err != nil { - return false, nil, err - } - - // If there was some change in the state, for now, trigger change function. - if !reflect.DeepEqual(newSt, oldSt) { - return true, nil, nil - } - - return false, nil, nil - - } - - err := evs.StateChanged(checkFunc, changeHandler, revertHandler, 5, 76587687658765876, match) - if err != nil { - return - } -} - -func (s *SubnetMgr) triggerChange(ctx context.Context, api *API, diff struct{}) (more bool, err error) { - log.Warnw("No logic implemented yet when SCA changes are detected", "subnetID", api.NetworkName) - // TODO: This will be populated when checkpointing and cross-subnet transactions come. - return true, nil -} diff --git a/chain/consensus/hierarchical/subnet/iface.go b/chain/consensus/hierarchical/subnet/iface.go new file mode 100644 index 000000000..6725a4385 --- /dev/null +++ b/chain/consensus/hierarchical/subnet/iface.go @@ -0,0 +1,17 @@ +package subnet + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/api/v1api" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" +) + +// SubnetMgr is a convenient interface to get SubnetMgr API +// without dependency cycles. +type SubnetMgr interface { + GetSubnetAPI(id address.SubnetID) (v1api.FullNode, error) + GetSCAState(ctx context.Context, id address.SubnetID) (*sca.SCAState, blockadt.Store, error) +} diff --git a/chain/consensus/hierarchical/subnet/api.go b/chain/consensus/hierarchical/subnet/manager/api.go similarity index 99% rename from chain/consensus/hierarchical/subnet/api.go rename to chain/consensus/hierarchical/subnet/manager/api.go index b23031042..cb6fa106f 100644 --- a/chain/consensus/hierarchical/subnet/api.go +++ b/chain/consensus/hierarchical/subnet/manager/api.go @@ -1,4 +1,4 @@ -package subnet +package subnetmgr import ( "context" @@ -181,6 +181,7 @@ func (sh *Subnet) populateAPIs( BeaconAPI: parentAPI.BeaconAPI, DS: sh.ds, NetworkName: dtypes.NetworkName(sh.ID.String()), + SubnetMgr: parentAPI.SubnetMgr, } // Register API so it can be accessed from CLI diff --git a/chain/consensus/hierarchical/subnet/manager/checkpoint.go b/chain/consensus/hierarchical/subnet/manager/checkpoint.go new file mode 100644 index 000000000..f69ff4c16 --- /dev/null +++ b/chain/consensus/hierarchical/subnet/manager/checkpoint.go @@ -0,0 +1,209 @@ +package subnetmgr + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/subnet" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" + ctypes "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/types" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "golang.org/x/xerrors" +) + +func (s *SubnetMgr) SubmitSignedCheckpoint( + ctx context.Context, wallet address.Address, + id address.SubnetID, ch *schema.Checkpoint) (cid.Cid, error) { + + // TODO: Think a bit deeper the locking strategy for subnets. + s.lk.RLock() + defer s.lk.RUnlock() + + // Get actor from subnet ID + SubnetActor, err := id.Actor() + if err != nil { + return cid.Undef, err + } + + // Get the api for the parent network hosting the subnet actor + // for the subnet. + parentAPI, err := s.getParentAPI(id) + if err != nil { + return cid.Undef, err + } + + b, err := ch.MarshalBinary() + if err != nil { + return cid.Undef, err + } + params := &sca.CheckpointParams{Checkpoint: b} + serparams, err := actors.SerializeParams(params) + if err != nil { + return cid.Undef, xerrors.Errorf("failed serializing init actor params: %s", err) + } + + // Get the parent and the actor to know where to send the message. + smsg, aerr := parentAPI.MpoolPushMessage(ctx, &types.Message{ + To: SubnetActor, + From: wallet, + Value: abi.NewTokenAmount(0), + Method: subnet.Methods.SubmitCheckpoint, + Params: serparams, + GasLimit: 1_000_000_000, // NOTE: Adding high gas limit to ensure that the message is accepted. + }, nil) + if aerr != nil { + log.Errorf("Error MpoolPushMessage: %s", aerr) + return cid.Undef, aerr + } + + msg := smsg.Cid() + + chcid, _ := ch.Cid() + log.Infow("Success signing checkpoint in subnet", "subnetID", id, "message", msg, "cid", chcid) + return smsg.Cid(), nil +} + +func (s *SubnetMgr) ListCheckpoints( + ctx context.Context, id address.SubnetID, num int) ([]*schema.Checkpoint, error) { + + // TODO: Think a bit deeper the locking strategy for subnets. + s.lk.RLock() + defer s.lk.RUnlock() + + // Get actor from subnet ID + subnetActAddr, err := id.Actor() + if err != nil { + return nil, err + } + + // Get the api for the parent network hosting the subnet actor + // for the subnet. + parentAPI, err := s.getParentAPI(id) + if err != nil { + return nil, err + } + + subAPI := s.getAPI(id) + if subAPI == nil { + return nil, xerrors.Errorf("Not listening to subnet") + } + + subnetAct, err := parentAPI.StateGetActor(ctx, subnetActAddr, types.EmptyTSK) + if err != nil { + return nil, err + } + + var snst subnet.SubnetState + pbs := blockstore.NewAPIBlockstore(parentAPI) + pcst := cbor.NewCborStore(pbs) + if err := pcst.Get(ctx, subnetAct.Head, &snst); err != nil { + return nil, err + } + pstore := adt.WrapStore(ctx, pcst) + out := make([]*schema.Checkpoint, 0) + ts := subAPI.ChainAPI.Chain.GetHeaviestTipSet() + currEpoch := ts.Height() + for i := 0; i < num; i++ { + signWindow := ctypes.CheckpointEpoch(currEpoch, snst.CheckPeriod) + signWindow = abi.ChainEpoch(int(signWindow) - i*int(snst.CheckPeriod)) + if signWindow < 0 { + break + } + ch, found, err := snst.GetCheckpoint(pstore, signWindow) + if err != nil { + return nil, err + } + if found { + out = append(out, ch) + } + } + return out, nil +} + +func (s *SubnetMgr) ValidateCheckpoint( + ctx context.Context, id address.SubnetID, epoch abi.ChainEpoch) (*schema.Checkpoint, error) { + + // TODO: Think a bit deeper the locking strategy for subnets. + s.lk.RLock() + defer s.lk.RUnlock() + + // Get actor from subnet ID + subnetActAddr, err := id.Actor() + if err != nil { + return nil, err + } + + // Get the api for the parent network hosting the subnet actor + // for the subnet. + parentAPI, err := s.getParentAPI(id) + if err != nil { + return nil, err + } + + subAPI := s.getAPI(id) + if subAPI == nil { + xerrors.Errorf("Not listening to subnet") + } + + subnetAct, err := parentAPI.StateGetActor(ctx, subnetActAddr, types.EmptyTSK) + if err != nil { + return nil, err + } + + var snst subnet.SubnetState + pbs := blockstore.NewAPIBlockstore(parentAPI) + pcst := cbor.NewCborStore(pbs) + if err := pcst.Get(ctx, subnetAct.Head, &snst); err != nil { + return nil, err + } + pstore := adt.WrapStore(ctx, pcst) + ts := subAPI.ChainAPI.Chain.GetHeaviestTipSet() + + // If epoch < 0 we are singalling that we want to verify the + // checkpoint for the latest epoch submitted. + if epoch < 0 { + currEpoch := ts.Height() + epoch = ctypes.CheckpointEpoch(currEpoch-snst.CheckPeriod, snst.CheckPeriod) + } + + ch, found, err := snst.GetCheckpoint(pstore, epoch) + if err != nil { + return nil, err + } + if !found { + return nil, xerrors.Errorf("no checkpoint committed in epoch: %s", epoch) + } + prevCid, err := snst.PrevCheckCid(pstore, epoch) + if err != nil { + return nil, err + } + + if pchc, _ := ch.PreviousCheck(); prevCid != pchc { + return ch, xerrors.Errorf("verification failed, previous checkpoints not equal: %s, %s", prevCid, pchc) + } + + if ch.Epoch() != epoch { + return ch, xerrors.Errorf("verification failed, wrong epoch: %s, %s", ch.Epoch(), epoch) + } + + subts, err := subAPI.ChainGetTipSetByHeight(ctx, epoch, types.EmptyTSK) + if err != nil { + return nil, err + } + if !ch.EqualTipSet(subts.Key()) { + chtsk, _ := ch.TipSet() + return ch, xerrors.Errorf("verification failed, checkpoint includes wrong tipSets : %s, %s", ts.Key(), chtsk) + } + + // TODO: Verify that the checkpoint has been committed in the corresponding SCA as a sanity check. + // TODO: Verify that committed childs are correct + // TODO: Any other verification? + return ch, nil +} diff --git a/chain/consensus/hierarchical/subnet/manager/crossmsg.go b/chain/consensus/hierarchical/subnet/manager/crossmsg.go new file mode 100644 index 000000000..1358954a4 --- /dev/null +++ b/chain/consensus/hierarchical/subnet/manager/crossmsg.go @@ -0,0 +1,390 @@ +package subnetmgr + +import ( + "context" + "sync" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet/resolver" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "golang.org/x/xerrors" +) + +// finalityWait is the number of epochs that we will wait +// before being able to re-propose a cross-msg. This is used to +// wait for all the state changes to be propagated. +const finalityWait = 15 + +func newCrossMsgPool() *crossMsgPool { + return &crossMsgPool{pool: make(map[address.SubnetID]*lastApplied)} +} + +type crossMsgPool struct { + lk sync.RWMutex + pool map[address.SubnetID]*lastApplied +} + +type lastApplied struct { + lk sync.RWMutex + topdown map[uint64]abi.ChainEpoch // nonce[epoch] + bottomup map[uint64]abi.ChainEpoch // nonce[epoch] + height abi.ChainEpoch +} + +func (cm *crossMsgPool) getPool(id address.SubnetID, height abi.ChainEpoch) *lastApplied { + cm.lk.RLock() + p, ok := cm.pool[id] + cm.lk.RUnlock() + // If no pool for subnet or height higher than the subsequent one. + // Add a buffer before pruning message pool. + if !ok || height > p.height+finalityWait { + cm.lk.Lock() + p = &lastApplied{ + height: height, + topdown: make(map[uint64]abi.ChainEpoch), + bottomup: make(map[uint64]abi.ChainEpoch), + } + cm.pool[id] = p + cm.lk.Unlock() + } + + return p +} + +func (cm *crossMsgPool) applyTopDown(n uint64, id address.SubnetID, height abi.ChainEpoch) { + p := cm.getPool(id, height) + p.lk.Lock() + defer p.lk.Unlock() + p.topdown[n] = height +} + +func (cm *crossMsgPool) applyBottomUp(n uint64, id address.SubnetID, height abi.ChainEpoch) { + p := cm.getPool(id, height) + p.lk.Lock() + defer p.lk.Unlock() + p.bottomup[n] = height +} + +func (cm *crossMsgPool) isTopDownApplied(n uint64, id address.SubnetID, height abi.ChainEpoch) bool { + p := cm.getPool(id, height) + p.lk.RLock() + defer p.lk.RUnlock() + h, ok := p.topdown[n] + return ok && h != height +} + +func (cm *crossMsgPool) isBottomUpApplied(n uint64, id address.SubnetID, height abi.ChainEpoch) bool { + p := cm.getPool(id, height) + p.lk.RLock() + defer p.lk.RUnlock() + h, ok := p.bottomup[n] + return ok && h != height +} + +// GetCrossMsgsPool returns a list with `num` number of of cross messages pending for validation. +// +// height determines the current consensus height +func (s *SubnetMgr) GetCrossMsgsPool( + ctx context.Context, id address.SubnetID, height abi.ChainEpoch) ([]*types.Message, error) { + // TODO: Think a bit deeper the locking strategy for subnets. + // s.lk.RLock() + // defer s.lk.RUnlock() + + var ( + topdown []*types.Message + bottomup []*types.Message + err error + ) + + // topDown messages only supported in subnets, not the root. + if !s.isRoot(id) { + topdown, err = s.getTopDownPool(ctx, id, height) + if err != nil { + return nil, err + } + } + + // Get bottomup messages and return all cross-messages. + bottomup, err = s.getBottomUpPool(ctx, id, height) + if err != nil { + return nil, err + } + + out := make([]*types.Message, len(topdown)+len(bottomup)) + copy(out[:len(topdown)], topdown) + copy(out[len(topdown):], bottomup) + + log.Debugf("Picked up %d cross-msgs from CrossMsgPool", len(out)) + return out, nil +} + +// FundSubnet injects funds in a subnet and returns the Cid of the +// message of the parent chain that included it. +func (s *SubnetMgr) FundSubnet( + ctx context.Context, wallet address.Address, + id address.SubnetID, value abi.TokenAmount) (cid.Cid, error) { + + // TODO: Think a bit deeper the locking strategy for subnets. + s.lk.RLock() + defer s.lk.RUnlock() + + // Get the api for the parent network hosting the subnet actor + // for the subnet. + parentAPI, err := s.getParentAPI(id) + if err != nil { + return cid.Undef, err + } + + params := &sca.SubnetIDParam{ID: id.String()} + serparams, err := actors.SerializeParams(params) + if err != nil { + return cid.Undef, xerrors.Errorf("failed serializing init actor params: %s", err) + } + + // Get the parent and the actor to know where to send the message. + smsg, aerr := parentAPI.MpoolPushMessage(ctx, &types.Message{ + To: hierarchical.SubnetCoordActorAddr, + From: wallet, + Value: value, + Method: sca.Methods.Fund, + Params: serparams, + }, nil) + if aerr != nil { + log.Errorf("Error MpoolPushMessage: %s", aerr) + return cid.Undef, aerr + } + + return smsg.Cid(), nil +} + +// ReleaseFunds releases some funds from a subnet +func (s *SubnetMgr) ReleaseFunds( + ctx context.Context, wallet address.Address, + id address.SubnetID, value abi.TokenAmount) (cid.Cid, error) { + + // TODO: Think a bit deeper the locking strategy for subnets. + s.lk.RLock() + defer s.lk.RUnlock() + + // Get the api for the subnet + api, err := s.GetSubnetAPI(id) + if err != nil { + return cid.Undef, err + } + + // Send a release message to SCA in subnet + smsg, aerr := api.MpoolPushMessage(ctx, &types.Message{ + To: hierarchical.SubnetCoordActorAddr, + From: wallet, + Value: value, + Method: sca.Methods.Release, + Params: nil, + }, nil) + if aerr != nil { + log.Errorf("Error MpoolPushMessage: %s", aerr) + return cid.Undef, aerr + } + + return smsg.Cid(), nil +} + +func (s *SubnetMgr) getSCAStateWithFinality(ctx context.Context, api *API, id address.SubnetID) (*sca.SCAState, adt.Store, error) { + var err error + finTs := api.ChainAPI.Chain.GetHeaviestTipSet() + height := finTs.Height() + + // Avoid negative epochs + if height-finalityThreshold >= 0 { + // Go back finalityThreshold to ensure the state is final in parent chain + finTs, err = api.ChainGetTipSetByHeight(ctx, height-finalityThreshold, types.EmptyTSK) + if err != nil { + return nil, nil, err + } + } + + // Get parent state back in the past where it should be final. + // (we don't want to validate in the subnet a state that may be reverted in the parent) + pAct, err := api.StateGetActor(ctx, hierarchical.SubnetCoordActorAddr, finTs.Key()) + if err != nil { + return nil, nil, err + } + var st sca.SCAState + pbs := blockstore.NewAPIBlockstore(api) + pcst := cbor.NewCborStore(pbs) + if err := pcst.Get(ctx, pAct.Head, &st); err != nil { + return nil, nil, err + } + + return &st, adt.WrapStore(ctx, pcst), nil +} + +// getParentSCAWithFinality returns the state of the SCA of the parent with `finalityThreshold` +// epochs ago to ensure that no reversion happens and we can operate with the state we got. +func (s *SubnetMgr) getParentSCAWithFinality(ctx context.Context, id address.SubnetID) (*sca.SCAState, adt.Store, error) { + parentAPI, err := s.getParentAPI(id) + if err != nil { + return nil, nil, err + } + return s.getSCAStateWithFinality(ctx, parentAPI, id) +} + +func (s *SubnetMgr) getTopDownPool(ctx context.Context, id address.SubnetID, height abi.ChainEpoch) ([]*types.Message, error) { + + // Get status for SCA in subnet to determine from which nonce to fetch messages + subAPI := s.getAPI(id) + if subAPI == nil { + return nil, xerrors.Errorf("Not listening to subnet") + } + subnetAct, err := subAPI.StateGetActor(ctx, hierarchical.SubnetCoordActorAddr, types.EmptyTSK) + if err != nil { + return nil, err + } + var snst sca.SCAState + bs := blockstore.NewAPIBlockstore(subAPI) + cst := cbor.NewCborStore(bs) + if err := cst.Get(ctx, subnetAct.Head, &snst); err != nil { + return nil, err + } + + // Get tipset at height-finalityThreshold to ensure some level of finality + // to get pool of cross-messages. + st, pstore, err := s.getParentSCAWithFinality(ctx, id) + if err != nil { + return nil, err + } + // Get topDown messages from parent SCA + sh, found, err := st.GetSubnet(pstore, id) + if err != nil { + return nil, err + } + if !found { + return nil, xerrors.Errorf("subnet with ID %v not found", id) + } + + msgs, err := sh.TopDownMsgFromNonce(pstore, snst.AppliedTopDownNonce) + if err != nil { + return nil, err + } + out := make([]*types.Message, 0) + for _, m := range msgs { + // The pool waits a few epochs before re-proposing a cross-msg if the applied nonce + // hasn't changed in order to give enough time for state changes to propagate. + if s.cm.isTopDownApplied(m.Nonce, id, height) { + continue + } + // FIXME: Instead of applying the message to check if it fails before including in + // the cross-msg pool, we include every cross-msg and if it fails it is handled by + // the SCA when applied. We could probably check here if it fails, and for failing messages + // revert the source transaction. Check https://github.com/filecoin-project/eudico/issues/92 + // for further details. + // // Apply message to see if it succeeds before considering it for the pool. + // if err := s.applyMsg(ctx, subAPI.StateManager, id, m); err != nil { + // log.Warnf("Error applying cross message when picking it up from CrossMsgPool: %s", err) + // continue + // } + out = append(out, m) + s.cm.applyTopDown(m.Nonce, id, height) + } + return out, nil +} + +func (s *SubnetMgr) getBottomUpPool(ctx context.Context, id address.SubnetID, height abi.ChainEpoch) ([]*types.Message, error) { + subAPI := s.getAPI(id) + if subAPI == nil { + return nil, xerrors.Errorf("Not listening to subnet") + } + // Get tipset at height-finalityThreshold to ensure some level of finality + // to get pool of cross-messages. + st, pstore, err := s.getSCAStateWithFinality(ctx, subAPI, id) + if err != nil { + return nil, err + } + // BottomUpMessage work a bit different from topDown messages. We accept + // several messages with the same nonce because Metas batch several messages + // inside the same package. To prevent applying messages twice we look in + // the pool for messages with AppliedBottomUpNonce+1, because the previous + // nonce was already applied in the previous batch (see sca_actor::ApplyMsg) + toApply := st.AppliedBottomUpNonce + 1 + metas, err := st.BottomUpMsgFromNonce(pstore, toApply) + if err != nil { + return nil, err + } + + // Get resolver for subnet + r := s.getSubnetResolver(id) + + out := make([]*types.Message, 0) + isFound := make(map[uint64][]types.Message) + // Resolve CrossMsgs behind meta or send pull message. + for _, mt := range metas { + // Resolve CrossMsgs behind meta. + c, err := mt.Cid() + if err != nil { + return nil, err + } + cross, found, err := r.ResolveCrossMsgs(ctx, c, address.SubnetID(mt.From)) + if err != nil { + return nil, err + } + if found { + // Mark that the meta with specific nonce has been + // fully resolved including the messages + isFound[uint64(mt.Nonce)] = cross + } + } + + // We return from AppliedBottomUpNonce all the metas that have been resolved + // successfully. They need to be applied sequentially, so the moment we find + // an unresolved meta we return. + // FIXME: This approach may affect the liveliness of hierarchical consensus. + // Assuming data availability and honest nodes we should include a fallback + // scheme to prevent the protocol from stalling. + for i := toApply; i < toApply+uint64(len(metas)); i++ { + cross, ok := isFound[i] + // If not found, return + if !ok { + return out, nil + } + // The pool waits a few epochs before re-proposing a cross-msg if the applied nonce + // hasn't changed in order to give enough time for state changes to propagate. + if s.cm.isBottomUpApplied(i, id, height) { + continue + } + for _, m := range cross { + // Add the meta nonce to the message nonce + m.Nonce = i + // Append for return + out = append(out, &m) + } + s.cm.applyBottomUp(i, id, height) + } + + return out, nil +} + +func (s *SubnetMgr) getSubnetResolver(id address.SubnetID) *resolver.Resolver { + r := s.r + if !s.isRoot(id) { + r = s.subnets[id].r + } + return r +} + +func (s *SubnetMgr) CrossMsgResolve(ctx context.Context, id address.SubnetID, c cid.Cid, from address.SubnetID) ([]types.Message, error) { + r := s.getSubnetResolver(id) + msgs, _, err := r.ResolveCrossMsgs(ctx, c, address.SubnetID(from)) + return msgs, err +} + +func (s *SubnetMgr) WaitCrossMsgResolved(ctx context.Context, id address.SubnetID, c cid.Cid, from address.SubnetID) chan error { + r := s.getSubnetResolver(id) + return r.WaitCrossMsgsResolved(ctx, c, address.SubnetID(from)) +} diff --git a/chain/consensus/hierarchical/subnet/manager/crossmsg_test.go b/chain/consensus/hierarchical/subnet/manager/crossmsg_test.go new file mode 100644 index 000000000..5aee7ad20 --- /dev/null +++ b/chain/consensus/hierarchical/subnet/manager/crossmsg_test.go @@ -0,0 +1,65 @@ +package subnetmgr + +import ( + "testing" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/stretchr/testify/require" +) + +func TestPoolTopDown(t *testing.T) { + cm := newCrossMsgPool() + id := address.SubnetID("test") + var nonce uint64 = 3 + height := abi.ChainEpoch(5) + cm.applyTopDown(nonce, id, height) + cm.applyTopDown(nonce+1, id, height) + require.False(t, cm.isTopDownApplied(nonce, id, height)) + require.False(t, cm.isTopDownApplied(nonce+1, id, height)) + require.False(t, cm.isTopDownApplied(nonce+2, id, height)) + + // In the next epoch we consider the previous ones as applied + height++ + cm.applyTopDown(nonce+2, id, height) + require.True(t, cm.isTopDownApplied(nonce, id, height)) + require.True(t, cm.isTopDownApplied(nonce+1, id, height)) + require.False(t, cm.isTopDownApplied(nonce+2, id, height)) + + // After the finality threshold we are free to re-propose previous ones if needed. + // (state changes would probably have propagated already). + height += finalityWait + cm.applyTopDown(nonce+3, id, height) + require.False(t, cm.isTopDownApplied(nonce+2, id, height)) + require.False(t, cm.isTopDownApplied(nonce+3, id, height)) + height++ + require.True(t, cm.isTopDownApplied(nonce+3, id, height)) +} + +func TestPoolBottomUp(t *testing.T) { + cm := newCrossMsgPool() + id := address.SubnetID("test") + var nonce uint64 = 3 + height := abi.ChainEpoch(5) + cm.applyBottomUp(nonce, id, height) + cm.applyBottomUp(nonce+1, id, height) + require.False(t, cm.isBottomUpApplied(nonce, id, height)) + require.False(t, cm.isBottomUpApplied(nonce+1, id, height)) + require.False(t, cm.isBottomUpApplied(nonce+2, id, height)) + + // In the next epoch we consider the previous ones as applied + height++ + cm.applyBottomUp(nonce+2, id, height) + require.True(t, cm.isBottomUpApplied(nonce, id, height)) + require.True(t, cm.isBottomUpApplied(nonce+1, id, height)) + require.False(t, cm.isBottomUpApplied(nonce+2, id, height)) + + // After the finality threshold we are free to re-propose previous ones if needed. + // (state changes would probably have propagated already). + height += finalityWait + cm.applyBottomUp(nonce+3, id, height) + require.False(t, cm.isBottomUpApplied(nonce+2, id, height)) + require.False(t, cm.isBottomUpApplied(nonce+3, id, height)) + height++ + require.True(t, cm.isBottomUpApplied(nonce+3, id, height)) +} diff --git a/chain/consensus/hierarchical/subnet/manager/events.go b/chain/consensus/hierarchical/subnet/manager/events.go new file mode 100644 index 000000000..409ddcdc1 --- /dev/null +++ b/chain/consensus/hierarchical/subnet/manager/events.go @@ -0,0 +1,433 @@ +package subnetmgr + +import ( + "context" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/subnet" + checkpoint "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" + ctypes "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/types" + "github.com/filecoin-project/lotus/chain/events" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "golang.org/x/xerrors" +) + +// finalityThreshold determines the number of epochs to wait +// before considering a change "final" and consider signing the +// checkpoint +const finalityThreshold = 5 + +// struct used to propagate detected changes. +type diffInfo struct { + checkToSign *signInfo + childChecks map[string][]cid.Cid +} + +// signInfo propagates signing inforamtion. +type signInfo struct { + checkpoint *schema.Checkpoint + addr address.Address + idAddr address.Address +} + +// signingState keeps track of checkpoint signing state +// for an epoch. +type signingState struct { + wait abi.ChainEpoch + currEpoch abi.ChainEpoch + signed bool +} + +// listenSubnetEvents is the routine responsible for listening to events +// +// This routine listens mainly for the following events: +// * Pending checkpoints to sign if we are miners in a subnet. +// * New checkpoints for child chains committed in SCA of the subnet. +func (s *SubnetMgr) listenSubnetEvents(ctx context.Context, sh *Subnet) { + evs := s.events + api := s.api + id := address.RootSubnet + root := true + + // If subnet is nil, we are listening from the root chain. + // TODO: Revisit this, there is probably a more elegant way to + // do this. + if sh != nil { + root = false + id = sh.ID + api = sh.api + evs = sh.events + sh.resetSigState(abi.ChainEpoch(0)) + } + + checkFunc := func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) { + return false, true, nil + } + + changeHandler := func(oldTs, newTs *types.TipSet, states events.StateChange, curH abi.ChainEpoch) (more bool, err error) { + log.Infow("State change detected for subnet", "subnetID", id) + diff, ok := states.(*diffInfo) + if !ok { + log.Error("Error casting states, not of type *diffInfo") + return true, err + } + + // Trigger the detected change in subnets. + return s.triggerChange(ctx, sh, diff) + + } + + revertHandler := func(ctx context.Context, ts *types.TipSet) error { + return nil + } + + match := func(oldTs, newTs *types.TipSet) (bool, events.StateChange, error) { + diff := &diffInfo{} + change := false + var err error + + // Root chain checkpointing process is independent from hierarchical consensus + // so there's no need for checking if there is something to sign in root. + if !root { + change, err = s.matchCheckpointSignature(ctx, sh, newTs, diff) + if err != nil { + log.Errorw("Error checking checkpoints to sign in subnet", "subnetID", id, "err", err) + return false, nil, err + } + } + + // Every subnet listents to its SCA contract to check when new child checkpoints have + // been committed. + change2, err := s.matchSCAChildCommit(ctx, api, oldTs, newTs, diff) + if err != nil { + log.Errorw("Error checking checkpoints to sign in subnet", "subnetID", id, "err", err) + return false, nil, err + } + + return change || change2, diff, nil + + } + + err := evs.StateChanged(checkFunc, changeHandler, revertHandler, finalityThreshold, 76587687658765876, match) + if err != nil { + return + } +} + +func (s *SubnetMgr) matchSCAChildCommit(ctx context.Context, api *API, oldTs, newTs *types.TipSet, diff *diffInfo) (bool, error) { + oldAct, err := api.StateGetActor(ctx, hierarchical.SubnetCoordActorAddr, oldTs.Key()) + if err != nil { + return false, err + } + newAct, err := api.StateGetActor(ctx, hierarchical.SubnetCoordActorAddr, newTs.Key()) + if err != nil { + return false, err + } + + var oldSt, newSt sca.SCAState + diff.childChecks = make(map[string][]cid.Cid) + + bs := blockstore.NewAPIBlockstore(api) + cst := cbor.NewCborStore(bs) + if err := cst.Get(ctx, oldAct.Head, &oldSt); err != nil { + return false, err + } + if err := cst.Get(ctx, newAct.Head, &newSt); err != nil { + return false, err + } + + // If no changes in checkpoints + if oldSt.Checkpoints == newSt.Checkpoints { + return false, nil + } + + store := adt.WrapStore(ctx, cst) + // Get checkpoints being populated in current window. + oldCheck, err := oldSt.CurrWindowCheckpoint(store, oldTs.Height()) + if err != nil { + return false, err + } + newCheck, err := newSt.CurrWindowCheckpoint(store, newTs.Height()) + if err != nil { + return false, err + } + + // Even if there is change, if newCheck is zero we are in + // a window change. + if oldCheck.LenChilds() > newCheck.LenChilds() { + return false, err + } + + oldChilds := oldCheck.GetChilds() + newChilds := newCheck.GetChilds() + + // Check changes in child changes + chngChilds := make(map[string][][]byte) + for _, ch := range newChilds { + chngChilds[ch.Source] = ch.Checks + } + + for _, ch := range oldChilds { + cs, ok := chngChilds[ch.Source] + // If found in new and old and same length + if ok && len(cs) == len(ch.Checks) { + delete(chngChilds, ch.Source) + } else if ok { + // If found but not the same size it means there is some child there + // We delete all + i := chngChilds[ch.Source][len(chngChilds[ch.Source])-1] + delete(chngChilds, ch.Source) + // And just add the last one added + chngChilds[ch.Source] = [][]byte{i} + } + } + + for k, out := range chngChilds { + cs, err := schema.ByteSliceToCidList(out) + if err != nil { + return false, err + } + diff.childChecks[k] = cs + } + + return len(diff.childChecks) > 0, nil +} + +func (s *SubnetMgr) matchCheckpointSignature(ctx context.Context, sh *Subnet, newTs *types.TipSet, diff *diffInfo) (bool, error) { + // Get the epoch for the current tipset in subnet. + subnetEpoch := newTs.Height() + + // Get the state of the corresponding subnet actor in the parent chain + // Get actor from subnet ID + subnetActAddr, err := sh.ID.Actor() + if err != nil { + return false, err + } + // Get the api for the parent network hosting the subnet actor + // for the subnet. + parentAPI, err := s.getParentAPI(sh.ID) + if err != nil { + return false, err + } + + // Get state of subnet actor in parent for heaviest tipset + subnetAct, err := parentAPI.StateGetActor(ctx, subnetActAddr, types.EmptyTSK) + if err != nil { + return false, err + } + + var snst subnet.SubnetState + pbs := blockstore.NewAPIBlockstore(parentAPI) + pcst := cbor.NewCborStore(pbs) + if err := pcst.Get(ctx, subnetAct.Head, &snst); err != nil { + return false, err + } + pstore := adt.WrapStore(ctx, pcst) + + // Check if no checkpoint committed for this window + signWindow := ctypes.CheckpointEpoch(subnetEpoch, snst.CheckPeriod) + + // Reset state if we have changed signing windows + if signWindow != sh.sigWindow() { + sh.resetSigState(signWindow) + } + _, found, err := snst.GetCheckpoint(pstore, signWindow) + if err != nil { + return false, err + } + if found { + log.Infow("Checkpoint for epoch already committed", "epoch", signWindow) + return false, nil + } + + // Get raw checkpoint for this window from SCA of subnet + scaAct, err := sh.api.StateGetActor(ctx, hierarchical.SubnetCoordActorAddr, newTs.Key()) + if err != nil { + return false, err + } + var scast sca.SCAState + bs := blockstore.NewAPIBlockstore(sh.api) + cst := cbor.NewCborStore(bs) + if err := cst.Get(ctx, scaAct.Head, &scast); err != nil { + return false, err + } + store := adt.WrapStore(ctx, cst) + ch, err := sca.RawCheckpoint(&scast, store, signWindow) + if err != nil { + log.Errorw("Error getting raw checkpoint", "err", err) + return false, err + } + // Populate checkpoint data + if err := sh.populateCheckpoint(ctx, pstore, &snst, ch); err != nil { + log.Errorw("Error populating checkpoint template", "err", err) + return false, err + } + + chcid, err := ch.Cid() + if err != nil { + return false, err + } + + // Check if there are votes for this checkpoint + votes, found, err := snst.GetWindowChecks(pstore, chcid) + if err != nil { + return false, err + } + + // If not check if I am miner and I haven't submitted a vote + // from all the identities in my wallet. + wallAddrs, err := s.api.WalletAPI.WalletList(ctx) + if err != nil { + return false, err + } + for _, waddr := range wallAddrs { + addr, err := s.api.StateLookupID(ctx, waddr, types.EmptyTSK) + if err != nil { + // Disregard errors here. We want to check if the + // state changes, if we can't check this, well, we keep going! + continue + } + // I'm in the list of miners, check if I have already committed + // a checkpoint. + if snst.IsMiner(addr) { + // If no windowChecks found, or we haven't sent a vote yet + if !found || !subnet.HasMiner(addr, votes.Miners) { + sh.sigWaitTick() + // If wait reached, the tipset is final and we can sign. + // This wait ensures that we only sign once + if sh.sigWaitReached() && !sh.hasSigned() { + diff.checkToSign = &signInfo{ch, waddr, addr} + // Notify that this epoch for subnet has been marked for signing. + sh.signed() + return true, nil + } + } + } + + } + // If not return. + return false, nil + +} + +// PopulateCheckpoint sets previous checkpoint and tipsetKey. +func (sh *Subnet) populateCheckpoint(ctx context.Context, store adt.Store, st *subnet.SubnetState, ch *schema.Checkpoint) error { + // Set Previous. + prevCid, err := st.PrevCheckCid(store, ch.Epoch()) + if err != nil { + return err + } + ch.SetPrevious(prevCid) + + // Set tipsetKeys for the epoch. + ts, err := sh.api.ChainGetTipSetByHeight(ctx, ch.Epoch(), types.EmptyTSK) + if err != nil { + return err + } + ch.SetTipsetKey(ts.Key()) + return nil +} + +func (s *SubnetMgr) triggerChange(ctx context.Context, sh *Subnet, diff *diffInfo) (more bool, err error) { + // If there's a checkpoint to sign. + if diff.checkToSign != nil { + err := s.signAndSubmitCheckpoint(ctx, sh, diff.checkToSign) + if err != nil { + log.Errorw("Error signing checkpoint for subnet", "subnetID", sh.ID, "err", err) + return true, err + } + log.Infow("Success signing checkpoint in subnet", "subnetID", sh.ID.String()) + } + + // If some child checkpoint committed in SCA + if len(diff.childChecks) != 0 { + err := s.childCheckDetected(ctx, diff.childChecks) + if err != nil { + log.Errorw("Error when detecting child checkpoint in SCA", "subnetID", sh.ID, "err", err) + return true, err + } + } + return true, nil +} + +func (s *SubnetMgr) signAndSubmitCheckpoint(ctx context.Context, sh *Subnet, info *signInfo) error { + log.Infow("Signing checkpoint for subnet", "subnetID", info.checkpoint.Source().String()) + // Using simple signature to sign checkpoint using the subnet wallet. + ver := checkpoint.NewSingleSigner() + err := ver.Sign(ctx, sh.api.WalletAPI.Wallet, info.addr, info.checkpoint, + []checkpoint.SigningOpts{checkpoint.IDAddr(info.idAddr)}...) + if err != nil { + return err + } + // Sign checkpoint + _, err = s.SubmitSignedCheckpoint(ctx, info.addr, sh.ID, info.checkpoint) + if err != nil { + return err + } + + // Trying to push cross-msgs included in checkpoint to corresponding subnet. + log.Infow("Pushing cross-msgs from checkpoint", "subnetID", info.checkpoint.Source().String()) + subAPI := s.getAPI(sh.ID) + if subAPI == nil { + xerrors.Errorf("Not listening to subnet") + } + st, store, err := s.GetSCAState(ctx, sh.ID) + if err != nil { + return err + } + + // Pushing cross-msg in checkpoint to corresponding subnets. + return sh.r.PushMsgFromCheckpoint(info.checkpoint, st, store) +} + +func (s *SubnetMgr) childCheckDetected(ctx context.Context, info map[string][]cid.Cid) error { + for k, c := range info { + log.Infof("Child checkpoint from %s committed in %s: %s", k, s.api.NetName, c) + } + return nil + +} + +func (sh *Subnet) resetSigState(epoch abi.ChainEpoch) { + sh.checklk.Lock() + defer sh.checklk.Unlock() + sh.singingState = &signingState{currEpoch: epoch} +} + +func (sh *Subnet) sigWaitTick() { + sh.checklk.Lock() + defer sh.checklk.Unlock() + sh.singingState.wait++ +} + +func (sh *Subnet) signed() { + sh.checklk.Lock() + defer sh.checklk.Unlock() + sh.singingState.signed = true +} + +func (sh *Subnet) hasSigned() bool { + sh.checklk.RLock() + defer sh.checklk.RUnlock() + return sh.singingState.signed +} + +func (sh *Subnet) sigWaitReached() bool { + sh.checklk.RLock() + defer sh.checklk.RUnlock() + return sh.singingState.wait >= finalityThreshold +} + +func (sh *Subnet) sigWindow() abi.ChainEpoch { + sh.checklk.RLock() + defer sh.checklk.RUnlock() + return sh.singingState.currEpoch +} diff --git a/chain/consensus/hierarchical/subnet/exchange.go b/chain/consensus/hierarchical/subnet/manager/exchange.go similarity index 99% rename from chain/consensus/hierarchical/subnet/exchange.go rename to chain/consensus/hierarchical/subnet/manager/exchange.go index 17ef74139..010053fd1 100644 --- a/chain/consensus/hierarchical/subnet/exchange.go +++ b/chain/consensus/hierarchical/subnet/manager/exchange.go @@ -1,4 +1,4 @@ -package subnet +package subnetmgr import ( "context" diff --git a/chain/consensus/hierarchical/subnet/manager.go b/chain/consensus/hierarchical/subnet/manager/manager.go similarity index 72% rename from chain/consensus/hierarchical/subnet/manager.go rename to chain/consensus/hierarchical/subnet/manager/manager.go index 1beeb6691..836508d3f 100644 --- a/chain/consensus/hierarchical/subnet/manager.go +++ b/chain/consensus/hierarchical/subnet/manager/manager.go @@ -1,4 +1,4 @@ -package subnet +package subnetmgr import ( "bytes" @@ -8,14 +8,20 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/beacon" act "github.com/filecoin-project/lotus/chain/consensus/actors" + "github.com/filecoin-project/lotus/chain/consensus/common" "github.com/filecoin-project/lotus/chain/consensus/hierarchical" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/subnet" + subiface "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet" + subcns "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet/consensus" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet/resolver" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/stmgr" @@ -26,7 +32,7 @@ import ( "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/lib/peermgr" "github.com/filecoin-project/lotus/node/impl/client" - "github.com/filecoin-project/lotus/node/impl/common" + commonapi "github.com/filecoin-project/lotus/node/impl/common" "github.com/filecoin-project/lotus/node/impl/full" "github.com/filecoin-project/lotus/node/impl/market" "github.com/filecoin-project/lotus/node/impl/net" @@ -35,11 +41,13 @@ import ( "github.com/filecoin-project/lotus/node/modules/helpers" "github.com/filecoin-project/specs-actors/actors/builtin" init_ "github.com/filecoin-project/specs-actors/actors/builtin/init" + blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" "github.com/ipfs/go-blockservice" "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" nsds "github.com/ipfs/go-datastore/namespace" offline "github.com/ipfs/go-ipfs-exchange-offline" + cbor "github.com/ipfs/go-ipld-cbor" logging "github.com/ipfs/go-log/v2" "github.com/libp2p/go-libp2p-core/host" peer "github.com/libp2p/go-libp2p-core/peer" @@ -48,7 +56,7 @@ import ( "golang.org/x/xerrors" ) -var log = logging.Logger("subnet") +var log = logging.Logger("subnetMgr") // SubnetMgr is the subneting manager in the root chain type SubnetMgr struct { @@ -59,6 +67,7 @@ type SubnetMgr struct { // api *impl.FullNodeAPI api *API host host.Host + self peer.ID pubsub *pubsub.PubSub // Root ds @@ -72,7 +81,12 @@ type SubnetMgr struct { bootstrapper dtypes.Bootstrapper lk sync.RWMutex - subnets map[hierarchical.SubnetID]*Subnet + subnets map[address.SubnetID]*Subnet + + // Cross-msg general pool + cm *crossMsgPool + // Root cross-msg resolver. Each subnet has one. + r *resolver.Resolver j journal.Journal } @@ -92,7 +106,7 @@ func NewSubnetMgr( verifier ffiwrapper.Verifier, pmgr peermgr.MaybePeerMgr, bootstrapper dtypes.Bootstrapper, - commonapi common.CommonAPI, + commonapi commonapi.CommonAPI, netapi net.NetAPI, chainapi full.ChainAPI, clientapi client.API, @@ -106,14 +120,17 @@ func NewSubnetMgr( netName dtypes.NetworkName, syncapi full.SyncAPI, beaconapi full.BeaconAPI, + r *resolver.Resolver, j journal.Journal) (*SubnetMgr, error) { - var err error ctx := helpers.LifecycleCtx(mctx, lc) + var err error + s := &SubnetMgr{ ctx: ctx, pubsub: pubsub, host: host, + self: self, ds: ds, syscalls: syscalls, us: us, @@ -122,7 +139,9 @@ func NewSubnetMgr( nodeServer: nodeServer, bootstrapper: bootstrapper, verifier: verifier, - subnets: make(map[hierarchical.SubnetID]*Subnet), + subnets: make(map[address.SubnetID]*Subnet), + cm: newCrossMsgPool(), + r: r, } s.api = &API{ @@ -153,8 +172,8 @@ func NewSubnetMgr( return s, nil } -func (s *SubnetMgr) startSubnet(ctx context.Context, id hierarchical.SubnetID, - parentAPI *API, consensus subnet.ConsensusType, +func (s *SubnetMgr) startSubnet(id address.SubnetID, + parentAPI *API, consensus hierarchical.ConsensusType, genesis []byte) error { var err error // Subnets inherit the context from the SubnetManager. @@ -182,20 +201,19 @@ func (s *SubnetMgr) startSubnet(ctx context.Context, id hierarchical.SubnetID, // figure out if it works. sh.bs = blockstore.FromDatastore(s.ds) + // Instantiate new cross-msg resolver + sh.r = resolver.NewResolver(s.self, sh.ds, sh.pubsub, sh.ID) + // Select the right TipSetExecutor for the consensus algorithms chosen. - tsExec, err := tipSetExecutor(consensus) - if err != nil { - log.Errorw("Error getting TipSetExecutor for consensus", "subnetID", id, "err", err) - return err - } - weight, err := weight(consensus) + tsExec := common.TipSetExecutor(s) + weight, err := subcns.Weight(consensus) if err != nil { log.Errorw("Error getting weight for consensus", "subnetID", id, "err", err) return err } sh.ch = store.NewChainStore(sh.bs, sh.bs, sh.ds, weight, s.j) - sh.sm, err = stmgr.NewStateManager(sh.ch, tsExec, s.syscalls, s.us, s.beacon) + sh.sm, err = stmgr.NewStateManager(sh.ch, tsExec, sh.r, s.syscalls, s.us, s.beacon) if err != nil { log.Errorw("Error creating state manager for subnet", "subnetID", id, "err", err) return err @@ -203,12 +221,13 @@ func (s *SubnetMgr) startSubnet(ctx context.Context, id hierarchical.SubnetID, // Start state manager. sh.sm.Start(ctx) - gen, err := sh.LoadGenesis(genesis) + gen, err := sh.LoadGenesis(ctx, genesis) if err != nil { log.Errorw("Error loading genesis bootstrap for subnet", "subnetID", id, "err", err) return err } - sh.cons, err = newConsensus(consensus, sh.sm, s.beacon, s.verifier, gen) + // Instantiate consensus + sh.cons, err = subcns.New(consensus, sh.sm, s, s.beacon, sh.r, s.verifier, gen, dtypes.NetworkName(id)) if err != nil { log.Errorw("Error creating consensus", "subnetID", id, "err", err) return err @@ -231,15 +250,24 @@ func (s *SubnetMgr) startSubnet(ctx context.Context, id hierarchical.SubnetID, // is created but before we set-up the gossipsub topics to listen for // new blocks and messages. sh.runHello(ctx) + + // FIXME: Consider inheriting Bitswap ChainBlockService instead of using + // offline.Exchange here. See builder_chain to undertand how is built. bserv := blockservice.New(sh.bs, offline.Exchange(sh.bs)) prov := messagepool.NewProvider(sh.sm, s.pubsub) - sh.mpool, err = messagepool.New(prov, sh.ds, s.us, dtypes.NetworkName(sh.ID.String()), s.j) + sh.mpool, err = messagepool.New(ctx, prov, sh.ds, s.us, dtypes.NetworkName(sh.ID.String()), s.j) if err != nil { log.Errorw("Error creating message pool for subnet", "subnetID", id, "err", err) return err } + // Start listening to cross-msg resolve messages + err = sh.r.HandleMsgs(ctx, s) + if err != nil { + return xerrors.Errorf("error initializing cross-msg resolver: %s", err) + } + // This functions create a new pubsub topic for the subnet to start // listening to new messages and blocks for the subnet. err = sh.HandleIncomingBlocks(ctx, bserv) @@ -269,7 +297,7 @@ func (s *SubnetMgr) startSubnet(ctx context.Context, id hierarchical.SubnetID, log.Errorw("Events couldn't be initialized for subnet", "subnetID", id, "err", err) return err } - go s.listenSCAEvents(ctx, sh) + go s.listenSubnetEvents(ctx, sh) log.Infow("Listening to SCA events in subnet", "subnetID", id) log.Infow("Successfully spawned subnet", "subnetID", id) @@ -280,17 +308,21 @@ func (s *SubnetMgr) startSubnet(ctx context.Context, id hierarchical.SubnetID, func (s *SubnetMgr) Start(ctx context.Context) { // Start listening to events in the SCA contract from root right away. // Every peer in the hierarchy needs to be aware of these events. - s.listenSCAEvents(ctx, nil) + s.listenSubnetEvents(ctx, nil) } func (s *SubnetMgr) Close(ctx context.Context) error { for _, sh := range s.subnets { err := sh.Close(ctx) if err != nil { - return err + log.Errorf("error closing subnet %s: %w", sh.ID, err) + // NOTE: Even if we fail to close a subnet we should continue + // and not return. We shouldn't stop half-way. + // return err } } - return nil + // Close resolver + return s.r.Close() } func BuildSubnetMgr(mctx helpers.MetricsCtx, lc fx.Lifecycle, s *SubnetMgr) { @@ -309,8 +341,9 @@ func BuildSubnetMgr(mctx helpers.MetricsCtx, lc fx.Lifecycle, s *SubnetMgr) { func (s *SubnetMgr) AddSubnet( ctx context.Context, wallet address.Address, - parent hierarchical.SubnetID, name string, + parent address.SubnetID, name string, consensus uint64, minerStake abi.TokenAmount, + checkPeriod abi.ChainEpoch, delegminer address.Address) (address.Address, error) { // Get the api for the parent network hosting the subnet actor @@ -324,8 +357,9 @@ func (s *SubnetMgr) AddSubnet( NetworkName: string(s.api.NetName), MinMinerStake: minerStake, Name: name, - Consensus: subnet.ConsensusType(consensus), + Consensus: hierarchical.ConsensusType(consensus), DelegMiner: delegminer, + CheckPeriod: checkPeriod, } seraddp, err := actors.SerializeParams(addp) @@ -369,7 +403,7 @@ func (s *SubnetMgr) AddSubnet( func (s *SubnetMgr) JoinSubnet( ctx context.Context, wallet address.Address, value abi.TokenAmount, - id hierarchical.SubnetID) (cid.Cid, error) { + id address.SubnetID) (cid.Cid, error) { // TODO: Think a bit deeper the locking strategy for subnets. s.lk.Lock() @@ -383,9 +417,9 @@ func (s *SubnetMgr) JoinSubnet( // Get the api for the parent network hosting the subnet actor // for the subnet. - parentAPI := s.getAPI(id.Parent()) - if parentAPI == nil { - return cid.Undef, xerrors.Errorf("not syncing with parent network") + parentAPI, err := s.getParentAPI(id) + if err != nil { + return cid.Undef, err } // Get the parent and the actor to know where to send the message. @@ -398,6 +432,7 @@ func (s *SubnetMgr) JoinSubnet( Params: nil, }, nil) if aerr != nil { + log.Errorw("Error pushing join subnet message to parent api", "err", aerr) return cid.Undef, aerr } @@ -406,6 +441,7 @@ func (s *SubnetMgr) JoinSubnet( // Wait state message. _, aerr = parentAPI.StateWaitMsg(ctx, msg, build.MessageConfidence, api.LookbackNoLimit, true) if aerr != nil { + log.Errorw("Error waiting for message to be committed", "err", aerr) return cid.Undef, aerr } @@ -417,19 +453,60 @@ func (s *SubnetMgr) JoinSubnet( } // If not we need to initialize the subnet in our client to start syncing. + err = s.syncSubnet(ctx, id, parentAPI) + if err != nil { + return cid.Undef, err + } + + return smsg.Cid(), nil +} + +func (s *SubnetMgr) syncSubnet(ctx context.Context, id address.SubnetID, parentAPI *API) error { + // Get actor from subnet ID + SubnetActor, err := id.Actor() + if err != nil { + return err + } + // See if we are already syncing with that chain. + if s.getAPI(id) != nil { + return xerrors.Errorf("Already syncing with subnet: %v", id) + } + // Get genesis from actor state. st, err := parentAPI.getActorState(ctx, SubnetActor) if err != nil { - return cid.Undef, nil + return err } - err = s.startSubnet(s.ctx, id, parentAPI, st.Consensus, st.Genesis) - return smsg.Cid(), nil + return s.startSubnet(id, parentAPI, st.Consensus, st.Genesis) +} + +// SyncSubnet starts syncing with a subnet even if we are not an active participant. +func (s *SubnetMgr) SyncSubnet(ctx context.Context, id address.SubnetID, stop bool) error { + if stop { + return s.stopSyncSubnet(ctx, id) + } + // Get the api for the parent network hosting the subnet actor + // for the subnet. + parentAPI, err := s.getParentAPI(id) + if err != nil { + return err + } + return s.syncSubnet(ctx, id, parentAPI) +} + +// stopSyncSubnet stops syncing from a subnet +func (s *SubnetMgr) stopSyncSubnet(ctx context.Context, id address.SubnetID) error { + if sh, _ := s.getSubnet(id); sh != nil { + delete(s.subnets, id) + return sh.Close(ctx) + } + return xerrors.Errorf("Not currently syncing with subnet: %s", id) } func (s *SubnetMgr) MineSubnet( ctx context.Context, wallet address.Address, - id hierarchical.SubnetID, stop bool) error { + id address.SubnetID, stop bool) error { // TODO: Think a bit deeper the locking strategy for subnets. s.lk.RLock() @@ -454,15 +531,15 @@ func (s *SubnetMgr) MineSubnet( // Get the api for the parent network hosting the subnet actor // for the subnet. - parentAPI := s.getAPI(id.Parent()) - if parentAPI == nil { - return xerrors.Errorf("not syncing with parent network") + parentAPI, err := s.getParentAPI(id) + if err != nil { + return err } // Get actor state to check if the subnet is active and we are in the list // of miners st, err := parentAPI.getActorState(ctx, SubnetActor) if err != nil { - return nil + return err } if st.IsMiner(wallet) && st.Status != subnet.Killed { @@ -476,7 +553,7 @@ func (s *SubnetMgr) MineSubnet( func (s *SubnetMgr) LeaveSubnet( ctx context.Context, wallet address.Address, - id hierarchical.SubnetID) (cid.Cid, error) { + id address.SubnetID) (cid.Cid, error) { // TODO: Think a bit deeper the locking strategy for subnets. s.lk.Lock() @@ -490,9 +567,9 @@ func (s *SubnetMgr) LeaveSubnet( // Get the api for the parent network hosting the subnet actor // for the subnet. - parentAPI := s.getAPI(id.Parent()) - if parentAPI == nil { - return cid.Undef, xerrors.Errorf("not syncing with parent network") + parentAPI, err := s.getParentAPI(id) + if err != nil { + return cid.Undef, err } // Get the parent and the actor to know where to send the message. @@ -517,7 +594,7 @@ func (s *SubnetMgr) LeaveSubnet( // See if we are already syncing with that chain. If this // is the case we can remove the subnet - if sh, _ := s.getSubnet(id); s != nil { + if sh, _ := s.getSubnet(id); sh != nil { log.Infow("Stop syncing with subnet", "subnetID", id) delete(s.subnets, id) return msg, sh.Close(ctx) @@ -528,7 +605,7 @@ func (s *SubnetMgr) LeaveSubnet( func (s *SubnetMgr) KillSubnet( ctx context.Context, wallet address.Address, - id hierarchical.SubnetID) (cid.Cid, error) { + id address.SubnetID) (cid.Cid, error) { // TODO: Think a bit deeper the locking strategy for subnets. s.lk.RLock() @@ -542,9 +619,9 @@ func (s *SubnetMgr) KillSubnet( // Get the api for the parent network hosting the subnet actor // for the subnet. - parentAPI := s.getAPI(id.Parent()) - if parentAPI == nil { - return cid.Undef, xerrors.Errorf("not syncing with parent network") + parentAPI, err := s.getParentAPI(id) + if err != nil { + return cid.Undef, err } // Get the parent and the actor to know where to send the message. @@ -572,21 +649,62 @@ func (s *SubnetMgr) KillSubnet( return smsg.Cid(), nil } -func (s *SubnetMgr) getAPI(n hierarchical.SubnetID) *API { - if n.String() == string(s.api.NetName) { +// isRoot checks if the +func (s *SubnetMgr) isRoot(id address.SubnetID) bool { + return id.String() == string(s.api.NetName) +} + +func (s *SubnetMgr) getAPI(id address.SubnetID) *API { + if s.isRoot(id) { return s.api } - sh, ok := s.subnets[n] + sh, ok := s.subnets[id] if !ok { return nil } return sh.api } -func (s *SubnetMgr) getSubnet(n hierarchical.SubnetID) (*Subnet, error) { - sh, ok := s.subnets[n] +func (s *SubnetMgr) getParentAPI(id address.SubnetID) (*API, error) { + parentAPI := s.getAPI(id.Parent()) + if parentAPI == nil { + return nil, xerrors.Errorf("not syncing with parent network") + } + return parentAPI, nil +} + +func (s *SubnetMgr) getSubnet(id address.SubnetID) (*Subnet, error) { + sh, ok := s.subnets[id] if !ok { - return nil, xerrors.Errorf("Not part of subnet %v. Consider joining it", n) + return nil, xerrors.Errorf("Not part of subnet %v. Consider joining it", id) } return sh, nil } + +func (s *SubnetMgr) GetSubnetAPI(id address.SubnetID) (v1api.FullNode, error) { + api := s.getAPI(id) + if api == nil { + return nil, xerrors.Errorf("subnet manager not syncing with network") + } + return api, nil +} + +func (s *SubnetMgr) GetSCAState(ctx context.Context, id address.SubnetID) (*sca.SCAState, blockadt.Store, error) { + api, err := s.GetSubnetAPI(id) + if err != nil { + return nil, nil, err + } + var st sca.SCAState + subnetAct, err := api.StateGetActor(ctx, hierarchical.SubnetCoordActorAddr, types.EmptyTSK) + if err != nil { + return nil, nil, xerrors.Errorf("loading actor state: %w", err) + } + pbs := blockstore.NewAPIBlockstore(api) + pcst := cbor.NewCborStore(pbs) + if err := pcst.Get(ctx, subnetAct.Head, &st); err != nil { + return nil, nil, xerrors.Errorf("getting actor state: %w", err) + } + return &st, blockadt.WrapStore(ctx, pcst), nil +} + +var _ subiface.SubnetMgr = &SubnetMgr{} diff --git a/chain/consensus/hierarchical/subnet/subnet.go b/chain/consensus/hierarchical/subnet/manager/subnet.go similarity index 78% rename from chain/consensus/hierarchical/subnet/subnet.go rename to chain/consensus/hierarchical/subnet/manager/subnet.go index da5e1db4d..c4add2d5e 100644 --- a/chain/consensus/hierarchical/subnet/subnet.go +++ b/chain/consensus/hierarchical/subnet/manager/subnet.go @@ -1,4 +1,4 @@ -package subnet +package subnetmgr import ( "bytes" @@ -12,10 +12,9 @@ import ( "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain" "github.com/filecoin-project/lotus/chain/consensus" - "github.com/filecoin-project/lotus/chain/consensus/delegcns" "github.com/filecoin-project/lotus/chain/consensus/hierarchical" - "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/subnet" - "github.com/filecoin-project/lotus/chain/consensus/tspow" + subcns "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet/consensus" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet/resolver" "github.com/filecoin-project/lotus/chain/events" "github.com/filecoin-project/lotus/chain/messagepool" "github.com/filecoin-project/lotus/chain/stmgr" @@ -37,9 +36,7 @@ import ( type Subnet struct { host host.Host // SubnetID - ID hierarchical.SubnetID - // Pubsub subcription for subnet. - // sub *pubsub.Subscription + ID address.SubnetID // Metadata datastore. ds dtypes.MetadataDS // Exposed blockstore @@ -51,7 +48,7 @@ type Subnet struct { // chain ch *store.ChainStore // Consensus type - consType subnet.ConsensusType + consType hierarchical.ConsensusType // Consensus of the subnet cons consensus.Consensus // Mempool for the subnet. @@ -80,18 +77,25 @@ type Subnet struct { minlk sync.Mutex miningCtx context.Context miningCncl context.CancelFunc + + // Checkpointing signing state + checklk sync.RWMutex + singingState *signingState + + // Cross-msg resolver + r *resolver.Resolver } // LoadGenesis from serialized genesis bootstrap -func (sh *Subnet) LoadGenesis(genBytes []byte) (chain.Genesis, error) { - c, err := car.LoadCar(sh.bs, bytes.NewReader(genBytes)) +func (sh *Subnet) LoadGenesis(ctx context.Context, genBytes []byte) (chain.Genesis, error) { + c, err := car.LoadCar(ctx, sh.bs, bytes.NewReader(genBytes)) if err != nil { return nil, xerrors.Errorf("loading genesis car file failed: %w", err) } if len(c.Roots) != 1 { return nil, xerrors.New("expected genesis file to have one root") } - root, err := sh.bs.Get(c.Roots[0]) + root, err := sh.bs.Get(ctx, c.Roots[0]) if err != nil { return nil, err } @@ -101,13 +105,13 @@ func (sh *Subnet) LoadGenesis(genBytes []byte) (chain.Genesis, error) { return nil, xerrors.Errorf("decoding block failed: %w", err) } - err = sh.ch.SetGenesis(h) + err = sh.ch.SetGenesis(ctx, h) if err != nil { log.Errorw("Error setting genesis for subnet", "err", err) return nil, err } //LoadGenesis to pass it - return chain.LoadGenesis(sh.sm) + return chain.LoadGenesis(ctx, sh.sm) } func (sh *Subnet) HandleIncomingMessages(ctx context.Context, bootstrapper dtypes.Bootstrapper) error { @@ -147,7 +151,8 @@ func (sh *Subnet) HandleIncomingMessages(ctx context.Context, bootstrapper dtype // Stop all processes and remove all handlers. func (sh *Subnet) Close(ctx context.Context) error { log.Infow("Closing subnet", "subnetID", sh.ID) - err0 := sh.stopMining(ctx) + sh.stopMining(ctx) + // Remove hello and exchange handlers to stop accepting requests from peers. sh.host.RemoveStreamHandler(protocol.ID(BlockSyncProtoPrefix + sh.ID.String())) sh.host.RemoveStreamHandler(protocol.ID(HelloProtoPrefix + sh.ID.String())) @@ -162,6 +167,8 @@ func (sh *Subnet) Close(ctx context.Context) error { sh.syncer.Stop() // Close message pool err5 := sh.mpool.Close() + // Close resolver. + err6 := sh.r.Close() // TODO: Do we need to do something else to fully close the // subnet. We'll need to revisit this. @@ -172,12 +179,12 @@ func (sh *Subnet) Close(ctx context.Context) error { sh.ctxCancel() return multierr.Combine( - err0, err1, err2, err3, err4, err5, + err6, ) } @@ -253,29 +260,16 @@ func (sh *Subnet) mine(ctx context.Context) error { log.Warnw("already mining in subnet", "subnetID", sh.ID) return nil } - // TODO: As-is a node will keep mining in a subnet until the node process - // is completely stopped. In the next iteration we need to figure out - // how to manage contexts for when a subnet is killed or a node moves into - // another subnet. (see next function) - // Mining in the root chain is an independent process. - // TODO: We should check if these processes throw an error - switch sh.consType { - case subnet.Delegated: - // Assigning mining context. - sh.miningCtx, sh.miningCncl = context.WithCancel(ctx) - go delegcns.Mine(sh.miningCtx, sh.api) - case subnet.PoW: - miner, err := sh.getWallet(ctx) - if err != nil { - log.Errorw("no valid identity found for PoW mining", "err", err) - return err - } - sh.miningCtx, sh.miningCncl = context.WithCancel(ctx) - go tspow.Mine(sh.miningCtx, miner, sh.api) - default: - return xerrors.New("consensus type not suported") + + mctx, cancel := context.WithCancel(ctx) + if err := subcns.Mine(mctx, sh.api, sh.consType); err != nil { + cancel() + return err } + // Set context and cancel for mining if started successfully + sh.miningCtx, sh.miningCncl = mctx, cancel log.Infow("Started mining in subnet", "subnetID", sh.ID, "consensus", sh.consType) + return nil } @@ -289,23 +283,3 @@ func (sh *Subnet) stopMining(ctx context.Context) error { } return xerrors.Errorf("Currently not mining in subnet") } - -// Get an identity from the peer's wallet. -// First check if a default identity has been set and -// if not take the first from the list. -// NOTE: We should probably make this configurable. -func (sh *Subnet) getWallet(ctx context.Context) (address.Address, error) { - addr, err := sh.api.WalletDefaultAddress(ctx) - // If no defualt wallet set - if err != nil || addr == address.Undef { - addrs, err := sh.api.WalletList(ctx) - if err != nil { - return address.Undef, err - } - if len(addrs) == 0 { - return address.Undef, xerrors.Errorf("no valid wallet found in peer") - } - addr = addrs[0] - } - return addr, nil -} diff --git a/chain/consensus/hierarchical/subnet/resolver/cbor_gen.go b/chain/consensus/hierarchical/subnet/resolver/cbor_gen.go new file mode 100644 index 000000000..5bfd94156 --- /dev/null +++ b/chain/consensus/hierarchical/subnet/resolver/cbor_gen.go @@ -0,0 +1,130 @@ +// Code generated by github.com/whyrusleeping/cbor-gen. DO NOT EDIT. + +package resolver + +import ( + "fmt" + "io" + "math" + "sort" + + address "github.com/filecoin-project/go-address" + cid "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + xerrors "golang.org/x/xerrors" +) + +var _ = xerrors.Errorf +var _ = cid.Undef +var _ = math.E +var _ = sort.Sort + +var lengthBufResolveMsg = []byte{132} + +func (t *ResolveMsg) MarshalCBOR(w io.Writer) error { + if t == nil { + _, err := w.Write(cbg.CborNull) + return err + } + if _, err := w.Write(lengthBufResolveMsg); err != nil { + return err + } + + scratch := make([]byte, 9) + + // t.From (address.SubnetID) (string) + if len(t.From) > cbg.MaxLength { + return xerrors.Errorf("Value in field t.From was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len(t.From))); err != nil { + return err + } + if _, err := io.WriteString(w, string(t.From)); err != nil { + return err + } + + // t.Type (resolver.MsgType) (uint64) + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajUnsignedInt, uint64(t.Type)); err != nil { + return err + } + + // t.Cid (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.Cid); err != nil { + return xerrors.Errorf("failed to write cid field t.Cid: %w", err) + } + + // t.CrossMsgs (sca.CrossMsgs) (struct) + if err := t.CrossMsgs.MarshalCBOR(w); err != nil { + return err + } + return nil +} + +func (t *ResolveMsg) UnmarshalCBOR(r io.Reader) error { + *t = ResolveMsg{} + + br := cbg.GetPeeker(r) + scratch := make([]byte, 8) + + maj, extra, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajArray { + return fmt.Errorf("cbor input should be of type array") + } + + if extra != 4 { + return fmt.Errorf("cbor input had wrong number of fields") + } + + // t.From (address.SubnetID) (string) + + { + sval, err := cbg.ReadStringBuf(br, scratch) + if err != nil { + return err + } + + t.From = address.SubnetID(sval) + } + // t.Type (resolver.MsgType) (uint64) + + { + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajUnsignedInt { + return fmt.Errorf("wrong type for uint64 field") + } + t.Type = MsgType(extra) + + } + // t.Cid (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.Cid: %w", err) + } + + t.Cid = c + + } + // t.CrossMsgs (sca.CrossMsgs) (struct) + + { + + if err := t.CrossMsgs.UnmarshalCBOR(br); err != nil { + return xerrors.Errorf("unmarshaling t.CrossMsgs: %w", err) + } + + } + return nil +} diff --git a/chain/consensus/hierarchical/subnet/resolver/gen/gen.go b/chain/consensus/hierarchical/subnet/resolver/gen/gen.go new file mode 100644 index 000000000..480a860d9 --- /dev/null +++ b/chain/consensus/hierarchical/subnet/resolver/gen/gen.go @@ -0,0 +1,15 @@ +package main + +import ( + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet/resolver" + + gen "github.com/whyrusleeping/cbor-gen" +) + +func main() { + if err := gen.WriteTupleEncodersToFile("./cbor_gen.go", "resolver", + resolver.ResolveMsg{}, + ); err != nil { + panic(err) + } +} diff --git a/chain/consensus/hierarchical/subnet/resolver/resolver.go b/chain/consensus/hierarchical/subnet/resolver/resolver.go new file mode 100644 index 000000000..796225fdb --- /dev/null +++ b/chain/consensus/hierarchical/subnet/resolver/resolver.go @@ -0,0 +1,504 @@ +package resolver + +//go:generate go run ./gen/gen.go + +import ( + "bytes" + "context" + "sync" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/modules/helpers" + lru "github.com/hashicorp/golang-lru" + "github.com/ipfs/go-cid" + "github.com/ipfs/go-datastore" + nsds "github.com/ipfs/go-datastore/namespace" + logging "github.com/ipfs/go-log/v2" + peer "github.com/libp2p/go-libp2p-core/peer" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "go.uber.org/fx" + xerrors "golang.org/x/xerrors" +) + +const retryTimeout = 10 * time.Second + +var log = logging.Logger("subnet-resolver") + +func SubnetResolverTopic(id address.SubnetID) string { + return "/fil/subnet/resolver" + id.String() +} + +func resolverNamespace(id address.SubnetID) datastore.Key { + return datastore.NewKey("/resolver/" + id.String()) +} + +type Resolver struct { + netName address.SubnetID + self peer.ID + ds datastore.Datastore + pubsub *pubsub.PubSub + + // Caches to track duplicate and frequent msgs + pushCache *msgReceiptCache + pullCache *msgReceiptCache + // NOTE: We don't track number of response + // messages sent for now. We accept any number. + // We will need to address this to prevent potential + // spamming. + // responseCache *msgReceiptCache + + lk sync.Mutex + ongoingPull map[cid.Cid]time.Time +} + +type MsgType uint64 + +const ( + // Push content to other subnet + Push MsgType = iota + // PullMeta requests CrossMsgs behind a CID + PullMeta + // Response is used to answer to pull requests. + Response + + // NOTE: For now we don't expect subnets needing to + // pull checkpoints from other subnets (although this + // has been discussed for verification purposes) + // PullCheck requests Checkpoint form a CID + // PullCheck +) + +type ResolveMsg struct { + // From subnet + From address.SubnetID + // Message type being propagated + Type MsgType + // Cid of the content + Cid cid.Cid + // MsgMeta being propagated (if any) + CrossMsgs sca.CrossMsgs + // Checkpoint being propagated (if any) + // Checkpoint schema.Checkpoint +} + +type msgReceiptCache struct { + msgs *lru.TwoQueueCache +} + +func newMsgReceiptCache() *msgReceiptCache { + c, _ := lru.New2Q(8192) + + return &msgReceiptCache{ + msgs: c, + } +} + +func (mrc *msgReceiptCache) add(bcid string) int { + val, ok := mrc.msgs.Get(bcid) + if !ok { + mrc.msgs.Add(bcid, int(1)) + return 0 + } + + mrc.msgs.Add(bcid, val.(int)+1) + return val.(int) +} + +func (r *Resolver) addMsgReceipt(t MsgType, bcid cid.Cid, from peer.ID) int { + if t == Push { + // All push messages are considered equal independent of + // the source. + return r.pushCache.add(bcid.String()) + } + // We allow each peer.ID in a subnet to send a pull request + // for each CID without being rejected. + // FIXME: Additional checks may be required to prevent malicious + // peers from spamming the topic with infinite requests. + // Deferring the design of a smarter logic here. + return r.pullCache.add(bcid.String() + from.String()) +} + +func NewRootResolver(self peer.ID, ds dtypes.MetadataDS, pubsub *pubsub.PubSub, nn dtypes.NetworkName) *Resolver { + return NewResolver(self, ds, pubsub, address.SubnetID(nn)) +} +func NewResolver(self peer.ID, ds dtypes.MetadataDS, pubsub *pubsub.PubSub, netName address.SubnetID) *Resolver { + return &Resolver{ + netName: netName, + self: self, + ds: nsds.Wrap(ds, resolverNamespace(netName)), + pubsub: pubsub, + pushCache: newMsgReceiptCache(), + pullCache: newMsgReceiptCache(), + ongoingPull: make(map[cid.Cid]time.Time), + } +} + +func HandleMsgs(mctx helpers.MetricsCtx, lc fx.Lifecycle, r *Resolver, submgr subnet.SubnetMgr) { + ctx := helpers.LifecycleCtx(mctx, lc) + if err := r.HandleMsgs(ctx, submgr); err != nil { + panic(err) + } +} + +func (r *Resolver) HandleMsgs(ctx context.Context, submgr subnet.SubnetMgr) error { + // Register new message validator for resolver msgs. + v := NewValidator(submgr, r) + if err := r.pubsub.RegisterTopicValidator(SubnetResolverTopic(r.netName), v.Validate); err != nil { + return err + } + + log.Infof("subscribing to subnet content resolver topic %s", SubnetResolverTopic(r.netName)) + + // Subscribe to subnet resolver topic. + msgSub, err := r.pubsub.Subscribe(SubnetResolverTopic(r.netName)) //nolint + if err != nil { + return err + } + + // Start handle incoming resolver msg. + go r.HandleIncomingResolveMsg(ctx, msgSub) + return nil +} + +func (r *Resolver) Close() error { + // Unregister topic validator when resolver is closed. If not, when + // initializing it again registering the validator will fail. + return r.pubsub.UnregisterTopicValidator(SubnetResolverTopic(r.netName)) +} +func (r *Resolver) shouldPull(c cid.Cid) bool { + r.lk.Lock() + defer r.lk.Unlock() + if time.Since(r.ongoingPull[c]) > retryTimeout { + r.ongoingPull[c] = time.Now() + return true + } + return false +} + +func (r *Resolver) pullSuccess(c cid.Cid) { + r.lk.Lock() + defer r.lk.Unlock() + delete(r.ongoingPull, c) +} + +func DecodeResolveMsg(b []byte) (*ResolveMsg, error) { + var bm ResolveMsg + if err := bm.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, err + } + + return &bm, nil +} + +func EncodeResolveMsg(m *ResolveMsg) ([]byte, error) { + w := new(bytes.Buffer) + if err := m.MarshalCBOR(w); err != nil { + return nil, err + } + return w.Bytes(), nil +} + +type Validator struct { + r *Resolver + submgr subnet.SubnetMgr +} + +func NewValidator(submgr subnet.SubnetMgr, r *Resolver) *Validator { + return &Validator{r, submgr} +} + +func (v *Validator) Validate(ctx context.Context, pid peer.ID, msg *pubsub.Message) (res pubsub.ValidationResult) { + // Decode resolve msg + rmsg, err := DecodeResolveMsg(msg.GetData()) + if err != nil { + log.Errorf("error decoding resolve msg cid: %s", err) + return pubsub.ValidationReject + } + + log.Infof("Received cross-msg resolution message of type: %v from subnet %v", rmsg.Type, rmsg.From) + // Check the CID and messages sent are correct for push messages + if rmsg.Type == Push { + msgs := rmsg.CrossMsgs + c, err := msgs.Cid() + if err != nil { + log.Errorf("error computing cross-msgs cid: %s", err) + return pubsub.ValidationIgnore + } + if rmsg.Cid != c { + log.Errorf("cid computed for crossMsgs not equal to the one requested: %s", err) + return pubsub.ValidationReject + } + } + + // it's a correct message! make sure we've only seen it once + if count := v.r.addMsgReceipt(rmsg.Type, rmsg.Cid, msg.GetFrom()); count > 0 { + if pid == v.r.self { + log.Warnf("local block has been seen %d times; ignoring", count) + } + + return pubsub.ValidationIgnore + } + + // Process the resolveMsg, record error, and return gossipsub validation status. + sub, err := v.r.processResolveMsg(ctx, v.submgr, rmsg) + if err != nil { + log.Errorf("error processing resolve message: %s", err) + return sub + } + + // TODO: Any additional check? + + // Pass validated request. + // msg.ValidatorData = rmsg + + return pubsub.ValidationAccept +} + +func (r *Resolver) HandleIncomingResolveMsg(ctx context.Context, sub *pubsub.Subscription) { + for { + _, err := sub.Next(ctx) + if err != nil { + log.Warn("error from message subscription: ", err) + if ctx.Err() != nil { + log.Warn("quitting HandleResolveMessages loop") + return + } + log.Error("error from resolve-msg subscription: ", err) + continue + } + + // Do nothing... everything happens in validate + // Including message handling. + } +} + +func (r *Resolver) processResolveMsg(ctx context.Context, submgr subnet.SubnetMgr, rmsg *ResolveMsg) (pubsub.ValidationResult, error) { + switch rmsg.Type { + case Push: + return r.processPush(ctx, rmsg) + case PullMeta: + return r.processPull(submgr, rmsg) + case Response: + return r.processResponse(ctx, rmsg) + } + return pubsub.ValidationReject, xerrors.Errorf("Resolve message type is not valid") + +} + +func (r *Resolver) processPush(ctx context.Context, rmsg *ResolveMsg) (pubsub.ValidationResult, error) { + // Check if we are already storing the CrossMsgs CID locally. + _, found, err := r.getLocal(ctx, rmsg.Cid) + if err != nil { + return pubsub.ValidationIgnore, xerrors.Errorf("Error getting cross-msg locally: %w", err) + } + if found { + // Ignoring message, we already have these cross-msgs + return pubsub.ValidationIgnore, nil + } + // If not stored locally, store it in the datastore for future access. + if err := r.setLocal(ctx, rmsg.Cid, &rmsg.CrossMsgs); err != nil { + return pubsub.ValidationIgnore, err + } + + // TODO: Introduce checks here to ensure that push messages come from the right + // source? + return pubsub.ValidationAccept, nil +} + +func (r *Resolver) processPull(submgr subnet.SubnetMgr, rmsg *ResolveMsg) (pubsub.ValidationResult, error) { + // Inspect the state of the SCA to get crossMsgs behind the CID. + st, store, err := submgr.GetSCAState(context.TODO(), r.netName) + if err != nil { + return pubsub.ValidationIgnore, err + } + msgs, found, err := st.GetCrossMsgs(store, rmsg.Cid) + if err != nil { + return pubsub.ValidationIgnore, err + } + if !found { + // Reject instead of ignore. Someone may be trying to spam us with + // random unvalid CIDs. + return pubsub.ValidationReject, xerrors.Errorf("couldn't find crossmsgs for msgMeta with cid: %s", rmsg.Cid) + } + // Send response + if err := r.PushCrossMsgs(*msgs, rmsg.From, true); err != nil { + return pubsub.ValidationIgnore, err + } + // Publish a Response message to the source subnet if the CID is found. + return pubsub.ValidationAccept, nil +} + +func (r *Resolver) processResponse(ctx context.Context, rmsg *ResolveMsg) (pubsub.ValidationResult, error) { + // Response messages are processed in the same way as push messages + // (at least for now). Is the validation what differs between them. + if sub, err := r.processPush(ctx, rmsg); err != nil { + return sub, err + } + // If received successfully we can delete ongoingPull + r.pullSuccess(rmsg.Cid) + return pubsub.ValidationAccept, nil +} + +func (r *Resolver) getLocal(ctx context.Context, c cid.Cid) (*sca.CrossMsgs, bool, error) { + b, err := r.ds.Get(ctx, datastore.NewKey(c.String())) + if err != nil { + if err == datastore.ErrNotFound { + return nil, false, nil + } + return nil, false, err + } + out := &sca.CrossMsgs{} + if err := out.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + return nil, false, err + } + return out, true, nil +} + +func (r *Resolver) setLocal(ctx context.Context, c cid.Cid, msgs *sca.CrossMsgs) error { + w := new(bytes.Buffer) + if err := msgs.MarshalCBOR(w); err != nil { + return err + } + return r.ds.Put(ctx, datastore.NewKey(c.String()), w.Bytes()) +} + +func (r *Resolver) publishMsg(m *ResolveMsg, id address.SubnetID) error { + b, err := EncodeResolveMsg(m) + if err != nil { + return xerrors.Errorf("error serializing resolveMsg: %v", err) + } + return r.pubsub.Publish(SubnetResolverTopic(id), b) +} + +// WaitCrossMsgsResolved waits until crossMsgs for meta have been fully resolved +func (r *Resolver) WaitCrossMsgsResolved(ctx context.Context, c cid.Cid, from address.SubnetID) chan error { + out := make(chan error) + resolved := false + go func() { + var err error + for !resolved { + select { + case <-ctx.Done(): + out <- xerrors.Errorf("context timeout") + return + default: + // Check if crossMsg fully resolved. + _, resolved, err = r.ResolveCrossMsgs(ctx, c, address.SubnetID(from)) + if err != nil { + out <- err + } + // If not resolved wait two seconds to poll again and see if it has been resolved + // FIXME: This is not the best approach, but good enough for now. + if !resolved { + time.Sleep(2 * time.Second) + } + } + } + close(out) + }() + return out +} + +func (r *Resolver) ResolveCrossMsgs(ctx context.Context, c cid.Cid, from address.SubnetID) ([]types.Message, bool, error) { + // FIXME: This function should keep track of the retries that have been done, + // and fallback to a 1:1 exchange if this fails. + cross, found, err := r.getLocal(ctx, c) + if err != nil { + return []types.Message{}, false, err + } + // If found, inspect messages and keep resolving metas + if found { + msgs := cross.Msgs + foundAll := true + // If there is some msgMeta to resolve, resolve it + for _, mt := range cross.Metas { + c, err := mt.Cid() + if err != nil { + return []types.Message{}, false, nil + } + // Recursively resolve crossMsg for meta + cross, found, err := r.ResolveCrossMsgs(ctx, c, address.SubnetID(mt.From)) + if err != nil { + return []types.Message{}, false, nil + } + // Append messages found + msgs = append(msgs, cross...) + foundAll = foundAll && found + } + if foundAll { + // Hurray! We resolved everything, ready to return. + return msgs, true, nil + } + + // We haven't resolved everything, wait for the next round to finish + // pulling everything. + // NOTE: We could consider still sending partial results here. + return []types.Message{}, true, nil + } + + // If not try to pull message + if r.shouldPull(c) { + return []types.Message{}, false, r.PullCrossMsgs(c, from) + } + + // If we shouldn't pull yet because we pulled recently + // do nothing for now, and notify that is wasn't resolved yet. + return []types.Message{}, false, nil + +} + +func (r *Resolver) PushCrossMsgs(msgs sca.CrossMsgs, id address.SubnetID, isResponse bool) error { + c, err := msgs.Cid() + if err != nil { + return err + } + m := &ResolveMsg{ + Type: Push, + From: r.netName, + Cid: c, + CrossMsgs: msgs, + } + if isResponse { + m.Type = Response + } + return r.publishMsg(m, id) +} + +func (r *Resolver) PushMsgFromCheckpoint(ch *schema.Checkpoint, st *sca.SCAState, store adt.Store) error { + // For each crossMsgMeta + for _, meta := range ch.CrossMsgs() { + // Get the crossMsgs behind Cid from SCA state and push it. + c, err := meta.Cid() + if err != nil { + return err + } + msgs, found, err := st.GetCrossMsgs(store, c) + if err != nil { + return err + } + if !found { + return xerrors.Errorf("couldn't found crossmsgs for msgMeta with cid: %s", c) + } + // Push cross-msgs to subnet + if err = r.PushCrossMsgs(*msgs, address.SubnetID(meta.To), false); err != nil { + return err + } + } + return nil +} + +func (r *Resolver) PullCrossMsgs(c cid.Cid, id address.SubnetID) error { + m := &ResolveMsg{ + Type: PullMeta, + From: r.netName, + Cid: c, + } + return r.publishMsg(m, id) +} diff --git a/chain/consensus/hierarchical/subnet/resolver/resolver_test.go b/chain/consensus/hierarchical/subnet/resolver/resolver_test.go new file mode 100644 index 000000000..49a1e1737 --- /dev/null +++ b/chain/consensus/hierarchical/subnet/resolver/resolver_test.go @@ -0,0 +1,119 @@ +package resolver + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" + ltypes "github.com/filecoin-project/lotus/chain/types" + tutil "github.com/filecoin-project/specs-actors/v7/support/testing" + "github.com/ipfs/go-datastore" + "github.com/libp2p/go-libp2p" + pubsub "github.com/libp2p/go-libp2p-pubsub" + "github.com/stretchr/testify/require" +) + +func TestGetSet(t *testing.T) { + ctx := context.Background() + ds := datastore.NewMapDatastore() + h, err := libp2p.New() + require.NoError(t, err) + ps, err := pubsub.NewGossipSub(context.TODO(), h) + require.NoError(t, err) + addr := tutil.NewIDAddr(t, 101) + msg := ltypes.Message{ + To: addr, + From: addr, + Value: abi.NewTokenAmount(1), + Nonce: 2, + GasLimit: 1 << 30, // This is will be applied as an implicit msg, add enough gas + GasFeeCap: ltypes.NewInt(0), + GasPremium: ltypes.NewInt(0), + Params: nil, + } + out := &sca.CrossMsgs{Msgs: []ltypes.Message{msg}} + r := NewResolver(h.ID(), ds, ps, address.RootSubnet) + out1, found, err := r.getLocal(ctx, msg.Cid()) + require.NoError(t, err) + require.False(t, found) + require.Nil(t, out1) + require.NoError(t, err) + err = r.setLocal(ctx, msg.Cid(), out) + require.NoError(t, err) + out2, found, err := r.getLocal(ctx, msg.Cid()) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, out, out2) +} + +func TestResolve(t *testing.T) { + ctx := context.Background() + ds := datastore.NewMapDatastore() + h, err := libp2p.New() + require.NoError(t, err) + ps, err := pubsub.NewGossipSub(context.TODO(), h) + require.NoError(t, err) + addr := tutil.NewIDAddr(t, 101) + msg := ltypes.Message{ + To: addr, + From: addr, + Value: abi.NewTokenAmount(1), + Nonce: 2, + GasLimit: 1 << 30, // This is will be applied as an implicit msg, add enough gas + GasFeeCap: ltypes.NewInt(0), + GasPremium: ltypes.NewInt(0), + Params: nil, + } + out := &sca.CrossMsgs{Msgs: []ltypes.Message{msg}} + r := NewResolver(h.ID(), ds, ps, address.RootSubnet) + c, _ := out.Cid() + _, found, err := r.ResolveCrossMsgs(ctx, c, address.RootSubnet) + require.NoError(t, err) + require.False(t, found) + err = r.setLocal(ctx, c, out) + require.NoError(t, err) + pulled, found, err := r.ResolveCrossMsgs(ctx, c, address.RootSubnet) + require.NoError(t, err) + require.True(t, found) + require.Equal(t, len(pulled), 1) + + // TODO: Test recursive resolve with Metas. +} + +func TestWaitResolve(t *testing.T) { + ctx := context.Background() + ds := datastore.NewMapDatastore() + h, err := libp2p.New() + require.NoError(t, err) + ps, err := pubsub.NewGossipSub(context.TODO(), h) + require.NoError(t, err) + addr := tutil.NewIDAddr(t, 101) + msg := ltypes.Message{ + To: addr, + From: addr, + Value: abi.NewTokenAmount(1), + Nonce: 2, + GasLimit: 1 << 30, // This is will be applied as an implicit msg, add enough gas + GasFeeCap: ltypes.NewInt(0), + GasPremium: ltypes.NewInt(0), + Params: nil, + } + out := &sca.CrossMsgs{Msgs: []ltypes.Message{msg}} + r := NewResolver(h.ID(), ds, ps, address.RootSubnet) + c, _ := out.Cid() + + // Wait for resolution. + found := r.WaitCrossMsgsResolved(context.TODO(), c, address.RootSubnet) + go func() { + // Wait one second, and store cross-msgs locally + time.Sleep(1 * time.Second) + err = r.setLocal(ctx, c, out) + require.NoError(t, err) + }() + + err = <-found + require.NoError(t, err) +} diff --git a/chain/consensus/hierarchical/subnet/utils.go b/chain/consensus/hierarchical/subnet/utils.go deleted file mode 100644 index 0a965b055..000000000 --- a/chain/consensus/hierarchical/subnet/utils.go +++ /dev/null @@ -1,51 +0,0 @@ -package subnet - -import ( - "github.com/filecoin-project/lotus/chain" - "github.com/filecoin-project/lotus/chain/beacon" - "github.com/filecoin-project/lotus/chain/consensus" - "github.com/filecoin-project/lotus/chain/consensus/delegcns" - "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/subnet" - "github.com/filecoin-project/lotus/chain/consensus/tspow" - "github.com/filecoin-project/lotus/chain/stmgr" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" - "golang.org/x/xerrors" -) - -func tipSetExecutor(consensus subnet.ConsensusType) (stmgr.Executor, error) { - switch consensus { - case subnet.Delegated: - return delegcns.TipSetExecutor(), nil - case subnet.PoW: - return tspow.TipSetExecutor(), nil - default: - return nil, xerrors.New("consensus type not suported") - } -} - -func weight(consensus subnet.ConsensusType) (store.WeightFunc, error) { - switch consensus { - case subnet.Delegated: - return delegcns.Weight, nil - case subnet.PoW: - return tspow.Weight, nil - default: - return nil, xerrors.New("consensus type not suported") - } -} - -func newConsensus(consensus subnet.ConsensusType, - sm *stmgr.StateManager, beacon beacon.Schedule, - verifier ffiwrapper.Verifier, - genesis chain.Genesis) (consensus.Consensus, error) { - - switch consensus { - case subnet.Delegated: - return delegcns.NewDelegatedConsensus(sm, beacon, verifier, genesis), nil - case subnet.PoW: - return tspow.NewTSPoWConsensus(sm, beacon, verifier, genesis), nil - default: - return nil, xerrors.New("consensus type not suported") - } -} diff --git a/chain/consensus/hierarchical/types.go b/chain/consensus/hierarchical/types.go new file mode 100644 index 000000000..576c31c3b --- /dev/null +++ b/chain/consensus/hierarchical/types.go @@ -0,0 +1,97 @@ +package hierarchical + +import ( + "strings" + + address "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/types" + "golang.org/x/xerrors" +) + +// ConsensusType for subnet +type ConsensusType uint64 + +// List of supported/implemented consensus for subnets. +const ( + Delegated ConsensusType = iota + PoW +) + +// MsgType of cross message +type MsgType uint64 + +// List of cross messages supported +const ( + Unknown MsgType = iota + BottomUp + TopDown +) + +// MsgType returns the +func GetMsgType(msg *types.Message) MsgType { + t := Unknown + + sto, err := msg.To.Subnet() + if err != nil { + return t + } + sfrom, err := msg.From.Subnet() + if err != nil { + return t + } + if IsBottomUp(sfrom, sto) { + return BottomUp + } + return TopDown +} + +// SubnetCoordActorAddr is the address of the SCA actor +// in a subnet. +// +// It is initialized in genesis with the +// address t064 +var SubnetCoordActorAddr = func() address.Address { + a, err := address.NewIDAddress(64) + if err != nil { + panic(err) + } + return a +}() + +// Implement keyer interface so it can be used as a +// key for maps +type SubnetKey address.SubnetID + +var _ abi.Keyer = SubnetKey("") + +func (id SubnetKey) Key() string { + return string(id) +} + +func IsBottomUp(from, to address.SubnetID) bool { + _, l := from.CommonParent(to) + sfrom := strings.Split(from.String(), "/") + return len(sfrom)-1 > l +} + +// ApplyAsBottomUp is used to determine if a cross-message in +// the current subnet needs to be applied as a top-down or +// bottom-up message according to the path its following (i.e. +// we process a message or a msgMeta). +func ApplyAsBottomUp(curr address.SubnetID, msg *types.Message) (bool, error) { + sto, err := msg.To.Subnet() + if err != nil { + return false, xerrors.Errorf("error getting subnet from hierarchical address in cross-msg") + } + sfrom, err := msg.From.Subnet() + if err != nil { + return false, xerrors.Errorf("error getting subnet from hierarchical address in cross-msg") + } + + mt := GetMsgType(msg) + cpcurr, _ := curr.CommonParent(sto) + cpfrom, _ := sfrom.CommonParent(sto) + return mt == BottomUp && cpcurr == cpfrom, nil + +} diff --git a/chain/consensus/hierarchical/types_test.go b/chain/consensus/hierarchical/types_test.go new file mode 100644 index 000000000..3b1765a43 --- /dev/null +++ b/chain/consensus/hierarchical/types_test.go @@ -0,0 +1,38 @@ +package hierarchical_test + +import ( + "testing" + + address "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical" + "github.com/filecoin-project/lotus/chain/types" + tutil "github.com/filecoin-project/specs-actors/v7/support/testing" + "github.com/stretchr/testify/require" +) + +func TestBottomUp(t *testing.T) { + testBottomUp(t, "/root/a", "/root/a/b", false) + testBottomUp(t, "/root/c/a", "/root/a/b", true) + testBottomUp(t, "/root/c/a/d", "/root/c/a/e", true) + testBottomUp(t, "/root/c/a", "/root/c/b", true) +} + +func testBottomUp(t *testing.T, from, to string, bottomup bool) { + require.Equal(t, hierarchical.IsBottomUp( + address.SubnetID(from), address.SubnetID(to)), bottomup) +} + +func TestApplyAsBottomUp(t *testing.T) { + testApplyAsBottomUp(t, "/root/a", "/root", "/root/a/b", false) + testApplyAsBottomUp(t, "/root/a", "/root/a/b/c", "/root/a", true) + testApplyAsBottomUp(t, "/root/a", "/root/a/b/c", "/root/b/a", true) + testApplyAsBottomUp(t, "/root/a", "/root/b/a/c", "/root/a/b", false) +} + +func testApplyAsBottomUp(t *testing.T, curr, from, to string, bottomup bool) { + ff, _ := address.NewHAddress(address.SubnetID(from), tutil.NewIDAddr(t, 101)) + tt, _ := address.NewHAddress(address.SubnetID(to), tutil.NewIDAddr(t, 101)) + bu, err := hierarchical.ApplyAsBottomUp(address.SubnetID(curr), &types.Message{From: ff, To: tt}) + require.NoError(t, err) + require.Equal(t, bu, bottomup) +} diff --git a/chain/consensus/tspow/compute_state.go b/chain/consensus/tspow/compute_state.go deleted file mode 100644 index b2113098b..000000000 --- a/chain/consensus/tspow/compute_state.go +++ /dev/null @@ -1,234 +0,0 @@ -package tspow - -// FIXME: This is the same implementation as the one from delegcns. -// We should implement our own. -import ( - "context" - "sync/atomic" - - "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/lotus/chain/actors/builtin/reward" - "github.com/filecoin-project/lotus/chain/consensus/actors/registry" - "github.com/filecoin-project/lotus/chain/rand" - "github.com/ipfs/go-cid" - cbg "github.com/whyrusleeping/cbor-gen" - "go.opencensus.io/stats" - "go.opencensus.io/trace" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/big" - blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" - - "github.com/filecoin-project/lotus/chain/stmgr" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" - "github.com/filecoin-project/lotus/metrics" -) - -func DefaultUpgradeSchedule() stmgr.UpgradeSchedule { - var us stmgr.UpgradeSchedule - - updates := []stmgr.Upgrade{{ - Height: -1, - Network: network.Version14, - Migration: nil, - Expensive: true, - }, - } - - for _, u := range updates { - if u.Height < 0 { - // upgrade disabled - continue - } - us = append(us, u) - } - return us -} - -type tipSetExecutor struct{} - -func (t *tipSetExecutor) NewActorRegistry() *vm.ActorRegistry { - return registry.NewActorRegistry() -} - -func TipSetExecutor() stmgr.Executor { - return &tipSetExecutor{} -} - -func (t *tipSetExecutor) ApplyBlocks(ctx context.Context, sm *stmgr.StateManager, parentEpoch abi.ChainEpoch, pstate cid.Cid, bms []store.BlockMessages, epoch abi.ChainEpoch, r vm.Rand, em stmgr.ExecMonitor, baseFee abi.TokenAmount, ts *types.TipSet) (cid.Cid, cid.Cid, error) { - done := metrics.Timer(ctx, metrics.VMApplyBlocksTotal) - defer done() - - partDone := metrics.Timer(ctx, metrics.VMApplyEarly) - defer func() { - partDone() - }() - - makeVmWithBaseState := func(base cid.Cid) (*vm.VM, error) { - vmopt := &vm.VMOpts{ - StateBase: base, - Epoch: epoch, - Rand: r, - Bstore: sm.ChainStore().StateBlockstore(), - Actors: registry.NewActorRegistry(), - Syscalls: sm.Syscalls, - CircSupplyCalc: sm.GetVMCirculatingSupply, - NtwkVersion: sm.GetNtwkVersion, - BaseFee: baseFee, - LookbackState: stmgr.LookbackStateGetterForTipset(sm, ts), - } - - return sm.VMConstructor()(ctx, vmopt) - } - - vmi, err := makeVmWithBaseState(pstate) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err) - } - - for i := parentEpoch; i < epoch; i++ { - // handle state forks - // XXX: The state tree - newState, err := sm.HandleStateForks(ctx, pstate, i, em, ts) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("error handling state forks: %w", err) - } - - if pstate != newState { - vmi, err = makeVmWithBaseState(newState) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("making vm: %w", err) - } - } - - vmi.SetBlockHeight(i + 1) - pstate = newState - } - - partDone() - partDone = metrics.Timer(ctx, metrics.VMApplyMessages) - - var receipts []cbg.CBORMarshaler - processedMsgs := make(map[cid.Cid]struct{}) - for _, b := range bms { - penalty := types.NewInt(0) - gasReward := big.Zero() - - for _, cm := range append(b.BlsMessages, b.SecpkMessages...) { - m := cm.VMMessage() - if _, found := processedMsgs[m.Cid()]; found { - continue - } - r, err := vmi.ApplyMessage(ctx, cm) - if err != nil { - return cid.Undef, cid.Undef, err - } - - receipts = append(receipts, &r.MessageReceipt) - gasReward = big.Add(gasReward, r.GasCosts.MinerTip) - penalty = big.Add(penalty, r.GasCosts.MinerPenalty) - - if em != nil { - if err := em.MessageApplied(ctx, ts, cm.Cid(), m, r, false); err != nil { - return cid.Undef, cid.Undef, err - } - } - processedMsgs[m.Cid()] = struct{}{} - } - - rwMsg := &types.Message{ - From: reward.Address, - To: b.Miner, - Nonce: uint64(epoch), - Value: types.FromFil(1), // always reward 1 fil - GasFeeCap: types.NewInt(0), - GasPremium: types.NewInt(0), - GasLimit: 1 << 30, - Method: 0, - } - ret, actErr := vmi.ApplyImplicitMessage(ctx, rwMsg) - if actErr != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("failed to apply reward message for miner %s: %w", b.Miner, actErr) - } - if em != nil { - if err := em.MessageApplied(ctx, ts, rwMsg.Cid(), rwMsg, ret, true); err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("callback failed on reward message: %w", err) - } - } - - if ret.ExitCode != 0 { - return cid.Undef, cid.Undef, xerrors.Errorf("reward application message failed (exit %d): %s", ret.ExitCode, ret.ActorErr) - } - } - - partDone() - partDone = metrics.Timer(ctx, metrics.VMApplyCron) - - partDone() - partDone = metrics.Timer(ctx, metrics.VMApplyFlush) - - rectarr := blockadt.MakeEmptyArray(sm.ChainStore().ActorStore(ctx)) - for i, receipt := range receipts { - if err := rectarr.Set(uint64(i), receipt); err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("failed to build receipts amt: %w", err) - } - } - rectroot, err := rectarr.Root() - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("failed to build receipts amt: %w", err) - } - - st, err := vmi.Flush(ctx) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("vm flush failed: %w", err) - } - - stats.Record(ctx, metrics.VMSends.M(int64(atomic.LoadUint64(&vm.StatSends))), - metrics.VMApplied.M(int64(atomic.LoadUint64(&vm.StatApplied)))) - - return st, rectroot, nil -} - -func (t *tipSetExecutor) ExecuteTipSet(ctx context.Context, sm *stmgr.StateManager, ts *types.TipSet, em stmgr.ExecMonitor) (stateroot cid.Cid, rectsroot cid.Cid, err error) { - ctx, span := trace.StartSpan(ctx, "computeTipSetState") - defer span.End() - - blks := ts.Blocks() - - for i := 0; i < len(blks); i++ { - for j := i + 1; j < len(blks); j++ { - if blks[i].Miner == blks[j].Miner { - return cid.Undef, cid.Undef, - xerrors.Errorf("duplicate miner in a tipset (%s %s)", - blks[i].Miner, blks[j].Miner) - } - } - } - - var parentEpoch abi.ChainEpoch - pstate := blks[0].ParentStateRoot - if blks[0].Height > 0 { - parent, err := sm.ChainStore().GetBlock(blks[0].Parents[0]) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("getting parent block: %w", err) - } - - parentEpoch = parent.Height - } - - r := rand.NewStateRand(sm.ChainStore(), ts.Cids(), nil) - - blkmsgs, err := sm.ChainStore().BlockMsgsForTipset(ts) - if err != nil { - return cid.Undef, cid.Undef, xerrors.Errorf("getting block messages for tipset: %w", err) - } - - baseFee := blks[0].ParentBaseFee - - return t.ApplyBlocks(ctx, sm, parentEpoch, pstate, blkmsgs, blks[0].Height, r, em, baseFee, ts) -} - -var _ stmgr.Executor = &tipSetExecutor{} diff --git a/chain/consensus/tspow/mine.go b/chain/consensus/tspow/mine.go index 00d1ef29c..4705897cf 100644 --- a/chain/consensus/tspow/mine.go +++ b/chain/consensus/tspow/mine.go @@ -7,16 +7,12 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/big" - "github.com/ipfs/go-cid" - "golang.org/x/xerrors" - - "github.com/filecoin-project/go-state-types/crypto" lapi "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/v1api" - - "github.com/filecoin-project/lotus/chain/consensus" - param "github.com/filecoin-project/lotus/chain/consensus/params" + common "github.com/filecoin-project/lotus/chain/consensus/common" + param "github.com/filecoin-project/lotus/chain/consensus/common/params" "github.com/filecoin-project/lotus/chain/types" + "golang.org/x/xerrors" ) func Mine(ctx context.Context, miner address.Address, api v1api.FullNode) error { @@ -62,6 +58,17 @@ func Mine(ctx context.Context, miner address.Address, api v1api.FullNode) error log.Errorw("selecting messages failed", "error", err) } + // Get cross-message pool from subnet. + nn, err := api.StateNetworkName(ctx) + if err != nil { + return err + } + crossmsgs, err := api.GetCrossMsgsPool(ctx, address.SubnetID(nn), base.Height()+1) + if err != nil { + log.Errorw("selecting cross-messages failed", "error", err) + } + log.Debugf("CrossMsgs being proposed in block @%s: %d", base.Height()+1, len(crossmsgs)) + bh, err := api.MinerCreateBlock(ctx, &lapi.BlockTemplate{ Miner: miner, Parents: types.NewTipSetKey(BestWorkBlock(base).Cid()), @@ -71,6 +78,7 @@ func Mine(ctx context.Context, miner address.Address, api v1api.FullNode) error Epoch: base.Height() + 1, Timestamp: uint64(time.Now().Unix()), WinningPoStProof: nil, + CrossMessages: crossmsgs, }) if err != nil { log.Errorw("creating block failed", "error", err) @@ -80,12 +88,13 @@ func Mine(ctx context.Context, miner address.Address, api v1api.FullNode) error continue } - log.Info("try PpW mining at @", base.Height(), base.String()) + log.Info("try PoW mining at @", base.Height(), base.String()) err = api.SyncSubmitBlock(ctx, &types.BlockMsg{ Header: bh.Header, BlsMessages: bh.BlsMessages, SecpkMessages: bh.SecpkMessages, + CrossMessages: bh.CrossMessages, }) if err != nil { log.Errorw("submitting block failed", "error", err) @@ -96,93 +105,11 @@ func Mine(ctx context.Context, miner address.Address, api v1api.FullNode) error } func (tsp *TSPoW) CreateBlock(ctx context.Context, w lapi.Wallet, bt *lapi.BlockTemplate) (*types.FullBlock, error) { - pts, err := tsp.sm.ChainStore().LoadTipSet(bt.Parents) - if err != nil { - return nil, xerrors.Errorf("failed to load parent tipset: %w", err) - } - - st, recpts, err := tsp.sm.TipSetState(ctx, pts) - if err != nil { - return nil, xerrors.Errorf("failed to load tipset state: %w", err) - } - - next := &types.BlockHeader{ - Miner: bt.Miner, - Parents: bt.Parents.Cids(), - Ticket: bt.Ticket, - ElectionProof: bt.Eproof, - - BeaconEntries: bt.BeaconValues, - Height: bt.Epoch, - Timestamp: bt.Timestamp, - WinPoStProof: bt.WinningPoStProof, - ParentStateRoot: st, - ParentMessageReceipts: recpts, - } - - var blsMessages []*types.Message - var secpkMessages []*types.SignedMessage - - var blsMsgCids, secpkMsgCids []cid.Cid - var blsSigs []crypto.Signature - for _, msg := range bt.Messages { - if msg.Signature.Type == crypto.SigTypeBLS { - blsSigs = append(blsSigs, msg.Signature) - blsMessages = append(blsMessages, &msg.Message) - - c, err := tsp.sm.ChainStore().PutMessage(&msg.Message) - if err != nil { - return nil, err - } - - blsMsgCids = append(blsMsgCids, c) - } else { - c, err := tsp.sm.ChainStore().PutMessage(msg) - if err != nil { - return nil, err - } - - secpkMsgCids = append(secpkMsgCids, c) - secpkMessages = append(secpkMessages, msg) - } - } - - store := tsp.sm.ChainStore().ActorStore(ctx) - blsmsgroot, err := consensus.ToMessagesArray(store, blsMsgCids) - if err != nil { - return nil, xerrors.Errorf("building bls amt: %w", err) - } - secpkmsgroot, err := consensus.ToMessagesArray(store, secpkMsgCids) - if err != nil { - return nil, xerrors.Errorf("building secpk amt: %w", err) - } - - mmcid, err := store.Put(store.Context(), &types.MsgMeta{ - BlsMessages: blsmsgroot, - SecpkMessages: secpkmsgroot, - }) - if err != nil { - return nil, err - } - next.Messages = mmcid - - aggSig, err := consensus.AggregateSignatures(blsSigs) - if err != nil { - return nil, err - } - - next.BLSAggregate = aggSig - pweight, err := tsp.sm.ChainStore().Weight(ctx, pts) + b, err := common.PrepareBlockForSignature(ctx, tsp.sm, bt) if err != nil { return nil, err } - next.ParentWeight = pweight - - baseFee, err := tsp.sm.ChainStore().ComputeBaseFee(ctx, pts) - if err != nil { - return nil, xerrors.Errorf("computing base fee: %w", err) - } - next.ParentBaseFee = baseFee + next := b.Header tgt := big.Zero() tgt.SetBytes(next.Ticket.VRFProof) @@ -206,25 +133,10 @@ func (tsp *TSPoW) CreateBlock(ctx context.Context, w lapi.Wallet, bt *lapi.Block return nil, nil } - nosigbytes, err := next.SigningBytes() + err = common.SignBlock(ctx, w, b) if err != nil { - return nil, xerrors.Errorf("failed to get signing bytes for block: %w", err) - } - - sig, err := w.WalletSign(ctx, bt.Miner, nosigbytes, lapi.MsgMeta{ - Type: lapi.MTBlock, - }) - if err != nil { - return nil, xerrors.Errorf("failed to sign new block: %w", err) - } - - next.BlockSig = sig - - fullBlock := &types.FullBlock{ - Header: next, - BlsMessages: blsMessages, - SecpkMessages: secpkMessages, + return nil, err } - return fullBlock, nil + return b, nil } diff --git a/chain/consensus/tspow/tspow.go b/chain/consensus/tspow/tspow.go index 077d609fb..10d8c7d46 100644 --- a/chain/consensus/tspow/tspow.go +++ b/chain/consensus/tspow/tspow.go @@ -13,32 +13,29 @@ import ( "github.com/Gurpartap/async" "github.com/hashicorp/go-multierror" - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" logging "github.com/ipfs/go-log/v2" pubsub "github.com/libp2p/go-libp2p-pubsub" - cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/go-state-types/network" bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain" - "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/beacon" "github.com/filecoin-project/lotus/chain/consensus" - param "github.com/filecoin-project/lotus/chain/consensus/params" - "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/consensus/common" + param "github.com/filecoin-project/lotus/chain/consensus/common/params" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet/resolver" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/lib/sigs" "github.com/filecoin-project/lotus/metrics" - blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" + "github.com/filecoin-project/lotus/node/modules/dtypes" ) var log = logging.Logger("tspow-consensus") @@ -92,30 +89,40 @@ type TSPoW struct { verifier ffiwrapper.Verifier genesis *types.TipSet + + subMgr subnet.SubnetMgr + + r *resolver.Resolver + + netName address.SubnetID } // Blocks that are more than MaxHeightDrift epochs above // the theoretical max height based on systime are quickly rejected const MaxHeightDrift = 5 -func NewTSPoWConsensus(sm *stmgr.StateManager, beacon beacon.Schedule, verifier ffiwrapper.Verifier, genesis chain.Genesis) consensus.Consensus { +func NewTSPoWConsensus(sm *stmgr.StateManager, submgr subnet.SubnetMgr, beacon beacon.Schedule, r *resolver.Resolver, + verifier ffiwrapper.Verifier, genesis chain.Genesis, netName dtypes.NetworkName) consensus.Consensus { return &TSPoW{ store: sm.ChainStore(), beacon: beacon, + r: r, sm: sm, verifier: verifier, genesis: genesis, + subMgr: submgr, + netName: address.SubnetID(netName), } } func (tsp *TSPoW) ValidateBlock(ctx context.Context, b *types.FullBlock) (err error) { - if err := blockSanityChecks(b.Header); err != nil { + if err := common.BlockSanityChecks(hierarchical.PoW, b.Header); err != nil { return xerrors.Errorf("incoming header failed basic sanity checks: %w", err) } h := b.Header - baseTs, err := tsp.store.LoadTipSet(types.NewTipSetKey(h.Parents...)) + baseTs, err := tsp.store.LoadTipSet(ctx, types.NewTipSetKey(h.Parents...)) if err != nil { return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err) } @@ -160,16 +167,7 @@ func (tsp *TSPoW) ValidateBlock(ctx context.Context, b *types.FullBlock) (err er } } - msgsCheck := async.Err(func() error { - if b.Cid() == build.WhitelistedBlock { - return nil - } - - if err := tsp.checkBlockMessages(ctx, b, baseTs); err != nil { - return xerrors.Errorf("block had invalid messages: %w", err) - } - return nil - }) + msgsChecks := common.CheckMsgs(ctx, tsp.store, tsp.sm, tsp.subMgr, tsp.r, tsp.netName, b, baseTs) minerCheck := async.Err(func() error { if err := tsp.minerIsValid(h.Miner); err != nil { @@ -178,17 +176,6 @@ func (tsp *TSPoW) ValidateBlock(ctx context.Context, b *types.FullBlock) (err er return nil }) - baseFeeCheck := async.Err(func() error { - baseFee, err := tsp.store.ComputeBaseFee(ctx, baseTs) - if err != nil { - return xerrors.Errorf("computing base fee: %w", err) - } - if types.BigCmp(baseFee, b.Header.ParentBaseFee) != 0 { - return xerrors.Errorf("base fee doesn't match: %s (header) != %s (computed)", - b.Header.ParentBaseFee, baseFee) - } - return nil - }) pweight, err := Weight(context.TODO(), nil, baseTs) if err != nil { return xerrors.Errorf("getting parent weight: %w", err) @@ -199,48 +186,13 @@ func (tsp *TSPoW) ValidateBlock(ctx context.Context, b *types.FullBlock) (err er b.Header.ParentWeight, pweight) } - stateRootCheck := async.Err(func() error { - stateroot, precp, err := tsp.sm.TipSetState(ctx, baseTs) - if err != nil { - return xerrors.Errorf("get tipsetstate(%d, %s) failed: %w", h.Height, h.Parents, err) - } - - if stateroot != h.ParentStateRoot { - msgs, err := tsp.store.MessagesForTipset(baseTs) - if err != nil { - log.Error("failed to load messages for tipset during tipset state mismatch error: ", err) - } else { - log.Warn("Messages for tipset with mismatching state:") - for i, m := range msgs { - mm := m.VMMessage() - log.Warnf("Message[%d]: from=%s to=%s method=%d params=%x", i, mm.From, mm.To, mm.Method, mm.Params) - } - } - - return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot) - } - - if precp != h.ParentMessageReceipts { - return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts) - } - - return nil - }) - - blockSigCheck := async.Err(func() error { - if err := sigs.CheckBlockSignature(ctx, h, b.Header.Miner); err != nil { - return xerrors.Errorf("check block signature failed: %w", err) - } - return nil - }) + stateRootCheck := common.CheckStateRoot(ctx, tsp.store, tsp.sm, b, baseTs) await := []async.ErrorFuture{ minerCheck, - blockSigCheck, - msgsCheck, - baseFeeCheck, stateRootCheck, } + await = append(await, msgsChecks...) var merr error for _, fut := range await { @@ -270,194 +222,6 @@ func (tsp *TSPoW) ValidateBlock(ctx context.Context, b *types.FullBlock) (err er return nil } -func blockSanityChecks(h *types.BlockHeader) error { - /* if h.ElectionProof != nil { - return xerrors.Errorf("block must have nil election proof") - }*/ - - if h.Ticket == nil { - return xerrors.Errorf("block must not have nil ticket") - } - - if h.BlockSig == nil { - return xerrors.Errorf("block had nil signature") - } - - if h.BLSAggregate == nil { - return xerrors.Errorf("block had nil bls aggregate signature") - } - - if h.Miner.Protocol() != address.SECP256K1 { - return xerrors.Errorf("block had non-secp miner address") - } - - if len(h.Parents) != 1 { - return xerrors.Errorf("must have 1 parents") - } - - return nil -} - -// TODO: We should extract this somewhere else and make the message pool and miner use the same logic -func (tsp *TSPoW) checkBlockMessages(ctx context.Context, b *types.FullBlock, baseTs *types.TipSet) error { - { - var sigCids []cid.Cid // this is what we get for people not wanting the marshalcbor method on the cid type - var pubks [][]byte - - for _, m := range b.BlsMessages { - sigCids = append(sigCids, m.Cid()) - - pubk, err := tsp.sm.GetBlsPublicKey(ctx, m.From, baseTs) - if err != nil { - return xerrors.Errorf("failed to load bls public to validate block: %w", err) - } - - pubks = append(pubks, pubk) - } - - if err := consensus.VerifyBlsAggregate(ctx, b.Header.BLSAggregate, sigCids, pubks); err != nil { - return xerrors.Errorf("bls aggregate signature was invalid: %w", err) - } - } - - nonces := make(map[address.Address]uint64) - - stateroot, _, err := tsp.sm.TipSetState(ctx, baseTs) - if err != nil { - return err - } - - st, err := state.LoadStateTree(tsp.store.ActorStore(ctx), stateroot) - if err != nil { - return xerrors.Errorf("failed to load base state tree: %w", err) - } - - nv := tsp.sm.GetNtwkVersion(ctx, b.Header.Height) - pl := vm.PricelistByEpoch(baseTs.Height()) - var sumGasLimit int64 - checkMsg := func(msg types.ChainMsg) error { - m := msg.VMMessage() - - // Phase 1: syntactic validation, as defined in the spec - minGas := pl.OnChainMessage(msg.ChainLength()) - if err := m.ValidForBlockInclusion(minGas.Total(), nv); err != nil { - return err - } - - // ValidForBlockInclusion checks if any single message does not exceed BlockGasLimit - // So below is overflow safe - sumGasLimit += m.GasLimit - if sumGasLimit > build.BlockGasLimit { - return xerrors.Errorf("block gas limit exceeded") - } - - // Phase 2: (Partial) semantic validation: - // the sender exists and is an account actor, and the nonces make sense - var sender address.Address - if tsp.sm.GetNtwkVersion(ctx, b.Header.Height) >= network.Version13 { - sender, err = st.LookupID(m.From) - if err != nil { - return err - } - } else { - sender = m.From - } - - if _, ok := nonces[sender]; !ok { - // `GetActor` does not validate that this is an account actor. - act, err := st.GetActor(sender) - if err != nil { - return xerrors.Errorf("failed to get actor: %w", err) - } - - if !builtin.IsAccountActor(act.Code) { - return xerrors.New("Sender must be an account actor") - } - nonces[sender] = act.Nonce - } - - if nonces[sender] != m.Nonce { - return xerrors.Errorf("wrong nonce (exp: %d, got: %d)", nonces[sender], m.Nonce) - } - nonces[sender]++ - - return nil - } - - // Validate message arrays in a temporary blockstore. - tmpbs := bstore.NewMemory() - tmpstore := blockadt.WrapStore(ctx, cbor.NewCborStore(tmpbs)) - - bmArr := blockadt.MakeEmptyArray(tmpstore) - for i, m := range b.BlsMessages { - if err := checkMsg(m); err != nil { - return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err) - } - - c, err := store.PutMessage(tmpbs, m) - if err != nil { - return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err) - } - - k := cbg.CborCid(c) - if err := bmArr.Set(uint64(i), &k); err != nil { - return xerrors.Errorf("failed to put bls message at index %d: %w", i, err) - } - } - - smArr := blockadt.MakeEmptyArray(tmpstore) - for i, m := range b.SecpkMessages { - if err := checkMsg(m); err != nil { - return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err) - } - - // `From` being an account actor is only validated inside the `vm.ResolveToKeyAddr` call - // in `StateManager.ResolveToKeyAddress` here (and not in `checkMsg`). - kaddr, err := tsp.sm.ResolveToKeyAddress(ctx, m.Message.From, baseTs) - if err != nil { - return xerrors.Errorf("failed to resolve key addr: %w", err) - } - - if err := sigs.Verify(&m.Signature, kaddr, m.Message.Cid().Bytes()); err != nil { - return xerrors.Errorf("secpk message %s has invalid signature: %w", m.Cid(), err) - } - - c, err := store.PutMessage(tmpbs, m) - if err != nil { - return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err) - } - k := cbg.CborCid(c) - if err := smArr.Set(uint64(i), &k); err != nil { - return xerrors.Errorf("failed to put secpk message at index %d: %w", i, err) - } - } - - bmroot, err := bmArr.Root() - if err != nil { - return err - } - - smroot, err := smArr.Root() - if err != nil { - return err - } - - mrcid, err := tmpstore.Put(ctx, &types.MsgMeta{ - BlsMessages: bmroot, - SecpkMessages: smroot, - }) - if err != nil { - return err - } - - if b.Header.Messages != mrcid { - return fmt.Errorf("messages didnt match message root in header") - } - - // Finally, flush. - return vm.Copy(ctx, tmpbs, tsp.store.ChainBlockstore(), mrcid) -} - func (tsp *TSPoW) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool { if tsp.genesis == nil { return false @@ -515,7 +279,7 @@ func Weight(ctx context.Context, stateBs bstore.Blockstore, ts *types.TipSet) (t func (tsp *TSPoW) ValidateBlockPubsub(ctx context.Context, self bool, msg *pubsub.Message) (pubsub.ValidationResult, string) { if self { - return tsp.validateLocalBlock(ctx, msg) + return common.ValidateLocalBlock(ctx, msg) } // track validation time @@ -531,7 +295,7 @@ func (tsp *TSPoW) ValidateBlockPubsub(ctx context.Context, self bool, msg *pubsu panic(what) } - blk, what, err := tsp.decodeAndCheckBlock(msg) + blk, what, err := common.DecodeAndCheckBlock(msg) if err != nil { log.Error("got invalid block over pubsub: ", err) recordFailureFlagPeer(what) @@ -539,7 +303,7 @@ func (tsp *TSPoW) ValidateBlockPubsub(ctx context.Context, self bool, msg *pubsu } // validate the block meta: the Message CID in the header must match the included messages - err = tsp.validateMsgMeta(ctx, blk) + err = common.ValidateMsgMeta(ctx, blk) if err != nil { log.Warnf("error validating message metadata: %s", err) recordFailureFlagPeer("invalid_block_meta") @@ -562,90 +326,6 @@ func (tsp *TSPoW) ValidateBlockPubsub(ctx context.Context, self bool, msg *pubsu return pubsub.ValidationAccept, "" } -func (tsp *TSPoW) validateLocalBlock(ctx context.Context, msg *pubsub.Message) (pubsub.ValidationResult, string) { - stats.Record(ctx, metrics.BlockPublished.M(1)) - - if size := msg.Size(); size > 1<<20-1<<15 { - log.Errorf("ignoring oversize block (%dB)", size) - return pubsub.ValidationIgnore, "oversize_block" - } - - blk, what, err := tsp.decodeAndCheckBlock(msg) - if err != nil { - log.Errorf("got invalid local block: %s", err) - return pubsub.ValidationIgnore, what - } - - msg.ValidatorData = blk - stats.Record(ctx, metrics.BlockValidationSuccess.M(1)) - return pubsub.ValidationAccept, "" -} - -func (tsp *TSPoW) decodeAndCheckBlock(msg *pubsub.Message) (*types.BlockMsg, string, error) { - blk, err := types.DecodeBlockMsg(msg.GetData()) - if err != nil { - return nil, "invalid", xerrors.Errorf("error decoding block: %w", err) - } - - if count := len(blk.BlsMessages) + len(blk.SecpkMessages); count > build.BlockMessageLimit { - return nil, "too_many_messages", fmt.Errorf("block contains too many messages (%d)", count) - } - - // make sure we have a signature - if blk.Header.BlockSig == nil { - return nil, "missing_signature", fmt.Errorf("block without a signature") - } - - return blk, "", nil -} - -func (tsp *TSPoW) validateMsgMeta(ctx context.Context, msg *types.BlockMsg) error { - // TODO there has to be a simpler way to do this without the blockstore dance - // block headers use adt0 - store := blockadt.WrapStore(ctx, cbor.NewCborStore(bstore.NewMemory())) - bmArr := blockadt.MakeEmptyArray(store) - smArr := blockadt.MakeEmptyArray(store) - - for i, m := range msg.BlsMessages { - c := cbg.CborCid(m) - if err := bmArr.Set(uint64(i), &c); err != nil { - return err - } - } - - for i, m := range msg.SecpkMessages { - c := cbg.CborCid(m) - if err := smArr.Set(uint64(i), &c); err != nil { - return err - } - } - - bmroot, err := bmArr.Root() - if err != nil { - return err - } - - smroot, err := smArr.Root() - if err != nil { - return err - } - - mrcid, err := store.Put(store.Context(), &types.MsgMeta{ - BlsMessages: bmroot, - SecpkMessages: smroot, - }) - - if err != nil { - return err - } - - if msg.Header.Messages != mrcid { - return fmt.Errorf("messages didn't match root cid in header") - } - - return nil -} - func (tsp *TSPoW) validateBlockHeader(ctx context.Context, b *types.BlockHeader) (rejectReason string, err error) { if err := tsp.minerIsValid(b.Miner); err != nil { return err.Error(), err diff --git a/chain/events/events_called.go b/chain/events/events_called.go index ffca57d5b..aa8f05b66 100644 --- a/chain/events/events_called.go +++ b/chain/events/events_called.go @@ -488,8 +488,12 @@ func (me *messageEvents) messagesForTs(ts *types.TipSet, consume func(*types.Mes seen[c] = struct{}{} if i < len(msgs.BlsMessages) { consume(msgs.BlsMessages[i]) - } else { + } else if i < len(msgs.SecpkMessages) { consume(&msgs.SecpkMessages[i-len(msgs.BlsMessages)].Message) + } else { + // NOTE: We don't listen for events in cross-shard messages + // at this point. We'll see if this is needed in the future. + continue } } } diff --git a/chain/events/events_test.go b/chain/events/events_test.go index 61dd25fbb..5f52cbd92 100644 --- a/chain/events/events_test.go +++ b/chain/events/events_test.go @@ -87,7 +87,7 @@ func (fcs *fakeCS) ChainGetPath(ctx context.Context, from, to types.TipSetKey) ( } // copied from the chainstore - revert, apply, err := store.ReorgOps(func(tsk types.TipSetKey) (*types.TipSet, error) { + revert, apply, err := store.ReorgOps(ctx, func(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { return fcs.ChainGetTipSet(ctx, tsk) }, fromTs, toTs) if err != nil { diff --git a/chain/events/state/mock/api.go b/chain/events/state/mock/api.go index 2ed48dc39..7a73355a5 100644 --- a/chain/events/state/mock/api.go +++ b/chain/events/state/mock/api.go @@ -27,11 +27,11 @@ func NewMockAPI(bs blockstore.Blockstore) *MockAPI { } func (m *MockAPI) ChainHasObj(ctx context.Context, c cid.Cid) (bool, error) { - return m.bs.Has(c) + return m.bs.Has(ctx, c) } func (m *MockAPI) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) { - blk, err := m.bs.Get(c) + blk, err := m.bs.Get(ctx, c) if err != nil { return nil, xerrors.Errorf("blockstore get: %w", err) } diff --git a/chain/exchange/cbor_gen.go b/chain/exchange/cbor_gen.go index 7a8597fd0..4b29cfcd4 100644 --- a/chain/exchange/cbor_gen.go +++ b/chain/exchange/cbor_gen.go @@ -259,7 +259,7 @@ func (t *Response) UnmarshalCBOR(r io.Reader) error { return nil } -var lengthBufCompactedMessages = []byte{132} +var lengthBufCompactedMessages = []byte{134} func (t *CompactedMessages) MarshalCBOR(w io.Writer) error { if t == nil { @@ -345,6 +345,43 @@ func (t *CompactedMessages) MarshalCBOR(w io.Writer) error { } } } + + // t.Cross ([]*types.Message) (slice) + if len(t.Cross) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.Cross was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.Cross))); err != nil { + return err + } + for _, v := range t.Cross { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + + // t.CrossIncludes ([][]uint64) (slice) + if len(t.CrossIncludes) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.CrossIncludes was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.CrossIncludes))); err != nil { + return err + } + for _, v := range t.CrossIncludes { + if len(v) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field v was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(v))); err != nil { + return err + } + for _, v := range v { + if err := cbg.CborWriteHeader(w, cbg.MajUnsignedInt, uint64(v)); err != nil { + return err + } + } + } return nil } @@ -362,7 +399,7 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type array") } - if extra != 4 { + if extra != 6 { return fmt.Errorf("cbor input had wrong number of fields") } @@ -542,6 +579,94 @@ func (t *CompactedMessages) UnmarshalCBOR(r io.Reader) error { } } + // t.Cross ([]*types.Message) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.Cross: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.Cross = make([]*types.Message, extra) + } + + for i := 0; i < int(extra); i++ { + + var v types.Message + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.Cross[i] = &v + } + + // t.CrossIncludes ([][]uint64) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.CrossIncludes: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.CrossIncludes = make([][]uint64, extra) + } + + for i := 0; i < int(extra); i++ { + { + var maj byte + var extra uint64 + var err error + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.CrossIncludes[i]: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.CrossIncludes[i] = make([]uint64, extra) + } + + for j := 0; j < int(extra); j++ { + + maj, val, err := cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return xerrors.Errorf("failed to read uint64 for t.CrossIncludes[i] slice: %w", err) + } + + if maj != cbg.MajUnsignedInt { + return xerrors.Errorf("value read for array t.CrossIncludes[i] was not a uint, instead got %d", maj) + } + + t.CrossIncludes[i][j] = uint64(val) + } + + } + } + return nil } diff --git a/chain/exchange/client.go b/chain/exchange/client.go index 722790698..20e463bef 100644 --- a/chain/exchange/client.go +++ b/chain/exchange/client.go @@ -266,6 +266,7 @@ func (c *client) processResponse(req *Request, res *Response, tipsets []*types.T Blocks: tipsets[i].Blocks(), Messages: resChain.Messages, } + chain = append(chain, next) } @@ -295,6 +296,11 @@ func (c *client) validateCompressedIndices(chain []*BSTipSet) error { len(msgs.SecpkIncludes), blocksNum) } + if len(msgs.CrossIncludes) != blocksNum { + return xerrors.Errorf("CrossIncludes (%d) does not match number of blocks (%d)", + len(msgs.CrossIncludes), blocksNum) + } + for blockIdx := 0; blockIdx < blocksNum; blockIdx++ { for _, mi := range msgs.BlsIncludes[blockIdx] { if int(mi) >= len(msgs.Bls) { @@ -309,6 +315,12 @@ func (c *client) validateCompressedIndices(chain []*BSTipSet) error { mi, len(msgs.Secpk)) } } + for _, mi := range msgs.CrossIncludes[blockIdx] { + if int(mi) >= len(msgs.Cross) { + return xerrors.Errorf("index in CrossIncludes (%d) exceeds number of messages (%d)", + mi, len(msgs.Cross)) + } + } } } @@ -384,7 +396,6 @@ func (c *client) GetChainMessages(ctx context.Context, tipsets []*types.TipSet) if err != nil { return nil, err } - return validRes.messages, nil } diff --git a/chain/exchange/protocol.go b/chain/exchange/protocol.go index d0977e54c..5b4423538 100644 --- a/chain/exchange/protocol.go +++ b/chain/exchange/protocol.go @@ -163,6 +163,9 @@ type CompactedMessages struct { Secpk []*types.SignedMessage SecpkIncludes [][]uint64 + + Cross []*types.Message + CrossIncludes [][]uint64 } // Response that has been validated according to the protocol @@ -198,6 +201,9 @@ func (res *validatedResponse) toFullTipSets() []*store.FullTipSet { for _, mi := range msgs.SecpkIncludes[blockIdx] { fb.SecpkMessages = append(fb.SecpkMessages, msgs.Secpk[mi]) } + for _, mi := range msgs.CrossIncludes[blockIdx] { + fb.CrossMessages = append(fb.CrossMessages, msgs.Cross[mi]) + } fts.Blocks = append(fts.Blocks, fb) } diff --git a/chain/exchange/server.go b/chain/exchange/server.go index 7c1624e57..adb9e7ac2 100644 --- a/chain/exchange/server.go +++ b/chain/exchange/server.go @@ -136,7 +136,7 @@ func (s *server) serviceRequest(ctx context.Context, req *validatedRequest) (*Re _, span := trace.StartSpan(ctx, "chainxchg.ServiceRequest") defer span.End() - chain, err := collectChainSegment(s.cs, req) + chain, err := collectChainSegment(ctx, s.cs, req) if err != nil { log.Warn("block sync request: collectChainSegment failed: ", err) return &Response{ @@ -156,13 +156,13 @@ func (s *server) serviceRequest(ctx context.Context, req *validatedRequest) (*Re }, nil } -func collectChainSegment(cs *store.ChainStore, req *validatedRequest) ([]*BSTipSet, error) { +func collectChainSegment(ctx context.Context, cs *store.ChainStore, req *validatedRequest) ([]*BSTipSet, error) { var bstips []*BSTipSet cur := req.head for { var bst BSTipSet - ts, err := cs.LoadTipSet(cur) + ts, err := cs.LoadTipSet(ctx, cur) if err != nil { return nil, xerrors.Errorf("failed loading tipset %s: %w", cur, err) } @@ -172,7 +172,7 @@ func collectChainSegment(cs *store.ChainStore, req *validatedRequest) ([]*BSTipS } if req.options.IncludeMessages { - bmsgs, bmincl, smsgs, smincl, err := gatherMessages(cs, ts) + bmsgs, bmincl, smsgs, smincl, crossmsg, crossincl, err := gatherMessages(ctx, cs, ts) if err != nil { return nil, xerrors.Errorf("gather messages failed: %w", err) } @@ -183,6 +183,8 @@ func collectChainSegment(cs *store.ChainStore, req *validatedRequest) ([]*BSTipS bst.Messages.BlsIncludes = bmincl bst.Messages.Secpk = smsgs bst.Messages.SecpkIncludes = smincl + bst.Messages.Cross = crossmsg + bst.Messages.CrossIncludes = crossincl } bstips = append(bstips, &bst) @@ -197,16 +199,20 @@ func collectChainSegment(cs *store.ChainStore, req *validatedRequest) ([]*BSTipS } } -func gatherMessages(cs *store.ChainStore, ts *types.TipSet) ([]*types.Message, [][]uint64, []*types.SignedMessage, [][]uint64, error) { +func gatherMessages(ctx context.Context, cs *store.ChainStore, ts *types.TipSet) ([]*types.Message, [][]uint64, + []*types.SignedMessage, [][]uint64, + []*types.Message, [][]uint64, + error) { blsmsgmap := make(map[cid.Cid]uint64) secpkmsgmap := make(map[cid.Cid]uint64) - var secpkincl, blsincl [][]uint64 + crossmsgmap := make(map[cid.Cid]uint64) + var secpkincl, blsincl, crossincl [][]uint64 - var blscids, secpkcids []cid.Cid + var blscids, secpkcids, crosscids []cid.Cid for _, block := range ts.Blocks() { - bc, sc, err := cs.ReadMsgMetaCids(block.Messages) + bc, sc, crossc, err := cs.ReadMsgMetaCids(ctx, block.Messages) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, nil, nil, err } // FIXME: DRY. Use `chain.Message` interface. @@ -235,17 +241,35 @@ func gatherMessages(cs *store.ChainStore, ts *types.TipSet) ([]*types.Message, [ smi = append(smi, i) } secpkincl = append(secpkincl, smi) + + crossmi := make([]uint64, 0, len(crossc)) + for _, m := range crossc { + i, ok := crossmsgmap[m] + if !ok { + i = uint64(len(crosscids)) + crosscids = append(crosscids, m) + crossmsgmap[m] = i + } + + crossmi = append(crossmi, i) + } + crossincl = append(crossincl, crossmi) + } + + blsmsgs, err := cs.LoadMessagesFromCids(ctx, blscids) + if err != nil { + return nil, nil, nil, nil, nil, nil, err } - blsmsgs, err := cs.LoadMessagesFromCids(blscids) + secpkmsgs, err := cs.LoadSignedMessagesFromCids(ctx, secpkcids) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, nil, nil, err } - secpkmsgs, err := cs.LoadSignedMessagesFromCids(secpkcids) + crossmsgs, err := cs.LoadMessagesFromCids(ctx, crosscids) if err != nil { - return nil, nil, nil, nil, err + return nil, nil, nil, nil, nil, nil, err } - return blsmsgs, blsincl, secpkmsgs, secpkincl, nil + return blsmsgs, blsincl, secpkmsgs, secpkincl, crossmsgs, crossincl, nil } diff --git a/chain/gen/gen.go b/chain/gen/gen.go index 1e9ccc422..2325fa793 100644 --- a/chain/gen/gen.go +++ b/chain/gen/gen.go @@ -9,6 +9,8 @@ import ( "sync/atomic" "time" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + "github.com/filecoin-project/lotus/chain/rand" "github.com/filecoin-project/go-state-types/network" @@ -31,6 +33,7 @@ import ( "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/blockstore" + bstore "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/policy" "github.com/filecoin-project/lotus/chain/beacon" @@ -234,12 +237,19 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS return nil, xerrors.Errorf("make genesis block failed: %w", err) } - cs := store.NewChainStore(bs, bs, ds, nil, j) + weight := func(ctx context.Context, stateBs bstore.Blockstore, ts *types.TipSet) (types.BigInt, error) { + if ts == nil { + return types.NewInt(0), nil + } + + return big.NewInt(int64(ts.Height() + 1)), nil + } + cs := store.NewChainStore(bs, bs, ds, weight, j) genfb := &types.FullBlock{Header: genb.Genesis} gents := store.NewFullTipSet([]*types.FullBlock{genfb}) - if err := cs.SetGenesis(genb.Genesis); err != nil { + if err := cs.SetGenesis(context.TODO(), genb.Genesis); err != nil { return nil, xerrors.Errorf("set genesis failed: %w", err) } @@ -256,7 +266,9 @@ func NewGeneratorWithSectorsAndUpgradeSchedule(numSectors int, us stmgr.UpgradeS //return nil, xerrors.Errorf("creating drand beacon: %w", err) //} - sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), sys, us, beac) + // TODO: No cross-msgs in genesis, we can use a nil resolver + // sh.r = resolver.NewResolver(s.self, sh.ds, sh.pubsub, sh.ID) + sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), nil, sys, us, beac) if err != nil { return nil, xerrors.Errorf("initing stmgr: %w", err) } @@ -459,7 +471,7 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessagesAndNulls(base *types.TipSet, if et != nil { // TODO: maybe think about passing in more real parameters to this? - wpost, err := cg.eppProvs[m].ComputeProof(context.TODO(), nil, nil) + wpost, err := cg.eppProvs[m].ComputeProof(context.TODO(), nil, nil, round, network.Version0) if err != nil { return nil, err } @@ -469,7 +481,7 @@ func (cg *ChainGen) NextTipSetFromMinersWithMessagesAndNulls(base *types.TipSet, return nil, xerrors.Errorf("making a block for next tipset failed: %w", err) } - if err := cg.cs.PersistBlockHeaders(fblk.Header); err != nil { + if err := cg.cs.PersistBlockHeaders(context.TODO(), fblk.Header); err != nil { return nil, xerrors.Errorf("chainstore AddBlock: %w", err) } @@ -618,7 +630,7 @@ func (mca mca) WalletSign(ctx context.Context, a address.Address, v []byte) (*cr type WinningPoStProver interface { GenerateCandidates(context.Context, abi.PoStRandomness, uint64) ([]uint64, error) - ComputeProof(context.Context, []proof5.SectorInfo, abi.PoStRandomness) ([]proof5.PoStProof, error) + ComputeProof(context.Context, []proof7.ExtendedSectorInfo, abi.PoStRandomness, abi.ChainEpoch, network.Version) ([]proof5.PoStProof, error) } type wppProvider struct{} @@ -627,7 +639,7 @@ func (wpp *wppProvider) GenerateCandidates(ctx context.Context, _ abi.PoStRandom return []uint64{0}, nil } -func (wpp *wppProvider) ComputeProof(context.Context, []proof5.SectorInfo, abi.PoStRandomness) ([]proof5.PoStProof, error) { +func (wpp *wppProvider) ComputeProof(context.Context, []proof7.ExtendedSectorInfo, abi.PoStRandomness, abi.ChainEpoch, network.Version) ([]proof5.PoStProof, error) { return ValidWpostForTesting, nil } @@ -686,11 +698,15 @@ func (m genFakeVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVeri panic("not supported") } -func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) { +func (m genFakeVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) { + panic("not supported") +} + +func (m genFakeVerifier) VerifyWinningPoSt(ctx context.Context, info proof7.WinningPoStVerifyInfo) (bool, error) { panic("not supported") } -func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) { +func (m genFakeVerifier) VerifyWindowPoSt(ctx context.Context, info proof7.WindowPoStVerifyInfo) (bool, error) { panic("not supported") } diff --git a/chain/gen/genesis/genesis.go b/chain/gen/genesis/genesis.go index 29f03e2af..c95e2136a 100644 --- a/chain/gen/genesis/genesis.go +++ b/chain/gen/genesis/genesis.go @@ -479,6 +479,10 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, sys vm.Sysca verifNeeds := make(map[address.Address]abi.PaddedPieceSize) var sum abi.PaddedPieceSize + csc := func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error) { + return big.Zero(), nil + } + vmopt := vm.VMOpts{ StateBase: stateroot, Epoch: 0, @@ -486,11 +490,9 @@ func VerifyPreSealedData(ctx context.Context, cs *store.ChainStore, sys vm.Sysca Bstore: cs.StateBlockstore(), Actors: filcns.NewActorRegistry(), Syscalls: mkFakedSigSyscalls(sys), - CircSupplyCalc: nil, - NtwkVersion: func(_ context.Context, _ abi.ChainEpoch) network.Version { - return nv - }, - BaseFee: types.NewInt(0), + CircSupplyCalc: csc, + NetworkVersion: nv, + BaseFee: types.NewInt(0), } vm, err := vm.NewVM(ctx, &vmopt) if err != nil { @@ -586,12 +588,13 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto mm := &types.MsgMeta{ BlsMessages: emptyroot, SecpkMessages: emptyroot, + CrossMessages: emptyroot, } mmb, err := mm.ToStorageBlock() if err != nil { return nil, xerrors.Errorf("serializing msgmeta failed: %w", err) } - if err := bs.Put(mmb); err != nil { + if err := bs.Put(ctx, mmb); err != nil { return nil, xerrors.Errorf("putting msgmeta block to blockstore: %w", err) } @@ -621,7 +624,7 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto return nil, xerrors.Errorf("filecoinGenesisCid != gblk.Cid") } - if err := bs.Put(gblk); err != nil { + if err := bs.Put(ctx, gblk); err != nil { return nil, xerrors.Errorf("failed writing filecoin genesis block to blockstore: %w", err) } @@ -652,7 +655,7 @@ func MakeGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blocksto return nil, xerrors.Errorf("serializing block header failed: %w", err) } - if err := bs.Put(sb); err != nil { + if err := bs.Put(ctx, sb); err != nil { return nil, xerrors.Errorf("putting header to blockstore: %w", err) } diff --git a/chain/gen/genesis/miners.go b/chain/gen/genesis/miners.go index edacfe304..274918147 100644 --- a/chain/gen/genesis/miners.go +++ b/chain/gen/genesis/miners.go @@ -6,6 +6,8 @@ import ( "fmt" "math/rand" + runtime7 "github.com/filecoin-project/specs-actors/v7/actors/runtime" + builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" "github.com/ipfs/go-cid" @@ -29,7 +31,6 @@ import ( market4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/market" power4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/power" reward4 "github.com/filecoin-project/specs-actors/v4/actors/builtin/reward" - runtime5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" @@ -57,7 +58,7 @@ func MinerAddress(genesisIndex uint64) address.Address { } type fakedSigSyscalls struct { - runtime5.Syscalls + runtime7.Syscalls } func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer address.Address, plaintext []byte) error { @@ -65,7 +66,7 @@ func (fss *fakedSigSyscalls) VerifySignature(signature crypto.Signature, signer } func mkFakedSigSyscalls(base vm.SyscallBuilder) vm.SyscallBuilder { - return func(ctx context.Context, rt *vm.Runtime) runtime5.Syscalls { + return func(ctx context.Context, rt *vm.Runtime) runtime7.Syscalls { return &fakedSigSyscalls{ base(ctx, rt), } @@ -93,10 +94,8 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal Actors: filcns.NewActorRegistry(), Syscalls: mkFakedSigSyscalls(sys), CircSupplyCalc: csc, - NtwkVersion: func(_ context.Context, _ abi.ChainEpoch) network.Version { - return nv - }, - BaseFee: types.NewInt(0), + NetworkVersion: nv, + BaseFee: types.NewInt(0), } vm, err := vm.NewVM(ctx, vmopt) @@ -509,31 +508,13 @@ func SetupStorageMiners(ctx context.Context, cs *store.ChainStore, sys vm.Syscal // TODO: copied from actors test harness, deduplicate or remove from here type fakeRand struct{} -func (fr *fakeRand) GetChainRandomnessV2(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { - out := make([]byte, 32) - _, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint - return out, nil -} - -func (fr *fakeRand) GetChainRandomnessV1(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (fr *fakeRand) GetChainRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { out := make([]byte, 32) _, _ = rand.New(rand.NewSource(int64(randEpoch * 1000))).Read(out) //nolint return out, nil } -func (fr *fakeRand) GetBeaconRandomnessV3(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { - out := make([]byte, 32) - _, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint - return out, nil -} - -func (fr *fakeRand) GetBeaconRandomnessV2(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { - out := make([]byte, 32) - _, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint - return out, nil -} - -func (fr *fakeRand) GetBeaconRandomnessV1(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (fr *fakeRand) GetBeaconRandomness(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { out := make([]byte, 32) _, _ = rand.New(rand.NewSource(int64(randEpoch))).Read(out) //nolint return out, nil diff --git a/chain/gen/slashfilter/slashfilter.go b/chain/gen/slashfilter/slashfilter.go index 5edcd5439..de3af5825 100644 --- a/chain/gen/slashfilter/slashfilter.go +++ b/chain/gen/slashfilter/slashfilter.go @@ -1,6 +1,7 @@ package slashfilter import ( + "context" "fmt" "github.com/filecoin-project/lotus/build" @@ -27,7 +28,7 @@ func New(dstore ds.Batching) *SlashFilter { } } -func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpoch) error { +func (f *SlashFilter) MinedBlock(ctx context.Context, bh *types.BlockHeader, parentEpoch abi.ChainEpoch) error { if build.IsNearUpgrade(bh.Height, build.UpgradeOrangeHeight) { return nil } @@ -35,7 +36,7 @@ func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpo epochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, bh.Height)) { // double-fork mining (2 blocks at one epoch) - if err := checkFault(f.byEpoch, epochKey, bh, "double-fork mining faults"); err != nil { + if err := checkFault(ctx, f.byEpoch, epochKey, bh, "double-fork mining faults"); err != nil { return err } } @@ -43,7 +44,7 @@ func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpo parentsKey := ds.NewKey(fmt.Sprintf("/%s/%x", bh.Miner, types.NewTipSetKey(bh.Parents...).Bytes())) { // time-offset mining faults (2 blocks with the same parents) - if err := checkFault(f.byParents, parentsKey, bh, "time-offset mining faults"); err != nil { + if err := checkFault(ctx, f.byParents, parentsKey, bh, "time-offset mining faults"); err != nil { return err } } @@ -53,14 +54,14 @@ func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpo // First check if we have mined a block on the parent epoch parentEpochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, parentEpoch)) - have, err := f.byEpoch.Has(parentEpochKey) + have, err := f.byEpoch.Has(ctx, parentEpochKey) if err != nil { return err } if have { // If we had, make sure it's in our parent tipset - cidb, err := f.byEpoch.Get(parentEpochKey) + cidb, err := f.byEpoch.Get(ctx, parentEpochKey) if err != nil { return xerrors.Errorf("getting other block cid: %w", err) } @@ -83,25 +84,25 @@ func (f *SlashFilter) MinedBlock(bh *types.BlockHeader, parentEpoch abi.ChainEpo } } - if err := f.byParents.Put(parentsKey, bh.Cid().Bytes()); err != nil { + if err := f.byParents.Put(ctx, parentsKey, bh.Cid().Bytes()); err != nil { return xerrors.Errorf("putting byEpoch entry: %w", err) } - if err := f.byEpoch.Put(epochKey, bh.Cid().Bytes()); err != nil { + if err := f.byEpoch.Put(ctx, epochKey, bh.Cid().Bytes()); err != nil { return xerrors.Errorf("putting byEpoch entry: %w", err) } return nil } -func checkFault(t ds.Datastore, key ds.Key, bh *types.BlockHeader, faultType string) error { - fault, err := t.Has(key) +func checkFault(ctx context.Context, t ds.Datastore, key ds.Key, bh *types.BlockHeader, faultType string) error { + fault, err := t.Has(ctx, key) if err != nil { return err } if fault { - cidb, err := t.Get(key) + cidb, err := t.Get(ctx, key) if err != nil { return xerrors.Errorf("getting other block cid: %w", err) } diff --git a/chain/market/fundmanager.go b/chain/market/fundmanager.go index 5becfdfa7..e934201d7 100644 --- a/chain/market/fundmanager.go +++ b/chain/market/fundmanager.go @@ -89,7 +89,7 @@ func (fm *FundManager) Start() error { // - in State() only load addresses with in-progress messages // - load the others just-in-time from getFundedAddress // - delete(fm.fundedAddrs, addr) when the queue has been processed - return fm.str.forEach(func(state *FundedAddressState) { + return fm.str.forEach(fm.ctx, func(state *FundedAddressState) { fa := newFundedAddress(fm, state.Addr) fa.state = state fm.fundedAddrs[fa.state.Addr] = fa @@ -322,7 +322,7 @@ func (a *fundedAddress) clearWaitState() { // Save state to datastore func (a *fundedAddress) saveState() { // Not much we can do if saving to the datastore fails, just log - err := a.str.save(a.state) + err := a.str.save(a.ctx, a.state) if err != nil { log.Errorf("saving state to store for addr %s: %v", a.state.Addr, err) } diff --git a/chain/market/store.go b/chain/market/store.go index e0d0e10be..9818b1d80 100644 --- a/chain/market/store.go +++ b/chain/market/store.go @@ -2,6 +2,7 @@ package market import ( "bytes" + "context" cborrpc "github.com/filecoin-project/go-cbor-util" "github.com/ipfs/go-datastore" @@ -27,7 +28,7 @@ func newStore(ds dtypes.MetadataDS) *Store { } // save the state to the datastore -func (ps *Store) save(state *FundedAddressState) error { +func (ps *Store) save(ctx context.Context, state *FundedAddressState) error { k := dskeyForAddr(state.Addr) b, err := cborrpc.Dump(state) @@ -35,14 +36,14 @@ func (ps *Store) save(state *FundedAddressState) error { return err } - return ps.ds.Put(k, b) + return ps.ds.Put(ctx, k, b) } // get the state for the given address -func (ps *Store) get(addr address.Address) (*FundedAddressState, error) { +func (ps *Store) get(ctx context.Context, addr address.Address) (*FundedAddressState, error) { k := dskeyForAddr(addr) - data, err := ps.ds.Get(k) + data, err := ps.ds.Get(ctx, k) if err != nil { return nil, err } @@ -56,8 +57,8 @@ func (ps *Store) get(addr address.Address) (*FundedAddressState, error) { } // forEach calls iter with each address in the datastore -func (ps *Store) forEach(iter func(*FundedAddressState)) error { - res, err := ps.ds.Query(dsq.Query{Prefix: dsKeyAddr}) +func (ps *Store) forEach(ctx context.Context, iter func(*FundedAddressState)) error { + res, err := ps.ds.Query(ctx, dsq.Query{Prefix: dsKeyAddr}) if err != nil { return err } diff --git a/chain/messagepool/config.go b/chain/messagepool/config.go index a511f84b7..3c07a8a0f 100644 --- a/chain/messagepool/config.go +++ b/chain/messagepool/config.go @@ -1,6 +1,7 @@ package messagepool import ( + "context" "encoding/json" "fmt" "time" @@ -20,8 +21,8 @@ var ( ConfigKey = datastore.NewKey("/mpool/config") ) -func loadConfig(ds dtypes.MetadataDS) (*types.MpoolConfig, error) { - haveCfg, err := ds.Has(ConfigKey) +func loadConfig(ctx context.Context, ds dtypes.MetadataDS) (*types.MpoolConfig, error) { + haveCfg, err := ds.Has(ctx, ConfigKey) if err != nil { return nil, err } @@ -30,7 +31,7 @@ func loadConfig(ds dtypes.MetadataDS) (*types.MpoolConfig, error) { return DefaultConfig(), nil } - cfgBytes, err := ds.Get(ConfigKey) + cfgBytes, err := ds.Get(ctx, ConfigKey) if err != nil { return nil, err } @@ -39,12 +40,12 @@ func loadConfig(ds dtypes.MetadataDS) (*types.MpoolConfig, error) { return cfg, err } -func saveConfig(cfg *types.MpoolConfig, ds dtypes.MetadataDS) error { +func saveConfig(ctx context.Context, cfg *types.MpoolConfig, ds dtypes.MetadataDS) error { cfgBytes, err := json.Marshal(cfg) if err != nil { return err } - return ds.Put(ConfigKey, cfgBytes) + return ds.Put(ctx, ConfigKey, cfgBytes) } func (mp *MessagePool) GetConfig() *types.MpoolConfig { @@ -68,7 +69,7 @@ func validateConfg(cfg *types.MpoolConfig) error { return nil } -func (mp *MessagePool) SetConfig(cfg *types.MpoolConfig) error { +func (mp *MessagePool) SetConfig(ctx context.Context, cfg *types.MpoolConfig) error { if err := validateConfg(cfg); err != nil { return err } @@ -76,7 +77,7 @@ func (mp *MessagePool) SetConfig(cfg *types.MpoolConfig) error { mp.cfgLk.Lock() mp.cfg = cfg - err := saveConfig(cfg, mp.ds) + err := saveConfig(ctx, cfg, mp.ds) if err != nil { log.Warnf("error persisting mpool config: %s", err) } diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go index 06343e9c9..76647e331 100644 --- a/chain/messagepool/messagepool.go +++ b/chain/messagepool/messagepool.go @@ -173,10 +173,17 @@ type MessagePool struct { sigValCache *lru.TwoQueueCache + nonceCache *lru.Cache + evtTypes [3]journal.EventType journal journal.Journal } +type nonceCacheKey struct { + tsk types.TipSetKey + addr address.Address +} + type msgSet struct { msgs map[uint64]*types.SignedMessage nextNonce uint64 @@ -196,10 +203,10 @@ func ComputeMinRBF(curPrem abi.TokenAmount) abi.TokenAmount { return types.BigAdd(minPrice, types.NewInt(1)) } -func CapGasFee(mff dtypes.DefaultMaxFeeFunc, msg *types.Message, sendSepc *api.MessageSendSpec) { +func CapGasFee(mff dtypes.DefaultMaxFeeFunc, msg *types.Message, sendSpec *api.MessageSendSpec) { var maxFee abi.TokenAmount - if sendSepc != nil { - maxFee = sendSepc.MaxFee + if sendSpec != nil { + maxFee = sendSpec.MaxFee } if maxFee.Int == nil || maxFee.Equals(big.Zero()) { mf, err := mff() @@ -358,11 +365,12 @@ func (ms *msgSet) toSlice() []*types.SignedMessage { return set } -func New(api Provider, ds dtypes.MetadataDS, us stmgr.UpgradeSchedule, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) { +func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.UpgradeSchedule, netName dtypes.NetworkName, j journal.Journal) (*MessagePool, error) { cache, _ := lru.New2Q(build.BlsSignatureCacheSize) verifcache, _ := lru.New2Q(build.VerifSigCacheSize) + noncecache, _ := lru.New(256) - cfg, err := loadConfig(ds) + cfg, err := loadConfig(ctx, ds) if err != nil { return nil, xerrors.Errorf("error loading mpool config: %w", err) } @@ -386,6 +394,7 @@ func New(api Provider, ds dtypes.MetadataDS, us stmgr.UpgradeSchedule, netName d pruneCooldown: make(chan struct{}, 1), blsSigCache: cache, sigValCache: verifcache, + nonceCache: noncecache, changes: lps.New(50), localMsgs: namespace.Wrap(ds, datastore.NewKey(localMsgsDs)), api: api, @@ -601,7 +610,7 @@ func (mp *MessagePool) addLocal(ctx context.Context, m *types.SignedMessage) err return xerrors.Errorf("error serializing message: %w", err) } - if err := mp.localMsgs.Put(datastore.NewKey(string(m.Cid().Bytes())), msgb); err != nil { + if err := mp.localMsgs.Put(ctx, datastore.NewKey(string(m.Cid().Bytes())), msgb); err != nil { return xerrors.Errorf("persisting local message: %w", err) } @@ -909,12 +918,12 @@ func (mp *MessagePool) addLocked(ctx context.Context, m *types.SignedMessage, st mp.blsSigCache.Add(m.Cid(), m.Signature) } - if _, err := mp.api.PutMessage(m); err != nil { + if _, err := mp.api.PutMessage(ctx, m); err != nil { log.Warnf("mpooladd cs.PutMessage failed: %s", err) return err } - if _, err := mp.api.PutMessage(&m.Message); err != nil { + if _, err := mp.api.PutMessage(ctx, &m.Message); err != nil { log.Warnf("mpooladd cs.PutMessage failed: %s", err) return err } @@ -1016,11 +1025,23 @@ func (mp *MessagePool) getStateNonce(ctx context.Context, addr address.Address, done := metrics.Timer(ctx, metrics.MpoolGetNonceDuration) defer done() + nk := nonceCacheKey{ + tsk: ts.Key(), + addr: addr, + } + + n, ok := mp.nonceCache.Get(nk) + if ok { + return n.(uint64), nil + } + act, err := mp.api.GetActorAfter(addr, ts) if err != nil { return 0, err } + mp.nonceCache.Add(nk, act.Nonce) + return act.Nonce, nil } @@ -1207,7 +1228,7 @@ func (mp *MessagePool) HeadChange(ctx context.Context, revert []*types.TipSet, a var merr error for _, ts := range revert { - pts, err := mp.api.LoadTipSet(ts.Parents()) + pts, err := mp.api.LoadTipSet(ctx, ts.Parents()) if err != nil { log.Errorf("error loading reverted tipset parent: %s", err) merr = multierror.Append(merr, err) @@ -1216,7 +1237,7 @@ func (mp *MessagePool) HeadChange(ctx context.Context, revert []*types.TipSet, a mp.curTs = pts - msgs, err := mp.MessagesForBlocks(ts.Blocks()) + msgs, err := mp.MessagesForBlocks(ctx, ts.Blocks()) if err != nil { log.Errorf("error retrieving messages for reverted block: %s", err) merr = multierror.Append(merr, err) @@ -1232,7 +1253,7 @@ func (mp *MessagePool) HeadChange(ctx context.Context, revert []*types.TipSet, a mp.curTs = ts for _, b := range ts.Blocks() { - bmsgs, smsgs, err := mp.api.MessagesForBlock(b) + bmsgs, smsgs, err := mp.api.MessagesForBlock(ctx, b) if err != nil { xerr := xerrors.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %w", b.Cid(), b.Height, b.Messages, err) log.Errorf("error retrieving messages for block: %s", xerr) @@ -1338,7 +1359,7 @@ func (mp *MessagePool) HeadChange(ctx context.Context, revert []*types.TipSet, a return merr } -func (mp *MessagePool) runHeadChange(from *types.TipSet, to *types.TipSet, rmsgs map[address.Address]map[uint64]*types.SignedMessage) error { +func (mp *MessagePool) runHeadChange(ctx context.Context, from *types.TipSet, to *types.TipSet, rmsgs map[address.Address]map[uint64]*types.SignedMessage) error { add := func(m *types.SignedMessage) { s, ok := rmsgs[m.Message.From] if !ok { @@ -1360,7 +1381,7 @@ func (mp *MessagePool) runHeadChange(from *types.TipSet, to *types.TipSet, rmsgs } - revert, apply, err := store.ReorgOps(mp.api.LoadTipSet, from, to) + revert, apply, err := store.ReorgOps(ctx, mp.api.LoadTipSet, from, to) if err != nil { return xerrors.Errorf("failed to compute reorg ops for mpool pending messages: %w", err) } @@ -1368,7 +1389,7 @@ func (mp *MessagePool) runHeadChange(from *types.TipSet, to *types.TipSet, rmsgs var merr error for _, ts := range revert { - msgs, err := mp.MessagesForBlocks(ts.Blocks()) + msgs, err := mp.MessagesForBlocks(ctx, ts.Blocks()) if err != nil { log.Errorf("error retrieving messages for reverted block: %s", err) merr = multierror.Append(merr, err) @@ -1382,7 +1403,7 @@ func (mp *MessagePool) runHeadChange(from *types.TipSet, to *types.TipSet, rmsgs for _, ts := range apply { for _, b := range ts.Blocks() { - bmsgs, smsgs, err := mp.api.MessagesForBlock(b) + bmsgs, smsgs, err := mp.api.MessagesForBlock(ctx, b) if err != nil { xerr := xerrors.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %w", b.Cid(), b.Height, b.Messages, err) log.Errorf("error retrieving messages for block: %s", xerr) @@ -1407,11 +1428,11 @@ type statBucket struct { msgs map[uint64]*types.SignedMessage } -func (mp *MessagePool) MessagesForBlocks(blks []*types.BlockHeader) ([]*types.SignedMessage, error) { +func (mp *MessagePool) MessagesForBlocks(ctx context.Context, blks []*types.BlockHeader) ([]*types.SignedMessage, error) { out := make([]*types.SignedMessage, 0) for _, b := range blks { - bmsgs, smsgs, err := mp.api.MessagesForBlock(b) + bmsgs, smsgs, err := mp.api.MessagesForBlock(ctx, b) if err != nil { return nil, xerrors.Errorf("failed to get messages for apply block %s(height %d) (msgroot = %s): %w", b.Cid(), b.Height, b.Messages, err) } @@ -1477,7 +1498,7 @@ func (mp *MessagePool) Updates(ctx context.Context) (<-chan api.MpoolUpdate, err } func (mp *MessagePool) loadLocal(ctx context.Context) error { - res, err := mp.localMsgs.Query(query.Query{}) + res, err := mp.localMsgs.Query(ctx, query.Query{}) if err != nil { return xerrors.Errorf("query local messages: %w", err) } @@ -1525,7 +1546,7 @@ func (mp *MessagePool) Clear(ctx context.Context, local bool) { if ok { for _, m := range mset.msgs { - err := mp.localMsgs.Delete(datastore.NewKey(string(m.Cid().Bytes()))) + err := mp.localMsgs.Delete(ctx, datastore.NewKey(string(m.Cid().Bytes()))) if err != nil { log.Warnf("error deleting local message: %s", err) } diff --git a/chain/messagepool/messagepool_test.go b/chain/messagepool/messagepool_test.go index 4a2bbfe94..6bd60da34 100644 --- a/chain/messagepool/messagepool_test.go +++ b/chain/messagepool/messagepool_test.go @@ -1,3 +1,4 @@ +//stm: #unit package messagepool import ( @@ -103,7 +104,7 @@ func (tma *testMpoolAPI) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) return tma.tipsets[0] } -func (tma *testMpoolAPI) PutMessage(m types.ChainMsg) (cid.Cid, error) { +func (tma *testMpoolAPI) PutMessage(ctx context.Context, m types.ChainMsg) (cid.Cid, error) { return cid.Undef, nil } @@ -164,16 +165,16 @@ func (tma *testMpoolAPI) StateAccountKeyAtFinality(ctx context.Context, addr add return addr, nil } -func (tma *testMpoolAPI) MessagesForBlock(h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { +func (tma *testMpoolAPI) MessagesForBlock(ctx context.Context, h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { return nil, tma.bmsgs[h.Cid()], nil } -func (tma *testMpoolAPI) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) { +func (tma *testMpoolAPI) MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) { if len(ts.Blocks()) != 1 { panic("cant deal with multiblock tipsets in this test") } - bm, sm, err := tma.MessagesForBlock(ts.Blocks()[0]) + bm, sm, err := tma.MessagesForBlock(ctx, ts.Blocks()[0]) if err != nil { return nil, err } @@ -190,7 +191,7 @@ func (tma *testMpoolAPI) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, return out, nil } -func (tma *testMpoolAPI) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { +func (tma *testMpoolAPI) LoadTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { for _, ts := range tma.tipsets { if types.CidArrsEqual(tsk.Cids(), ts.Cids()) { return ts, nil @@ -206,6 +207,7 @@ func (tma *testMpoolAPI) ChainComputeBaseFee(ctx context.Context, ts *types.TipS func assertNonce(t *testing.T, mp *MessagePool, addr address.Address, val uint64) { t.Helper() + //stm: @CHAIN_MEMPOOL_GET_NONCE_001 n, err := mp.GetNonce(context.TODO(), addr, types.EmptyTSK) if err != nil { t.Fatal(err) @@ -233,7 +235,7 @@ func TestMessagePool(t *testing.T) { ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -277,7 +279,7 @@ func TestCheckMessageBig(t *testing.T) { ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) assert.NoError(t, err) to := mock.Address(1001) @@ -340,7 +342,7 @@ func TestMessagePoolMessagesInEachBlock(t *testing.T) { ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -366,8 +368,10 @@ func TestMessagePoolMessagesInEachBlock(t *testing.T) { tma.applyBlock(t, a) tsa := mock.TipSet(a) + //stm: @CHAIN_MEMPOOL_PENDING_001 _, _ = mp.Pending(context.TODO()) + //stm: @CHAIN_MEMPOOL_SELECT_001 selm, _ := mp.SelectMessages(context.Background(), tsa, 1) if len(selm) == 0 { t.Fatal("should have returned the rest of the messages") @@ -389,7 +393,7 @@ func TestRevertMessages(t *testing.T) { ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -428,6 +432,7 @@ func TestRevertMessages(t *testing.T) { assertNonce(t, mp, sender, 4) + //stm: @CHAIN_MEMPOOL_PENDING_001 p, _ := mp.Pending(context.TODO()) fmt.Printf("%+v\n", p) if len(p) != 3 { @@ -452,7 +457,7 @@ func TestPruningSimple(t *testing.T) { ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -486,6 +491,7 @@ func TestPruningSimple(t *testing.T) { mp.Prune() + //stm: @CHAIN_MEMPOOL_PENDING_001 msgs, _ := mp.Pending(context.TODO()) if len(msgs) != 5 { t.Fatal("expected only 5 messages in pool, got: ", len(msgs)) @@ -496,7 +502,7 @@ func TestLoadLocal(t *testing.T) { tma := newTestMpoolAPI() ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -528,6 +534,7 @@ func TestLoadLocal(t *testing.T) { msgs := make(map[cid.Cid]struct{}) for i := 0; i < 10; i++ { m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + //stm: @CHAIN_MEMPOOL_PUSH_001 cid, err := mp.Push(context.TODO(), m) if err != nil { t.Fatal(err) @@ -539,11 +546,12 @@ func TestLoadLocal(t *testing.T) { t.Fatal(err) } - mp, err = New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err = New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } + //stm: @CHAIN_MEMPOOL_PENDING_001 pmsgs, _ := mp.Pending(context.TODO()) if len(msgs) != len(pmsgs) { t.Fatalf("expected %d messages, but got %d", len(msgs), len(pmsgs)) @@ -568,7 +576,7 @@ func TestClearAll(t *testing.T) { tma := newTestMpoolAPI() ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -599,6 +607,7 @@ func TestClearAll(t *testing.T) { gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] for i := 0; i < 10; i++ { m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + //stm: @CHAIN_MEMPOOL_PUSH_001 _, err := mp.Push(context.TODO(), m) if err != nil { t.Fatal(err) @@ -610,8 +619,10 @@ func TestClearAll(t *testing.T) { mustAdd(t, mp, m) } + //stm: @CHAIN_MEMPOOL_CLEAR_001 mp.Clear(context.Background(), true) + //stm: @CHAIN_MEMPOOL_PENDING_001 pending, _ := mp.Pending(context.TODO()) if len(pending) > 0 { t.Fatalf("cleared the mpool, but got %d pending messages", len(pending)) @@ -622,7 +633,7 @@ func TestClearNonLocal(t *testing.T) { tma := newTestMpoolAPI() ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -654,6 +665,7 @@ func TestClearNonLocal(t *testing.T) { gasLimit := gasguess.Costs[gasguess.CostKey{Code: builtin2.StorageMarketActorCodeID, M: 2}] for i := 0; i < 10; i++ { m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + //stm: @CHAIN_MEMPOOL_PUSH_001 _, err := mp.Push(context.TODO(), m) if err != nil { t.Fatal(err) @@ -665,8 +677,10 @@ func TestClearNonLocal(t *testing.T) { mustAdd(t, mp, m) } + //stm: @CHAIN_MEMPOOL_CLEAR_001 mp.Clear(context.Background(), false) + //stm: @CHAIN_MEMPOOL_PENDING_001 pending, _ := mp.Pending(context.TODO()) if len(pending) != 10 { t.Fatalf("expected 10 pending messages, but got %d instead", len(pending)) @@ -683,7 +697,7 @@ func TestUpdates(t *testing.T) { tma := newTestMpoolAPI() ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } @@ -724,6 +738,7 @@ func TestUpdates(t *testing.T) { for i := 0; i < 10; i++ { m := makeTestMessage(w1, a1, a2, uint64(i), gasLimit, uint64(i+1)) + //stm: @CHAIN_MEMPOOL_PUSH_001 _, err := mp.Push(context.TODO(), m) if err != nil { t.Fatal(err) diff --git a/chain/messagepool/provider.go b/chain/messagepool/provider.go index 0f904c52c..6929f0078 100644 --- a/chain/messagepool/provider.go +++ b/chain/messagepool/provider.go @@ -23,13 +23,13 @@ var ( type Provider interface { SubscribeHeadChanges(func(rev, app []*types.TipSet) error) *types.TipSet - PutMessage(m types.ChainMsg) (cid.Cid, error) + PutMessage(ctx context.Context, m types.ChainMsg) (cid.Cid, error) PubSubPublish(string, []byte) error GetActorAfter(address.Address, *types.TipSet) (*types.Actor, error) StateAccountKeyAtFinality(context.Context, address.Address, *types.TipSet) (address.Address, error) - MessagesForBlock(*types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) - MessagesForTipset(*types.TipSet) ([]types.ChainMsg, error) - LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) + MessagesForBlock(context.Context, *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) + MessagesForTipset(context.Context, *types.TipSet) ([]types.ChainMsg, error) + LoadTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) ChainComputeBaseFee(ctx context.Context, ts *types.TipSet) (types.BigInt, error) IsLite() bool } @@ -66,8 +66,8 @@ func (mpp *mpoolProvider) SubscribeHeadChanges(cb func(rev, app []*types.TipSet) return mpp.sm.ChainStore().GetHeaviestTipSet() } -func (mpp *mpoolProvider) PutMessage(m types.ChainMsg) (cid.Cid, error) { - return mpp.sm.ChainStore().PutMessage(m) +func (mpp *mpoolProvider) PutMessage(ctx context.Context, m types.ChainMsg) (cid.Cid, error) { + return mpp.sm.ChainStore().PutMessage(ctx, m) } func (mpp *mpoolProvider) PubSubPublish(k string, v []byte) error { @@ -103,16 +103,18 @@ func (mpp *mpoolProvider) StateAccountKeyAtFinality(ctx context.Context, addr ad return mpp.sm.ResolveToKeyAddressAtFinality(ctx, addr, ts) } -func (mpp *mpoolProvider) MessagesForBlock(h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { - return mpp.sm.ChainStore().MessagesForBlock(h) +func (mpp *mpoolProvider) MessagesForBlock(ctx context.Context, h *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { + // Mpool only handles non-cross messages. Disregard cross-messages in block. + b, s, _, err := mpp.sm.ChainStore().MessagesForBlock(ctx, h) + return b, s, err } -func (mpp *mpoolProvider) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) { - return mpp.sm.ChainStore().MessagesForTipset(ts) +func (mpp *mpoolProvider) MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) { + return mpp.sm.ChainStore().MessagesForTipset(ctx, ts) } -func (mpp *mpoolProvider) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { - return mpp.sm.ChainStore().LoadTipSet(tsk) +func (mpp *mpoolProvider) LoadTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { + return mpp.sm.ChainStore().LoadTipSet(ctx, tsk) } func (mpp *mpoolProvider) ChainComputeBaseFee(ctx context.Context, ts *types.TipSet) (types.BigInt, error) { diff --git a/chain/messagepool/pruning.go b/chain/messagepool/pruning.go index c10239b8e..d405afb65 100644 --- a/chain/messagepool/pruning.go +++ b/chain/messagepool/pruning.go @@ -49,7 +49,7 @@ func (mp *MessagePool) pruneMessages(ctx context.Context, ts *types.TipSet) erro } baseFeeLowerBound := getBaseFeeLowerBound(baseFee, baseFeeLowerBoundFactor) - pending, _ := mp.getPendingMessages(ts, ts) + pending, _ := mp.getPendingMessages(ctx, ts, ts) // protected actors -- not pruned protected := make(map[address.Address]struct{}) diff --git a/chain/messagepool/repub.go b/chain/messagepool/repub.go index 4323bdee1..d92b5bd58 100644 --- a/chain/messagepool/repub.go +++ b/chain/messagepool/repub.go @@ -121,7 +121,7 @@ loop: // we can't fit the current chain but there is gas to spare // trim it and push it down - chain.Trim(gasLimit, mp, baseFee) + chain.Trim(gasLimit, repubMsgLimit, mp, baseFee) for j := i; j < len(chains)-1; j++ { if chains[j].Before(chains[j+1]) { break @@ -131,6 +131,10 @@ loop: } count := 0 + if len(msgs) > repubMsgLimit { + msgs = msgs[:repubMsgLimit] + } + log.Infof("republishing %d messages", len(msgs)) for _, m := range msgs { mb, err := m.Serialize() diff --git a/chain/messagepool/repub_test.go b/chain/messagepool/repub_test.go index fa27d68ed..de32eaa6b 100644 --- a/chain/messagepool/repub_test.go +++ b/chain/messagepool/repub_test.go @@ -25,7 +25,7 @@ func TestRepubMessages(t *testing.T) { tma := newTestMpoolAPI() ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "mptest", nil) if err != nil { t.Fatal(err) } diff --git a/chain/messagepool/selection.go b/chain/messagepool/selection.go index acff7c4cf..633e9b23f 100644 --- a/chain/messagepool/selection.go +++ b/chain/messagepool/selection.go @@ -7,6 +7,10 @@ import ( "sort" "time" + cbg "github.com/whyrusleeping/cbor-gen" + + "github.com/filecoin-project/go-state-types/crypto" + "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -34,9 +38,10 @@ type msgChain struct { merged bool next *msgChain prev *msgChain + sigType crypto.SigType } -func (mp *MessagePool) SelectMessages(ctx context.Context, ts *types.TipSet, tq float64) (msgs []*types.SignedMessage, err error) { +func (mp *MessagePool) SelectMessages(ctx context.Context, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) { mp.curTsLk.Lock() defer mp.curTsLk.Unlock() @@ -46,24 +51,156 @@ func (mp *MessagePool) SelectMessages(ctx context.Context, ts *types.TipSet, tq // if the ticket quality is high enough that the first block has higher probability // than any other block, then we don't bother with optimal selection because the // first block will always have higher effective performance + var sm *selectedMessages + var err error if tq > 0.84 { - msgs, err = mp.selectMessagesGreedy(ctx, mp.curTs, ts) + sm, err = mp.selectMessagesGreedy(ctx, mp.curTs, ts) } else { - msgs, err = mp.selectMessagesOptimal(ctx, mp.curTs, ts, tq) + sm, err = mp.selectMessagesOptimal(ctx, mp.curTs, ts, tq) } if err != nil { return nil, err } - if len(msgs) > build.BlockMessageLimit { - msgs = msgs[:build.BlockMessageLimit] + if sm == nil { + return nil, nil + } + + // one last sanity check + if len(sm.msgs) > build.BlockMessageLimit { + log.Errorf("message selection chose too many messages %d > %d", len(sm.msgs), build.BlockMessageLimit) + sm.msgs = sm.msgs[:build.BlockMessageLimit] + } + + return sm.msgs, nil +} + +type selectedMessages struct { + msgs []*types.SignedMessage + gasLimit int64 + secpLimit int + blsLimit int +} + +// returns false if chain can't be added due to block constraints +func (sm *selectedMessages) tryToAdd(mc *msgChain) bool { + l := len(mc.msgs) + + if build.BlockMessageLimit < l+len(sm.msgs) || sm.gasLimit < mc.gasLimit { + return false } - return msgs, nil + if mc.sigType == crypto.SigTypeBLS { + if sm.blsLimit < l { + return false + } + + sm.msgs = append(sm.msgs, mc.msgs...) + sm.blsLimit -= l + sm.gasLimit -= mc.gasLimit + } else if mc.sigType == crypto.SigTypeSecp256k1 { + if sm.secpLimit < l { + return false + } + + sm.msgs = append(sm.msgs, mc.msgs...) + sm.secpLimit -= l + sm.gasLimit -= mc.gasLimit + } + + // don't add the weird sigType msg, but otherwise proceed + return true } -func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *types.TipSet, tq float64) ([]*types.SignedMessage, error) { +// returns false if messages can't be added due to block constraints +// will trim / invalidate chain as appropriate +func (sm *selectedMessages) tryToAddWithDeps(mc *msgChain, mp *MessagePool, baseFee types.BigInt) bool { + // compute the dependencies that must be merged and the gas limit including deps + chainGasLimit := mc.gasLimit + chainMsgLimit := len(mc.msgs) + depGasLimit := int64(0) + depMsgLimit := 0 + smMsgLimit := 0 + + if mc.sigType == crypto.SigTypeBLS { + smMsgLimit = sm.blsLimit + } else if mc.sigType == crypto.SigTypeSecp256k1 { + smMsgLimit = sm.secpLimit + } else { + return false + } + + if smMsgLimit > build.BlockMessageLimit-len(sm.msgs) { + smMsgLimit = build.BlockMessageLimit - len(sm.msgs) + } + + var chainDeps []*msgChain + for curChain := mc.prev; curChain != nil && !curChain.merged; curChain = curChain.prev { + chainDeps = append(chainDeps, curChain) + chainGasLimit += curChain.gasLimit + chainMsgLimit += len(curChain.msgs) + depGasLimit += curChain.gasLimit + depMsgLimit += len(curChain.msgs) + } + + // the chain doesn't fit as-is, so trim / invalidate it and return false + if chainGasLimit > sm.gasLimit || chainMsgLimit > smMsgLimit { + + // it doesn't all fit; now we have to take into account the dependent chains before + // making a decision about trimming or invalidating. + // if the dependencies exceed the block limits, then we must invalidate the chain + // as it can never be included. + // Otherwise we can just trim and continue + if depGasLimit > sm.gasLimit || depMsgLimit >= smMsgLimit { + mc.Invalidate() + } else { + // dependencies fit, just trim it + mc.Trim(sm.gasLimit-depGasLimit, smMsgLimit-depMsgLimit, mp, baseFee) + } + + return false + } + + // the chain fits! include it together with all dependencies + for i := len(chainDeps) - 1; i >= 0; i-- { + curChain := chainDeps[i] + curChain.merged = true + sm.msgs = append(sm.msgs, curChain.msgs...) + } + + mc.merged = true + + sm.msgs = append(sm.msgs, mc.msgs...) + sm.gasLimit -= chainGasLimit + + if mc.sigType == crypto.SigTypeBLS { + sm.blsLimit -= chainMsgLimit + } else if mc.sigType == crypto.SigTypeSecp256k1 { + sm.secpLimit -= chainMsgLimit + } + + return true +} + +func (sm *selectedMessages) trimChain(mc *msgChain, mp *MessagePool, baseFee types.BigInt) { + msgLimit := build.BlockMessageLimit - len(sm.msgs) + if mc.sigType == crypto.SigTypeBLS { + if msgLimit > sm.blsLimit { + msgLimit = sm.blsLimit + } + } else if mc.sigType == crypto.SigTypeSecp256k1 { + if msgLimit > sm.secpLimit { + msgLimit = sm.secpLimit + } + } + + if mc.gasLimit > sm.gasLimit || len(mc.msgs) > msgLimit { + mc.Trim(sm.gasLimit, msgLimit, mp, baseFee) + } +} + +func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *types.TipSet, tq float64) (*selectedMessages, error) { start := time.Now() baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts) @@ -73,7 +210,7 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ // 0. Load messages from the target tipset; if it is the same as the current tipset in // the mpool, then this is just the pending messages - pending, err := mp.getPendingMessages(curTs, ts) + pending, err := mp.getPendingMessages(ctx, curTs, ts) if err != nil { return nil, err } @@ -89,10 +226,10 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ // 0b. Select all priority messages that fit in the block minGas := int64(gasguess.MinGas) - result, gasLimit := mp.selectPriorityMessages(ctx, pending, baseFee, ts) + result := mp.selectPriorityMessages(ctx, pending, baseFee, ts) // have we filled the block? - if gasLimit < minGas { + if result.gasLimit < minGas || len(result.msgs) >= build.BlockMessageLimit { return result, nil } @@ -117,19 +254,21 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ return result, nil } - // 3. Parition chains into blocks (without trimming) + // 3. Partition chains into blocks (without trimming) // we use the full blockGasLimit (as opposed to the residual gas limit from the - // priority message selection) as we have to account for what other miners are doing + // priority message selection) as we have to account for what other block providers are doing nextChain := 0 partitions := make([][]*msgChain, MaxBlocks) for i := 0; i < MaxBlocks && nextChain < len(chains); i++ { gasLimit := int64(build.BlockGasLimit) + msgLimit := build.BlockMessageLimit for nextChain < len(chains) { chain := chains[nextChain] nextChain++ partitions[i] = append(partitions[i], chain) gasLimit -= chain.gasLimit - if gasLimit < minGas { + msgLimit -= len(chain.msgs) + if gasLimit < minGas || msgLimit <= 0 { break } } @@ -158,7 +297,7 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ }) // 6. Merge the head chains to produce the list of messages selected for inclusion - // subject to the residual gas limit + // subject to the residual block limits // When a chain is merged in, all its previous dependent chains *must* also be // merged in or we'll have a broken block startMerge := time.Now() @@ -174,35 +313,16 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ continue } - // compute the dependencies that must be merged and the gas limit including deps - chainGasLimit := chain.gasLimit - var chainDeps []*msgChain - for curChain := chain.prev; curChain != nil && !curChain.merged; curChain = curChain.prev { - chainDeps = append(chainDeps, curChain) - chainGasLimit += curChain.gasLimit - } - - // does it all fit in the block? - if chainGasLimit <= gasLimit { - // include it together with all dependencies - for i := len(chainDeps) - 1; i >= 0; i-- { - curChain := chainDeps[i] - curChain.merged = true - result = append(result, curChain.msgs...) - } - - chain.merged = true - // adjust the effective pefromance for all subsequent chains + if result.tryToAddWithDeps(chain, mp, baseFee) { + // adjust the effective performance for all subsequent chains if next := chain.next; next != nil && next.effPerf > 0 { next.effPerf += next.parentOffset for next = next.next; next != nil && next.effPerf > 0; next = next.next { next.setEffPerf() } } - result = append(result, chain.msgs...) - gasLimit -= chainGasLimit - // resort to account for already merged chains and effective performance adjustments + // re-sort to account for already merged chains and effective performance adjustments // the sort *must* be stable or we end up getting negative gasPerfs pushed up. sort.SliceStable(chains[i+1:], func(i, j int) bool { return chains[i].BeforeEffective(chains[j]) @@ -211,7 +331,7 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ continue } - // we can't fit this chain and its dependencies because of block gasLimit -- we are + // we can't fit this chain and its dependencies because of block limits -- we are // at the edge last = i break @@ -222,18 +342,22 @@ func (mp *MessagePool) selectMessagesOptimal(ctx context.Context, curTs, ts *typ // 7. We have reached the edge of what can fit wholesale; if we still hae available // gasLimit to pack some more chains, then trim the last chain and push it down. - // Trimming invalidaates subsequent dependent chains so that they can't be selected + // Trimming invalidates subsequent dependent chains so that they can't be selected // as their dependency cannot be (fully) included. // We do this in a loop because the blocker might have been inordinately large and // we might have to do it multiple times to satisfy tail packing startTail := time.Now() tailLoop: - for gasLimit >= minGas && last < len(chains) { - // trim if necessary - if chains[last].gasLimit > gasLimit { - chains[last].Trim(gasLimit, mp, baseFee) + for result.gasLimit >= minGas && last < len(chains) { + + if !chains[last].valid { + last++ + continue tailLoop } + // trim if necessary + result.trimChain(chains[last], mp, baseFee) + // push down if it hasn't been invalidated if chains[last].valid { for i := last; i < len(chains)-1; i++ { @@ -245,7 +369,7 @@ tailLoop: } // select the next (valid and fitting) chain and its dependencies for inclusion - for i, chain := range chains[last:] { + for _, chain := range chains[last:] { // has the chain been invalidated? if !chain.valid { continue @@ -261,45 +385,10 @@ tailLoop: break tailLoop } - // compute the dependencies that must be merged and the gas limit including deps - chainGasLimit := chain.gasLimit - depGasLimit := int64(0) - var chainDeps []*msgChain - for curChain := chain.prev; curChain != nil && !curChain.merged; curChain = curChain.prev { - chainDeps = append(chainDeps, curChain) - chainGasLimit += curChain.gasLimit - depGasLimit += curChain.gasLimit - } - - // does it all fit in the bock - if chainGasLimit <= gasLimit { - // include it together with all dependencies - for i := len(chainDeps) - 1; i >= 0; i-- { - curChain := chainDeps[i] - curChain.merged = true - result = append(result, curChain.msgs...) - } - - chain.merged = true - result = append(result, chain.msgs...) - gasLimit -= chainGasLimit + if result.tryToAddWithDeps(chain, mp, baseFee) { continue } - // it doesn't all fit; now we have to take into account the dependent chains before - // making a decision about trimming or invalidating. - // if the dependencies exceed the gas limit, then we must invalidate the chain - // as it can never be included. - // Otherwise we can just trim and continue - if depGasLimit > gasLimit { - chain.Invalidate() - last += i + 1 - continue tailLoop - } - - // dependencies fit, just trim it - chain.Trim(gasLimit-depGasLimit, mp, baseFee) - last += i continue tailLoop } @@ -311,17 +400,17 @@ tailLoop: log.Infow("pack tail chains done", "took", dt) } - // if we have gasLimit to spare, pick some random (non-negative) chains to fill the block - // we pick randomly so that we minimize the probability of duplication among all miners - if gasLimit >= minGas { - randomCount := 0 + // if we have room to spare, pick some random (non-negative) chains to fill the block + // we pick randomly so that we minimize the probability of duplication among all block producers + if result.gasLimit >= minGas && len(result.msgs) <= build.BlockMessageLimit { + preRandomLength := len(result.msgs) startRandom := time.Now() shuffleChains(chains) for _, chain := range chains { // have we filled the block - if gasLimit < minGas { + if result.gasLimit < minGas || len(result.msgs) >= build.BlockMessageLimit { break } @@ -335,59 +424,31 @@ tailLoop: continue } - // compute the dependencies that must be merged and the gas limit including deps - chainGasLimit := chain.gasLimit - depGasLimit := int64(0) - var chainDeps []*msgChain - for curChain := chain.prev; curChain != nil && !curChain.merged; curChain = curChain.prev { - chainDeps = append(chainDeps, curChain) - chainGasLimit += curChain.gasLimit - depGasLimit += curChain.gasLimit - } - - // do the deps fit? if the deps won't fit, invalidate the chain - if depGasLimit > gasLimit { - chain.Invalidate() + if result.tryToAddWithDeps(chain, mp, baseFee) { continue } - // do they fit as is? if it doesn't, trim to make it fit if possible - if chainGasLimit > gasLimit { - chain.Trim(gasLimit-depGasLimit, mp, baseFee) - - if !chain.valid { - continue - } - } - - // include it together with all dependencies - for i := len(chainDeps) - 1; i >= 0; i-- { - curChain := chainDeps[i] - curChain.merged = true - result = append(result, curChain.msgs...) - randomCount += len(curChain.msgs) + if chain.valid { + // chain got trimmed on the previous call to tryToAddWithDeps, can now be included + result.tryToAddWithDeps(chain, mp, baseFee) + continue } - - chain.merged = true - result = append(result, chain.msgs...) - randomCount += len(chain.msgs) - gasLimit -= chainGasLimit } if dt := time.Since(startRandom); dt > time.Millisecond { log.Infow("pack random tail chains done", "took", dt) } - if randomCount > 0 { + if len(result.msgs) != preRandomLength { log.Warnf("optimal selection failed to pack a block; picked %d messages with random selection", - randomCount) + len(result.msgs)-preRandomLength) } } return result, nil } -func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *types.TipSet) ([]*types.SignedMessage, error) { +func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *types.TipSet) (*selectedMessages, error) { start := time.Now() baseFee, err := mp.api.ChainComputeBaseFee(context.TODO(), ts) @@ -397,7 +458,7 @@ func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *type // 0. Load messages for the target tipset; if it is the same as the current tipset in the mpool // then this is just the pending messages - pending, err := mp.getPendingMessages(curTs, ts) + pending, err := mp.getPendingMessages(ctx, curTs, ts) if err != nil { return nil, err } @@ -413,10 +474,10 @@ func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *type // 0b. Select all priority messages that fit in the block minGas := int64(gasguess.MinGas) - result, gasLimit := mp.selectPriorityMessages(ctx, pending, baseFee, ts) + result := mp.selectPriorityMessages(ctx, pending, baseFee, ts) // have we filled the block? - if gasLimit < minGas { + if result.gasLimit < minGas || len(result.msgs) > build.BlockMessageLimit { return result, nil } @@ -442,7 +503,7 @@ func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *type } // 3. Merge the head chains to produce the list of messages selected for inclusion, subject to - // the block gas limit. + // the block gas and message limits. startMerge := time.Now() last := len(chains) for i, chain := range chains { @@ -452,13 +513,12 @@ func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *type } // does it fit in the block? - if chain.gasLimit <= gasLimit { - gasLimit -= chain.gasLimit - result = append(result, chain.msgs...) + if result.tryToAdd(chain) { + // there was room, we added the chain, keep going continue } - // we can't fit this chain because of block gasLimit -- we are at the edge + // we can't fit this chain because of block limits -- we are at the edge last = i break } @@ -474,9 +534,9 @@ func (mp *MessagePool) selectMessagesGreedy(ctx context.Context, curTs, ts *type // have to do it multiple times to satisfy tail packing. startTail := time.Now() tailLoop: - for gasLimit >= minGas && last < len(chains) { + for result.gasLimit >= minGas && last < len(chains) { // trim - chains[last].Trim(gasLimit, mp, baseFee) + result.trimChain(chains[last], mp, baseFee) // push down if it hasn't been invalidated if chains[last].valid { @@ -501,9 +561,8 @@ tailLoop: } // does it fit in the bock? - if chain.gasLimit <= gasLimit { - gasLimit -= chain.gasLimit - result = append(result, chain.msgs...) + if result.tryToAdd(chain) { + // there was room, we added the chain, keep going continue } @@ -523,7 +582,7 @@ tailLoop: return result, nil } -func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[address.Address]map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) ([]*types.SignedMessage, int64) { +func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[address.Address]map[uint64]*types.SignedMessage, baseFee types.BigInt, ts *types.TipSet) *selectedMessages { start := time.Now() defer func() { if dt := time.Since(start); dt > time.Millisecond { @@ -531,8 +590,12 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a } }() mpCfg := mp.getConfig() - result := make([]*types.SignedMessage, 0, mpCfg.SizeLimitLow) - gasLimit := int64(build.BlockGasLimit) + result := &selectedMessages{ + msgs: make([]*types.SignedMessage, 0, mpCfg.SizeLimitLow), + gasLimit: int64(build.BlockGasLimit), + blsLimit: cbg.MaxLength, + secpLimit: cbg.MaxLength, + } minGas := int64(gasguess.MinGas) // 1. Get priority actor chains @@ -542,7 +605,7 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a pk, err := mp.resolveToKey(ctx, actor) if err != nil { log.Debugf("mpooladdlocal failed to resolve sender: %s", err) - return nil, gasLimit + return result } mset, ok := pending[pk] @@ -554,9 +617,8 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a chains = append(chains, next...) } } - if len(chains) == 0 { - return nil, gasLimit + return result } // 2. Sort the chains @@ -566,7 +628,7 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a if len(chains) != 0 && chains[0].gasPerf < 0 { log.Warnw("all priority messages in mpool have negative gas performance", "bestGasPerf", chains[0].gasPerf) - return nil, gasLimit + return result } // 3. Merge chains until the block limit, as long as they have non-negative gas performance @@ -576,9 +638,8 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a break } - if chain.gasLimit <= gasLimit { - gasLimit -= chain.gasLimit - result = append(result, chain.msgs...) + if result.tryToAdd(chain) { + // there was room, we added the chain, keep going continue } @@ -588,9 +649,10 @@ func (mp *MessagePool) selectPriorityMessages(ctx context.Context, pending map[a } tailLoop: - for gasLimit >= minGas && last < len(chains) { + for result.gasLimit >= minGas && last < len(chains) { // trim, discarding negative performing messages - chains[last].Trim(gasLimit, mp, baseFee) + + result.trimChain(chains[last], mp, baseFee) // push down if it hasn't been invalidated if chains[last].valid { @@ -615,9 +677,8 @@ tailLoop: } // does it fit in the bock? - if chain.gasLimit <= gasLimit { - gasLimit -= chain.gasLimit - result = append(result, chain.msgs...) + if result.tryToAdd(chain) { + // there was room, we added the chain, keep going continue } @@ -631,10 +692,10 @@ tailLoop: break } - return result, gasLimit + return result } -func (mp *MessagePool) getPendingMessages(curTs, ts *types.TipSet) (map[address.Address]map[uint64]*types.SignedMessage, error) { +func (mp *MessagePool) getPendingMessages(ctx context.Context, curTs, ts *types.TipSet) (map[address.Address]map[uint64]*types.SignedMessage, error) { start := time.Now() result := make(map[address.Address]map[uint64]*types.SignedMessage) @@ -670,7 +731,7 @@ func (mp *MessagePool) getPendingMessages(curTs, ts *types.TipSet) (map[address. return result, nil } - if err := mp.runHeadChange(curTs, ts, result); err != nil { + if err := mp.runHeadChange(ctx, curTs, ts, result); err != nil { return nil, xerrors.Errorf("failed to process difference between mpool head and given head: %w", err) } @@ -778,11 +839,16 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6 return nil } + // if we have more messages from this sender than can fit in a block, drop the extra ones + if len(msgs) > build.BlockMessageLimit { + msgs = msgs[:build.BlockMessageLimit] + } + // ok, now we can construct the chains using the messages we have // invariant: each chain has a bigger gasPerf than the next -- otherwise they can be merged // and increase the gasPerf of the first chain // We do this in two passes: - // - in the first pass we create chains that aggreagate messages with non-decreasing gasPerf + // - in the first pass we create chains that aggregate messages with non-decreasing gasPerf // - in the second pass we merge chains to maintain the invariant. var chains []*msgChain var curChain *msgChain @@ -794,6 +860,7 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6 chain.gasLimit = m.Message.GasLimit chain.gasPerf = mp.getGasPerf(chain.gasReward, chain.gasLimit) chain.valid = true + chain.sigType = m.Signature.Type return chain } @@ -808,7 +875,7 @@ func (mp *MessagePool) createMessageChains(actor address.Address, mset map[uint6 gasLimit := curChain.gasLimit + m.Message.GasLimit gasPerf := mp.getGasPerf(gasReward, gasLimit) - // try to add the message to the current chain -- if it decreases the gasPerf, then make a + // try to add the message to the current chain -- if it decreases the gasPerf, or then make a // new chain if gasPerf < curChain.gasPerf { chains = append(chains, curChain) @@ -868,9 +935,9 @@ func (mc *msgChain) Before(other *msgChain) bool { (mc.gasPerf == other.gasPerf && mc.gasReward.Cmp(other.gasReward) > 0) } -func (mc *msgChain) Trim(gasLimit int64, mp *MessagePool, baseFee types.BigInt) { +func (mc *msgChain) Trim(gasLimit int64, msgLimit int, mp *MessagePool, baseFee types.BigInt) { i := len(mc.msgs) - 1 - for i >= 0 && (mc.gasLimit > gasLimit || mc.gasPerf < 0) { + for i >= 0 && (mc.gasLimit > gasLimit || mc.gasPerf < 0 || i >= msgLimit) { gasReward := mp.getGasReward(mc.msgs[i], baseFee) mc.gasReward = new(big.Int).Sub(mc.gasReward, gasReward) mc.gasLimit -= mc.msgs[i].Message.GasLimit @@ -893,6 +960,7 @@ func (mc *msgChain) Trim(gasLimit int64, mp *MessagePool, baseFee types.BigInt) mc.msgs = mc.msgs[:i+1] } + // TODO: if the trim above is a no-op, this (may) needlessly invalidates the next chain if mc.next != nil { mc.next.Invalidate() mc.next = nil diff --git a/chain/messagepool/selection_test.go b/chain/messagepool/selection_test.go index 0f8fd8ee6..2ae99cd77 100644 --- a/chain/messagepool/selection_test.go +++ b/chain/messagepool/selection_test.go @@ -13,6 +13,10 @@ import ( "sort" "testing" + "github.com/filecoin-project/go-state-types/crypto" + + cbg "github.com/whyrusleeping/cbor-gen" + "github.com/filecoin-project/go-address" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" @@ -61,7 +65,7 @@ func makeTestMessage(w *wallet.LocalWallet, from, to address.Address, nonce uint func makeTestMpool() (*MessagePool, *testMpoolAPI) { tma := newTestMpoolAPI() ds := datastore.NewMapDatastore() - mp, err := New(tma, ds, filcns.DefaultUpgradeSchedule(), "test", nil) + mp, err := New(context.Background(), tma, ds, filcns.DefaultUpgradeSchedule(), "test", nil) if err != nil { panic(err) } @@ -527,7 +531,7 @@ func TestBasicMessageSelection(t *testing.T) { } } -func TestMessageSelectionTrimming(t *testing.T) { +func TestMessageSelectionTrimmingGas(t *testing.T) { mp, tma := makeTestMpool() // the actors @@ -577,17 +581,210 @@ func TestMessageSelectionTrimming(t *testing.T) { expected := int(build.BlockGasLimit / gasLimit) if len(msgs) != expected { - t.Fatalf("expected %d messages, bug got %d", expected, len(msgs)) + t.Fatalf("expected %d messages, but got %d", expected, len(msgs)) + } + + mGasLimit := int64(0) + for _, m := range msgs { + mGasLimit += m.Message.GasLimit + } + if mGasLimit > build.BlockGasLimit { + t.Fatal("selected messages gas limit exceeds block gas limit!") + } + +} + +func TestMessageSelectionTrimmingMsgsBasic(t *testing.T) { + mp, tma := makeTestMpool() + + // the actors + w1, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mock.TipSet(block) + tma.applyBlock(t, block) + + tma.setBalance(a1, 1) // in FIL + + // create a larger than selectable chain + for i := 0; i < build.BlockMessageLimit; i++ { + m := makeTestMessage(w1, a1, a1, uint64(i), 300000, 100) + mustAdd(t, mp, m) + } + + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) + if err != nil { + t.Fatal(err) + } + + expected := cbg.MaxLength + if len(msgs) != expected { + t.Fatalf("expected %d messages, but got %d", expected, len(msgs)) + } + + mGasLimit := int64(0) + for _, m := range msgs { + mGasLimit += m.Message.GasLimit + } + if mGasLimit > build.BlockGasLimit { + t.Fatal("selected messages gas limit exceeds block gas limit!") + } + +} + +func TestMessageSelectionTrimmingMsgsTwoSendersBasic(t *testing.T) { + mp, tma := makeTestMpool() + + // the actors + w1, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + w2, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a2, err := w2.WalletNew(context.Background(), types.KTBLS) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mock.TipSet(block) + tma.applyBlock(t, block) + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + // create 2 larger than selectable chains + for i := 0; i < build.BlockMessageLimit; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), 300000, 100) + mustAdd(t, mp, m) + // a2's messages are preferred + m = makeTestMessage(w2, a2, a1, uint64(i), 300000, 1000) + mustAdd(t, mp, m) + } + + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) + if err != nil { + t.Fatal(err) + } + + mGasLimit := int64(0) + counts := make(map[crypto.SigType]uint) + for _, m := range msgs { + mGasLimit += m.Message.GasLimit + counts[m.Signature.Type]++ + } + + if mGasLimit > build.BlockGasLimit { + t.Fatal("selected messages gas limit exceeds block gas limit!") + } + + expected := build.BlockMessageLimit + if len(msgs) != expected { + t.Fatalf("expected %d messages, but got %d", expected, len(msgs)) + } + + if counts[crypto.SigTypeBLS] != cbg.MaxLength { + t.Fatalf("expected %d bls messages, but got %d", cbg.MaxLength, len(msgs)) + } +} + +func TestMessageSelectionTrimmingMsgsTwoSendersAdvanced(t *testing.T) { + mp, tma := makeTestMpool() + + // the actors + w1, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a1, err := w1.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + w2, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a2, err := w2.WalletNew(context.Background(), types.KTBLS) + if err != nil { + t.Fatal(err) + } + + block := tma.nextBlock() + ts := mock.TipSet(block) + tma.applyBlock(t, block) + + tma.setBalance(a1, 1) // in FIL + tma.setBalance(a2, 1) // in FIL + + // create 2 almost max-length chains of equal value + i := 0 + for i = 0; i < cbg.MaxLength-1; i++ { + m := makeTestMessage(w1, a1, a2, uint64(i), 300000, 100) + mustAdd(t, mp, m) + // a2's messages are preferred + m = makeTestMessage(w2, a2, a1, uint64(i), 300000, 100) + mustAdd(t, mp, m) + } + + // a1's 8192th message is worth more than a2's + m := makeTestMessage(w1, a1, a2, uint64(i), 300000, 1000) + mustAdd(t, mp, m) + + m = makeTestMessage(w2, a2, a1, uint64(i), 300000, 100) + mustAdd(t, mp, m) + + i++ + + // a2's (unselectable) 8193rd message is worth SO MUCH + m = makeTestMessage(w2, a2, a1, uint64(i), 300000, 1000000) + mustAdd(t, mp, m) + + msgs, err := mp.SelectMessages(context.Background(), ts, 1.0) + if err != nil { + t.Fatal(err) } mGasLimit := int64(0) + counts := make(map[crypto.SigType]uint) for _, m := range msgs { mGasLimit += m.Message.GasLimit + counts[m.Signature.Type]++ } + if mGasLimit > build.BlockGasLimit { t.Fatal("selected messages gas limit exceeds block gas limit!") } + expected := build.BlockMessageLimit + if len(msgs) != expected { + t.Fatalf("expected %d messages, but got %d", expected, len(msgs)) + } + + // we should have taken the secp chain + if counts[crypto.SigTypeSecp256k1] != cbg.MaxLength { + t.Fatalf("expected %d bls messages, but got %d", cbg.MaxLength, len(msgs)) + } } func TestPriorityMessageSelection(t *testing.T) { @@ -978,7 +1175,7 @@ func TestOptimalMessageSelection2(t *testing.T) { func TestOptimalMessageSelection3(t *testing.T) { // this test uses 10 actors sending a block of messages to each other, with the the first // actors paying higher gas premium than the subsequent actors. - // We select with a low ticket quality; the chain depenent merging algorithm should pick + // We select with a low ticket quality; the chain dependent merging algorithm should pick // messages from the median actor from the start mp, tma := makeTestMpool() @@ -1109,11 +1306,13 @@ func testCompetitiveMessageSelection(t *testing.T, rng *rand.Rand, getPremium fu logging.SetLogLevel("messagepool", "error") // 1. greedy selection - greedyMsgs, err := mp.selectMessagesGreedy(context.Background(), ts, ts) + gm, err := mp.selectMessagesGreedy(context.Background(), ts, ts) if err != nil { t.Fatal(err) } + greedyMsgs := gm.msgs + totalGreedyCapacity := 0.0 totalGreedyReward := 0.0 totalOptimalCapacity := 0.0 diff --git a/chain/messagesigner/messagesigner.go b/chain/messagesigner/messagesigner.go index 063d1aa7d..e2229bb51 100644 --- a/chain/messagesigner/messagesigner.go +++ b/chain/messagesigner/messagesigner.go @@ -84,7 +84,7 @@ func (ms *MessageSigner) SignMessage(ctx context.Context, msg *types.Message, cb } // If the callback executed successfully, write the nonce to the datastore - if err := ms.saveNonce(msg.From, nonce); err != nil { + if err := ms.saveNonce(ctx, msg.From, nonce); err != nil { return nil, xerrors.Errorf("failed to save nonce: %w", err) } @@ -105,7 +105,7 @@ func (ms *MessageSigner) nextNonce(ctx context.Context, addr address.Address) (u // Get the next nonce for this address from the datastore addrNonceKey := ms.dstoreKey(addr) - dsNonceBytes, err := ms.ds.Get(addrNonceKey) + dsNonceBytes, err := ms.ds.Get(ctx, addrNonceKey) switch { case xerrors.Is(err, datastore.ErrNotFound): @@ -139,7 +139,7 @@ func (ms *MessageSigner) nextNonce(ctx context.Context, addr address.Address) (u // saveNonce increments the nonce for this address and writes it to the // datastore -func (ms *MessageSigner) saveNonce(addr address.Address, nonce uint64) error { +func (ms *MessageSigner) saveNonce(ctx context.Context, addr address.Address, nonce uint64) error { // Increment the nonce nonce++ @@ -150,7 +150,7 @@ func (ms *MessageSigner) saveNonce(addr address.Address, nonce uint64) error { if err != nil { return xerrors.Errorf("failed to marshall nonce: %w", err) } - err = ms.ds.Put(addrNonceKey, buf.Bytes()) + err = ms.ds.Put(ctx, addrNonceKey, buf.Bytes()) if err != nil { return xerrors.Errorf("failed to write nonce to datastore: %w", err) } diff --git a/chain/messagesigner/messagesigner_test.go b/chain/messagesigner/messagesigner_test.go index 20d9af38b..00a09fc95 100644 --- a/chain/messagesigner/messagesigner_test.go +++ b/chain/messagesigner/messagesigner_test.go @@ -1,3 +1,4 @@ +//stm: #unit package messagesigner import ( @@ -60,6 +61,7 @@ func TestMessageSignerSignMessage(t *testing.T) { to2, err := w.WalletNew(ctx, types.KTSecp256k1) require.NoError(t, err) + //stm: @CHAIN_MESSAGE_SIGNER_NEW_SIGNER_001, @CHAIN_MESSAGE_SIGNER_SIGN_MESSAGE_001, @CHAIN_MESSAGE_SIGNER_SIGN_MESSAGE_005 type msgSpec struct { msg *types.Message mpoolNonce [1]uint64 diff --git a/chain/rand/rand.go b/chain/rand/rand.go index 90e9a514b..427648f2a 100644 --- a/chain/rand/rand.go +++ b/chain/rand/rand.go @@ -4,6 +4,8 @@ import ( "context" "encoding/binary" + "github.com/filecoin-project/go-state-types/network" + logging "github.com/ipfs/go-log/v2" "github.com/filecoin-project/lotus/chain/beacon" @@ -48,7 +50,7 @@ func (sr *stateRand) GetBeaconRandomnessTipset(ctx context.Context, round abi.Ch defer span.End() span.AddAttributes(trace.Int64Attribute("round", int64(round))) - ts, err := sr.cs.LoadTipSet(types.NewTipSetKey(sr.blks...)) + ts, err := sr.cs.LoadTipSet(ctx, types.NewTipSetKey(sr.blks...)) if err != nil { return nil, err } @@ -70,12 +72,12 @@ func (sr *stateRand) GetBeaconRandomnessTipset(ctx context.Context, round abi.Ch return randTs, nil } -func (sr *stateRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) { +func (sr *stateRand) getChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) { _, span := trace.StartSpan(ctx, "store.GetChainRandomness") defer span.End() span.AddAttributes(trace.Int64Attribute("round", int64(round))) - ts, err := sr.cs.LoadTipSet(types.NewTipSetKey(sr.blks...)) + ts, err := sr.cs.LoadTipSet(ctx, types.NewTipSetKey(sr.blks...)) if err != nil { return nil, err } @@ -101,38 +103,32 @@ func (sr *stateRand) GetChainRandomness(ctx context.Context, pers crypto.DomainS return DrawRandomness(mtb.Ticket.VRFProof, pers, round, entropy) } +type NetworkVersionGetter func(context.Context, abi.ChainEpoch) network.Version + type stateRand struct { - cs *store.ChainStore - blks []cid.Cid - beacon beacon.Schedule + cs *store.ChainStore + blks []cid.Cid + beacon beacon.Schedule + networkVersionGetter NetworkVersionGetter } -func NewStateRand(cs *store.ChainStore, blks []cid.Cid, b beacon.Schedule) vm.Rand { +func NewStateRand(cs *store.ChainStore, blks []cid.Cid, b beacon.Schedule, networkVersionGetter NetworkVersionGetter) vm.Rand { return &stateRand{ - cs: cs, - blks: blks, - beacon: b, + cs: cs, + blks: blks, + beacon: b, + networkVersionGetter: networkVersionGetter, } } // network v0-12 -func (sr *stateRand) GetChainRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return sr.GetChainRandomness(ctx, pers, round, entropy, true) -} - -// network v13 and on -func (sr *stateRand) GetChainRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return sr.GetChainRandomness(ctx, pers, round, entropy, false) -} - -// network v0-12 -func (sr *stateRand) GetBeaconRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (sr *stateRand) getBeaconRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { randTs, err := sr.GetBeaconRandomnessTipset(ctx, round, true) if err != nil { return nil, err } - be, err := sr.cs.GetLatestBeaconEntry(randTs) + be, err := sr.cs.GetLatestBeaconEntry(ctx, randTs) if err != nil { return nil, err } @@ -143,13 +139,13 @@ func (sr *stateRand) GetBeaconRandomnessV1(ctx context.Context, pers crypto.Doma } // network v13 -func (sr *stateRand) GetBeaconRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (sr *stateRand) getBeaconRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { randTs, err := sr.GetBeaconRandomnessTipset(ctx, round, false) if err != nil { return nil, err } - be, err := sr.cs.GetLatestBeaconEntry(randTs) + be, err := sr.cs.GetLatestBeaconEntry(ctx, randTs) if err != nil { return nil, err } @@ -160,9 +156,9 @@ func (sr *stateRand) GetBeaconRandomnessV2(ctx context.Context, pers crypto.Doma } // network v14 and on -func (sr *stateRand) GetBeaconRandomnessV3(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (sr *stateRand) getBeaconRandomnessV3(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { if filecoinEpoch < 0 { - return sr.GetBeaconRandomnessV2(ctx, pers, filecoinEpoch, entropy) + return sr.getBeaconRandomnessV2(ctx, pers, filecoinEpoch, entropy) } be, err := sr.extractBeaconEntryForEpoch(ctx, filecoinEpoch) @@ -174,6 +170,28 @@ func (sr *stateRand) GetBeaconRandomnessV3(ctx context.Context, pers crypto.Doma return DrawRandomness(be.Data, pers, filecoinEpoch, entropy) } +func (sr *stateRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { + nv := sr.networkVersionGetter(ctx, filecoinEpoch) + + if nv >= network.Version13 { + return sr.getChainRandomness(ctx, pers, filecoinEpoch, entropy, false) + } + + return sr.getChainRandomness(ctx, pers, filecoinEpoch, entropy, true) +} + +func (sr *stateRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) { + nv := sr.networkVersionGetter(ctx, filecoinEpoch) + + if nv >= network.Version14 { + return sr.getBeaconRandomnessV3(ctx, pers, filecoinEpoch, entropy) + } else if nv == network.Version13 { + return sr.getBeaconRandomnessV2(ctx, pers, filecoinEpoch, entropy) + } else { + return sr.getBeaconRandomnessV1(ctx, pers, filecoinEpoch, entropy) + } +} + func (sr *stateRand) extractBeaconEntryForEpoch(ctx context.Context, filecoinEpoch abi.ChainEpoch) (*types.BeaconEntry, error) { randTs, err := sr.GetBeaconRandomnessTipset(ctx, filecoinEpoch, false) if err != nil { @@ -190,7 +208,7 @@ func (sr *stateRand) extractBeaconEntryForEpoch(ctx context.Context, filecoinEpo } } - next, err := sr.cs.LoadTipSet(randTs.Parents()) + next, err := sr.cs.LoadTipSet(ctx, randTs.Parents()) if err != nil { return nil, xerrors.Errorf("failed to load parents when searching back for beacon entry: %w", err) } diff --git a/chain/rand/rand_test.go b/chain/rand/rand_test.go index 5e5dae3f1..b5e2482b7 100644 --- a/chain/rand/rand_test.go +++ b/chain/rand/rand_test.go @@ -1,3 +1,4 @@ +//stm:#unit package rand_test import ( @@ -55,11 +56,13 @@ func TestNullRandomnessV1(t *testing.T) { randEpoch := ts.TipSet.TipSet().Height() - 2 + //stm: @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_V1_01, @BLOCKCHAIN_RAND_EXTRACT_BEACON_ENTRY_FOR_EPOCH_01, @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_TIPSET_02 rand1, err := cg.StateManager().GetRandomnessFromBeacon(ctx, pers, randEpoch, entropy, ts.TipSet.TipSet().Key()) if err != nil { t.Fatal(err) } + //stm: @BLOCKCHAIN_BEACON_GET_BEACON_FOR_EPOCH_01 bch := cg.BeaconSchedule().BeaconForEpoch(randEpoch).Entry(ctx, uint64(beforeNullHeight)+offset) select { @@ -68,6 +71,7 @@ func TestNullRandomnessV1(t *testing.T) { t.Fatal(resp.Err) } + //stm: @BLOCKCHAIN_RAND_DRAW_RANDOMNESS_01 rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy) if err != nil { t.Fatal(err) @@ -131,11 +135,13 @@ func TestNullRandomnessV2(t *testing.T) { randEpoch := ts.TipSet.TipSet().Height() - 2 + //stm: @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_V2_01 rand1, err := cg.StateManager().GetRandomnessFromBeacon(ctx, pers, randEpoch, entropy, ts.TipSet.TipSet().Key()) if err != nil { t.Fatal(err) } + //stm: @BLOCKCHAIN_BEACON_GET_BEACON_FOR_EPOCH_01 bch := cg.BeaconSchedule().BeaconForEpoch(randEpoch).Entry(ctx, uint64(ts.TipSet.TipSet().Height())+offset) select { @@ -144,6 +150,7 @@ func TestNullRandomnessV2(t *testing.T) { t.Fatal(resp.Err) } + //stm: @BLOCKCHAIN_RAND_DRAW_RANDOMNESS_01, @BLOCKCHAIN_RAND_EXTRACT_BEACON_ENTRY_FOR_EPOCH_01, @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_TIPSET_03 // note that the randEpoch passed to DrawRandomness is still randEpoch (not the latest ts height) rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy) if err != nil { @@ -212,11 +219,13 @@ func TestNullRandomnessV3(t *testing.T) { randEpoch := ts.TipSet.TipSet().Height() - 2 + //stm: @BLOCKCHAIN_RAND_GET_BEACON_RANDOMNESS_V3_01, @BLOCKCHAIN_RAND_EXTRACT_BEACON_ENTRY_FOR_EPOCH_01 rand1, err := cg.StateManager().GetRandomnessFromBeacon(ctx, pers, randEpoch, entropy, ts.TipSet.TipSet().Key()) if err != nil { t.Fatal(err) } + //stm: @BLOCKCHAIN_BEACON_GET_BEACON_FOR_EPOCH_01 bch := cg.BeaconSchedule().BeaconForEpoch(randEpoch).Entry(ctx, uint64(randEpoch)+offset) select { @@ -225,6 +234,7 @@ func TestNullRandomnessV3(t *testing.T) { t.Fatal(resp.Err) } + //stm: @BLOCKCHAIN_RAND_DRAW_RANDOMNESS_01 rand2, err := rand.DrawRandomness(resp.Entry.Data, pers, randEpoch, entropy) if err != nil { t.Fatal(err) diff --git a/chain/state/statetree.go b/chain/state/statetree.go index b4323c04b..9a518a622 100644 --- a/chain/state/statetree.go +++ b/chain/state/statetree.go @@ -152,7 +152,16 @@ func VersionForNetwork(ver network.Version) (types.StateTreeVersion, error) { return types.StateTreeVersion2, nil case network.Version12: return types.StateTreeVersion3, nil - case network.Version13, network.Version14: + + /* inline-gen template + {{$lastNv := .latestNetworkVersion}} + case{{range .networkVersions}} {{if (ge . 13.)}} network.Version{{.}}{{if (lt . $lastNv)}},{{end}}{{end}}{{end}}: + + /* inline-gen start */ + + case network.Version13, network.Version14, network.Version15: + + /* inline-gen end */ return types.StateTreeVersion4, nil default: panic(fmt.Sprintf("unsupported network version %d", ver)) diff --git a/chain/stmgr/actors.go b/chain/stmgr/actors.go index 4d016b7ab..52773e1e4 100644 --- a/chain/stmgr/actors.go +++ b/chain/stmgr/actors.go @@ -14,6 +14,7 @@ import ( "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/network" cid "github.com/ipfs/go-cid" + "golang.org/x/xerrors" "github.com/filecoin-project/lotus/api" @@ -116,7 +117,7 @@ func MinerSectorInfo(ctx context.Context, sm *StateManager, maddr address.Addres return mas.GetSector(sid) } -func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.SectorInfo, error) { +func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwrapper.Verifier, sm *StateManager, st cid.Cid, maddr address.Address, rand abi.PoStRandomness) ([]builtin.ExtendedSectorInfo, error) { act, err := sm.LoadActorRaw(ctx, maddr, st) if err != nil { return nil, xerrors.Errorf("failed to load miner actor: %w", err) @@ -202,12 +203,13 @@ func GetSectorsForWinningPoSt(ctx context.Context, nv network.Version, pv ffiwra return nil, xerrors.Errorf("loading proving sectors: %w", err) } - out := make([]builtin.SectorInfo, len(sectors)) + out := make([]builtin.ExtendedSectorInfo, len(sectors)) for i, sinfo := range sectors { - out[i] = builtin.SectorInfo{ + out[i] = builtin.ExtendedSectorInfo{ SealProof: sinfo.SealProof, SectorNumber: sinfo.SectorNumber, SealedCID: sinfo.SealedCID, + SectorKey: sinfo.SectorKeyCID, } } @@ -300,12 +302,12 @@ func ListMinerActors(ctx context.Context, sm *StateManager, ts *types.TipSet) ([ } func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule, tsk types.TipSetKey, round abi.ChainEpoch, maddr address.Address, pv ffiwrapper.Verifier) (*api.MiningBaseInfo, error) { - ts, err := sm.ChainStore().LoadTipSet(tsk) + ts, err := sm.ChainStore().LoadTipSet(ctx, tsk) if err != nil { return nil, xerrors.Errorf("failed to load tipset for mining base: %w", err) } - prev, err := sm.ChainStore().GetLatestBeaconEntry(ts) + prev, err := sm.ChainStore().GetLatestBeaconEntry(ctx, ts) if err != nil { if os.Getenv("LOTUS_IGNORE_DRAND") != "_yes_" { return nil, xerrors.Errorf("failed to get latest beacon entry: %w", err) @@ -357,7 +359,7 @@ func MinerGetBaseInfo(ctx context.Context, sm *StateManager, bcs beacon.Schedule return nil, xerrors.Errorf("failed to get randomness for winning post: %w", err) } - nv := sm.GetNtwkVersion(ctx, ts.Height()) + nv := sm.GetNetworkVersion(ctx, ts.Height()) sectors, err := GetSectorsForWinningPoSt(ctx, nv, pv, sm, lbst, maddr, prand) if err != nil { @@ -419,7 +421,7 @@ func MinerEligibleToMine(ctx context.Context, sm *StateManager, addr address.Add hmp, err := minerHasMinPower(ctx, sm, addr, lookbackTs) // TODO: We're blurring the lines between a "runtime network version" and a "Lotus upgrade epoch", is that unavoidable? - if sm.GetNtwkVersion(ctx, baseTs.Height()) <= network.Version3 { + if sm.GetNetworkVersion(ctx, baseTs.Height()) <= network.Version3 { return hmp, err } diff --git a/chain/stmgr/call.go b/chain/stmgr/call.go index 7cc50e710..1d913cc81 100644 --- a/chain/stmgr/call.go +++ b/chain/stmgr/call.go @@ -40,7 +40,7 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types. ts = sm.cs.GetHeaviestTipSet() // Search back till we find a height with no fork, or we reach the beginning. for ts.Height() > 0 { - pts, err := sm.cs.GetTipSetFromKey(ts.Parents()) + pts, err := sm.cs.GetTipSetFromKey(ctx, ts.Parents()) if err != nil { return nil, xerrors.Errorf("failed to find a non-forking epoch: %w", err) } @@ -51,7 +51,7 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types. ts = pts } } else if ts.Height() > 0 { - pts, err := sm.cs.LoadTipSet(ts.Parents()) + pts, err := sm.cs.LoadTipSet(ctx, ts.Parents()) if err != nil { return nil, xerrors.Errorf("failed to load parent tipset: %w", err) } @@ -75,12 +75,12 @@ func (sm *StateManager) Call(ctx context.Context, msg *types.Message, ts *types. vmopt := &vm.VMOpts{ StateBase: bstate, Epoch: pheight + 1, - Rand: rand.NewStateRand(sm.cs, ts.Cids(), sm.beacon), + Rand: rand.NewStateRand(sm.cs, ts.Cids(), sm.beacon, sm.GetNetworkVersion), Bstore: sm.cs.StateBlockstore(), Actors: sm.tsExec.NewActorRegistry(), Syscalls: sm.Syscalls, CircSupplyCalc: sm.GetVMCirculatingSupply, - NtwkVersion: sm.GetNtwkVersion, + NetworkVersion: sm.GetNetworkVersion(ctx, pheight+1), BaseFee: types.NewInt(0), LookbackState: LookbackStateGetterForTipset(sm, ts), } @@ -155,7 +155,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri // height to have no fork, because we'll run it inside this // function before executing the given message. for ts.Height() > 0 { - pts, err := sm.cs.GetTipSetFromKey(ts.Parents()) + pts, err := sm.cs.GetTipSetFromKey(ctx, ts.Parents()) if err != nil { return nil, xerrors.Errorf("failed to find a non-forking epoch: %w", err) } @@ -166,7 +166,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri ts = pts } } else if ts.Height() > 0 { - pts, err := sm.cs.GetTipSetFromKey(ts.Parents()) + pts, err := sm.cs.GetTipSetFromKey(ctx, ts.Parents()) if err != nil { return nil, xerrors.Errorf("failed to find a non-forking epoch: %w", err) } @@ -186,7 +186,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri return nil, fmt.Errorf("failed to handle fork: %w", err) } - r := rand.NewStateRand(sm.cs, ts.Cids(), sm.beacon) + r := rand.NewStateRand(sm.cs, ts.Cids(), sm.beacon, sm.GetNetworkVersion) if span.IsRecordingEvents() { span.AddAttributes( @@ -204,7 +204,7 @@ func (sm *StateManager) CallWithGas(ctx context.Context, msg *types.Message, pri Actors: sm.tsExec.NewActorRegistry(), Syscalls: sm.Syscalls, CircSupplyCalc: sm.GetVMCirculatingSupply, - NtwkVersion: sm.GetNtwkVersion, + NetworkVersion: sm.GetNetworkVersion(ctx, ts.Height()+1), BaseFee: ts.Blocks()[0].ParentBaseFee, LookbackState: LookbackStateGetterForTipset(sm, ts), } @@ -275,7 +275,7 @@ func (sm *StateManager) Replay(ctx context.Context, ts *types.TipSet, mcid cid.C // message to find finder.mcid = mcid - _, _, err := sm.tsExec.ExecuteTipSet(ctx, sm, ts, &finder) + _, _, err := sm.tsExec.ExecuteTipSet(ctx, sm, sm.cr, ts, &finder) if err != nil && !xerrors.Is(err, errHaltExecution) { return nil, nil, xerrors.Errorf("unexpected error during execution: %w", err) } diff --git a/chain/stmgr/execute.go b/chain/stmgr/execute.go index 901d71068..35aee1887 100644 --- a/chain/stmgr/execute.go +++ b/chain/stmgr/execute.go @@ -60,7 +60,7 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c return ts.Blocks()[0].ParentStateRoot, ts.Blocks()[0].ParentMessageReceipts, nil } - st, rec, err = sm.tsExec.ExecuteTipSet(ctx, sm, ts, sm.tsExecMonitor) + st, rec, err = sm.tsExec.ExecuteTipSet(ctx, sm, sm.cr, ts, sm.tsExecMonitor) if err != nil { return cid.Undef, cid.Undef, err } @@ -69,7 +69,7 @@ func (sm *StateManager) TipSetState(ctx context.Context, ts *types.TipSet) (st c } func (sm *StateManager) ExecutionTraceWithMonitor(ctx context.Context, ts *types.TipSet, em ExecMonitor) (cid.Cid, error) { - st, _, err := sm.tsExec.ExecuteTipSet(ctx, sm, ts, em) + st, _, err := sm.tsExec.ExecuteTipSet(ctx, sm, sm.cr, ts, em) return st, err } diff --git a/chain/stmgr/forks.go b/chain/stmgr/forks.go index 454f781c4..a83ffdf7a 100644 --- a/chain/stmgr/forks.go +++ b/chain/stmgr/forks.go @@ -8,6 +8,8 @@ import ( "sync" "time" + "github.com/filecoin-project/specs-actors/v7/actors/migration/nv15" + "github.com/ipfs/go-cid" "golang.org/x/xerrors" @@ -15,8 +17,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/network" - "github.com/filecoin-project/specs-actors/v3/actors/migration/nv10" - "github.com/filecoin-project/lotus/chain/actors/adt" "github.com/filecoin-project/lotus/chain/actors/builtin" init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" @@ -211,7 +211,7 @@ func (sm *StateManager) hasExpensiveFork(height abi.ChainEpoch) bool { return ok } -func runPreMigration(ctx context.Context, sm *StateManager, fn PreMigrationFunc, cache *nv10.MemMigrationCache, ts *types.TipSet) { +func runPreMigration(ctx context.Context, sm *StateManager, fn PreMigrationFunc, cache *nv15.MemMigrationCache, ts *types.TipSet) { height := ts.Height() parent := ts.ParentState() diff --git a/chain/stmgr/forks_test.go b/chain/stmgr/forks_test.go index 4fad1e4fc..ab33dfb7b 100644 --- a/chain/stmgr/forks_test.go +++ b/chain/stmgr/forks_test.go @@ -122,7 +122,7 @@ func TestForkHeightTriggers(t *testing.T) { } sm, err := NewStateManager( - cg.ChainStore(), filcns.NewTipSetExecutor(), cg.StateManager().VMSys(), UpgradeSchedule{{ + cg.ChainStore(), filcns.NewTipSetExecutor(), nil, cg.StateManager().VMSys(), UpgradeSchedule{{ Network: network.Version1, Height: testForkHeight, Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, @@ -265,7 +265,7 @@ func testForkRefuseCall(t *testing.T, nullsBefore, nullsAfter int) { var migrationCount int sm, err := NewStateManager( - cg.ChainStore(), filcns.NewTipSetExecutor(), cg.StateManager().VMSys(), UpgradeSchedule{{ + cg.ChainStore(), filcns.NewTipSetExecutor(), nil, cg.StateManager().VMSys(), UpgradeSchedule{{ Network: network.Version1, Expensive: true, Height: testForkHeight, @@ -400,7 +400,7 @@ func TestForkPreMigration(t *testing.T) { counter := make(chan struct{}, 10) sm, err := NewStateManager( - cg.ChainStore(), filcns.NewTipSetExecutor(), cg.StateManager().VMSys(), UpgradeSchedule{{ + cg.ChainStore(), filcns.NewTipSetExecutor(), nil, cg.StateManager().VMSys(), UpgradeSchedule{{ Network: network.Version1, Height: testForkHeight, Migration: func(ctx context.Context, sm *StateManager, cache MigrationCache, cb ExecMonitor, diff --git a/chain/stmgr/read.go b/chain/stmgr/read.go index bc259f227..bca32429b 100644 --- a/chain/stmgr/read.go +++ b/chain/stmgr/read.go @@ -13,8 +13,8 @@ import ( "github.com/filecoin-project/lotus/chain/types" ) -func (sm *StateManager) ParentStateTsk(tsk types.TipSetKey) (*state.StateTree, error) { - ts, err := sm.cs.GetTipSetFromKey(tsk) +func (sm *StateManager) ParentStateTsk(ctx context.Context, tsk types.TipSetKey) (*state.StateTree, error) { + ts, err := sm.cs.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -57,8 +57,8 @@ func (sm *StateManager) LoadActor(_ context.Context, addr address.Address, ts *t return state.GetActor(addr) } -func (sm *StateManager) LoadActorTsk(_ context.Context, addr address.Address, tsk types.TipSetKey) (*types.Actor, error) { - state, err := sm.ParentStateTsk(tsk) +func (sm *StateManager) LoadActorTsk(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*types.Actor, error) { + state, err := sm.ParentStateTsk(ctx, tsk) if err != nil { return nil, err } diff --git a/chain/stmgr/searchwait.go b/chain/stmgr/searchwait.go index 45c98a855..7e6de91d4 100644 --- a/chain/stmgr/searchwait.go +++ b/chain/stmgr/searchwait.go @@ -20,7 +20,7 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid ctx, cancel := context.WithCancel(ctx) defer cancel() - msg, err := sm.cs.GetCMessage(mcid) + msg, err := sm.cs.GetCMessage(ctx, mcid) if err != nil { return nil, nil, cid.Undef, fmt.Errorf("failed to load message: %w", err) } @@ -40,7 +40,7 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid return nil, nil, cid.Undef, fmt.Errorf("expected current head on SHC stream (got %s)", head[0].Type) } - r, foundMsg, err := sm.tipsetExecutedMessage(head[0].Val, mcid, msg.VMMessage(), allowReplaced) + r, foundMsg, err := sm.tipsetExecutedMessage(ctx, head[0].Val, mcid, msg.VMMessage(), allowReplaced) if err != nil { return nil, nil, cid.Undef, err } @@ -93,7 +93,7 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid if candidateTs != nil && val.Val.Height() >= candidateTs.Height()+abi.ChainEpoch(confidence) { return candidateTs, candidateRcp, candidateFm, nil } - r, foundMsg, err := sm.tipsetExecutedMessage(val.Val, mcid, msg.VMMessage(), allowReplaced) + r, foundMsg, err := sm.tipsetExecutedMessage(ctx, val.Val, mcid, msg.VMMessage(), allowReplaced) if err != nil { return nil, nil, cid.Undef, err } @@ -130,12 +130,12 @@ func (sm *StateManager) WaitForMessage(ctx context.Context, mcid cid.Cid, confid } func (sm *StateManager) SearchForMessage(ctx context.Context, head *types.TipSet, mcid cid.Cid, lookbackLimit abi.ChainEpoch, allowReplaced bool) (*types.TipSet, *types.MessageReceipt, cid.Cid, error) { - msg, err := sm.cs.GetCMessage(mcid) + msg, err := sm.cs.GetCMessage(ctx, mcid) if err != nil { return nil, nil, cid.Undef, fmt.Errorf("failed to load message: %w", err) } - r, foundMsg, err := sm.tipsetExecutedMessage(head, mcid, msg.VMMessage(), allowReplaced) + r, foundMsg, err := sm.tipsetExecutedMessage(ctx, head, mcid, msg.VMMessage(), allowReplaced) if err != nil { return nil, nil, cid.Undef, err } @@ -201,7 +201,7 @@ func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet return nil, nil, cid.Undef, nil } - pts, err := sm.cs.LoadTipSet(cur.Parents()) + pts, err := sm.cs.LoadTipSet(ctx, cur.Parents()) if err != nil { return nil, nil, cid.Undef, xerrors.Errorf("failed to load tipset during msg wait searchback: %w", err) } @@ -214,7 +214,7 @@ func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet // check that between cur and parent tipset the nonce fell into range of our message if actorNoExist || (curActor.Nonce > mNonce && act.Nonce <= mNonce) { - r, foundMsg, err := sm.tipsetExecutedMessage(cur, m.Cid(), m.VMMessage(), allowReplaced) + r, foundMsg, err := sm.tipsetExecutedMessage(ctx, cur, m.Cid(), m.VMMessage(), allowReplaced) if err != nil { return nil, nil, cid.Undef, xerrors.Errorf("checking for message execution during lookback: %w", err) } @@ -229,18 +229,18 @@ func (sm *StateManager) searchBackForMsg(ctx context.Context, from *types.TipSet } } -func (sm *StateManager) tipsetExecutedMessage(ts *types.TipSet, msg cid.Cid, vmm *types.Message, allowReplaced bool) (*types.MessageReceipt, cid.Cid, error) { +func (sm *StateManager) tipsetExecutedMessage(ctx context.Context, ts *types.TipSet, msg cid.Cid, vmm *types.Message, allowReplaced bool) (*types.MessageReceipt, cid.Cid, error) { // The genesis block did not execute any messages if ts.Height() == 0 { return nil, cid.Undef, nil } - pts, err := sm.cs.LoadTipSet(ts.Parents()) + pts, err := sm.cs.LoadTipSet(ctx, ts.Parents()) if err != nil { return nil, cid.Undef, err } - cm, err := sm.cs.MessagesForTipset(pts) + cm, err := sm.cs.MessagesForTipset(ctx, pts) if err != nil { return nil, cid.Undef, err } @@ -267,7 +267,7 @@ func (sm *StateManager) tipsetExecutedMessage(ts *types.TipSet, msg cid.Cid, vmm } } - pr, err := sm.cs.GetParentReceipt(ts.Blocks()[0], i) + pr, err := sm.cs.GetParentReceipt(ctx, ts.Blocks()[0], i) if err != nil { return nil, cid.Undef, err } diff --git a/chain/stmgr/searchwait_test.go b/chain/stmgr/searchwait_test.go index 1e4776ff7..b8cd7ddcf 100644 --- a/chain/stmgr/searchwait_test.go +++ b/chain/stmgr/searchwait_test.go @@ -75,7 +75,7 @@ func TestSearchForMessageReplacements(t *testing.T) { t.Fatal(err) } - err = cg.Blockstore().Put(rmb) + err = cg.Blockstore().Put(ctx, rmb) if err != nil { t.Fatal(err) } @@ -117,7 +117,7 @@ func TestSearchForMessageReplacements(t *testing.T) { t.Fatal(err) } - err = cg.Blockstore().Put(nrmb) + err = cg.Blockstore().Put(ctx, nrmb) if err != nil { t.Fatal(err) } diff --git a/chain/stmgr/stmgr.go b/chain/stmgr/stmgr.go index a3f17cd41..2c9b2f787 100644 --- a/chain/stmgr/stmgr.go +++ b/chain/stmgr/stmgr.go @@ -4,6 +4,9 @@ import ( "context" "sync" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet/resolver" + "github.com/filecoin-project/specs-actors/v7/actors/migration/nv15" + "github.com/filecoin-project/lotus/chain/rand" "github.com/filecoin-project/lotus/chain/beacon" @@ -18,10 +21,6 @@ import ( "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/network" - // Used for genesis. - msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" - "github.com/filecoin-project/specs-actors/v3/actors/migration/nv10" - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin/paych" @@ -30,6 +29,9 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" + + // Used for genesis. + msig0 "github.com/filecoin-project/specs-actors/actors/builtin/multisig" ) const LookbackNoLimit = api.LookbackNoLimit @@ -53,12 +55,12 @@ type versionSpec struct { type migration struct { upgrade MigrationFunc preMigrations []PreMigration - cache *nv10.MemMigrationCache + cache *nv15.MemMigrationCache } type Executor interface { NewActorRegistry() *vm.ActorRegistry - ExecuteTipSet(ctx context.Context, sm *StateManager, ts *types.TipSet, em ExecMonitor) (stateroot cid.Cid, rectsroot cid.Cid, err error) + ExecuteTipSet(ctx context.Context, sm *StateManager, cr *resolver.Resolver, ts *types.TipSet, em ExecMonitor) (stateroot cid.Cid, rectsroot cid.Cid, err error) } type StateManager struct { @@ -95,6 +97,8 @@ type StateManager struct { tsExec Executor tsExecMonitor ExecMonitor beacon beacon.Schedule + + cr *resolver.Resolver } // Caches a single state tree @@ -103,7 +107,7 @@ type treeCache struct { tree *state.StateTree } -func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule) (*StateManager, error) { +func NewStateManager(cs *store.ChainStore, exec Executor, cr *resolver.Resolver, sys vm.SyscallBuilder, us UpgradeSchedule, beacon beacon.Schedule) (*StateManager, error) { // If we have upgrades, make sure they're in-order and make sense. if err := us.Validate(); err != nil { return nil, err @@ -121,7 +125,7 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, migration := &migration{ upgrade: upgrade.Migration, preMigrations: upgrade.PreMigrations, - cache: nv10.NewMemMigrationCache(), + cache: nv15.NewMemMigrationCache(), } stateMigrations[upgrade.Height] = migration } @@ -134,9 +138,6 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, }) lastVersion = upgrade.Network } - } else { - // Otherwise, go directly to the latest version. - lastVersion = build.NewestNetworkVersion } return &StateManager{ @@ -147,6 +148,7 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, newVM: vm.NewVM, Syscalls: sys, cs: cs, + cr: cr, tsExec: exec, stCache: make(map[string][]cid.Cid), beacon: beacon, @@ -158,8 +160,8 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, }, nil } -func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor) (*StateManager, error) { - sm, err := NewStateManager(cs, exec, sys, us, b) +func NewStateManagerWithUpgradeScheduleAndMonitor(cs *store.ChainStore, exec Executor, cr *resolver.Resolver, sys vm.SyscallBuilder, us UpgradeSchedule, b beacon.Schedule, em ExecMonitor) (*StateManager, error) { + sm, err := NewStateManager(cs, exec, cr, sys, us, b) if err != nil { return nil, err } @@ -323,7 +325,7 @@ func (sm *StateManager) LookupID(ctx context.Context, addr address.Address, ts * func (sm *StateManager) ValidateChain(ctx context.Context, ts *types.TipSet) error { tschain := []*types.TipSet{ts} for ts.Height() != 0 { - next, err := sm.cs.LoadTipSet(ts.Parents()) + next, err := sm.cs.LoadTipSet(ctx, ts.Parents()) if err != nil { return err } @@ -359,7 +361,7 @@ func (sm *StateManager) VMConstructor() func(context.Context, *vm.VMOpts) (*vm.V } } -func (sm *StateManager) GetNtwkVersion(ctx context.Context, height abi.ChainEpoch) network.Version { +func (sm *StateManager) GetNetworkVersion(ctx context.Context, height abi.ChainEpoch) network.Version { // The epochs here are the _last_ epoch for every version, or -1 if the // version is disabled. for _, spec := range sm.networkVersions { @@ -375,36 +377,24 @@ func (sm *StateManager) VMSys() vm.SyscallBuilder { } func (sm *StateManager) GetRandomnessFromBeacon(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) { - pts, err := sm.ChainStore().GetTipSetFromKey(tsk) + pts, err := sm.ChainStore().GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } - r := rand.NewStateRand(sm.ChainStore(), pts.Cids(), sm.beacon) - rnv := sm.GetNtwkVersion(ctx, randEpoch) - - if rnv >= network.Version14 { - return r.GetBeaconRandomnessV3(ctx, personalization, randEpoch, entropy) - } else if rnv == network.Version13 { - return r.GetBeaconRandomnessV2(ctx, personalization, randEpoch, entropy) - } + r := rand.NewStateRand(sm.ChainStore(), pts.Cids(), sm.beacon, sm.GetNetworkVersion) - return r.GetBeaconRandomnessV1(ctx, personalization, randEpoch, entropy) + return r.GetBeaconRandomness(ctx, personalization, randEpoch, entropy) } func (sm *StateManager) GetRandomnessFromTickets(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) { - pts, err := sm.ChainStore().LoadTipSet(tsk) + pts, err := sm.ChainStore().LoadTipSet(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset key: %w", err) } - r := rand.NewStateRand(sm.ChainStore(), pts.Cids(), sm.beacon) - rnv := sm.GetNtwkVersion(ctx, randEpoch) - - if rnv >= network.Version13 { - return r.GetChainRandomnessV2(ctx, personalization, randEpoch, entropy) - } + r := rand.NewStateRand(sm.ChainStore(), pts.Cids(), sm.beacon, sm.GetNetworkVersion) - return r.GetChainRandomnessV1(ctx, personalization, randEpoch, entropy) + return r.GetChainRandomness(ctx, personalization, randEpoch, entropy) } diff --git a/chain/stmgr/supply.go b/chain/stmgr/supply.go index c9475a51e..a5ac9749b 100644 --- a/chain/stmgr/supply.go +++ b/chain/stmgr/supply.go @@ -24,6 +24,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin/power" "github.com/filecoin-project/lotus/chain/actors/builtin/reward" "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" + rewact "github.com/filecoin-project/lotus/chain/consensus/actors/reward" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/types" ) @@ -31,7 +32,7 @@ import ( // sets up information about the vesting schedule func (sm *StateManager) setupGenesisVestingSchedule(ctx context.Context) error { - gb, err := sm.cs.GetGenesis() + gb, err := sm.cs.GetGenesis(ctx) if err != nil { return xerrors.Errorf("getting genesis block: %w", err) } @@ -246,12 +247,13 @@ func GetFilMined(ctx context.Context, st *state.StateTree) (abi.TokenAmount, err return big.Zero(), xerrors.Errorf("failed to load reward actor state: %w", err) } - rst, err := reward.Load(adt.WrapStore(ctx, st.Store), ractor) + var rst rewact.State + store := adt.WrapStore(ctx, st.Store) + err = store.Get(store.Context(), ractor.Head, &rst) if err != nil { return big.Zero(), err } - - return rst.TotalStoragePowerReward() + return rst.TotalStoragePowerReward, nil } func getFilMarketLocked(ctx context.Context, st *state.StateTree) (abi.TokenAmount, error) { diff --git a/chain/stmgr/utils.go b/chain/stmgr/utils.go index ce4f60105..2a84c777b 100644 --- a/chain/stmgr/utils.go +++ b/chain/stmgr/utils.go @@ -79,7 +79,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch, // future. It's not guaranteed to be accurate... but that's fine. } - r := rand.NewStateRand(sm.cs, ts.Cids(), sm.beacon) + r := rand.NewStateRand(sm.cs, ts.Cids(), sm.beacon, sm.GetNetworkVersion) vmopt := &vm.VMOpts{ StateBase: base, Epoch: height, @@ -88,7 +88,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch, Actors: sm.tsExec.NewActorRegistry(), Syscalls: sm.Syscalls, CircSupplyCalc: sm.GetVMCirculatingSupply, - NtwkVersion: sm.GetNtwkVersion, + NetworkVersion: sm.GetNetworkVersion(ctx, height), BaseFee: ts.Blocks()[0].ParentBaseFee, LookbackState: LookbackStateGetterForTipset(sm, ts), } @@ -128,7 +128,7 @@ func LookbackStateGetterForTipset(sm *StateManager, ts *types.TipSet) vm.Lookbac func GetLookbackTipSetForRound(ctx context.Context, sm *StateManager, ts *types.TipSet, round abi.ChainEpoch) (*types.TipSet, cid.Cid, error) { var lbr abi.ChainEpoch - lb := policy.GetWinningPoStSectorSetLookback(sm.GetNtwkVersion(ctx, round)) + lb := policy.GetWinningPoStSectorSetLookback(sm.GetNetworkVersion(ctx, round)) if round > lb { lbr = round - lb } @@ -155,7 +155,7 @@ func GetLookbackTipSetForRound(ctx context.Context, sm *StateManager, ts *types. } - lbts, err := sm.ChainStore().GetTipSetFromKey(nextTs.Parents()) + lbts, err := sm.ChainStore().GetTipSetFromKey(ctx, nextTs.Parents()) if err != nil { return nil, cid.Undef, xerrors.Errorf("failed to resolve lookback tipset: %w", err) } diff --git a/chain/store/basefee.go b/chain/store/basefee.go index 33367abcc..05fb5a1ee 100644 --- a/chain/store/basefee.go +++ b/chain/store/basefee.go @@ -58,7 +58,8 @@ func (cs *ChainStore) ComputeBaseFee(ctx context.Context, ts *types.TipSet) (abi seen := make(map[cid.Cid]struct{}) for _, b := range ts.Blocks() { - msg1, msg2, err := cs.MessagesForBlock(b) + // NOTE XXX: Cross-messages doesn't account for basefee? + msg1, msg2, _, err := cs.MessagesForBlock(ctx, b) if err != nil { return zero, xerrors.Errorf("error getting messages for: %s: %w", b.Cid(), err) } diff --git a/chain/store/checkpoint_test.go b/chain/store/checkpoint_test.go index 81bbab6ea..73b45f3ad 100644 --- a/chain/store/checkpoint_test.go +++ b/chain/store/checkpoint_test.go @@ -10,6 +10,8 @@ import ( ) func TestChainCheckpoint(t *testing.T) { + ctx := context.Background() + cg, err := gen.NewGenerator() if err != nil { t.Fatal(err) @@ -27,11 +29,11 @@ func TestChainCheckpoint(t *testing.T) { cs := cg.ChainStore() checkpoint := last - checkpointParents, err := cs.GetTipSetFromKey(checkpoint.Parents()) + checkpointParents, err := cs.GetTipSetFromKey(ctx, checkpoint.Parents()) require.NoError(t, err) // Set the head to the block before the checkpoint. - err = cs.SetHead(checkpointParents) + err = cs.SetHead(ctx, checkpointParents) require.NoError(t, err) // Verify it worked. @@ -39,11 +41,11 @@ func TestChainCheckpoint(t *testing.T) { require.True(t, head.Equals(checkpointParents)) // Try to set the checkpoint in the future, it should fail. - err = cs.SetCheckpoint(checkpoint) + err = cs.SetCheckpoint(ctx, checkpoint) require.Error(t, err) // Then move the head back. - err = cs.SetHead(checkpoint) + err = cs.SetHead(ctx, checkpoint) require.NoError(t, err) // Verify it worked. @@ -51,7 +53,7 @@ func TestChainCheckpoint(t *testing.T) { require.True(t, head.Equals(checkpoint)) // And checkpoint it. - err = cs.SetCheckpoint(checkpoint) + err = cs.SetCheckpoint(ctx, checkpoint) require.NoError(t, err) // Let the second miner miner mine a fork @@ -70,7 +72,7 @@ func TestChainCheckpoint(t *testing.T) { require.True(t, head.Equals(checkpoint)) // Remove the checkpoint. - err = cs.RemoveCheckpoint() + err = cs.RemoveCheckpoint(ctx) require.NoError(t, err) // Now switch to the other fork. @@ -80,10 +82,10 @@ func TestChainCheckpoint(t *testing.T) { require.True(t, head.Equals(last)) // Setting a checkpoint on the other fork should fail. - err = cs.SetCheckpoint(checkpoint) + err = cs.SetCheckpoint(ctx, checkpoint) require.Error(t, err) // Setting a checkpoint on this fork should succeed. - err = cs.SetCheckpoint(checkpointParents) + err = cs.SetCheckpoint(ctx, checkpointParents) require.NoError(t, err) } diff --git a/chain/store/index.go b/chain/store/index.go index 324fb7a63..f5bbbd438 100644 --- a/chain/store/index.go +++ b/chain/store/index.go @@ -31,7 +31,7 @@ type ChainIndex struct { skipLength abi.ChainEpoch } -type loadTipSetFunc func(types.TipSetKey) (*types.TipSet, error) +type loadTipSetFunc func(context.Context, types.TipSetKey) (*types.TipSet, error) func NewChainIndex(lts loadTipSetFunc) *ChainIndex { sc, _ := lru.NewARC(DefaultChainIndexCacheSize) @@ -49,12 +49,12 @@ type lbEntry struct { target types.TipSetKey } -func (ci *ChainIndex) GetTipsetByHeight(_ context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { +func (ci *ChainIndex) GetTipsetByHeight(ctx context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { if from.Height()-to <= ci.skipLength { - return ci.walkBack(from, to) + return ci.walkBack(ctx, from, to) } - rounded, err := ci.roundDown(from) + rounded, err := ci.roundDown(ctx, from) if err != nil { return nil, err } @@ -63,7 +63,7 @@ func (ci *ChainIndex) GetTipsetByHeight(_ context.Context, from *types.TipSet, t for { cval, ok := ci.skipCache.Get(cur) if !ok { - fc, err := ci.fillCache(cur) + fc, err := ci.fillCache(ctx, cur) if err != nil { return nil, err } @@ -74,19 +74,19 @@ func (ci *ChainIndex) GetTipsetByHeight(_ context.Context, from *types.TipSet, t if lbe.ts.Height() == to || lbe.parentHeight < to { return lbe.ts, nil } else if to > lbe.targetHeight { - return ci.walkBack(lbe.ts, to) + return ci.walkBack(ctx, lbe.ts, to) } cur = lbe.target } } -func (ci *ChainIndex) GetTipsetByHeightWithoutCache(from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { - return ci.walkBack(from, to) +func (ci *ChainIndex) GetTipsetByHeightWithoutCache(ctx context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { + return ci.walkBack(ctx, from, to) } -func (ci *ChainIndex) fillCache(tsk types.TipSetKey) (*lbEntry, error) { - ts, err := ci.loadTipSet(tsk) +func (ci *ChainIndex) fillCache(ctx context.Context, tsk types.TipSetKey) (*lbEntry, error) { + ts, err := ci.loadTipSet(ctx, tsk) if err != nil { return nil, err } @@ -101,7 +101,7 @@ func (ci *ChainIndex) fillCache(tsk types.TipSetKey) (*lbEntry, error) { // will either be equal to ts.Height, or at least > ts.Parent.Height() rheight := ci.roundHeight(ts.Height()) - parent, err := ci.loadTipSet(ts.Parents()) + parent, err := ci.loadTipSet(ctx, ts.Parents()) if err != nil { return nil, err } @@ -115,7 +115,7 @@ func (ci *ChainIndex) fillCache(tsk types.TipSetKey) (*lbEntry, error) { if parent.Height() < rheight { skipTarget = parent } else { - skipTarget, err = ci.walkBack(parent, rheight) + skipTarget, err = ci.walkBack(ctx, parent, rheight) if err != nil { return nil, xerrors.Errorf("fillCache walkback: %w", err) } @@ -137,10 +137,10 @@ func (ci *ChainIndex) roundHeight(h abi.ChainEpoch) abi.ChainEpoch { return (h / ci.skipLength) * ci.skipLength } -func (ci *ChainIndex) roundDown(ts *types.TipSet) (*types.TipSet, error) { +func (ci *ChainIndex) roundDown(ctx context.Context, ts *types.TipSet) (*types.TipSet, error) { target := ci.roundHeight(ts.Height()) - rounded, err := ci.walkBack(ts, target) + rounded, err := ci.walkBack(ctx, ts, target) if err != nil { return nil, err } @@ -148,7 +148,7 @@ func (ci *ChainIndex) roundDown(ts *types.TipSet) (*types.TipSet, error) { return rounded, nil } -func (ci *ChainIndex) walkBack(from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { +func (ci *ChainIndex) walkBack(ctx context.Context, from *types.TipSet, to abi.ChainEpoch) (*types.TipSet, error) { if to > from.Height() { return nil, xerrors.Errorf("looking for tipset with height greater than start point") } @@ -160,7 +160,7 @@ func (ci *ChainIndex) walkBack(from *types.TipSet, to abi.ChainEpoch) (*types.Ti ts := from for { - pts, err := ci.loadTipSet(ts.Parents()) + pts, err := ci.loadTipSet(ctx, ts.Parents()) if err != nil { return nil, err } diff --git a/chain/store/index_test.go b/chain/store/index_test.go index 9bc31e5a8..b7f1d570f 100644 --- a/chain/store/index_test.go +++ b/chain/store/index_test.go @@ -35,7 +35,7 @@ func TestIndexSeeks(t *testing.T) { cs := store.NewChainStore(nbs, nbs, syncds.MutexWrap(datastore.NewMapDatastore()), filcns.Weight, nil) defer cs.Close() //nolint:errcheck - _, err = cs.Import(bytes.NewReader(gencar)) + _, err = cs.Import(ctx, bytes.NewReader(gencar)) if err != nil { t.Fatal(err) } @@ -44,7 +44,7 @@ func TestIndexSeeks(t *testing.T) { if err := cs.PutTipSet(ctx, mock.TipSet(gen)); err != nil { t.Fatal(err) } - assert.NoError(t, cs.SetGenesis(gen)) + assert.NoError(t, cs.SetGenesis(ctx, gen)) // Put 113 blocks from genesis for i := 0; i < 113; i++ { diff --git a/chain/store/messages.go b/chain/store/messages.go index 07ce83458..8a1426c11 100644 --- a/chain/store/messages.go +++ b/chain/store/messages.go @@ -23,25 +23,25 @@ type storable interface { ToStorageBlock() (block.Block, error) } -func PutMessage(bs bstore.Blockstore, m storable) (cid.Cid, error) { +func PutMessage(ctx context.Context, bs bstore.Blockstore, m storable) (cid.Cid, error) { b, err := m.ToStorageBlock() if err != nil { return cid.Undef, err } - if err := bs.Put(b); err != nil { + if err := bs.Put(ctx, b); err != nil { return cid.Undef, err } return b.Cid(), nil } -func (cs *ChainStore) PutMessage(m storable) (cid.Cid, error) { - return PutMessage(cs.chainBlockstore, m) +func (cs *ChainStore) PutMessage(ctx context.Context, m storable) (cid.Cid, error) { + return PutMessage(ctx, cs.chainBlockstore, m) } -func (cs *ChainStore) GetCMessage(c cid.Cid) (types.ChainMsg, error) { - m, err := cs.GetMessage(c) +func (cs *ChainStore) GetCMessage(ctx context.Context, c cid.Cid) (types.ChainMsg, error) { + m, err := cs.GetMessage(ctx, c) if err == nil { return m, nil } @@ -49,21 +49,21 @@ func (cs *ChainStore) GetCMessage(c cid.Cid) (types.ChainMsg, error) { log.Warnf("GetCMessage: unexpected error getting unsigned message: %s", err) } - return cs.GetSignedMessage(c) + return cs.GetSignedMessage(ctx, c) } -func (cs *ChainStore) GetMessage(c cid.Cid) (*types.Message, error) { +func (cs *ChainStore) GetMessage(ctx context.Context, c cid.Cid) (*types.Message, error) { var msg *types.Message - err := cs.chainLocalBlockstore.View(c, func(b []byte) (err error) { + err := cs.chainLocalBlockstore.View(ctx, c, func(b []byte) (err error) { msg, err = types.DecodeMessage(b) return err }) return msg, err } -func (cs *ChainStore) GetSignedMessage(c cid.Cid) (*types.SignedMessage, error) { +func (cs *ChainStore) GetSignedMessage(ctx context.Context, c cid.Cid) (*types.SignedMessage, error) { var msg *types.SignedMessage - err := cs.chainLocalBlockstore.View(c, func(b []byte) (err error) { + err := cs.chainLocalBlockstore.View(ctx, c, func(b []byte) (err error) { msg, err = types.DecodeSignedMessage(b) return err }) @@ -101,9 +101,10 @@ type BlockMessages struct { Miner address.Address BlsMessages []types.ChainMsg SecpkMessages []types.ChainMsg + CrossMessages []types.ChainMsg } -func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, error) { +func (cs *ChainStore) BlockMsgsForTipset(ctx context.Context, ts *types.TipSet) ([]BlockMessages, error) { // returned BlockMessages match block order in tipset applied := make(map[address.Address]uint64) @@ -142,7 +143,7 @@ func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, err var out []BlockMessages for _, b := range ts.Blocks() { - bms, sms, err := cs.MessagesForBlock(b) + bms, sms, cms, err := cs.MessagesForBlock(ctx, b) if err != nil { return nil, xerrors.Errorf("failed to get messages for block: %w", err) } @@ -151,6 +152,7 @@ func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, err Miner: b.Miner, BlsMessages: make([]types.ChainMsg, 0, len(bms)), SecpkMessages: make([]types.ChainMsg, 0, len(sms)), + CrossMessages: make([]types.ChainMsg, 0, len(sms)), } for _, bmsg := range bms { @@ -175,14 +177,29 @@ func (cs *ChainStore) BlockMsgsForTipset(ts *types.TipSet) ([]BlockMessages, err } } + for _, crossmsg := range cms { + // NOTE: Cross-msgs don't follow the same cross message selection strategy as + // plain messages so there is no selection needed and can be appended directly. + // no message selection here. + // b, err := selectMsg(crossmsg.VMMessage()) + // if err != nil { + // return nil, xerrors.Errorf("failed to decide whether to select message for block: %w", err) + // } + // + // if b { + // bm.CrossMessages = append(bm.CrossMessages, crossmsg) + // } + bm.CrossMessages = append(bm.CrossMessages, crossmsg) + } + out = append(out, bm) } return out, nil } -func (cs *ChainStore) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, error) { - bmsgs, err := cs.BlockMsgsForTipset(ts) +func (cs *ChainStore) MessagesForTipset(ctx context.Context, ts *types.TipSet) ([]types.ChainMsg, error) { + bmsgs, err := cs.BlockMsgsForTipset(ctx, ts) if err != nil { return nil, err } @@ -204,60 +221,71 @@ func (cs *ChainStore) MessagesForTipset(ts *types.TipSet) ([]types.ChainMsg, err type mmCids struct { bls []cid.Cid secpk []cid.Cid + cross []cid.Cid } -func (cs *ChainStore) ReadMsgMetaCids(mmc cid.Cid) ([]cid.Cid, []cid.Cid, error) { +func (cs *ChainStore) ReadMsgMetaCids(ctx context.Context, mmc cid.Cid) ([]cid.Cid, []cid.Cid, []cid.Cid, error) { o, ok := cs.mmCache.Get(mmc) if ok { mmcids := o.(*mmCids) - return mmcids.bls, mmcids.secpk, nil + return mmcids.bls, mmcids.secpk, mmcids.cross, nil } cst := cbor.NewCborStore(cs.chainLocalBlockstore) var msgmeta types.MsgMeta - if err := cst.Get(context.TODO(), mmc, &msgmeta); err != nil { - return nil, nil, xerrors.Errorf("failed to load msgmeta (%s): %w", mmc, err) + if err := cst.Get(ctx, mmc, &msgmeta); err != nil { + return nil, nil, nil, xerrors.Errorf("failed to load msgmeta (%s): %w", mmc, err) } blscids, err := cs.readAMTCids(msgmeta.BlsMessages) if err != nil { - return nil, nil, xerrors.Errorf("loading bls message cids for block: %w", err) + return nil, nil, nil, xerrors.Errorf("loading bls message cids for block: %w", err) } secpkcids, err := cs.readAMTCids(msgmeta.SecpkMessages) if err != nil { - return nil, nil, xerrors.Errorf("loading secpk message cids for block: %w", err) + return nil, nil, nil, xerrors.Errorf("loading secpk message cids for block: %w", err) + } + + crosscids, err := cs.readAMTCids(msgmeta.CrossMessages) + if err != nil { + return nil, nil, nil, xerrors.Errorf("loading cross message cids for block: %w", err) } cs.mmCache.Add(mmc, &mmCids{ bls: blscids, secpk: secpkcids, + cross: crosscids, }) - return blscids, secpkcids, nil + return blscids, secpkcids, crosscids, nil } -func (cs *ChainStore) MessagesForBlock(b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, error) { - blscids, secpkcids, err := cs.ReadMsgMetaCids(b.Messages) +func (cs *ChainStore) MessagesForBlock(ctx context.Context, b *types.BlockHeader) ([]*types.Message, []*types.SignedMessage, []*types.Message, error) { + blscids, secpkcids, crosscids, err := cs.ReadMsgMetaCids(ctx, b.Messages) if err != nil { - return nil, nil, err + return nil, nil, nil, err } - blsmsgs, err := cs.LoadMessagesFromCids(blscids) + blsmsgs, err := cs.LoadMessagesFromCids(ctx, blscids) if err != nil { - return nil, nil, xerrors.Errorf("loading bls messages for block: %w", err) + return nil, nil, nil, xerrors.Errorf("loading bls messages for block: %w", err) } - secpkmsgs, err := cs.LoadSignedMessagesFromCids(secpkcids) + secpkmsgs, err := cs.LoadSignedMessagesFromCids(ctx, secpkcids) if err != nil { - return nil, nil, xerrors.Errorf("loading secpk messages for block: %w", err) + return nil, nil, nil, xerrors.Errorf("loading secpk messages for block: %w", err) } - return blsmsgs, secpkmsgs, nil + crossmsgs, err := cs.LoadMessagesFromCids(ctx, crosscids) + if err != nil { + return nil, nil, nil, xerrors.Errorf("loading cross messages messages for block: %w", err) + } + + return blsmsgs, secpkmsgs, crossmsgs, nil } -func (cs *ChainStore) GetParentReceipt(b *types.BlockHeader, i int) (*types.MessageReceipt, error) { - ctx := context.TODO() +func (cs *ChainStore) GetParentReceipt(ctx context.Context, b *types.BlockHeader, i int) (*types.MessageReceipt, error) { // block headers use adt0, for now. a, err := blockadt.AsArray(cs.ActorStore(ctx), b.ParentMessageReceipts) if err != nil { @@ -274,10 +302,10 @@ func (cs *ChainStore) GetParentReceipt(b *types.BlockHeader, i int) (*types.Mess return &r, nil } -func (cs *ChainStore) LoadMessagesFromCids(cids []cid.Cid) ([]*types.Message, error) { +func (cs *ChainStore) LoadMessagesFromCids(ctx context.Context, cids []cid.Cid) ([]*types.Message, error) { msgs := make([]*types.Message, 0, len(cids)) for i, c := range cids { - m, err := cs.GetMessage(c) + m, err := cs.GetMessage(ctx, c) if err != nil { return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", c, i, err) } @@ -288,10 +316,10 @@ func (cs *ChainStore) LoadMessagesFromCids(cids []cid.Cid) ([]*types.Message, er return msgs, nil } -func (cs *ChainStore) LoadSignedMessagesFromCids(cids []cid.Cid) ([]*types.SignedMessage, error) { +func (cs *ChainStore) LoadSignedMessagesFromCids(ctx context.Context, cids []cid.Cid) ([]*types.SignedMessage, error) { msgs := make([]*types.SignedMessage, 0, len(cids)) for i, c := range cids { - m, err := cs.GetSignedMessage(c) + m, err := cs.GetSignedMessage(ctx, c) if err != nil { return nil, xerrors.Errorf("failed to get message: (%s):%d: %w", c, i, err) } diff --git a/chain/store/snapshot.go b/chain/store/snapshot.go index 1d4ce3758..61fa8bdc8 100644 --- a/chain/store/snapshot.go +++ b/chain/store/snapshot.go @@ -30,7 +30,7 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo unionBs := bstore.Union(cs.stateBlockstore, cs.chainBlockstore) return cs.WalkSnapshot(ctx, ts, inclRecentRoots, skipOldMsgs, true, func(c cid.Cid) error { - blk, err := unionBs.Get(c) + blk, err := unionBs.Get(ctx, c) if err != nil { return xerrors.Errorf("writing object to car, bs.Get: %w", err) } @@ -43,18 +43,18 @@ func (cs *ChainStore) Export(ctx context.Context, ts *types.TipSet, inclRecentRo }) } -func (cs *ChainStore) Import(r io.Reader) (*types.TipSet, error) { +func (cs *ChainStore) Import(ctx context.Context, r io.Reader) (*types.TipSet, error) { // TODO: writing only to the state blockstore is incorrect. // At this time, both the state and chain blockstores are backed by the // universal store. When we physically segregate the stores, we will need // to route state objects to the state blockstore, and chain objects to // the chain blockstore. - header, err := car.LoadCar(cs.StateBlockstore(), r) + header, err := car.LoadCar(ctx, cs.StateBlockstore(), r) if err != nil { return nil, xerrors.Errorf("loadcar failed: %w", err) } - root, err := cs.LoadTipSet(types.NewTipSetKey(header.Roots...)) + root, err := cs.LoadTipSet(ctx, types.NewTipSetKey(header.Roots...)) if err != nil { return nil, xerrors.Errorf("failed to load root tipset from chainfile: %w", err) } @@ -82,7 +82,7 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe return err } - data, err := cs.chainBlockstore.Get(blk) + data, err := cs.chainBlockstore.Get(ctx, blk) if err != nil { return xerrors.Errorf("getting block: %w", err) } @@ -102,7 +102,7 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe var cids []cid.Cid if !skipOldMsgs || b.Height > ts.Height()-inclRecentRoots { if walked.Visit(b.Messages) { - mcids, err := recurseLinks(cs.chainBlockstore, walked, b.Messages, []cid.Cid{b.Messages}) + mcids, err := recurseLinks(ctx, cs.chainBlockstore, walked, b.Messages, []cid.Cid{b.Messages}) if err != nil { return xerrors.Errorf("recursing messages failed: %w", err) } @@ -123,7 +123,7 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe if b.Height == 0 || b.Height > ts.Height()-inclRecentRoots { if walked.Visit(b.ParentStateRoot) { - cids, err := recurseLinks(cs.stateBlockstore, walked, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot}) + cids, err := recurseLinks(ctx, cs.stateBlockstore, walked, b.ParentStateRoot, []cid.Cid{b.ParentStateRoot}) if err != nil { return xerrors.Errorf("recursing genesis state failed: %w", err) } @@ -168,12 +168,12 @@ func (cs *ChainStore) WalkSnapshot(ctx context.Context, ts *types.TipSet, inclRe return nil } -func recurseLinks(bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid.Cid) ([]cid.Cid, error) { +func recurseLinks(ctx context.Context, bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid.Cid) ([]cid.Cid, error) { if root.Prefix().Codec != cid.DagCBOR { return in, nil } - data, err := bs.Get(root) + data, err := bs.Get(ctx, root) if err != nil { return nil, xerrors.Errorf("recurse links get (%s) failed: %w", root, err) } @@ -192,7 +192,7 @@ func recurseLinks(bs bstore.Blockstore, walked *cid.Set, root cid.Cid, in []cid. in = append(in, c) var err error - in, err = recurseLinks(bs, walked, c, in) + in, err = recurseLinks(ctx, bs, walked, c, in) if err != nil { rerr = err } diff --git a/chain/store/store.go b/chain/store/store.go index f99c7f649..78254cd41 100644 --- a/chain/store/store.go +++ b/chain/store/store.go @@ -207,17 +207,17 @@ func (cs *ChainStore) Close() error { return nil } -func (cs *ChainStore) Load() error { - if err := cs.loadHead(); err != nil { +func (cs *ChainStore) Load(ctx context.Context) error { + if err := cs.loadHead(ctx); err != nil { return err } - if err := cs.loadCheckpoint(); err != nil { + if err := cs.loadCheckpoint(ctx); err != nil { return err } return nil } -func (cs *ChainStore) loadHead() error { - head, err := cs.metadataDs.Get(chainHeadKey) +func (cs *ChainStore) loadHead(ctx context.Context) error { + head, err := cs.metadataDs.Get(ctx, chainHeadKey) if err == dstore.ErrNotFound { log.Warn("no previous chain state found") return nil @@ -231,7 +231,7 @@ func (cs *ChainStore) loadHead() error { return xerrors.Errorf("failed to unmarshal stored chain head: %w", err) } - ts, err := cs.LoadTipSet(types.NewTipSetKey(tscids...)) + ts, err := cs.LoadTipSet(ctx, types.NewTipSetKey(tscids...)) if err != nil { return xerrors.Errorf("loading tipset: %w", err) } @@ -241,8 +241,8 @@ func (cs *ChainStore) loadHead() error { return nil } -func (cs *ChainStore) loadCheckpoint() error { - tskBytes, err := cs.metadataDs.Get(checkpointKey) +func (cs *ChainStore) loadCheckpoint(ctx context.Context) error { + tskBytes, err := cs.metadataDs.Get(ctx, checkpointKey) if err == dstore.ErrNotFound { return nil } @@ -256,7 +256,7 @@ func (cs *ChainStore) loadCheckpoint() error { return err } - ts, err := cs.LoadTipSet(tsk) + ts, err := cs.LoadTipSet(ctx, tsk) if err != nil { return xerrors.Errorf("loading tipset: %w", err) } @@ -266,13 +266,13 @@ func (cs *ChainStore) loadCheckpoint() error { return nil } -func (cs *ChainStore) writeHead(ts *types.TipSet) error { +func (cs *ChainStore) writeHead(ctx context.Context, ts *types.TipSet) error { data, err := json.Marshal(ts.Cids()) if err != nil { return xerrors.Errorf("failed to marshal tipset: %w", err) } - if err := cs.metadataDs.Put(chainHeadKey, data); err != nil { + if err := cs.metadataDs.Put(ctx, chainHeadKey, data); err != nil { return xerrors.Errorf("failed to write chain head to datastore: %w", err) } @@ -341,13 +341,13 @@ func (cs *ChainStore) SubscribeHeadChanges(f ReorgNotifee) { func (cs *ChainStore) IsBlockValidated(ctx context.Context, blkid cid.Cid) (bool, error) { key := blockValidationCacheKeyPrefix.Instance(blkid.String()) - return cs.metadataDs.Has(key) + return cs.metadataDs.Has(ctx, key) } func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error { key := blockValidationCacheKeyPrefix.Instance(blkid.String()) - if err := cs.metadataDs.Put(key, []byte{0}); err != nil { + if err := cs.metadataDs.Put(ctx, key, []byte{0}); err != nil { return xerrors.Errorf("cache block validation: %w", err) } @@ -357,34 +357,34 @@ func (cs *ChainStore) MarkBlockAsValidated(ctx context.Context, blkid cid.Cid) e func (cs *ChainStore) UnmarkBlockAsValidated(ctx context.Context, blkid cid.Cid) error { key := blockValidationCacheKeyPrefix.Instance(blkid.String()) - if err := cs.metadataDs.Delete(key); err != nil { + if err := cs.metadataDs.Delete(ctx, key); err != nil { return xerrors.Errorf("removing from valid block cache: %w", err) } return nil } -func (cs *ChainStore) SetGenesis(b *types.BlockHeader) error { +func (cs *ChainStore) SetGenesis(ctx context.Context, b *types.BlockHeader) error { ts, err := types.NewTipSet([]*types.BlockHeader{b}) if err != nil { return err } - if err := cs.PutTipSet(context.TODO(), ts); err != nil { + if err := cs.PutTipSet(ctx, ts); err != nil { return err } - return cs.metadataDs.Put(dstore.NewKey("0"), b.Cid().Bytes()) + return cs.metadataDs.Put(ctx, dstore.NewKey("0"), b.Cid().Bytes()) } func (cs *ChainStore) PutTipSet(ctx context.Context, ts *types.TipSet) error { for _, b := range ts.Blocks() { - if err := cs.PersistBlockHeaders(b); err != nil { + if err := cs.PersistBlockHeaders(ctx, b); err != nil { return err } } - expanded, err := cs.expandTipset(ts.Blocks()[0]) + expanded, err := cs.expandTipset(ctx, ts.Blocks()[0]) if err != nil { return xerrors.Errorf("errored while expanding tipset: %w", err) } @@ -435,7 +435,7 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS // difference between 'bootstrap sync' and 'caught up' sync, we need // some other heuristic. - exceeds, err := cs.exceedsForkLength(cs.heaviest, ts) + exceeds, err := cs.exceedsForkLength(ctx, cs.heaviest, ts) if err != nil { return err } @@ -458,7 +458,7 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS // FIXME: We may want to replace some of the logic in `syncFork()` with this. // `syncFork()` counts the length on both sides of the fork at the moment (we // need to settle on that) but here we just enforce it on the `synced` side. -func (cs *ChainStore) exceedsForkLength(synced, external *types.TipSet) (bool, error) { +func (cs *ChainStore) exceedsForkLength(ctx context.Context, synced, external *types.TipSet) (bool, error) { if synced == nil || external == nil { // FIXME: If `cs.heaviest` is nil we should just bypass the entire // `MaybeTakeHeavierTipSet` logic (instead of each of the called @@ -482,7 +482,7 @@ func (cs *ChainStore) exceedsForkLength(synced, external *types.TipSet) (bool, e // length). return true, nil } - external, err = cs.LoadTipSet(external.Parents()) + external, err = cs.LoadTipSet(ctx, external.Parents()) if err != nil { return false, xerrors.Errorf("failed to load parent tipset in external chain: %w", err) } @@ -505,7 +505,7 @@ func (cs *ChainStore) exceedsForkLength(synced, external *types.TipSet) (bool, e // there is no common ancestor. return true, nil } - synced, err = cs.LoadTipSet(synced.Parents()) + synced, err = cs.LoadTipSet(ctx, synced.Parents()) if err != nil { return false, xerrors.Errorf("failed to load parent tipset in synced chain: %w", err) } @@ -521,17 +521,17 @@ func (cs *ChainStore) exceedsForkLength(synced, external *types.TipSet) (bool, e // CAUTION: Use it only for testing, such as to teleport the chain to a // particular tipset to carry out a benchmark, verification, etc. on a chain // segment. -func (cs *ChainStore) ForceHeadSilent(_ context.Context, ts *types.TipSet) error { +func (cs *ChainStore) ForceHeadSilent(ctx context.Context, ts *types.TipSet) error { log.Warnf("(!!!) forcing a new head silently; new head: %s", ts) cs.heaviestLk.Lock() defer cs.heaviestLk.Unlock() - if err := cs.removeCheckpoint(); err != nil { + if err := cs.removeCheckpoint(ctx); err != nil { return err } cs.heaviest = ts - err := cs.writeHead(ts) + err := cs.writeHead(ctx, ts) if err != nil { err = xerrors.Errorf("failed to write chain head: %s", err) } @@ -561,7 +561,7 @@ func (cs *ChainStore) reorgWorker(ctx context.Context, initialNotifees []ReorgNo notifees = append(notifees, n) case r := <-out: - revert, apply, err := cs.ReorgOps(r.old, r.new) + revert, apply, err := cs.ReorgOps(ctx, r.old, r.new) if err != nil { log.Error("computing reorg ops failed: ", err) continue @@ -646,7 +646,7 @@ func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) log.Infof("New heaviest tipset! %s (height=%d)", ts.Cids(), ts.Height()) cs.heaviest = ts - if err := cs.writeHead(ts); err != nil { + if err := cs.writeHead(ctx, ts); err != nil { log.Errorf("failed to write chain head: %s", err) return nil } @@ -656,14 +656,14 @@ func (cs *ChainStore) takeHeaviestTipSet(ctx context.Context, ts *types.TipSet) // FlushValidationCache removes all results of block validation from the // chain metadata store. Usually the first step after a new chain import. -func (cs *ChainStore) FlushValidationCache() error { - return FlushValidationCache(cs.metadataDs) +func (cs *ChainStore) FlushValidationCache(ctx context.Context) error { + return FlushValidationCache(ctx, cs.metadataDs) } -func FlushValidationCache(ds dstore.Batching) error { +func FlushValidationCache(ctx context.Context, ds dstore.Batching) error { log.Infof("clearing block validation cache...") - dsWalk, err := ds.Query(query.Query{ + dsWalk, err := ds.Query(ctx, query.Query{ // Potential TODO: the validation cache is not a namespace on its own // but is rather constructed as prefixed-key `foo:bar` via .Instance(), which // in turn does not work with the filter, which can match only on `foo/bar` @@ -683,7 +683,7 @@ func FlushValidationCache(ds dstore.Batching) error { return xerrors.Errorf("failed to run key listing query: %w", err) } - batch, err := ds.Batch() + batch, err := ds.Batch(ctx) if err != nil { return xerrors.Errorf("failed to open a DS batch: %w", err) } @@ -692,11 +692,11 @@ func FlushValidationCache(ds dstore.Batching) error { for _, k := range allKeys { if strings.HasPrefix(k.Key, blockValidationCacheKeyPrefix.String()) { delCnt++ - batch.Delete(dstore.RawKey(k.Key)) // nolint:errcheck + batch.Delete(ctx, dstore.RawKey(k.Key)) // nolint:errcheck } } - if err := batch.Commit(); err != nil { + if err := batch.Commit(ctx); err != nil { return xerrors.Errorf("failed to commit the DS batch: %w", err) } @@ -709,24 +709,24 @@ func FlushValidationCache(ds dstore.Batching) error { // This should only be called if something is broken and needs fixing. // // This function will bypass and remove any checkpoints. -func (cs *ChainStore) SetHead(ts *types.TipSet) error { +func (cs *ChainStore) SetHead(ctx context.Context, ts *types.TipSet) error { cs.heaviestLk.Lock() defer cs.heaviestLk.Unlock() - if err := cs.removeCheckpoint(); err != nil { + if err := cs.removeCheckpoint(ctx); err != nil { return err } return cs.takeHeaviestTipSet(context.TODO(), ts) } // RemoveCheckpoint removes the current checkpoint. -func (cs *ChainStore) RemoveCheckpoint() error { +func (cs *ChainStore) RemoveCheckpoint(ctx context.Context) error { cs.heaviestLk.Lock() defer cs.heaviestLk.Unlock() - return cs.removeCheckpoint() + return cs.removeCheckpoint(ctx) } -func (cs *ChainStore) removeCheckpoint() error { - if err := cs.metadataDs.Delete(checkpointKey); err != nil { +func (cs *ChainStore) removeCheckpoint(ctx context.Context) error { + if err := cs.metadataDs.Delete(ctx, checkpointKey); err != nil { return err } cs.checkpoint = nil @@ -736,7 +736,7 @@ func (cs *ChainStore) removeCheckpoint() error { // SetCheckpoint will set a checkpoint past which the chainstore will not allow forks. // // NOTE: Checkpoints cannot be set beyond ForkLengthThreshold epochs in the past. -func (cs *ChainStore) SetCheckpoint(ts *types.TipSet) error { +func (cs *ChainStore) SetCheckpoint(ctx context.Context, ts *types.TipSet) error { tskBytes, err := json.Marshal(ts.Key()) if err != nil { return err @@ -755,7 +755,7 @@ func (cs *ChainStore) SetCheckpoint(ts *types.TipSet) error { } if !ts.Equals(cs.heaviest) { - anc, err := cs.IsAncestorOf(ts, cs.heaviest) + anc, err := cs.IsAncestorOf(ctx, ts, cs.heaviest) if err != nil { return xerrors.Errorf("cannot determine whether checkpoint tipset is in main-chain: %w", err) } @@ -764,7 +764,7 @@ func (cs *ChainStore) SetCheckpoint(ts *types.TipSet) error { return xerrors.Errorf("cannot mark tipset as checkpoint, since it isn't in the main-chain: %w", err) } } - err = cs.metadataDs.Put(checkpointKey, tskBytes) + err = cs.metadataDs.Put(ctx, checkpointKey, tskBytes) if err != nil { return err } @@ -781,9 +781,9 @@ func (cs *ChainStore) GetCheckpoint() *types.TipSet { } // Contains returns whether our BlockStore has all blocks in the supplied TipSet. -func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) { +func (cs *ChainStore) Contains(ctx context.Context, ts *types.TipSet) (bool, error) { for _, c := range ts.Cids() { - has, err := cs.chainBlockstore.Has(c) + has, err := cs.chainBlockstore.Has(ctx, c) if err != nil { return false, err } @@ -797,16 +797,16 @@ func (cs *ChainStore) Contains(ts *types.TipSet) (bool, error) { // GetBlock fetches a BlockHeader with the supplied CID. It returns // blockstore.ErrNotFound if the block was not found in the BlockStore. -func (cs *ChainStore) GetBlock(c cid.Cid) (*types.BlockHeader, error) { +func (cs *ChainStore) GetBlock(ctx context.Context, c cid.Cid) (*types.BlockHeader, error) { var blk *types.BlockHeader - err := cs.chainLocalBlockstore.View(c, func(b []byte) (err error) { + err := cs.chainLocalBlockstore.View(ctx, c, func(b []byte) (err error) { blk, err = types.DecodeBlock(b) return err }) return blk, err } -func (cs *ChainStore) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { +func (cs *ChainStore) LoadTipSet(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { v, ok := cs.tsCache.Get(tsk) if ok { return v.(*types.TipSet), nil @@ -819,7 +819,7 @@ func (cs *ChainStore) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { for i, c := range cids { i, c := i, c eg.Go(func() error { - b, err := cs.GetBlock(c) + b, err := cs.GetBlock(ctx, c) if err != nil { return xerrors.Errorf("get block %s: %w", c, err) } @@ -844,14 +844,14 @@ func (cs *ChainStore) LoadTipSet(tsk types.TipSetKey) (*types.TipSet, error) { } // IsAncestorOf returns true if 'a' is an ancestor of 'b' -func (cs *ChainStore) IsAncestorOf(a, b *types.TipSet) (bool, error) { +func (cs *ChainStore) IsAncestorOf(ctx context.Context, a, b *types.TipSet) (bool, error) { if b.Height() <= a.Height() { return false, nil } cur := b for !a.Equals(cur) && cur.Height() > a.Height() { - next, err := cs.LoadTipSet(cur.Parents()) + next, err := cs.LoadTipSet(ctx, cur.Parents()) if err != nil { return false, err } @@ -862,13 +862,13 @@ func (cs *ChainStore) IsAncestorOf(a, b *types.TipSet) (bool, error) { return cur.Equals(a), nil } -func (cs *ChainStore) NearestCommonAncestor(a, b *types.TipSet) (*types.TipSet, error) { - l, _, err := cs.ReorgOps(a, b) +func (cs *ChainStore) NearestCommonAncestor(ctx context.Context, a, b *types.TipSet) (*types.TipSet, error) { + l, _, err := cs.ReorgOps(ctx, a, b) if err != nil { return nil, err } - return cs.LoadTipSet(l[len(l)-1].Parents()) + return cs.LoadTipSet(ctx, l[len(l)-1].Parents()) } // ReorgOps takes two tipsets (which can be at different heights), and walks @@ -879,11 +879,11 @@ func (cs *ChainStore) NearestCommonAncestor(a, b *types.TipSet) (*types.TipSet, // ancestor. // // If an error happens along the way, we return the error with nil slices. -func (cs *ChainStore) ReorgOps(a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) { - return ReorgOps(cs.LoadTipSet, a, b) +func (cs *ChainStore) ReorgOps(ctx context.Context, a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) { + return ReorgOps(ctx, cs.LoadTipSet, a, b) } -func ReorgOps(lts func(types.TipSetKey) (*types.TipSet, error), a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) { +func ReorgOps(ctx context.Context, lts func(ctx context.Context, _ types.TipSetKey) (*types.TipSet, error), a, b *types.TipSet) ([]*types.TipSet, []*types.TipSet, error) { left := a right := b @@ -891,7 +891,7 @@ func ReorgOps(lts func(types.TipSetKey) (*types.TipSet, error), a, b *types.TipS for !left.Equals(right) { if left.Height() > right.Height() { leftChain = append(leftChain, left) - par, err := lts(left.Parents()) + par, err := lts(ctx, left.Parents()) if err != nil { return nil, nil, err } @@ -899,7 +899,7 @@ func ReorgOps(lts func(types.TipSetKey) (*types.TipSet, error), a, b *types.TipS left = par } else { rightChain = append(rightChain, right) - par, err := lts(right.Parents()) + par, err := lts(ctx, right.Parents()) if err != nil { log.Infof("failed to fetch right.Parents: %s", err) return nil, nil, err @@ -921,7 +921,7 @@ func (cs *ChainStore) GetHeaviestTipSet() (ts *types.TipSet) { return } -func (cs *ChainStore) AddToTipSetTracker(b *types.BlockHeader) error { +func (cs *ChainStore) AddToTipSetTracker(ctx context.Context, b *types.BlockHeader) error { cs.tstLk.Lock() defer cs.tstLk.Unlock() @@ -931,7 +931,7 @@ func (cs *ChainStore) AddToTipSetTracker(b *types.BlockHeader) error { log.Debug("tried to add block to tipset tracker that was already there") return nil } - h, err := cs.GetBlock(oc) + h, err := cs.GetBlock(ctx, oc) if err == nil && h != nil { if h.Miner == b.Miner { log.Warnf("Have multiple blocks from miner %s at height %d in our tipset cache %s-%s", b.Miner, b.Height, b.Cid(), h.Cid()) @@ -960,7 +960,7 @@ func (cs *ChainStore) AddToTipSetTracker(b *types.BlockHeader) error { return nil } -func (cs *ChainStore) PersistBlockHeaders(b ...*types.BlockHeader) error { +func (cs *ChainStore) PersistBlockHeaders(ctx context.Context, b ...*types.BlockHeader) error { sbs := make([]block.Block, len(b)) for i, header := range b { @@ -982,13 +982,13 @@ func (cs *ChainStore) PersistBlockHeaders(b ...*types.BlockHeader) error { end = len(b) } - err = multierr.Append(err, cs.chainLocalBlockstore.PutMany(sbs[start:end])) + err = multierr.Append(err, cs.chainLocalBlockstore.PutMany(ctx, sbs[start:end])) } return err } -func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error) { +func (cs *ChainStore) expandTipset(ctx context.Context, b *types.BlockHeader) (*types.TipSet, error) { // Hold lock for the whole function for now, if it becomes a problem we can // fix pretty easily cs.tstLk.Lock() @@ -1007,7 +1007,7 @@ func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error) continue } - h, err := cs.GetBlock(bhc) + h, err := cs.GetBlock(ctx, bhc) if err != nil { return nil, xerrors.Errorf("failed to load block (%s) for tipset expansion: %w", bhc, err) } @@ -1029,11 +1029,11 @@ func (cs *ChainStore) expandTipset(b *types.BlockHeader) (*types.TipSet, error) } func (cs *ChainStore) AddBlock(ctx context.Context, b *types.BlockHeader) error { - if err := cs.PersistBlockHeaders(b); err != nil { + if err := cs.PersistBlockHeaders(ctx, b); err != nil { return err } - ts, err := cs.expandTipset(b) + ts, err := cs.expandTipset(ctx, b) if err != nil { return err } @@ -1045,8 +1045,8 @@ func (cs *ChainStore) AddBlock(ctx context.Context, b *types.BlockHeader) error return nil } -func (cs *ChainStore) GetGenesis() (*types.BlockHeader, error) { - data, err := cs.metadataDs.Get(dstore.NewKey("0")) +func (cs *ChainStore) GetGenesis(ctx context.Context) (*types.BlockHeader, error) { + data, err := cs.metadataDs.Get(ctx, dstore.NewKey("0")) if err != nil { return nil, err } @@ -1056,22 +1056,22 @@ func (cs *ChainStore) GetGenesis() (*types.BlockHeader, error) { return nil, err } - return cs.GetBlock(c) + return cs.GetBlock(ctx, c) } // GetPath returns the sequence of atomic head change operations that // need to be applied in order to switch the head of the chain from the `from` // tipset to the `to` tipset. func (cs *ChainStore) GetPath(ctx context.Context, from types.TipSetKey, to types.TipSetKey) ([]*api.HeadChange, error) { - fts, err := cs.LoadTipSet(from) + fts, err := cs.LoadTipSet(ctx, from) if err != nil { return nil, xerrors.Errorf("loading from tipset %s: %w", from, err) } - tts, err := cs.LoadTipSet(to) + tts, err := cs.LoadTipSet(ctx, to) if err != nil { return nil, xerrors.Errorf("loading to tipset %s: %w", to, err) } - revert, apply, err := cs.ReorgOps(fts, tts) + revert, apply, err := cs.ReorgOps(ctx, fts, tts) if err != nil { return nil, xerrors.Errorf("error getting tipset branches: %w", err) } @@ -1108,11 +1108,11 @@ func (cs *ChainStore) ActorStore(ctx context.Context) adt.Store { return ActorStore(ctx, cs.stateBlockstore) } -func (cs *ChainStore) TryFillTipSet(ts *types.TipSet) (*FullTipSet, error) { +func (cs *ChainStore) TryFillTipSet(ctx context.Context, ts *types.TipSet) (*FullTipSet, error) { var out []*types.FullBlock for _, b := range ts.Blocks() { - bmsgs, smsgs, err := cs.MessagesForBlock(b) + bmsgs, smsgs, crossmsg, err := cs.MessagesForBlock(ctx, b) if err != nil { // TODO: check for 'not found' errors, and only return nil if this // is actually a 'not found' error @@ -1123,6 +1123,7 @@ func (cs *ChainStore) TryFillTipSet(ts *types.TipSet) (*FullTipSet, error) { Header: b, BlsMessages: bmsgs, SecpkMessages: smsgs, + CrossMessages: crossmsg, } out = append(out, fb) @@ -1154,7 +1155,7 @@ func (cs *ChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, t if lbts.Height() < h { log.Warnf("chain index returned the wrong tipset at height %d, using slow retrieval", h) - lbts, err = cs.cindex.GetTipsetByHeightWithoutCache(ts, h) + lbts, err = cs.cindex.GetTipsetByHeightWithoutCache(ctx, ts, h) if err != nil { return nil, err } @@ -1164,7 +1165,7 @@ func (cs *ChainStore) GetTipsetByHeight(ctx context.Context, h abi.ChainEpoch, t return lbts, nil } - return cs.LoadTipSet(lbts.Parents()) + return cs.LoadTipSet(ctx, lbts.Parents()) } func (cs *ChainStore) Weight(ctx context.Context, hts *types.TipSet) (types.BigInt, error) { // todo remove @@ -1190,14 +1191,14 @@ func breakWeightTie(ts1, ts2 *types.TipSet) bool { return false } -func (cs *ChainStore) GetTipSetFromKey(tsk types.TipSetKey) (*types.TipSet, error) { +func (cs *ChainStore) GetTipSetFromKey(ctx context.Context, tsk types.TipSetKey) (*types.TipSet, error) { if tsk.IsEmpty() { return cs.GetHeaviestTipSet(), nil } - return cs.LoadTipSet(tsk) + return cs.LoadTipSet(ctx, tsk) } -func (cs *ChainStore) GetLatestBeaconEntry(ts *types.TipSet) (*types.BeaconEntry, error) { +func (cs *ChainStore) GetLatestBeaconEntry(ctx context.Context, ts *types.TipSet) (*types.BeaconEntry, error) { cur := ts for i := 0; i < 20; i++ { cbe := cur.Blocks()[0].BeaconEntries @@ -1209,7 +1210,7 @@ func (cs *ChainStore) GetLatestBeaconEntry(ts *types.TipSet) (*types.BeaconEntry return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry") } - next, err := cs.LoadTipSet(cur.Parents()) + next, err := cs.LoadTipSet(ctx, cur.Parents()) if err != nil { return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err) } diff --git a/chain/store/store_test.go b/chain/store/store_test.go index 2004b266c..6cf23d4d3 100644 --- a/chain/store/store_test.go +++ b/chain/store/store_test.go @@ -109,7 +109,7 @@ func TestChainExportImport(t *testing.T) { cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), filcns.Weight, nil) defer cs.Close() //nolint:errcheck - root, err := cs.Import(buf) + root, err := cs.Import(context.TODO(), buf) if err != nil { t.Fatal(err) } @@ -144,12 +144,12 @@ func TestChainExportImportFull(t *testing.T) { cs := store.NewChainStore(nbs, nbs, datastore.NewMapDatastore(), filcns.Weight, nil) defer cs.Close() //nolint:errcheck - root, err := cs.Import(buf) + root, err := cs.Import(context.TODO(), buf) if err != nil { t.Fatal(err) } - err = cs.SetHead(last) + err = cs.SetHead(context.Background(), last) if err != nil { t.Fatal(err) } @@ -158,7 +158,7 @@ func TestChainExportImportFull(t *testing.T) { t.Fatal("imported chain differed from exported chain") } - sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule()) + sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), nil, nil, filcns.DefaultUpgradeSchedule(), cg.BeaconSchedule()) if err != nil { t.Fatal(err) } diff --git a/chain/sub/incoming.go b/chain/sub/incoming.go index 2e962a249..ca48c13d2 100644 --- a/chain/sub/incoming.go +++ b/chain/sub/incoming.go @@ -82,6 +82,12 @@ func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *cha return } + crossmsgs, err := FetchCrossMessagesByCids(ctx, ses, blk.CrossMessages) + if err != nil { + log.Errorf("failed to fetch all cross messages for block received over pubsub: %s; source: %s", err, src) + return + } + took := build.Clock.Since(start) log.Debugw("new block over pubsub", "cid", blk.Header.Cid(), "source", msg.GetFrom(), "msgfetch", took) if took > 3*time.Second { @@ -99,6 +105,7 @@ func HandleIncomingBlocks(ctx context.Context, bsub *pubsub.Subscription, s *cha Header: blk.Header, BlsMessages: bmsgs, SecpkMessages: smsgs, + CrossMessages: crossmsgs, }) { cmgr.TagPeer(msg.ReceivedFrom, "blkprop", 5) } @@ -121,7 +128,7 @@ func FetchMessagesByCids( out[i] = msg return nil - }) + }, false) if err != nil { return nil, err } @@ -144,7 +151,34 @@ func FetchSignedMessagesByCids( out[i] = smsg return nil - }) + }, false) + if err != nil { + return nil, err + } + return out, nil +} + +// CrossMessages allow duplicate messages as there may be messages with the +// the same parameter inside the same CrossMsgMetas. The original messages +// will have different nonce and this is the way we discern betweeen them. +// See how CrossMsgs are executed or `sca_apply.go` for more information. +// FIXME: Duplicate of above. +func FetchCrossMessagesByCids( + ctx context.Context, + bserv bserv.BlockGetter, + cids []cid.Cid, +) ([]*types.Message, error) { + out := make([]*types.Message, len(cids)) + + err := fetchCids(ctx, bserv, cids, func(i int, b blocks.Block) error { + msg, err := types.DecodeMessage(b.RawData()) + if err != nil { + return err + } + + out[i] = msg + return nil + }, true) if err != nil { return nil, err } @@ -160,6 +194,7 @@ func fetchCids( bserv bserv.BlockGetter, cids []cid.Cid, cb func(int, blocks.Block) error, + allowDuplicates bool, ) error { ctx, cancel := context.WithCancel(ctx) @@ -172,7 +207,7 @@ func fetchCids( } cidIndex[c] = i } - if len(cids) != len(cidIndex) { + if !allowDuplicates && len(cids) != len(cidIndex) { return fmt.Errorf("duplicate CIDs in fetchCids input") } diff --git a/chain/sub/incoming_test.go b/chain/sub/incoming_test.go index 215439209..1a3ab2785 100644 --- a/chain/sub/incoming_test.go +++ b/chain/sub/incoming_test.go @@ -1,3 +1,4 @@ +//stm: #unit package sub import ( @@ -49,6 +50,7 @@ func TestFetchCidsWithDedup(t *testing.T) { } g := &getter{msgs} + //stm: @CHAIN_INCOMING_FETCH_MESSAGES_BY_CID_001 // the cids have a duplicate res, err := FetchMessagesByCids(context.TODO(), g, append(cids, cids[0])) diff --git a/chain/sync.go b/chain/sync.go index 34867b136..8955441a3 100644 --- a/chain/sync.go +++ b/chain/sync.go @@ -119,8 +119,8 @@ type SyncManagerCtor func(syncFn SyncFunc) SyncManager type Genesis *types.TipSet -func LoadGenesis(sm *stmgr.StateManager) (Genesis, error) { - gen, err := sm.ChainStore().GetGenesis() +func LoadGenesis(ctx context.Context, sm *stmgr.StateManager) (Genesis, error) { + gen, err := sm.ChainStore().GetGenesis(ctx) if err != nil { return nil, xerrors.Errorf("getting genesis block: %w", err) } @@ -227,7 +227,7 @@ func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool { // TODO: IMPORTANT(GARBAGE) this needs to be put in the 'temporary' side of // the blockstore - if err := syncer.store.PersistBlockHeaders(fts.TipSet().Blocks()...); err != nil { + if err := syncer.store.PersistBlockHeaders(ctx, fts.TipSet().Blocks()...); err != nil { log.Warn("failed to persist incoming block header: ", err) return false } @@ -286,6 +286,8 @@ func (syncer *Syncer) IncomingBlocks(ctx context.Context) (<-chan *types.BlockHe // messages within this block. If validation passes, it stores the messages in // the underlying IPLD block store. func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error { + ctx := context.Background() + // TODO: Should we consider cross-messages for the BlockMessageLimit? if msgc := len(fblk.BlsMessages) + len(fblk.SecpkMessages); msgc > build.BlockMessageLimit { return xerrors.Errorf("block %s has too many messages (%d)", fblk.Header.Cid(), msgc) } @@ -299,10 +301,10 @@ func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error { blockstore := bstore.NewMemory() cst := cbor.NewCborStore(blockstore) - var bcids, scids []cid.Cid + var bcids, scids, crosscids []cid.Cid for _, m := range fblk.BlsMessages { - c, err := store.PutMessage(blockstore, m) + c, err := store.PutMessage(ctx, blockstore, m) if err != nil { return xerrors.Errorf("putting bls message to blockstore after msgmeta computation: %w", err) } @@ -310,15 +312,23 @@ func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error { } for _, m := range fblk.SecpkMessages { - c, err := store.PutMessage(blockstore, m) + c, err := store.PutMessage(ctx, blockstore, m) if err != nil { return xerrors.Errorf("putting bls message to blockstore after msgmeta computation: %w", err) } scids = append(scids, c) } + for _, m := range fblk.CrossMessages { + c, err := store.PutMessage(ctx, blockstore, m) + if err != nil { + return xerrors.Errorf("putting cross message to blockstore after msgmeta computation: %w", err) + } + crosscids = append(crosscids, c) + } + // Compute the root CID of the combined message trie. - smroot, err := computeMsgMeta(cst, bcids, scids) + smroot, err := computeMsgMeta(cst, bcids, scids, crosscids) if err != nil { return xerrors.Errorf("validating msgmeta, compute failed: %w", err) } @@ -360,7 +370,7 @@ func copyBlockstore(ctx context.Context, from, to bstore.Blockstore) error { // TODO: should probably expose better methods on the blockstore for this operation var blks []blocks.Block for c := range cids { - b, err := from.Get(c) + b, err := from.Get(ctx, c) if err != nil { return err } @@ -368,7 +378,7 @@ func copyBlockstore(ctx context.Context, from, to bstore.Blockstore) error { blks = append(blks, b) } - if err := to.PutMany(blks); err != nil { + if err := to.PutMany(ctx, blks); err != nil { return err } @@ -379,12 +389,12 @@ func copyBlockstore(ctx context.Context, from, to bstore.Blockstore) error { // either validate it here, or ensure that its validated elsewhere (maybe make // sure the blocksync code checks it?) // maybe this code should actually live in blocksync?? -func zipTipSetAndMessages(bs cbor.IpldStore, ts *types.TipSet, allbmsgs []*types.Message, allsmsgs []*types.SignedMessage, bmi, smi [][]uint64) (*store.FullTipSet, error) { - if len(ts.Blocks()) != len(smi) || len(ts.Blocks()) != len(bmi) { +func zipTipSetAndMessages(bs cbor.IpldStore, ts *types.TipSet, allbmsgs []*types.Message, allsmsgs []*types.SignedMessage, allcross []*types.Message, bmi, smi, crossmi [][]uint64) (*store.FullTipSet, error) { + if len(ts.Blocks()) != len(smi) || len(ts.Blocks()) != len(bmi) || len(ts.Blocks()) != len(crossmi) { return nil, fmt.Errorf("msgincl length didnt match tipset size") } - if err := checkMsgMeta(ts, allbmsgs, allsmsgs, bmi, smi); err != nil { + if err := checkMsgMeta(ts, allbmsgs, allsmsgs, allcross, bmi, smi, crossmi); err != nil { return nil, err } @@ -401,10 +411,16 @@ func zipTipSetAndMessages(bs cbor.IpldStore, ts *types.TipSet, allbmsgs []*types bmsgs = append(bmsgs, allbmsgs[m]) } + var crossmsgs []*types.Message + for _, m := range crossmi[bi] { + crossmsgs = append(crossmsgs, allcross[m]) + } + fb := &types.FullBlock{ Header: b, BlsMessages: bmsgs, SecpkMessages: smsgs, + CrossMessages: crossmsgs, } fts.Blocks = append(fts.Blocks, fb) @@ -415,11 +431,12 @@ func zipTipSetAndMessages(bs cbor.IpldStore, ts *types.TipSet, allbmsgs []*types // computeMsgMeta computes the root CID of the combined arrays of message CIDs // of both types (BLS and Secpk). -func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cid.Cid) (cid.Cid, error) { +func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids, crossCids []cid.Cid) (cid.Cid, error) { // block headers use adt0 store := blockadt.WrapStore(context.TODO(), bs) bmArr := blockadt.MakeEmptyArray(store) smArr := blockadt.MakeEmptyArray(store) + crossArr := blockadt.MakeEmptyArray(store) for i, m := range bmsgCids { c := cbg.CborCid(m) @@ -435,6 +452,13 @@ func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cid.Cid) (cid.Cid, e } } + for i, m := range crossCids { + c := cbg.CborCid(m) + if err := crossArr.Set(uint64(i), &c); err != nil { + return cid.Undef, err + } + } + bmroot, err := bmArr.Root() if err != nil { return cid.Undef, err @@ -445,14 +469,18 @@ func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cid.Cid) (cid.Cid, e return cid.Undef, err } + crossroot, err := crossArr.Root() + if err != nil { + return cid.Undef, err + } + + // FIXME: This needs to change if we want the chain to be backward-compatible, + // as the chain currently has blocks that don't have msgMeta. mrcid, err := store.Put(store.Context(), &types.MsgMeta{ BlsMessages: bmroot, SecpkMessages: smroot, + CrossMessages: crossroot, }) - if err != nil { - return cid.Undef, xerrors.Errorf("failed to put msgmeta: %w", err) - } - return mrcid, nil } @@ -463,7 +491,7 @@ func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cid.Cid) (cid.Cid, e // {hint/usage} This is used from the HELLO protocol, to fetch the greeting // peer's heaviest tipset if we don't have it. func (syncer *Syncer) FetchTipSet(ctx context.Context, p peer.ID, tsk types.TipSetKey) (*store.FullTipSet, error) { - if fts, err := syncer.tryLoadFullTipSet(tsk); err == nil { + if fts, err := syncer.tryLoadFullTipSet(ctx, tsk); err == nil { return fts, nil } @@ -474,15 +502,15 @@ func (syncer *Syncer) FetchTipSet(ctx context.Context, p peer.ID, tsk types.TipS // tryLoadFullTipSet queries the tipset in the ChainStore, and returns a full // representation of it containing FullBlocks. If ALL blocks are not found // locally, it errors entirely with blockstore.ErrNotFound. -func (syncer *Syncer) tryLoadFullTipSet(tsk types.TipSetKey) (*store.FullTipSet, error) { - ts, err := syncer.store.LoadTipSet(tsk) +func (syncer *Syncer) tryLoadFullTipSet(ctx context.Context, tsk types.TipSetKey) (*store.FullTipSet, error) { + ts, err := syncer.store.LoadTipSet(ctx, tsk) if err != nil { return nil, err } fts := &store.FullTipSet{} for _, b := range ts.Blocks() { - bmsgs, smsgs, err := syncer.store.MessagesForBlock(b) + bmsgs, smsgs, crossmsg, err := syncer.store.MessagesForBlock(ctx, b) if err != nil { return nil, err } @@ -491,6 +519,7 @@ func (syncer *Syncer) tryLoadFullTipSet(tsk types.TipSetKey) (*store.FullTipSet, Header: b, BlsMessages: bmsgs, SecpkMessages: smsgs, + CrossMessages: crossmsg, } fts.Blocks = append(fts.Blocks, fb) } @@ -583,7 +612,7 @@ func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet, return xerrors.Errorf("validating block %s: %w", b.Cid(), err) } - if err := syncer.sm.ChainStore().AddToTipSetTracker(b.Header); err != nil { + if err := syncer.sm.ChainStore().AddToTipSetTracker(ctx, b.Header); err != nil { return xerrors.Errorf("failed to add validated header to tipset tracker: %w", err) } return nil @@ -755,7 +784,7 @@ loop: } // If, for some reason, we have a suffix of the chain locally, handle that here - ts, err := syncer.store.LoadTipSet(at) + ts, err := syncer.store.LoadTipSet(ctx, at) if err == nil { acceptedBlocks = append(acceptedBlocks, at.Cids()...) @@ -838,7 +867,7 @@ loop: return blockSet, nil } - knownParent, err := syncer.store.LoadTipSet(known.Parents()) + knownParent, err := syncer.store.LoadTipSet(ctx, known.Parents()) if err != nil { return nil, xerrors.Errorf("failed to load next local tipset: %w", err) } @@ -892,7 +921,7 @@ func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, know return nil, err } - nts, err := syncer.store.LoadTipSet(known.Parents()) + nts, err := syncer.store.LoadTipSet(ctx, known.Parents()) if err != nil { return nil, xerrors.Errorf("failed to load next local tipset: %w", err) } @@ -928,7 +957,7 @@ func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, know return nil, ErrForkCheckpoint } - nts, err = syncer.store.LoadTipSet(nts.Parents()) + nts, err = syncer.store.LoadTipSet(ctx, nts.Parents()) if err != nil { return nil, xerrors.Errorf("loading next local tipset: %w", err) } @@ -965,7 +994,7 @@ func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipS span.AddAttributes(trace.Int64Attribute("num_headers", int64(len(headers)))) for i := len(headers) - 1; i >= 0; { - fts, err := syncer.store.TryFillTipSet(headers[i]) + fts, err := syncer.store.TryFillTipSet(ctx, headers[i]) if err != nil { return err } @@ -998,7 +1027,7 @@ func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipS this := headers[i-bsi] bstip := bstout[len(bstout)-(bsi+1)] - fts, err := zipTipSetAndMessages(blks, this, bstip.Bls, bstip.Secpk, bstip.BlsIncludes, bstip.SecpkIncludes) + fts, err := zipTipSetAndMessages(blks, this, bstip.Bls, bstip.Secpk, bstip.Cross, bstip.BlsIncludes, bstip.SecpkIncludes, bstip.CrossIncludes) if err != nil { log.Warnw("zipping failed", "error", err, "bsi", bsi, "i", i, "height", this.Height(), @@ -1025,8 +1054,9 @@ func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipS return nil } -func checkMsgMeta(ts *types.TipSet, allbmsgs []*types.Message, allsmsgs []*types.SignedMessage, bmi, smi [][]uint64) error { +func checkMsgMeta(ts *types.TipSet, allbmsgs []*types.Message, allsmsgs []*types.SignedMessage, allcross []*types.Message, bmi, smi, crossmi [][]uint64) error { for bi, b := range ts.Blocks() { + // TODO XXX: Should we account cross-messages for the blockMessageLimt?? if msgc := len(bmi[bi]) + len(smi[bi]); msgc > build.BlockMessageLimit { return fmt.Errorf("block %q has too many messages (%d)", b.Cid(), msgc) } @@ -1041,7 +1071,12 @@ func checkMsgMeta(ts *types.TipSet, allbmsgs []*types.Message, allsmsgs []*types bmsgCids = append(bmsgCids, allbmsgs[m].Cid()) } - mrcid, err := computeMsgMeta(cbor.NewCborStore(bstore.NewMemory()), bmsgCids, smsgCids) + var crossCids []cid.Cid + for _, m := range crossmi[bi] { + crossCids = append(crossCids, allcross[m].Cid()) + } + + mrcid, err := computeMsgMeta(cbor.NewCborStore(bstore.NewMemory()), bmsgCids, smsgCids, crossCids) if err != nil { return err } @@ -1095,7 +1130,7 @@ func (syncer *Syncer) fetchMessages(ctx context.Context, headers []*types.TipSet isGood := true for index, ts := range headers[nextI:lastI] { cm := result[index] - if err := checkMsgMeta(ts, cm.Bls, cm.Secpk, cm.BlsIncludes, cm.SecpkIncludes); err != nil { + if err := checkMsgMeta(ts, cm.Bls, cm.Secpk, cm.Cross, cm.BlsIncludes, cm.SecpkIncludes, cm.CrossIncludes); err != nil { log.Errorf("fetched messages not as expected: %s", err) isGood = false break @@ -1138,7 +1173,7 @@ func persistMessages(ctx context.Context, bs bstore.Blockstore, bst *exchange.Co for _, m := range bst.Bls { //log.Infof("putting BLS message: %s", m.Cid()) - if _, err := store.PutMessage(bs, m); err != nil { + if _, err := store.PutMessage(ctx, bs, m); err != nil { log.Errorf("failed to persist messages: %+v", err) return xerrors.Errorf("BLS message processing failed: %w", err) } @@ -1148,11 +1183,20 @@ func persistMessages(ctx context.Context, bs bstore.Blockstore, bst *exchange.Co return xerrors.Errorf("unknown signature type on message %s: %q", m.Cid(), m.Signature.Type) } //log.Infof("putting secp256k1 message: %s", m.Cid()) - if _, err := store.PutMessage(bs, m); err != nil { + if _, err := store.PutMessage(ctx, bs, m); err != nil { log.Errorf("failed to persist messages: %+v", err) return xerrors.Errorf("secp256k1 message processing failed: %w", err) } } + for _, m := range bst.Cross { + //log.Infof("putting Cross message: %s", m.Cid()) + if _, err := store.PutMessage(ctx, bs, m); err != nil { + log.Errorf("failed to persist messages: %+v", err) + return xerrors.Errorf("Cross message processing failed: %w", err) + } + } + + // TODO: Persist cross-messages, or as they are stored in SCA we don't need to? return nil } @@ -1201,7 +1245,7 @@ func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet, hts *t for _, ts := range headers { toPersist = append(toPersist, ts.Blocks()...) } - if err := syncer.store.PersistBlockHeaders(toPersist...); err != nil { + if err := syncer.store.PersistBlockHeaders(ctx, toPersist...); err != nil { err = xerrors.Errorf("failed to persist synced blocks to the chainstore: %w", err) ss.Error(err) return err @@ -1245,7 +1289,7 @@ func (syncer *Syncer) CheckBadBlockCache(blk cid.Cid) (string, bool) { return bbr.String(), ok } -func (syncer *Syncer) getLatestBeaconEntry(_ context.Context, ts *types.TipSet) (*types.BeaconEntry, error) { +func (syncer *Syncer) getLatestBeaconEntry(ctx context.Context, ts *types.TipSet) (*types.BeaconEntry, error) { cur := ts for i := 0; i < 20; i++ { cbe := cur.Blocks()[0].BeaconEntries @@ -1257,7 +1301,7 @@ func (syncer *Syncer) getLatestBeaconEntry(_ context.Context, ts *types.TipSet) return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry") } - next, err := syncer.store.LoadTipSet(cur.Parents()) + next, err := syncer.store.LoadTipSet(ctx, cur.Parents()) if err != nil { return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err) } diff --git a/chain/sync_test.go b/chain/sync_test.go index 4175ff5fa..35566169f 100644 --- a/chain/sync_test.go +++ b/chain/sync_test.go @@ -1,3 +1,4 @@ +//stm: #unit package chain_test import ( @@ -22,6 +23,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" @@ -102,7 +104,7 @@ func prepSyncTest(t testing.TB, h int) *syncTestUtil { ctx: ctx, cancel: cancel, - mn: mocknet.New(ctx), + mn: mocknet.New(), g: g, us: filcns.DefaultUpgradeSchedule(), } @@ -156,7 +158,7 @@ func prepSyncTestWithV5Height(t testing.TB, h int, v5height abi.ChainEpoch) *syn ctx: ctx, cancel: cancel, - mn: mocknet.New(ctx), + mn: mocknet.New(), g: g, us: sched, } @@ -205,20 +207,21 @@ func (tu *syncTestUtil) pushFtsAndWait(to int, fts *store.FullTipSet, wait bool) } func (tu *syncTestUtil) pushTsExpectErr(to int, fts *store.FullTipSet, experr bool) { + ctx := context.TODO() for _, fb := range fts.Blocks { var b types.BlockMsg // -1 to match block.Height b.Header = fb.Header for _, msg := range fb.SecpkMessages { - c, err := tu.nds[to].(*impl.FullNodeAPI).ChainAPI.Chain.PutMessage(msg) + c, err := tu.nds[to].(*impl.FullNodeAPI).ChainAPI.Chain.PutMessage(ctx, msg) require.NoError(tu.t, err) b.SecpkMessages = append(b.SecpkMessages, c) } for _, msg := range fb.BlsMessages { - c, err := tu.nds[to].(*impl.FullNodeAPI).ChainAPI.Chain.PutMessage(msg) + c, err := tu.nds[to].(*impl.FullNodeAPI).ChainAPI.Chain.PutMessage(ctx, msg) require.NoError(tu.t, err) b.BlsMessages = append(b.BlsMessages, c) @@ -298,7 +301,7 @@ func (tu *syncTestUtil) addSourceNode(gen int) { lastTs := blocks[len(blocks)-1].Blocks for _, lastB := range lastTs { cs := out.(*impl.FullNodeAPI).ChainAPI.Chain - require.NoError(tu.t, cs.AddToTipSetTracker(lastB.Header)) + require.NoError(tu.t, cs.AddToTipSetTracker(context.Background(), lastB.Header)) err = cs.AddBlock(tu.ctx, lastB.Header) require.NoError(tu.t, err) } @@ -460,6 +463,8 @@ func (tu *syncTestUtil) waitUntilSyncTarget(to int, target *types.TipSet) { } func TestSyncSimple(t *testing.T) { + //stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 H := 50 tu := prepSyncTest(t, H) @@ -476,6 +481,8 @@ func TestSyncSimple(t *testing.T) { } func TestSyncMining(t *testing.T) { + //stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 H := 50 tu := prepSyncTest(t, H) @@ -498,6 +505,8 @@ func TestSyncMining(t *testing.T) { } func TestSyncBadTimestamp(t *testing.T) { + //stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 H := 50 tu := prepSyncTest(t, H) @@ -542,7 +551,7 @@ func (wpp badWpp) GenerateCandidates(context.Context, abi.PoStRandomness, uint64 return []uint64{1}, nil } -func (wpp badWpp) ComputeProof(context.Context, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) { +func (wpp badWpp) ComputeProof(context.Context, []proof7.ExtendedSectorInfo, abi.PoStRandomness, abi.ChainEpoch, network.Version) ([]proof2.PoStProof, error) { return []proof2.PoStProof{ { PoStProof: abi.RegisteredPoStProof_StackedDrgWinning2KiBV1, @@ -552,6 +561,8 @@ func (wpp badWpp) ComputeProof(context.Context, []proof2.SectorInfo, abi.PoStRan } func TestSyncBadWinningPoSt(t *testing.T) { + //stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 H := 15 tu := prepSyncTest(t, H) @@ -581,6 +592,9 @@ func (tu *syncTestUtil) loadChainToNode(to int) { } func TestSyncFork(t *testing.T) { + //stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 H := 10 tu := prepSyncTest(t, H) @@ -648,6 +662,9 @@ func TestSyncFork(t *testing.T) { // A and B both include _different_ messages from sender X with nonce N (where N is the correct nonce for X). // We can confirm that the state can be correctly computed, and that `MessagesForTipset` behaves as expected. func TestDuplicateNonce(t *testing.T) { + //stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 H := 10 tu := prepSyncTest(t, H) @@ -702,6 +719,7 @@ func TestDuplicateNonce(t *testing.T) { var includedMsg cid.Cid var skippedMsg cid.Cid + //stm: @CHAIN_STATE_SEARCH_MSG_001 r0, err0 := tu.nds[0].StateSearchMsg(context.TODO(), ts2.TipSet().Key(), msgs[0][0].Cid(), api.LookbackNoLimit, true) r1, err1 := tu.nds[0].StateSearchMsg(context.TODO(), ts2.TipSet().Key(), msgs[1][0].Cid(), api.LookbackNoLimit, true) @@ -734,7 +752,7 @@ func TestDuplicateNonce(t *testing.T) { t.Fatal("included message should be in exec trace") } - mft, err := tu.g.ChainStore().MessagesForTipset(ts1.TipSet()) + mft, err := tu.g.ChainStore().MessagesForTipset(context.TODO(), ts1.TipSet()) require.NoError(t, err) require.True(t, len(mft) == 1, "only expecting one message for this tipset") require.Equal(t, includedMsg, mft[0].VMMessage().Cid(), "messages for tipset didn't contain expected message") @@ -743,6 +761,9 @@ func TestDuplicateNonce(t *testing.T) { // This test asserts that a block that includes a message with bad nonce can't be synced. A nonce is "bad" if it can't // be applied on the parent state. func TestBadNonce(t *testing.T) { + //stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001 + //stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001 H := 10 tu := prepSyncTest(t, H) @@ -790,6 +811,9 @@ func TestBadNonce(t *testing.T) { // One of the messages uses the sender's robust address, the other uses the ID address. // Such a block is invalid and should not sync. func TestMismatchedNoncesRobustID(t *testing.T) { + //stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001 + //stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001 v5h := abi.ChainEpoch(4) tu := prepSyncTestWithV5Height(t, int(v5h+5), v5h) @@ -802,6 +826,7 @@ func TestMismatchedNoncesRobustID(t *testing.T) { require.NoError(t, err) // Produce a message from the banker + //stm: @CHAIN_STATE_LOOKUP_ID_001 makeMsg := func(id bool) *types.SignedMessage { sender := tu.g.Banker() if id { @@ -844,6 +869,9 @@ func TestMismatchedNoncesRobustID(t *testing.T) { // One of the messages uses the sender's robust address, the other uses the ID address. // Such a block is valid and should sync. func TestMatchedNoncesRobustID(t *testing.T) { + //stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001 + //stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001 v5h := abi.ChainEpoch(4) tu := prepSyncTestWithV5Height(t, int(v5h+5), v5h) @@ -856,6 +884,7 @@ func TestMatchedNoncesRobustID(t *testing.T) { require.NoError(t, err) // Produce a message from the banker with specified nonce + //stm: @CHAIN_STATE_LOOKUP_ID_001 makeMsg := func(n uint64, id bool) *types.SignedMessage { sender := tu.g.Banker() if id { @@ -915,6 +944,8 @@ func runSyncBenchLength(b *testing.B, l int) { } func TestSyncInputs(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_VALIDATE_BLOCK_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_STOP_001 H := 10 tu := prepSyncTest(t, H) @@ -942,6 +973,9 @@ func TestSyncInputs(t *testing.T) { } func TestSyncCheckpointHead(t *testing.T) { + //stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001 + //stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001 H := 10 tu := prepSyncTest(t, H) @@ -961,6 +995,7 @@ func TestSyncCheckpointHead(t *testing.T) { a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0, true) tu.waitUntilSyncTarget(p1, a.TipSet()) + //stm: @CHAIN_SYNCER_CHECKPOINT_001 tu.checkpointTs(p1, a.TipSet().Key()) require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) @@ -980,15 +1015,20 @@ func TestSyncCheckpointHead(t *testing.T) { tu.waitUntilNodeHasTs(p1, b.TipSet().Key()) p1Head := tu.getHead(p1) require.True(tu.t, p1Head.Equals(a.TipSet())) + //stm: @CHAIN_SYNCER_CHECK_BAD_001 tu.assertBad(p1, b.TipSet()) // Should be able to switch forks. + //stm: @CHAIN_SYNCER_CHECKPOINT_001 tu.checkpointTs(p1, b.TipSet().Key()) p1Head = tu.getHead(p1) require.True(tu.t, p1Head.Equals(b.TipSet())) } func TestSyncCheckpointEarlierThanHead(t *testing.T) { + //stm: @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01, @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_SYNC_001, @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001 + //stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001, @CHAIN_SYNCER_STOP_001 H := 10 tu := prepSyncTest(t, H) @@ -1008,6 +1048,7 @@ func TestSyncCheckpointEarlierThanHead(t *testing.T) { a = tu.mineOnBlock(a, p1, []int{0}, true, false, nil, 0, true) tu.waitUntilSyncTarget(p1, a.TipSet()) + //stm: @CHAIN_SYNCER_CHECKPOINT_001 tu.checkpointTs(p1, a1.TipSet().Key()) require.NoError(t, tu.g.ResyncBankerNonce(a1.TipSet())) @@ -1027,15 +1068,19 @@ func TestSyncCheckpointEarlierThanHead(t *testing.T) { tu.waitUntilNodeHasTs(p1, b.TipSet().Key()) p1Head := tu.getHead(p1) require.True(tu.t, p1Head.Equals(a.TipSet())) + //stm: @CHAIN_SYNCER_CHECK_BAD_001 tu.assertBad(p1, b.TipSet()) // Should be able to switch forks. + //stm: @CHAIN_SYNCER_CHECKPOINT_001 tu.checkpointTs(p1, b.TipSet().Key()) p1Head = tu.getHead(p1) require.True(tu.t, p1Head.Equals(b.TipSet())) } func TestInvalidHeight(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, @CHAIN_SYNCER_START_001 + //stm: @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 H := 50 tu := prepSyncTest(t, H) diff --git a/chain/types/blockheader.go b/chain/types/blockheader.go index d36ee9314..12ea2f782 100644 --- a/chain/types/blockheader.go +++ b/chain/types/blockheader.go @@ -131,6 +131,7 @@ func (blk *BlockHeader) IsValidated() bool { type MsgMeta struct { BlsMessages cid.Cid SecpkMessages cid.Cid + CrossMessages cid.Cid } func (mm *MsgMeta) Cid() cid.Cid { diff --git a/chain/types/blockmsg.go b/chain/types/blockmsg.go index f3114499d..ea608e419 100644 --- a/chain/types/blockmsg.go +++ b/chain/types/blockmsg.go @@ -10,6 +10,7 @@ type BlockMsg struct { Header *BlockHeader BlsMessages []cid.Cid SecpkMessages []cid.Cid + CrossMessages []cid.Cid } func DecodeBlockMsg(b []byte) (*BlockMsg, error) { diff --git a/chain/types/cbor_gen.go b/chain/types/cbor_gen.go index b54c18c07..471a6dd55 100644 --- a/chain/types/cbor_gen.go +++ b/chain/types/cbor_gen.go @@ -928,7 +928,7 @@ func (t *SignedMessage) UnmarshalCBOR(r io.Reader) error { return nil } -var lengthBufMsgMeta = []byte{130} +var lengthBufMsgMeta = []byte{131} func (t *MsgMeta) MarshalCBOR(w io.Writer) error { if t == nil { @@ -953,6 +953,12 @@ func (t *MsgMeta) MarshalCBOR(w io.Writer) error { return xerrors.Errorf("failed to write cid field t.SecpkMessages: %w", err) } + // t.CrossMessages (cid.Cid) (struct) + + if err := cbg.WriteCidBuf(scratch, w, t.CrossMessages); err != nil { + return xerrors.Errorf("failed to write cid field t.CrossMessages: %w", err) + } + return nil } @@ -970,7 +976,7 @@ func (t *MsgMeta) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type array") } - if extra != 2 { + if extra != 3 { return fmt.Errorf("cbor input had wrong number of fields") } @@ -997,6 +1003,18 @@ func (t *MsgMeta) UnmarshalCBOR(r io.Reader) error { t.SecpkMessages = c + } + // t.CrossMessages (cid.Cid) (struct) + + { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.CrossMessages: %w", err) + } + + t.CrossMessages = c + } return nil } @@ -1249,7 +1267,7 @@ func (t *MessageReceipt) UnmarshalCBOR(r io.Reader) error { return nil } -var lengthBufBlockMsg = []byte{131} +var lengthBufBlockMsg = []byte{132} func (t *BlockMsg) MarshalCBOR(w io.Writer) error { if t == nil { @@ -1294,6 +1312,20 @@ func (t *BlockMsg) MarshalCBOR(w io.Writer) error { return xerrors.Errorf("failed writing cid field t.SecpkMessages: %w", err) } } + + // t.CrossMessages ([]cid.Cid) (slice) + if len(t.CrossMessages) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.CrossMessages was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.CrossMessages))); err != nil { + return err + } + for _, v := range t.CrossMessages { + if err := cbg.WriteCidBuf(scratch, w, v); err != nil { + return xerrors.Errorf("failed writing cid field t.CrossMessages: %w", err) + } + } return nil } @@ -1311,7 +1343,7 @@ func (t *BlockMsg) UnmarshalCBOR(r io.Reader) error { return fmt.Errorf("cbor input should be of type array") } - if extra != 3 { + if extra != 4 { return fmt.Errorf("cbor input had wrong number of fields") } @@ -1390,6 +1422,34 @@ func (t *BlockMsg) UnmarshalCBOR(r io.Reader) error { t.SecpkMessages[i] = c } + // t.CrossMessages ([]cid.Cid) (slice) + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.CrossMessages: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.CrossMessages = make([]cid.Cid, extra) + } + + for i := 0; i < int(extra); i++ { + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("reading cid field t.CrossMessages failed: %w", err) + } + t.CrossMessages[i] = c + } + return nil } diff --git a/chain/types/fullblock.go b/chain/types/fullblock.go index 5511cea8c..59e19d6b9 100644 --- a/chain/types/fullblock.go +++ b/chain/types/fullblock.go @@ -6,6 +6,7 @@ type FullBlock struct { Header *BlockHeader BlsMessages []*Message SecpkMessages []*SignedMessage + CrossMessages []*Message } func (fb *FullBlock) Cid() cid.Cid { diff --git a/chain/vm/gas.go b/chain/vm/gas.go index 206a55d36..e75c86b9f 100644 --- a/chain/vm/gas.go +++ b/chain/vm/gas.go @@ -3,14 +3,16 @@ package vm import ( "fmt" + vmr "github.com/filecoin-project/specs-actors/v7/actors/runtime" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + "github.com/filecoin-project/go-address" addr "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" - "github.com/filecoin-project/lotus/build" - vmr5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" - proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/ipfs/go-cid" + + "github.com/filecoin-project/lotus/build" ) type GasCharge struct { @@ -73,13 +75,16 @@ type Pricelist interface { OnVerifySignature(sigType crypto.SigType, planTextSize int) (GasCharge, error) OnHashing(dataSize int) GasCharge OnComputeUnsealedSectorCid(proofType abi.RegisteredSealProof, pieces []abi.PieceInfo) GasCharge - OnVerifySeal(info proof5.SealVerifyInfo) GasCharge - OnVerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) GasCharge - OnVerifyPost(info proof5.WindowPoStVerifyInfo) GasCharge + OnVerifySeal(info proof7.SealVerifyInfo) GasCharge + OnVerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) GasCharge + OnVerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) GasCharge + OnVerifyPost(info proof7.WindowPoStVerifyInfo) GasCharge OnVerifyConsensusFault() GasCharge } -var prices = map[abi.ChainEpoch]Pricelist{ +// Prices are the price lists per starting epoch. Public for testing purposes +// (concretely to allow the test vector runner to rebase prices). +var Prices = map[abi.ChainEpoch]Pricelist{ abi.ChainEpoch(0): &pricelistV0{ computeGasMulti: 1, storageGasMulti: 1000, @@ -204,6 +209,8 @@ var prices = map[abi.ChainEpoch]Pricelist{ }, verifyPostDiscount: false, verifyConsensusFault: 495422, + + verifyReplicaUpdate: 36316136, }, } @@ -212,8 +219,8 @@ func PricelistByEpoch(epoch abi.ChainEpoch) Pricelist { // since we are storing the prices as map or epoch to price // we need to get the price with the highest epoch that is lower or equal to the `epoch` arg bestEpoch := abi.ChainEpoch(0) - bestPrice := prices[bestEpoch] - for e, pl := range prices { + bestPrice := Prices[bestEpoch] + for e, pl := range Prices { // if `e` happened after `bestEpoch` and `e` is earlier or equal to the target `epoch` if e > bestEpoch && e <= epoch { bestEpoch = e @@ -227,7 +234,7 @@ func PricelistByEpoch(epoch abi.ChainEpoch) Pricelist { } type pricedSyscalls struct { - under vmr5.Syscalls + under vmr.Syscalls pl Pricelist chargeGas func(GasCharge) } @@ -261,7 +268,7 @@ func (ps pricedSyscalls) ComputeUnsealedSectorCID(reg abi.RegisteredSealProof, p } // Verifies a sector seal proof. -func (ps pricedSyscalls) VerifySeal(vi proof5.SealVerifyInfo) error { +func (ps pricedSyscalls) VerifySeal(vi proof7.SealVerifyInfo) error { ps.chargeGas(ps.pl.OnVerifySeal(vi)) defer ps.chargeGas(gasOnActorExec) @@ -269,7 +276,7 @@ func (ps pricedSyscalls) VerifySeal(vi proof5.SealVerifyInfo) error { } // Verifies a proof of spacetime. -func (ps pricedSyscalls) VerifyPoSt(vi proof5.WindowPoStVerifyInfo) error { +func (ps pricedSyscalls) VerifyPoSt(vi proof7.WindowPoStVerifyInfo) error { ps.chargeGas(ps.pl.OnVerifyPost(vi)) defer ps.chargeGas(gasOnActorExec) @@ -286,14 +293,14 @@ func (ps pricedSyscalls) VerifyPoSt(vi proof5.WindowPoStVerifyInfo) error { // the "parent grinding fault", in which case it must be the sibling of h1 (same parent tipset) and one of the // blocks in the parent of h2 (i.e. h2's grandparent). // Returns nil and an error if the headers don't prove a fault. -func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*vmr5.ConsensusFault, error) { +func (ps pricedSyscalls) VerifyConsensusFault(h1 []byte, h2 []byte, extra []byte) (*vmr.ConsensusFault, error) { ps.chargeGas(ps.pl.OnVerifyConsensusFault()) defer ps.chargeGas(gasOnActorExec) return ps.under.VerifyConsensusFault(h1, h2, extra) } -func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof5.SealVerifyInfo) (map[address.Address][]bool, error) { +func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof7.SealVerifyInfo) (map[address.Address][]bool, error) { count := int64(0) for _, svis := range inp { count += int64(len(svis)) @@ -307,9 +314,16 @@ func (ps pricedSyscalls) BatchVerifySeals(inp map[address.Address][]proof5.SealV return ps.under.BatchVerifySeals(inp) } -func (ps pricedSyscalls) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) error { +func (ps pricedSyscalls) VerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) error { ps.chargeGas(ps.pl.OnVerifyAggregateSeals(aggregate)) defer ps.chargeGas(gasOnActorExec) return ps.under.VerifyAggregateSeals(aggregate) } + +func (ps pricedSyscalls) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) error { + ps.chargeGas(ps.pl.OnVerifyReplicaUpdate(update)) + defer ps.chargeGas(gasOnActorExec) + + return ps.under.VerifyReplicaUpdate(update) +} diff --git a/chain/vm/gas_v0.go b/chain/vm/gas_v0.go index 13c5fdd86..1bda6dfae 100644 --- a/chain/vm/gas_v0.go +++ b/chain/vm/gas_v0.go @@ -3,8 +3,7 @@ package vm import ( "fmt" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" - proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -121,6 +120,8 @@ type pricelistV0 struct { verifyPostLookup map[abi.RegisteredPoStProof]scalingCost verifyPostDiscount bool verifyConsensusFault int64 + + verifyReplicaUpdate int64 } var _ Pricelist = (*pricelistV0)(nil) @@ -206,14 +207,14 @@ func (pl *pricelistV0) OnComputeUnsealedSectorCid(proofType abi.RegisteredSealPr } // OnVerifySeal -func (pl *pricelistV0) OnVerifySeal(info proof2.SealVerifyInfo) GasCharge { +func (pl *pricelistV0) OnVerifySeal(info proof7.SealVerifyInfo) GasCharge { // TODO: this needs more cost tunning, check with @lotus // this is not used return newGasCharge("OnVerifySeal", pl.verifySealBase, 0) } // OnVerifyAggregateSeals -func (pl *pricelistV0) OnVerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) GasCharge { +func (pl *pricelistV0) OnVerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) GasCharge { proofType := aggregate.SealProof perProof, ok := pl.verifyAggregateSealPer[proofType] if !ok { @@ -228,8 +229,13 @@ func (pl *pricelistV0) OnVerifyAggregateSeals(aggregate proof5.AggregateSealVeri return newGasCharge("OnVerifyAggregateSeals", perProof*num+step.Lookup(num), 0) } +// OnVerifyReplicaUpdate +func (pl *pricelistV0) OnVerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) GasCharge { + return newGasCharge("OnVerifyReplicaUpdate", pl.verifyReplicaUpdate, 0) +} + // OnVerifyPost -func (pl *pricelistV0) OnVerifyPost(info proof2.WindowPoStVerifyInfo) GasCharge { +func (pl *pricelistV0) OnVerifyPost(info proof7.WindowPoStVerifyInfo) GasCharge { sectorSize := "unknown" var proofType abi.RegisteredPoStProof diff --git a/chain/vm/invoker.go b/chain/vm/invoker.go index 3c8a924f6..decb11250 100644 --- a/chain/vm/invoker.go +++ b/chain/vm/invoker.go @@ -16,7 +16,7 @@ import ( cbg "github.com/whyrusleeping/cbor-gen" "golang.org/x/xerrors" - vmr "github.com/filecoin-project/specs-actors/v5/actors/runtime" + vmr "github.com/filecoin-project/specs-actors/v7/actors/runtime" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/exitcode" diff --git a/chain/vm/invoker_test.go b/chain/vm/invoker_test.go index 8499f001a..fb9910ecd 100644 --- a/chain/vm/invoker_test.go +++ b/chain/vm/invoker_test.go @@ -1,7 +1,6 @@ package vm import ( - "context" "fmt" "io" "testing" @@ -136,9 +135,7 @@ func TestInvokerBasic(t *testing.T) { { _, aerr := code[1](&Runtime{ - vm: &VM{ntwkVersion: func(ctx context.Context, epoch abi.ChainEpoch) network.Version { - return network.Version0 - }}, + vm: &VM{networkVersion: network.Version0}, Message: &basicRtMessage{}, }, []byte{99}) if aerrors.IsFatal(aerr) { @@ -149,9 +146,7 @@ func TestInvokerBasic(t *testing.T) { { _, aerr := code[1](&Runtime{ - vm: &VM{ntwkVersion: func(ctx context.Context, epoch abi.ChainEpoch) network.Version { - return network.Version7 - }}, + vm: &VM{networkVersion: network.Version7}, Message: &basicRtMessage{}, }, []byte{99}) if aerrors.IsFatal(aerr) { diff --git a/chain/vm/mkactor.go b/chain/vm/mkactor.go index b75f290dc..40abc1834 100644 --- a/chain/vm/mkactor.go +++ b/chain/vm/mkactor.go @@ -14,12 +14,21 @@ import ( "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" + /* inline-gen template + {{range .actorVersions}} + builtin{{.}} "github.com/filecoin-project/specs-actors{{import .}}actors/builtin"{{end}} + + /* inline-gen start */ + builtin0 "github.com/filecoin-project/specs-actors/actors/builtin" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" builtin3 "github.com/filecoin-project/specs-actors/v3/actors/builtin" builtin4 "github.com/filecoin-project/specs-actors/v4/actors/builtin" builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" builtin6 "github.com/filecoin-project/specs-actors/v6/actors/builtin" + builtin7 "github.com/filecoin-project/specs-actors/v7/actors/builtin" + + /* inline-gen end */ "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/chain/actors/aerrors" @@ -40,6 +49,11 @@ func init() { var EmptyObjectCid cid.Cid +func (vm *VM) CreateAccountActor(ctx context.Context, msg *types.Message, addr address.Address) (*types.Actor, address.Address, aerrors.ActorError) { + rt := vm.makeRuntime(ctx, msg, nil) + return TryCreateAccountActor(rt, addr) +} + // TryCreateAccountActor creates account actors from only BLS/SECP256K1 addresses. func TryCreateAccountActor(rt *Runtime, addr address.Address) (*types.Actor, address.Address, aerrors.ActorError) { if err := rt.chargeGasSafe(PricelistByEpoch(rt.height).OnCreateActor()); err != nil { @@ -60,7 +74,7 @@ func TryCreateAccountActor(rt *Runtime, addr address.Address) (*types.Actor, add return nil, address.Undef, aerrors.Escalate(err, "unsupported network version") } - act, aerr := makeActor(av, addr) + act, aerr := makeAccountActor(av, addr) if aerr != nil { return nil, address.Undef, aerr } @@ -87,7 +101,7 @@ func TryCreateAccountActor(rt *Runtime, addr address.Address) (*types.Actor, add return act, addrID, nil } -func makeActor(ver actors.Version, addr address.Address) (*types.Actor, aerrors.ActorError) { +func makeAccountActor(ver actors.Version, addr address.Address) (*types.Actor, aerrors.ActorError) { switch addr.Protocol() { case address.BLS, address.SECP256K1: return newAccountActor(ver), nil @@ -104,6 +118,12 @@ func newAccountActor(ver actors.Version) *types.Actor { // TODO: ActorsUpgrade use a global actor registry? var code cid.Cid switch ver { + /* inline-gen template + {{range .actorVersions}} + case actors.Version{{.}}: + code = builtin{{.}}.AccountActorCodeID{{end}} + /* inline-gen start */ + case actors.Version0: code = builtin0.AccountActorCodeID case actors.Version2: @@ -116,6 +136,9 @@ func newAccountActor(ver actors.Version) *types.Actor { code = builtin5.AccountActorCodeID case actors.Version6: code = builtin6.AccountActorCodeID + case actors.Version7: + code = builtin7.AccountActorCodeID + /* inline-gen end */ default: panic("unsupported actors version") } diff --git a/chain/vm/runtime.go b/chain/vm/runtime.go index b3e013e5a..e6e1413a2 100644 --- a/chain/vm/runtime.go +++ b/chain/vm/runtime.go @@ -5,6 +5,7 @@ import ( "context" "encoding/binary" "fmt" + "os" gruntime "runtime" "time" @@ -16,8 +17,12 @@ import ( "github.com/filecoin-project/go-state-types/network" rtt "github.com/filecoin-project/go-state-types/rt" rt0 "github.com/filecoin-project/specs-actors/actors/runtime" + rt2 "github.com/filecoin-project/specs-actors/v2/actors/runtime" + rt3 "github.com/filecoin-project/specs-actors/v3/actors/runtime" + rt4 "github.com/filecoin-project/specs-actors/v4/actors/runtime" rt5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" rt6 "github.com/filecoin-project/specs-actors/v6/actors/runtime" + rt7 "github.com/filecoin-project/specs-actors/v7/actors/runtime" "github.com/ipfs/go-cid" ipldcbor "github.com/ipfs/go-ipld-cbor" "go.opencensus.io/trace" @@ -52,11 +57,11 @@ func (m *Message) ValueReceived() abi.TokenAmount { } // EnableGasTracing, if true, outputs gas tracing in execution traces. -var EnableGasTracing = false +var EnableGasTracing = os.Getenv("LOTUS_VM_ENABLE_GAS_TRACING_VERY_SLOW") == "1" type Runtime struct { - rt5.Message - rt5.Syscalls + rt7.Message + rt7.Syscalls ctx context.Context @@ -87,7 +92,7 @@ func (rt *Runtime) BaseFee() abi.TokenAmount { } func (rt *Runtime) NetworkVersion() network.Version { - return rt.vm.GetNtwkVersion(rt.ctx, rt.CurrEpoch()) + return rt.vm.networkVersion } func (rt *Runtime) TotalFilCircSupply() abi.TokenAmount { @@ -142,7 +147,12 @@ func (rt *Runtime) StorePut(x cbor.Marshaler) cid.Cid { var _ rt0.Runtime = (*Runtime)(nil) var _ rt5.Runtime = (*Runtime)(nil) +var _ rt2.Runtime = (*Runtime)(nil) +var _ rt3.Runtime = (*Runtime)(nil) +var _ rt4.Runtime = (*Runtime)(nil) +var _ rt5.Runtime = (*Runtime)(nil) var _ rt6.Runtime = (*Runtime)(nil) +var _ rt7.Runtime = (*Runtime)(nil) func (rt *Runtime) shimCall(f func() interface{}) (rval []byte, aerr aerrors.ActorError) { defer func() { @@ -214,16 +224,7 @@ func (rt *Runtime) GetActorCodeCID(addr address.Address) (ret cid.Cid, ok bool) } func (rt *Runtime) GetRandomnessFromTickets(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness { - var err error - var res []byte - - rnv := rt.vm.ntwkVersion(rt.ctx, randEpoch) - - if rnv >= network.Version13 { - res, err = rt.vm.rand.GetChainRandomnessV2(rt.ctx, personalization, randEpoch, entropy) - } else { - res, err = rt.vm.rand.GetChainRandomnessV1(rt.ctx, personalization, randEpoch, entropy) - } + res, err := rt.vm.rand.GetChainRandomness(rt.ctx, personalization, randEpoch, entropy) if err != nil { panic(aerrors.Fatalf("could not get ticket randomness: %s", err)) @@ -232,17 +233,7 @@ func (rt *Runtime) GetRandomnessFromTickets(personalization crypto.DomainSeparat } func (rt *Runtime) GetRandomnessFromBeacon(personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte) abi.Randomness { - var err error - var res []byte - - rnv := rt.vm.ntwkVersion(rt.ctx, randEpoch) - if rnv >= network.Version14 { - res, err = rt.vm.rand.GetBeaconRandomnessV3(rt.ctx, personalization, randEpoch, entropy) - } else if rnv == network.Version13 { - res, err = rt.vm.rand.GetBeaconRandomnessV2(rt.ctx, personalization, randEpoch, entropy) - } else { - res, err = rt.vm.rand.GetBeaconRandomnessV1(rt.ctx, personalization, randEpoch, entropy) - } + res, err := rt.vm.rand.GetBeaconRandomness(rt.ctx, personalization, randEpoch, entropy) if err != nil { panic(aerrors.Fatalf("could not get beacon randomness: %s", err)) @@ -322,7 +313,7 @@ func (rt *Runtime) DeleteActor(beneficiary address.Address) { } // Transfer the executing actor's balance to the beneficiary - if err := rt.vm.transfer(rt.Receiver(), beneficiary, act.Balance); err != nil { + if err := rt.vm.transfer(rt.Receiver(), beneficiary, act.Balance, rt.NetworkVersion()); err != nil { panic(aerrors.Fatalf("failed to transfer balance to beneficiary actor: %s", err)) } } @@ -382,9 +373,6 @@ func (rt *Runtime) CurrEpoch() abi.ChainEpoch { } func (rt *Runtime) Send(to address.Address, method abi.MethodNum, m cbor.Marshaler, value abi.TokenAmount, out cbor.Er) exitcode.ExitCode { - if !rt.allowInternal { - rt.Abortf(exitcode.SysErrorIllegalActor, "runtime.Send() is currently disallowed") - } var params []byte if m != nil { buf := new(bytes.Buffer) @@ -393,6 +381,13 @@ func (rt *Runtime) Send(to address.Address, method abi.MethodNum, m cbor.Marshal } params = buf.Bytes() } + return rt.SendWithSerializedParams(to, method, params, value, out) +} + +func (rt *Runtime) SendWithSerializedParams(to address.Address, method abi.MethodNum, params []byte, value abi.TokenAmount, out cbor.Er) exitcode.ExitCode { + if !rt.allowInternal { + rt.Abortf(exitcode.SysErrorIllegalActor, "runtime.Send() is currently disallowed") + } ret, err := rt.internalSend(rt.Receiver(), to, method, value, params) if err != nil { diff --git a/chain/vm/syscalls.go b/chain/vm/syscalls.go index 0cbefd1fd..cd143279e 100644 --- a/chain/vm/syscalls.go +++ b/chain/vm/syscalls.go @@ -7,6 +7,8 @@ import ( goruntime "runtime" "sync" + proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" "github.com/minio/blake2b-simd" @@ -26,8 +28,8 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" "github.com/filecoin-project/lotus/lib/sigs" - runtime5 "github.com/filecoin-project/specs-actors/v5/actors/runtime" - proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + runtime7 "github.com/filecoin-project/specs-actors/v7/actors/runtime" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" ) func init() { @@ -36,10 +38,10 @@ func init() { // Actual type is defined in chain/types/vmcontext.go because the VMContext interface is there -type SyscallBuilder func(ctx context.Context, rt *Runtime) runtime5.Syscalls +type SyscallBuilder func(ctx context.Context, rt *Runtime) runtime7.Syscalls func Syscalls(verifier ffiwrapper.Verifier) SyscallBuilder { - return func(ctx context.Context, rt *Runtime) runtime5.Syscalls { + return func(ctx context.Context, rt *Runtime) runtime7.Syscalls { return &syscallShim{ ctx: ctx, @@ -90,7 +92,7 @@ func (ss *syscallShim) HashBlake2b(data []byte) [32]byte { // Checks validity of the submitted consensus fault with the two block headers needed to prove the fault // and an optional extra one to check common ancestry (as needed). // Note that the blocks are ordered: the method requires a.Epoch() <= b.Epoch(). -func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime5.ConsensusFault, error) { +func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime7.ConsensusFault, error) { // Note that block syntax is not validated. Any validly signed block will be accepted pursuant to the below conditions. // Whether or not it could ever have been accepted in a chain is not checked/does not matter here. // for that reason when checking block parent relationships, rather than instantiating a Tipset to do so @@ -133,14 +135,14 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime5.Conse } // (2) check for the consensus faults themselves - var consensusFault *runtime5.ConsensusFault + var consensusFault *runtime7.ConsensusFault // (a) double-fork mining fault if blockA.Height == blockB.Height { - consensusFault = &runtime5.ConsensusFault{ + consensusFault = &runtime7.ConsensusFault{ Target: blockA.Miner, Epoch: blockB.Height, - Type: runtime5.ConsensusFaultDoubleForkMining, + Type: runtime7.ConsensusFaultDoubleForkMining, } } @@ -148,10 +150,10 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime5.Conse // strictly speaking no need to compare heights based on double fork mining check above, // but at same height this would be a different fault. if types.CidArrsEqual(blockA.Parents, blockB.Parents) && blockA.Height != blockB.Height { - consensusFault = &runtime5.ConsensusFault{ + consensusFault = &runtime7.ConsensusFault{ Target: blockA.Miner, Epoch: blockB.Height, - Type: runtime5.ConsensusFaultTimeOffsetMining, + Type: runtime7.ConsensusFaultTimeOffsetMining, } } @@ -171,10 +173,10 @@ func (ss *syscallShim) VerifyConsensusFault(a, b, extra []byte) (*runtime5.Conse if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height && types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) { - consensusFault = &runtime5.ConsensusFault{ + consensusFault = &runtime7.ConsensusFault{ Target: blockA.Miner, Epoch: blockB.Height, - Type: runtime5.ConsensusFaultParentGrinding, + Type: runtime7.ConsensusFaultParentGrinding, } } } @@ -243,8 +245,8 @@ func (ss *syscallShim) workerKeyAtLookback(height abi.ChainEpoch) (address.Addre return ResolveToKeyAddr(ss.cstate, ss.cst, info.Worker) } -func (ss *syscallShim) VerifyPoSt(proof proof5.WindowPoStVerifyInfo) error { - ok, err := ss.verifier.VerifyWindowPoSt(context.TODO(), proof) +func (ss *syscallShim) VerifyPoSt(info proof5.WindowPoStVerifyInfo) error { + ok, err := ss.verifier.VerifyWindowPoSt(context.TODO(), info) if err != nil { return err } @@ -286,6 +288,7 @@ func (ss *syscallShim) VerifyAggregateSeals(aggregate proof5.AggregateSealVerify if err != nil { return xerrors.Errorf("failed to verify aggregated PoRep: %w", err) } + if !ok { return fmt.Errorf("invalid aggregate proof") } @@ -293,6 +296,19 @@ func (ss *syscallShim) VerifyAggregateSeals(aggregate proof5.AggregateSealVerify return nil } +func (ss *syscallShim) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) error { + ok, err := ss.verifier.VerifyReplicaUpdate(update) + if err != nil { + return xerrors.Errorf("failed to verify replica update: %w", err) + } + + if !ok { + return fmt.Errorf("invalid replica update") + } + + return nil +} + func (ss *syscallShim) VerifySignature(sig crypto.Signature, addr address.Address, input []byte) error { // TODO: in genesis setup, we are currently faking signatures diff --git a/chain/vm/vm.go b/chain/vm/vm.go index 36308fe03..1ab97bc33 100644 --- a/chain/vm/vm.go +++ b/chain/vm/vm.go @@ -82,10 +82,10 @@ type gasChargingBlocks struct { under cbor.IpldBlockstore } -func (bs *gasChargingBlocks) View(c cid.Cid, cb func([]byte) error) error { +func (bs *gasChargingBlocks) View(ctx context.Context, c cid.Cid, cb func([]byte) error) error { if v, ok := bs.under.(blockstore.Viewer); ok { bs.chargeGas(bs.pricelist.OnIpldGet()) - return v.View(c, func(b []byte) error { + return v.View(ctx, c, func(b []byte) error { // we have successfully retrieved the value; charge for it, even if the user-provided function fails. bs.chargeGas(newGasCharge("OnIpldViewEnd", 0, 0).WithExtra(len(b))) bs.chargeGas(gasOnActorExec) @@ -93,16 +93,16 @@ func (bs *gasChargingBlocks) View(c cid.Cid, cb func([]byte) error) error { }) } // the underlying blockstore doesn't implement the viewer interface, fall back to normal Get behaviour. - blk, err := bs.Get(c) + blk, err := bs.Get(ctx, c) if err == nil && blk != nil { return cb(blk.RawData()) } return err } -func (bs *gasChargingBlocks) Get(c cid.Cid) (block.Block, error) { +func (bs *gasChargingBlocks) Get(ctx context.Context, c cid.Cid) (block.Block, error) { bs.chargeGas(bs.pricelist.OnIpldGet()) - blk, err := bs.under.Get(c) + blk, err := bs.under.Get(ctx, c) if err != nil { return nil, aerrors.Escalate(err, "failed to get block from blockstore") } @@ -112,10 +112,10 @@ func (bs *gasChargingBlocks) Get(c cid.Cid) (block.Block, error) { return blk, nil } -func (bs *gasChargingBlocks) Put(blk block.Block) error { +func (bs *gasChargingBlocks) Put(ctx context.Context, blk block.Block) error { bs.chargeGas(bs.pricelist.OnIpldPut(len(blk.RawData()))) - if err := bs.under.Put(blk); err != nil { + if err := bs.under.Put(ctx, blk); err != nil { return aerrors.Escalate(err, "failed to write data to disk") } bs.chargeGas(gasOnActorExec) @@ -169,7 +169,7 @@ func (vm *VM) makeRuntime(ctx context.Context, msg *types.Message, parent *Runti } vmm.From = resF - if vm.ntwkVersion(ctx, vm.blockHeight) <= network.Version3 { + if vm.networkVersion <= network.Version3 { rt.Message = &vmm } else { resT, _ := rt.ResolveAddress(msg.To) @@ -202,18 +202,17 @@ type ( ) type VM struct { - cstate *state.StateTree - // TODO: Is base actually used? Can we delete it? - base cid.Cid + cstate *state.StateTree cst *cbor.BasicIpldStore buf *blockstore.BufferedBlockstore blockHeight abi.ChainEpoch areg *ActorRegistry rand Rand circSupplyCalc CircSupplyCalculator - ntwkVersion NtwkVersionGetter + networkVersion network.Version baseFee abi.TokenAmount lbStateGet LookbackStateGetter + baseCircSupply abi.TokenAmount Syscalls SyscallBuilder } @@ -226,7 +225,7 @@ type VMOpts struct { Actors *ActorRegistry Syscalls SyscallBuilder CircSupplyCalc CircSupplyCalculator - NtwkVersion NtwkVersionGetter // TODO: stebalien: In what cases do we actually need this? It seems like even when creating new networks we want to use the 'global'/build-default version getter + NetworkVersion network.Version BaseFee abi.TokenAmount LookbackState LookbackStateGetter } @@ -239,28 +238,30 @@ func NewVM(ctx context.Context, opts *VMOpts) (*VM, error) { return nil, err } + baseCirc, err := opts.CircSupplyCalc(ctx, opts.Epoch, state) + if err != nil { + return nil, err + } + return &VM{ cstate: state, - base: opts.StateBase, cst: cst, buf: buf, blockHeight: opts.Epoch, areg: opts.Actors, rand: opts.Rand, // TODO: Probably should be a syscall circSupplyCalc: opts.CircSupplyCalc, - ntwkVersion: opts.NtwkVersion, + networkVersion: opts.NetworkVersion, Syscalls: opts.Syscalls, baseFee: opts.BaseFee, + baseCircSupply: baseCirc, lbStateGet: opts.LookbackState, }, nil } type Rand interface { - GetChainRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) - GetChainRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) - GetBeaconRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) - GetBeaconRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) - GetBeaconRandomnessV3(ctx context.Context, pers crypto.DomainSeparationTag, filecoinEpoch abi.ChainEpoch, entropy []byte) ([]byte, error) + GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) + GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) } type ApplyRet struct { @@ -312,7 +313,7 @@ func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime, return nil, aerrors.Wrapf(err, "could not create account") } toActor = a - if vm.ntwkVersion(ctx, vm.blockHeight) <= network.Version3 { + if vm.networkVersion <= network.Version3 { // Leave the rt.Message as is } else { nmsg := Message{ @@ -339,7 +340,7 @@ func (vm *VM) send(ctx context.Context, msg *types.Message, parent *Runtime, defer rt.chargeGasSafe(newGasCharge("OnMethodInvocationDone", 0, 0)) if types.BigCmp(msg.Value, types.NewInt(0)) != 0 { - if err := vm.transfer(msg.From, msg.To, msg.Value); err != nil { + if err := vm.transfer(msg.From, msg.To, msg.Value, vm.networkVersion); err != nil { return nil, aerrors.Wrap(err, "failed to transfer funds") } } @@ -616,7 +617,7 @@ func (vm *VM) ApplyMessage(ctx context.Context, cmsg types.ChainMsg) (*ApplyRet, } func (vm *VM) ShouldBurn(ctx context.Context, st *state.StateTree, msg *types.Message, errcode exitcode.ExitCode) (bool, error) { - if vm.ntwkVersion(ctx, vm.blockHeight) <= network.Version12 { + if vm.networkVersion <= network.Version12 { // Check to see if we should burn funds. We avoid burning on successful // window post. This won't catch _indirect_ window post calls, but this // is the best we can get for now. @@ -706,7 +707,7 @@ func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) err go func() { for b := range toFlush { - if err := to.PutMany(b); err != nil { + if err := to.PutMany(ctx, b); err != nil { close(freeBufs) errFlushChan <- xerrors.Errorf("batch put in copy: %w", err) return @@ -735,7 +736,7 @@ func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) err return nil } - if err := copyRec(from, to, root, batchCp); err != nil { + if err := copyRec(ctx, from, to, root, batchCp); err != nil { return xerrors.Errorf("copyRec: %w", err) } @@ -760,13 +761,13 @@ func Copy(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid) err return nil } -func copyRec(from, to blockstore.Blockstore, root cid.Cid, cp func(block.Block) error) error { +func copyRec(ctx context.Context, from, to blockstore.Blockstore, root cid.Cid, cp func(block.Block) error) error { if root.Prefix().MhType == 0 { // identity cid, skip return nil } - blk, err := from.Get(root) + blk, err := from.Get(ctx, root) if err != nil { return xerrors.Errorf("get %s failed: %w", root, err) } @@ -791,7 +792,7 @@ func copyRec(from, to blockstore.Blockstore, root cid.Cid, cp func(block.Block) } } else { // If we have an object, we already have its children, skip the object. - has, err := to.Has(link) + has, err := to.Has(ctx, link) if err != nil { lerr = xerrors.Errorf("has: %w", err) return @@ -801,7 +802,7 @@ func copyRec(from, to blockstore.Blockstore, root cid.Cid, cp func(block.Block) } } - if err := copyRec(from, to, link, cp); err != nil { + if err := copyRec(ctx, from, to, link, cp); err != nil { lerr = err return } @@ -823,10 +824,6 @@ func (vm *VM) StateTree() types.StateTree { return vm.cstate } -func (vm *VM) SetBlockHeight(h abi.ChainEpoch) { - vm.blockHeight = h -} - func (vm *VM) Invoke(act *types.Actor, rt *Runtime, method abi.MethodNum, params []byte) ([]byte, aerrors.ActorError) { ctx, span := trace.StartSpan(rt.ctx, "vm.Invoke") defer span.End() @@ -854,12 +851,13 @@ func (vm *VM) SetInvoker(i *ActorRegistry) { vm.areg = i } -func (vm *VM) GetNtwkVersion(ctx context.Context, ce abi.ChainEpoch) network.Version { - return vm.ntwkVersion(ctx, ce) -} - func (vm *VM) GetCircSupply(ctx context.Context) (abi.TokenAmount, error) { - return vm.circSupplyCalc(ctx, vm.blockHeight, vm.cstate) + // Before v15, this was recalculated on each invocation as the state tree was mutated + if vm.networkVersion <= network.Version14 { + return vm.circSupplyCalc(ctx, vm.blockHeight, vm.cstate) + } + + return vm.baseCircSupply, nil } func (vm *VM) incrementNonce(addr address.Address) error { @@ -869,32 +867,71 @@ func (vm *VM) incrementNonce(addr address.Address) error { }) } -func (vm *VM) transfer(from, to address.Address, amt types.BigInt) aerrors.ActorError { - if from == to { - return nil - } +func (vm *VM) transfer(from, to address.Address, amt types.BigInt, networkVersion network.Version) aerrors.ActorError { + var f *types.Actor + var fromID, toID address.Address + var err error + // switching the order around so that transactions for more than the balance sent to self fail + if networkVersion >= network.Version15 { + if amt.LessThan(types.NewInt(0)) { + return aerrors.Newf(exitcode.SysErrForbidden, "attempted to transfer negative value: %s", amt) + } - fromID, err := vm.cstate.LookupID(from) - if err != nil { - return aerrors.Fatalf("transfer failed when resolving sender address: %s", err) - } + fromID, err = vm.cstate.LookupID(from) + if err != nil { + return aerrors.Fatalf("transfer failed when resolving sender address: %s", err) + } - toID, err := vm.cstate.LookupID(to) - if err != nil { - return aerrors.Fatalf("transfer failed when resolving receiver address: %s", err) - } + f, err = vm.cstate.GetActor(fromID) + if err != nil { + return aerrors.Fatalf("transfer failed when retrieving sender actor: %s", err) + } - if fromID == toID { - return nil - } + if f.Balance.LessThan(amt) { + return aerrors.Newf(exitcode.SysErrInsufficientFunds, "transfer failed, insufficient balance in sender actor: %v", f.Balance) + } - if amt.LessThan(types.NewInt(0)) { - return aerrors.Newf(exitcode.SysErrForbidden, "attempted to transfer negative value: %s", amt) - } + if from == to { + log.Infow("sending to same address: noop", "from/to addr", from) + return nil + } - f, err := vm.cstate.GetActor(fromID) - if err != nil { - return aerrors.Fatalf("transfer failed when retrieving sender actor: %s", err) + toID, err = vm.cstate.LookupID(to) + if err != nil { + return aerrors.Fatalf("transfer failed when resolving receiver address: %s", err) + } + + if fromID == toID { + log.Infow("sending to same actor ID: noop", "from/to actor", fromID) + return nil + } + } else { + if from == to { + return nil + } + + fromID, err = vm.cstate.LookupID(from) + if err != nil { + return aerrors.Fatalf("transfer failed when resolving sender address: %s", err) + } + + toID, err = vm.cstate.LookupID(to) + if err != nil { + return aerrors.Fatalf("transfer failed when resolving receiver address: %s", err) + } + + if fromID == toID { + return nil + } + + if amt.LessThan(types.NewInt(0)) { + return aerrors.Newf(exitcode.SysErrForbidden, "attempted to transfer negative value: %s", amt) + } + + f, err = vm.cstate.GetActor(fromID) + if err != nil { + return aerrors.Fatalf("transfer failed when retrieving sender actor: %s", err) + } } t, err := vm.cstate.GetActor(toID) @@ -902,17 +939,17 @@ func (vm *VM) transfer(from, to address.Address, amt types.BigInt) aerrors.Actor return aerrors.Fatalf("transfer failed when retrieving receiver actor: %s", err) } - if err := deductFunds(f, amt); err != nil { + if err = deductFunds(f, amt); err != nil { return aerrors.Newf(exitcode.SysErrInsufficientFunds, "transfer failed when deducting funds (%s): %s", types.FIL(amt), err) } depositFunds(t, amt) - if err := vm.cstate.SetActor(fromID, f); err != nil { - return aerrors.Fatalf("transfer failed when setting receiver actor: %s", err) + if err = vm.cstate.SetActor(fromID, f); err != nil { + return aerrors.Fatalf("transfer failed when setting sender actor: %s", err) } - if err := vm.cstate.SetActor(toID, t); err != nil { - return aerrors.Fatalf("transfer failed when setting sender actor: %s", err) + if err = vm.cstate.SetActor(toID, t); err != nil { + return aerrors.Fatalf("transfer failed when setting receiver actor: %s", err) } return nil diff --git a/chain/wallet/ledger/ledger.go b/chain/wallet/ledger/ledger.go index eb16f6460..5279389de 100644 --- a/chain/wallet/ledger/ledger.go +++ b/chain/wallet/ledger/ledger.go @@ -39,7 +39,7 @@ type LedgerKeyInfo struct { var _ api.Wallet = (*LedgerWallet)(nil) func (lw LedgerWallet) WalletSign(ctx context.Context, signer address.Address, toSign []byte, meta api.MsgMeta) (*crypto.Signature, error) { - ki, err := lw.getKeyInfo(signer) + ki, err := lw.getKeyInfo(ctx, signer) if err != nil { return nil, err } @@ -80,8 +80,8 @@ func (lw LedgerWallet) WalletSign(ctx context.Context, signer address.Address, t }, nil } -func (lw LedgerWallet) getKeyInfo(addr address.Address) (*LedgerKeyInfo, error) { - kib, err := lw.ds.Get(keyForAddr(addr)) +func (lw LedgerWallet) getKeyInfo(ctx context.Context, addr address.Address) (*LedgerKeyInfo, error) { + kib, err := lw.ds.Get(ctx, keyForAddr(addr)) if err != nil { return nil, err } @@ -95,7 +95,7 @@ func (lw LedgerWallet) getKeyInfo(addr address.Address) (*LedgerKeyInfo, error) } func (lw LedgerWallet) WalletDelete(ctx context.Context, k address.Address) error { - return lw.ds.Delete(keyForAddr(k)) + return lw.ds.Delete(ctx, keyForAddr(k)) } func (lw LedgerWallet) WalletExport(ctx context.Context, k address.Address) (*types.KeyInfo, error) { @@ -103,7 +103,7 @@ func (lw LedgerWallet) WalletExport(ctx context.Context, k address.Address) (*ty } func (lw LedgerWallet) WalletHas(ctx context.Context, k address.Address) (bool, error) { - _, err := lw.ds.Get(keyForAddr(k)) + _, err := lw.ds.Get(ctx, keyForAddr(k)) if err == nil { return true, nil } @@ -118,10 +118,10 @@ func (lw LedgerWallet) WalletImport(ctx context.Context, kinfo *types.KeyInfo) ( if err := json.Unmarshal(kinfo.PrivateKey, &ki); err != nil { return address.Undef, err } - return lw.importKey(ki) + return lw.importKey(ctx, ki) } -func (lw LedgerWallet) importKey(ki LedgerKeyInfo) (address.Address, error) { +func (lw LedgerWallet) importKey(ctx context.Context, ki LedgerKeyInfo) (address.Address, error) { if ki.Address == address.Undef { return address.Undef, fmt.Errorf("no address given in imported key info") } @@ -133,7 +133,7 @@ func (lw LedgerWallet) importKey(ki LedgerKeyInfo) (address.Address, error) { return address.Undef, xerrors.Errorf("marshaling key info: %w", err) } - if err := lw.ds.Put(keyForAddr(ki.Address), bb); err != nil { + if err := lw.ds.Put(ctx, keyForAddr(ki.Address), bb); err != nil { return address.Undef, err } @@ -141,7 +141,7 @@ func (lw LedgerWallet) importKey(ki LedgerKeyInfo) (address.Address, error) { } func (lw LedgerWallet) WalletList(ctx context.Context) ([]address.Address, error) { - res, err := lw.ds.Query(query.Query{Prefix: dsLedgerPrefix}) + res, err := lw.ds.Query(ctx, query.Query{Prefix: dsLedgerPrefix}) if err != nil { return nil, err } @@ -175,7 +175,7 @@ func (lw LedgerWallet) WalletNew(ctx context.Context, t types.KeyType) (address. t, types.KTSecp256k1Ledger) } - res, err := lw.ds.Query(query.Query{Prefix: dsLedgerPrefix}) + res, err := lw.ds.Query(ctx, query.Query{Prefix: dsLedgerPrefix}) if err != nil { return address.Undef, err } @@ -224,7 +224,7 @@ func (lw LedgerWallet) WalletNew(ctx context.Context, t types.KeyType) (address. lki.Address = a lki.Path = path - return lw.importKey(lki) + return lw.importKey(ctx, lki) } func (lw *LedgerWallet) Get() api.Wallet { diff --git a/cli/backup.go b/cli/backup.go index 856e098dd..4d88d4bbc 100644 --- a/cli/backup.go +++ b/cli/backup.go @@ -66,7 +66,7 @@ func BackupCmd(repoFlag string, rt repo.RepoType, getApi BackupApiFn) *cli.Comma return xerrors.Errorf("opening backup file %s: %w", fpath, err) } - if err := bds.Backup(out); err != nil { + if err := bds.Backup(cctx.Context, out); err != nil { if cerr := out.Close(); cerr != nil { log.Errorw("error closing backup file while handling backup error", "closeErr", cerr, "backupErr", err) } diff --git a/cli/client.go b/cli/client.go index daaf5f3fe..634bd18e5 100644 --- a/cli/client.go +++ b/cli/client.go @@ -26,7 +26,6 @@ import ( datatransfer "github.com/filecoin-project/go-data-transfer" "github.com/ipfs/go-cid" "github.com/ipfs/go-cidutil/cidenc" - textselector "github.com/ipld/go-ipld-selector-text-lite" "github.com/libp2p/go-libp2p-core/peer" "github.com/multiformats/go-multibase" "github.com/urfave/cli/v2" @@ -93,7 +92,10 @@ var clientCmd = &cli.Command{ WithCategory("data", clientLocalCmd), WithCategory("data", clientStat), WithCategory("retrieval", clientFindCmd), + WithCategory("retrieval", clientQueryRetrievalAskCmd), WithCategory("retrieval", clientRetrieveCmd), + WithCategory("retrieval", clientRetrieveCatCmd), + WithCategory("retrieval", clientRetrieveLsCmd), WithCategory("retrieval", clientCancelRetrievalDealCmd), WithCategory("retrieval", clientListRetrievalsCmd), WithCategory("util", clientCommPCmd), @@ -1029,206 +1031,64 @@ var clientFindCmd = &cli.Command{ }, } -const DefaultMaxRetrievePrice = "0.01" - -var clientRetrieveCmd = &cli.Command{ - Name: "retrieve", - Usage: "Retrieve data from network", - ArgsUsage: "[dataCid outputPath]", +var clientQueryRetrievalAskCmd = &cli.Command{ + Name: "retrieval-ask", + Usage: "Get a miner's retrieval ask", + ArgsUsage: "[minerAddress] [data CID]", Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "from", - Usage: "address to send transactions from", - }, - &cli.BoolFlag{ - Name: "car", - Usage: "export to a car file instead of a regular file", - }, - &cli.StringFlag{ - Name: "miner", - Usage: "miner address for retrieval, if not present it'll use local discovery", - }, - &cli.StringFlag{ - Name: "datamodel-path-selector", - Usage: "a rudimentary (DM-level-only) text-path selector, allowing for sub-selection within a deal", - }, - &cli.StringFlag{ - Name: "maxPrice", - Usage: fmt.Sprintf("maximum price the client is willing to consider (default: %s FIL)", DefaultMaxRetrievePrice), - }, - &cli.StringFlag{ - Name: "pieceCid", - Usage: "require data to be retrieved from a specific Piece CID", - }, - &cli.BoolFlag{ - Name: "allow-local", + &cli.Int64Flag{ + Name: "size", + Usage: "data size in bytes", }, }, Action: func(cctx *cli.Context) error { + afmt := NewAppFmt(cctx.App) if cctx.NArg() != 2 { - return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments")) + afmt.Println("Usage: retrieval-ask [minerAddress] [data CID]") + return nil } - fapi, closer, err := GetFullNodeAPI(cctx) + maddr, err := address.NewFromString(cctx.Args().First()) if err != nil { return err } - defer closer() - ctx := ReqContext(cctx) - afmt := NewAppFmt(cctx.App) - var payer address.Address - if cctx.String("from") != "" { - payer, err = address.NewFromString(cctx.String("from")) - } else { - payer, err = fapi.WalletDefaultAddress(ctx) - } + dataCid, err := cid.Parse(cctx.Args().Get(1)) if err != nil { - return err + return fmt.Errorf("parsing data cid: %w", err) } - file, err := cid.Parse(cctx.Args().Get(0)) + api, closer, err := GetFullNodeAPI(cctx) if err != nil { return err } + defer closer() + ctx := ReqContext(cctx) - var pieceCid *cid.Cid - if cctx.String("pieceCid") != "" { - parsed, err := cid.Parse(cctx.String("pieceCid")) - if err != nil { - return err - } - pieceCid = &parsed - } - - var order *lapi.RetrievalOrder - if cctx.Bool("allow-local") { - imports, err := fapi.ClientListImports(ctx) - if err != nil { - return err - } - - for _, i := range imports { - if i.Root != nil && i.Root.Equals(file) { - order = &lapi.RetrievalOrder{ - Root: file, - FromLocalCAR: i.CARPath, - - Total: big.Zero(), - UnsealPrice: big.Zero(), - } - break - } - } + ask, err := api.ClientMinerQueryOffer(ctx, maddr, dataCid, nil) + if err != nil { + return err } - if order == nil { - var offer api.QueryOffer - minerStrAddr := cctx.String("miner") - if minerStrAddr == "" { // Local discovery - offers, err := fapi.ClientFindData(ctx, file, pieceCid) - - var cleaned []api.QueryOffer - // filter out offers that errored - for _, o := range offers { - if o.Err == "" { - cleaned = append(cleaned, o) - } - } - - offers = cleaned - - // sort by price low to high - sort.Slice(offers, func(i, j int) bool { - return offers[i].MinPrice.LessThan(offers[j].MinPrice) - }) - if err != nil { - return err - } - - // TODO: parse offer strings from `client find`, make this smarter - if len(offers) < 1 { - fmt.Println("Failed to find file") - return nil - } - offer = offers[0] - } else { // Directed retrieval - minerAddr, err := address.NewFromString(minerStrAddr) - if err != nil { - return err - } - offer, err = fapi.ClientMinerQueryOffer(ctx, minerAddr, file, pieceCid) - if err != nil { - return err - } - } - if offer.Err != "" { - return fmt.Errorf("The received offer errored: %s", offer.Err) - } - - maxPrice := types.MustParseFIL(DefaultMaxRetrievePrice) - - if cctx.String("maxPrice") != "" { - maxPrice, err = types.ParseFIL(cctx.String("maxPrice")) - if err != nil { - return xerrors.Errorf("parsing maxPrice: %w", err) - } - } + afmt.Printf("Ask: %s\n", maddr) + afmt.Printf("Unseal price: %s\n", types.FIL(ask.UnsealPrice)) + afmt.Printf("Price per byte: %s\n", types.FIL(ask.PricePerByte)) + afmt.Printf("Payment interval: %s\n", types.SizeStr(types.NewInt(ask.PaymentInterval))) + afmt.Printf("Payment interval increase: %s\n", types.SizeStr(types.NewInt(ask.PaymentIntervalIncrease))) - if offer.MinPrice.GreaterThan(big.Int(maxPrice)) { - return xerrors.Errorf("failed to find offer satisfying maxPrice: %s", maxPrice) + size := cctx.Uint64("size") + if size == 0 { + if ask.Size == 0 { + return nil } - - o := offer.Order(payer) - order = &o - } - ref := &lapi.FileRef{ - Path: cctx.Args().Get(1), - IsCAR: cctx.Bool("car"), - } - - if sel := textselector.Expression(cctx.String("datamodel-path-selector")); sel != "" { - order.DatamodelPathSelector = &sel + size = ask.Size + afmt.Printf("Size: %s\n", types.SizeStr(types.NewInt(ask.Size))) } + transferPrice := types.BigMul(ask.PricePerByte, types.NewInt(size)) + totalPrice := types.BigAdd(ask.UnsealPrice, transferPrice) + afmt.Printf("Total price for %d bytes: %s\n", size, types.FIL(totalPrice)) - updates, err := fapi.ClientRetrieveWithEvents(ctx, *order, ref) - if err != nil { - return xerrors.Errorf("error setting up retrieval: %w", err) - } - - var prevStatus retrievalmarket.DealStatus - - for { - select { - case evt, ok := <-updates: - if ok { - afmt.Printf("> Recv: %s, Paid %s, %s (%s)\n", - types.SizeStr(types.NewInt(evt.BytesReceived)), - types.FIL(evt.FundsSpent), - retrievalmarket.ClientEvents[evt.Event], - retrievalmarket.DealStatuses[evt.Status], - ) - prevStatus = evt.Status - } - - if evt.Err != "" { - return xerrors.Errorf("retrieval failed: %s", evt.Err) - } - - if !ok { - if prevStatus == retrievalmarket.DealStatusCompleted { - afmt.Println("Success") - } else { - afmt.Printf("saw final deal state %s instead of expected success state DealStatusCompleted\n", - retrievalmarket.DealStatuses[prevStatus]) - } - return nil - } - - case <-ctx.Done(): - return xerrors.Errorf("retrieval timed out") - } - } + return nil }, } diff --git a/cli/client_retr.go b/cli/client_retr.go new file mode 100644 index 000000000..9b195a5d8 --- /dev/null +++ b/cli/client_retr.go @@ -0,0 +1,602 @@ +package cli + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path" + "sort" + "strings" + "time" + + "github.com/ipfs/go-blockservice" + "github.com/ipfs/go-cid" + offline "github.com/ipfs/go-ipfs-exchange-offline" + "github.com/ipfs/go-merkledag" + carv2 "github.com/ipld/go-car/v2" + "github.com/ipld/go-car/v2/blockstore" + "github.com/ipld/go-ipld-prime" + "github.com/ipld/go-ipld-prime/codec/dagjson" + basicnode "github.com/ipld/go-ipld-prime/node/basic" + "github.com/ipld/go-ipld-prime/traversal" + "github.com/ipld/go-ipld-prime/traversal/selector" + "github.com/ipld/go-ipld-prime/traversal/selector/builder" + selectorparse "github.com/ipld/go-ipld-prime/traversal/selector/parse" + textselector "github.com/ipld/go-ipld-selector-text-lite" + "github.com/multiformats/go-multiaddr" + manet "github.com/multiformats/go-multiaddr/net" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-state-types/big" + + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/markets/utils" + "github.com/filecoin-project/lotus/node/repo" +) + +const DefaultMaxRetrievePrice = "0" + +func retrieve(ctx context.Context, cctx *cli.Context, fapi lapi.FullNode, sel *lapi.Selector, printf func(string, ...interface{})) (*lapi.ExportRef, error) { + var payer address.Address + var err error + if cctx.String("from") != "" { + payer, err = address.NewFromString(cctx.String("from")) + } else { + payer, err = fapi.WalletDefaultAddress(ctx) + } + if err != nil { + return nil, err + } + + file, err := cid.Parse(cctx.Args().Get(0)) + if err != nil { + return nil, err + } + + var pieceCid *cid.Cid + if cctx.String("pieceCid") != "" { + parsed, err := cid.Parse(cctx.String("pieceCid")) + if err != nil { + return nil, err + } + pieceCid = &parsed + } + + var eref *lapi.ExportRef + if cctx.Bool("allow-local") { + imports, err := fapi.ClientListImports(ctx) + if err != nil { + return nil, err + } + + for _, i := range imports { + if i.Root != nil && i.Root.Equals(file) { + eref = &lapi.ExportRef{ + Root: file, + FromLocalCAR: i.CARPath, + } + break + } + } + } + + // no local found, so make a retrieval + if eref == nil { + var offer lapi.QueryOffer + minerStrAddr := cctx.String("provider") + if minerStrAddr == "" { // Local discovery + offers, err := fapi.ClientFindData(ctx, file, pieceCid) + + var cleaned []lapi.QueryOffer + // filter out offers that errored + for _, o := range offers { + if o.Err == "" { + cleaned = append(cleaned, o) + } + } + + offers = cleaned + + // sort by price low to high + sort.Slice(offers, func(i, j int) bool { + return offers[i].MinPrice.LessThan(offers[j].MinPrice) + }) + if err != nil { + return nil, err + } + + // TODO: parse offer strings from `client find`, make this smarter + if len(offers) < 1 { + fmt.Println("Failed to find file") + return nil, nil + } + offer = offers[0] + } else { // Directed retrieval + minerAddr, err := address.NewFromString(minerStrAddr) + if err != nil { + return nil, err + } + offer, err = fapi.ClientMinerQueryOffer(ctx, minerAddr, file, pieceCid) + if err != nil { + return nil, err + } + } + if offer.Err != "" { + return nil, fmt.Errorf("offer error: %s", offer.Err) + } + + maxPrice := types.MustParseFIL(DefaultMaxRetrievePrice) + + if cctx.String("maxPrice") != "" { + maxPrice, err = types.ParseFIL(cctx.String("maxPrice")) + if err != nil { + return nil, xerrors.Errorf("parsing maxPrice: %w", err) + } + } + + if offer.MinPrice.GreaterThan(big.Int(maxPrice)) { + return nil, xerrors.Errorf("failed to find offer satisfying maxPrice: %s", maxPrice) + } + + o := offer.Order(payer) + o.DataSelector = sel + + subscribeEvents, err := fapi.ClientGetRetrievalUpdates(ctx) + if err != nil { + return nil, xerrors.Errorf("error setting up retrieval updates: %w", err) + } + retrievalRes, err := fapi.ClientRetrieve(ctx, o) + if err != nil { + return nil, xerrors.Errorf("error setting up retrieval: %w", err) + } + + start := time.Now() + readEvents: + for { + var evt lapi.RetrievalInfo + select { + case <-ctx.Done(): + return nil, xerrors.New("Retrieval Timed Out") + case evt = <-subscribeEvents: + if evt.ID != retrievalRes.DealID { + // we can't check the deal ID ahead of time because: + // 1. We need to subscribe before retrieving. + // 2. We won't know the deal ID until after retrieving. + continue + } + } + + event := "New" + if evt.Event != nil { + event = retrievalmarket.ClientEvents[*evt.Event] + } + + printf("Recv %s, Paid %s, %s (%s), %s\n", + types.SizeStr(types.NewInt(evt.BytesReceived)), + types.FIL(evt.TotalPaid), + strings.TrimPrefix(event, "ClientEvent"), + strings.TrimPrefix(retrievalmarket.DealStatuses[evt.Status], "DealStatus"), + time.Now().Sub(start).Truncate(time.Millisecond), + ) + + switch evt.Status { + case retrievalmarket.DealStatusCompleted: + break readEvents + case retrievalmarket.DealStatusRejected: + return nil, xerrors.Errorf("Retrieval Proposal Rejected: %s", evt.Message) + case + retrievalmarket.DealStatusDealNotFound, + retrievalmarket.DealStatusErrored: + return nil, xerrors.Errorf("Retrieval Error: %s", evt.Message) + } + } + + eref = &lapi.ExportRef{ + Root: file, + DealID: retrievalRes.DealID, + } + } + + return eref, nil +} + +var retrFlagsCommon = []cli.Flag{ + &cli.StringFlag{ + Name: "from", + Usage: "address to send transactions from", + }, + &cli.StringFlag{ + Name: "provider", + Usage: "provider to use for retrieval, if not present it'll use local discovery", + Aliases: []string{"miner"}, + }, + &cli.StringFlag{ + Name: "maxPrice", + Usage: fmt.Sprintf("maximum price the client is willing to consider (default: %s FIL)", DefaultMaxRetrievePrice), + }, + &cli.StringFlag{ + Name: "pieceCid", + Usage: "require data to be retrieved from a specific Piece CID", + }, + &cli.BoolFlag{ + Name: "allow-local", + // todo: default to true? + }, +} + +var clientRetrieveCmd = &cli.Command{ + Name: "retrieve", + Usage: "Retrieve data from network", + ArgsUsage: "[dataCid outputPath]", + Description: `Retrieve data from the Filecoin network. + +The retrieve command will attempt to find a provider make a retrieval deal with +them. In case a provider can't be found, it can be specified with the --provider +flag. + +By default the data will be interpreted as DAG-PB UnixFSv1 File. Alternatively +a CAR file containing the raw IPLD graph can be exported by setting the --car +flag. + +Partial Retrieval: + +The --data-selector flag can be used to specify a sub-graph to fetch. The +selector can be specified as either IPLD datamodel text-path selector, or IPLD +json selector. + +In case of unixfs retrieval, the selector must point at a single root node, and +match the entire graph under that node. + +In case of CAR retrieval, the selector must have one common "sub-root" node. + +Examples: + +- Retrieve a file by CID + $ lotus client retrieve Qm... my-file.txt + +- Retrieve a file by CID from f0123 + $ lotus client retrieve --provider f0123 Qm... my-file.txt + +- Retrieve a first file from a specified directory + $ lotus client retrieve --data-selector /Links/0/Hash Qm... my-file.txt +`, + Flags: append([]cli.Flag{ + &cli.BoolFlag{ + Name: "car", + Usage: "Export to a car file instead of a regular file", + }, + &cli.StringFlag{ + Name: "data-selector", + Aliases: []string{"datamodel-path-selector"}, + Usage: "IPLD datamodel text-path selector, or IPLD json selector", + }, + &cli.BoolFlag{ + Name: "car-export-merkle-proof", + Usage: "(requires --data-selector and --car) Export data-selector merkle proof", + }, + }, retrFlagsCommon...), + Action: func(cctx *cli.Context) error { + if cctx.NArg() != 2 { + return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments")) + } + + if cctx.Bool("car-export-merkle-proof") { + if !cctx.Bool("car") || !cctx.IsSet("data-selector") { + return ShowHelp(cctx, fmt.Errorf("--car-export-merkle-proof requires --car and --data-selector")) + } + } + + fapi, closer, err := GetFullNodeAPIV1(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + afmt := NewAppFmt(cctx.App) + + var s *lapi.Selector + if sel := lapi.Selector(cctx.String("data-selector")); sel != "" { + s = &sel + } + + eref, err := retrieve(ctx, cctx, fapi, s, afmt.Printf) + if err != nil { + return err + } + + if s != nil { + eref.DAGs = append(eref.DAGs, lapi.DagSpec{DataSelector: s, ExportMerkleProof: cctx.Bool("car-export-merkle-proof")}) + } + + err = fapi.ClientExport(ctx, *eref, lapi.FileRef{ + Path: cctx.Args().Get(1), + IsCAR: cctx.Bool("car"), + }) + if err != nil { + return err + } + afmt.Println("Success") + return nil + }, +} + +func ClientExportStream(apiAddr string, apiAuth http.Header, eref lapi.ExportRef, car bool) (io.ReadCloser, error) { + rj, err := json.Marshal(eref) + if err != nil { + return nil, xerrors.Errorf("marshaling export ref: %w", err) + } + + ma, err := multiaddr.NewMultiaddr(apiAddr) + if err == nil { + _, addr, err := manet.DialArgs(ma) + if err != nil { + return nil, err + } + + // todo: make cliutil helpers for this + apiAddr = "http://" + addr + } + + aa, err := url.Parse(apiAddr) + if err != nil { + return nil, xerrors.Errorf("parsing api address: %w", err) + } + switch aa.Scheme { + case "ws": + aa.Scheme = "http" + case "wss": + aa.Scheme = "https" + } + + aa.Path = path.Join(aa.Path, "rest/v0/export") + req, err := http.NewRequest("GET", fmt.Sprintf("%s?car=%t&export=%s", aa, car, url.QueryEscape(string(rj))), nil) + if err != nil { + return nil, err + } + + req.Header = apiAuth + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + em, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, xerrors.Errorf("reading error body: %w", err) + } + + resp.Body.Close() // nolint + return nil, xerrors.Errorf("getting root car: http %d: %s", resp.StatusCode, string(em)) + } + + return resp.Body, nil +} + +var clientRetrieveCatCmd = &cli.Command{ + Name: "cat", + Usage: "Show data from network", + ArgsUsage: "[dataCid]", + Flags: append([]cli.Flag{ + &cli.BoolFlag{ + Name: "ipld", + Usage: "list IPLD datamodel links", + }, + &cli.StringFlag{ + Name: "data-selector", + Usage: "IPLD datamodel text-path selector, or IPLD json selector", + }, + }, retrFlagsCommon...), + Action: func(cctx *cli.Context) error { + if cctx.NArg() != 1 { + return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments")) + } + + ainfo, err := GetAPIInfo(cctx, repo.FullNode) + if err != nil { + return xerrors.Errorf("could not get API info: %w", err) + } + + fapi, closer, err := GetFullNodeAPIV1(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + afmt := NewAppFmt(cctx.App) + + sel := lapi.Selector(cctx.String("data-selector")) + selp := &sel + if sel == "" { + selp = nil + } + + eref, err := retrieve(ctx, cctx, fapi, selp, afmt.Printf) + if err != nil { + return err + } + + fmt.Println() // separate retrieval events from results + + if sel != "" { + eref.DAGs = append(eref.DAGs, lapi.DagSpec{DataSelector: &sel}) + } + + rc, err := ClientExportStream(ainfo.Addr, ainfo.AuthHeader(), *eref, false) + if err != nil { + return err + } + defer rc.Close() // nolint + + _, err = io.Copy(os.Stdout, rc) + return err + }, +} + +func pathToSel(psel string, matchTraversal bool, sub builder.SelectorSpec) (lapi.Selector, error) { + rs, err := textselector.SelectorSpecFromPath(textselector.Expression(psel), matchTraversal, sub) + if err != nil { + return "", xerrors.Errorf("failed to parse path-selector: %w", err) + } + + var b bytes.Buffer + if err := dagjson.Encode(rs.Node(), &b); err != nil { + return "", err + } + + return lapi.Selector(b.String()), nil +} + +var clientRetrieveLsCmd = &cli.Command{ + Name: "ls", + Usage: "List object links", + ArgsUsage: "[dataCid]", + Flags: append([]cli.Flag{ + &cli.BoolFlag{ + Name: "ipld", + Usage: "list IPLD datamodel links", + }, + &cli.IntFlag{ + Name: "depth", + Usage: "list links recursively up to the specified depth", + Value: 1, + }, + &cli.StringFlag{ + Name: "data-selector", + Usage: "IPLD datamodel text-path selector, or IPLD json selector", + }, + }, retrFlagsCommon...), + Action: func(cctx *cli.Context) error { + if cctx.NArg() != 1 { + return ShowHelp(cctx, fmt.Errorf("incorrect number of arguments")) + } + + ainfo, err := GetAPIInfo(cctx, repo.FullNode) + if err != nil { + return xerrors.Errorf("could not get API info: %w", err) + } + + fapi, closer, err := GetFullNodeAPIV1(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + afmt := NewAppFmt(cctx.App) + + dataSelector := lapi.Selector(fmt.Sprintf(`{"R":{"l":{"depth":%d},":>":{"a":{">":{"|":[{"@":{}},{".":{}}]}}}}}`, cctx.Int("depth"))) + + if cctx.IsSet("data-selector") { + ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) + dataSelector, err = pathToSel(cctx.String("data-selector"), cctx.Bool("ipld"), + ssb.ExploreUnion( + ssb.Matcher(), + ssb.ExploreAll( + ssb.ExploreRecursive(selector.RecursionLimitDepth(int64(cctx.Int("depth"))), ssb.ExploreAll(ssb.ExploreUnion(ssb.Matcher(), ssb.ExploreRecursiveEdge()))), + ))) + if err != nil { + return xerrors.Errorf("parsing datamodel path: %w", err) + } + } + + eref, err := retrieve(ctx, cctx, fapi, &dataSelector, afmt.Printf) + if err != nil { + return xerrors.Errorf("retrieve: %w", err) + } + + fmt.Println() // separate retrieval events from results + + eref.DAGs = append(eref.DAGs, lapi.DagSpec{ + DataSelector: &dataSelector, + }) + + rc, err := ClientExportStream(ainfo.Addr, ainfo.AuthHeader(), *eref, true) + if err != nil { + return xerrors.Errorf("export: %w", err) + } + defer rc.Close() // nolint + + var memcar bytes.Buffer + _, err = io.Copy(&memcar, rc) + if err != nil { + return err + } + + cbs, err := blockstore.NewReadOnly(&bytesReaderAt{bytes.NewReader(memcar.Bytes())}, nil, + carv2.ZeroLengthSectionAsEOF(true), + blockstore.UseWholeCIDs(true)) + if err != nil { + return xerrors.Errorf("opening car blockstore: %w", err) + } + + roots, err := cbs.Roots() + if err != nil { + return xerrors.Errorf("getting roots: %w", err) + } + + if len(roots) != 1 { + return xerrors.Errorf("expected 1 car root, got %d", len(roots)) + } + dserv := merkledag.NewDAGService(blockservice.New(cbs, offline.Exchange(cbs))) + + if !cctx.Bool("ipld") { + links, err := dserv.GetLinks(ctx, roots[0]) + if err != nil { + return xerrors.Errorf("getting links: %w", err) + } + + for _, link := range links { + fmt.Printf("%s %s\t%d\n", link.Cid, link.Name, link.Size) + } + } else { + jsel := lapi.Selector(fmt.Sprintf(`{"R":{"l":{"depth":%d},":>":{"a":{">":{"|":[{"@":{}},{".":{}}]}}}}}`, cctx.Int("depth"))) + + if cctx.IsSet("data-selector") { + ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) + jsel, err = pathToSel(cctx.String("data-selector"), false, + ssb.ExploreRecursive(selector.RecursionLimitDepth(int64(cctx.Int("depth"))), ssb.ExploreAll(ssb.ExploreUnion(ssb.Matcher(), ssb.ExploreRecursiveEdge()))), + ) + } + + sel, _ := selectorparse.ParseJSONSelector(string(jsel)) + + if err := utils.TraverseDag( + ctx, + dserv, + roots[0], + sel, + func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error { + if r == traversal.VisitReason_SelectionMatch { + fmt.Println(p.Path) + } + return nil + }, + ); err != nil { + return err + } + } + + return err + }, +} + +type bytesReaderAt struct { + btr *bytes.Reader +} + +func (b bytesReaderAt) ReadAt(p []byte, off int64) (n int, err error) { + return b.btr.ReadAt(p, off) +} + +var _ io.ReaderAt = &bytesReaderAt{} diff --git a/cli/filplus.go b/cli/filplus.go index 007071ea2..02aac0b7b 100644 --- a/cli/filplus.go +++ b/cli/filplus.go @@ -220,8 +220,8 @@ var filplusCheckClientCmd = &cli.Command{ } var filplusCheckNotaryCmd = &cli.Command{ - Name: "check-notaries-datacap", - Usage: "check notaries remaining bytes", + Name: "check-notary-datacap", + Usage: "check a notary's remaining bytes", Action: func(cctx *cli.Context) error { if !cctx.Args().Present() { return fmt.Errorf("must specify notary address to check") diff --git a/cli/multisig.go b/cli/multisig.go index 7b93e55f9..0179378a7 100644 --- a/cli/multisig.go +++ b/cli/multisig.go @@ -51,6 +51,7 @@ var multisigCmd = &cli.Command{ msigProposeCmd, msigRemoveProposeCmd, msigApproveCmd, + msigCancelCmd, msigAddProposeCmd, msigAddApproveCmd, msigAddCancelCmd, @@ -159,6 +160,8 @@ var msigCreateCmd = &cli.Command{ msgCid := sm.Cid() + fmt.Println("sent create in message: ", msgCid) + // wait for it to get mined into a block wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { @@ -448,7 +451,7 @@ var msigProposeCmd = &cli.Command{ msgCid := sm.Cid() - fmt.Println("send proposal in message: ", msgCid) + fmt.Println("sent proposal in message: ", msgCid) wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) if err != nil { @@ -612,6 +615,131 @@ var msigApproveCmd = &cli.Command{ }, } +var msigCancelCmd = &cli.Command{ + Name: "cancel", + Usage: "Cancel a multisig message", + ArgsUsage: " [destination value [methodId methodParams]]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "from", + Usage: "account to send the cancel message from", + }, + }, + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() < 2 { + return ShowHelp(cctx, fmt.Errorf("must pass at least multisig address and message ID")) + } + + if cctx.Args().Len() > 2 && cctx.Args().Len() < 4 { + return ShowHelp(cctx, fmt.Errorf("usage: msig cancel ")) + } + + if cctx.Args().Len() > 4 && cctx.Args().Len() != 6 { + return ShowHelp(cctx, fmt.Errorf("usage: msig cancel [ ]")) + } + + srv, err := GetFullNodeServices(cctx) + if err != nil { + return err + } + defer srv.Close() //nolint:errcheck + + api := srv.FullNodeAPI() + ctx := ReqContext(cctx) + + msig, err := address.NewFromString(cctx.Args().Get(0)) + if err != nil { + return err + } + + txid, err := strconv.ParseUint(cctx.Args().Get(1), 10, 64) + if err != nil { + return err + } + + var from address.Address + if cctx.IsSet("from") { + f, err := address.NewFromString(cctx.String("from")) + if err != nil { + return err + } + from = f + } else { + defaddr, err := api.WalletDefaultAddress(ctx) + if err != nil { + return err + } + from = defaddr + } + + var msgCid cid.Cid + if cctx.Args().Len() == 2 { + proto, err := api.MsigCancel(ctx, msig, txid, from) + if err != nil { + return err + } + + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid = sm.Cid() + } else { + dest, err := address.NewFromString(cctx.Args().Get(2)) + if err != nil { + return err + } + + value, err := types.ParseFIL(cctx.Args().Get(3)) + if err != nil { + return err + } + + var method uint64 + var params []byte + if cctx.Args().Len() == 6 { + m, err := strconv.ParseUint(cctx.Args().Get(4), 10, 64) + if err != nil { + return err + } + method = m + + p, err := hex.DecodeString(cctx.Args().Get(5)) + if err != nil { + return err + } + params = p + } + + proto, err := api.MsigCancelTxnHash(ctx, msig, txid, dest, types.BigInt(value), from, method, params) + if err != nil { + return err + } + + sm, err := InteractiveSend(ctx, cctx, srv, proto) + if err != nil { + return err + } + + msgCid = sm.Cid() + } + + fmt.Println("sent cancel in message: ", msgCid) + + wait, err := api.StateWaitMsg(ctx, msgCid, uint64(cctx.Int("confidence")), build.Finality, true) + if err != nil { + return err + } + + if wait.Receipt.ExitCode != 0 { + return fmt.Errorf("cancel returned exit %d", wait.Receipt.ExitCode) + } + + return nil + }, +} + var msigRemoveProposeCmd = &cli.Command{ Name: "propose-remove", Usage: "Propose to remove a signer", @@ -1490,7 +1618,7 @@ var msigLockCancelCmd = &cli.Command{ return actErr } - proto, err := api.MsigCancel(ctx, msig, txid, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) + proto, err := api.MsigCancelTxnHash(ctx, msig, txid, msig, big.Zero(), from, uint64(multisig.Methods.LockBalance), params) if err != nil { return err } diff --git a/cli/net.go b/cli/net.go index fdd0a13d6..524b0d753 100644 --- a/cli/net.go +++ b/cli/net.go @@ -36,6 +36,8 @@ var NetCmd = &cli.Command{ NetReachability, NetBandwidthCmd, NetBlockCmd, + NetStatCmd, + NetLimitCmd, }, } @@ -606,3 +608,103 @@ var NetBlockListCmd = &cli.Command{ return nil }, } + +var NetStatCmd = &cli.Command{ + Name: "stat", + Usage: "Report resource usage for a scope", + ArgsUsage: "scope", + Description: `Report resource usage for a scope. + + The scope can be one of the following: + - system -- reports the system aggregate resource usage. + - transient -- reports the transient resource usage. + - svc: -- reports the resource usage of a specific service. + - proto: -- reports the resource usage of a specific protocol. + - peer: -- reports the resource usage of a specific peer. + - all -- reports the resource usage for all currently active scopes. +`, + Action: func(cctx *cli.Context) error { + api, closer, err := GetAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + + args := cctx.Args().Slice() + if len(args) != 1 { + return xerrors.Errorf("must specify exactly one scope") + } + scope := args[0] + + result, err := api.NetStat(ctx, scope) + if err != nil { + return err + } + + enc := json.NewEncoder(os.Stdout) + return enc.Encode(result) + }, +} + +var NetLimitCmd = &cli.Command{ + Name: "limit", + Usage: "Get or set resource limits for a scope", + ArgsUsage: "scope [limit]", + Description: `Get or set resource limits for a scope. + + The scope can be one of the following: + - system -- reports the system aggregate resource usage. + - transient -- reports the transient resource usage. + - svc: -- reports the resource usage of a specific service. + - proto: -- reports the resource usage of a specific protocol. + - peer: -- reports the resource usage of a specific peer. + + The limit is json-formatted, with the same structure as the limits file. +`, + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "set", + Usage: "set the limit for a scope", + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := GetAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := ReqContext(cctx) + args := cctx.Args().Slice() + + if cctx.Bool("set") { + if len(args) != 2 { + return xerrors.Errorf("must specify exactly a scope and a limit") + } + scope := args[0] + limitStr := args[1] + + var limit atypes.NetLimit + err := json.Unmarshal([]byte(limitStr), &limit) + if err != nil { + return xerrors.Errorf("error decoding limit: %w", err) + } + + return api.NetSetLimit(ctx, scope, limit) + + } + + if len(args) != 1 { + return xerrors.Errorf("must specify exactly one scope") + } + scope := args[0] + + result, err := api.NetLimit(ctx, scope) + if err != nil { + return err + } + + enc := json.NewEncoder(os.Stdout) + return enc.Encode(result) + }, +} diff --git a/cli/wait.go b/cli/wait.go index 5fc5fa469..a3c0e511a 100644 --- a/cli/wait.go +++ b/cli/wait.go @@ -1,6 +1,7 @@ package cli import ( + "context" "fmt" "time" @@ -10,8 +11,22 @@ import ( var WaitApiCmd = &cli.Command{ Name: "wait-api", Usage: "Wait for lotus api to come online", + Flags: []cli.Flag{ + &cli.DurationFlag{ + Name: "timeout", + Usage: "duration to wait till fail", + Value: time.Second * 30, + }, + }, Action: func(cctx *cli.Context) error { - for i := 0; i < 30; i++ { + ctx := ReqContext(cctx) + ctx, cancel := context.WithTimeout(ctx, cctx.Duration("timeout")) + defer cancel() + for { + if ctx.Err() != nil { + break + } + api, closer, err := GetAPI(cctx) if err != nil { fmt.Printf("Not online yet... (%s)\n", err) @@ -20,8 +35,6 @@ var WaitApiCmd = &cli.Command{ } defer closer() - ctx := ReqContext(cctx) - _, err = api.Version(ctx) if err != nil { return err @@ -29,6 +42,11 @@ var WaitApiCmd = &cli.Command{ return nil } - return fmt.Errorf("timed out waiting for api to come online") + + if ctx.Err() == context.DeadlineExceeded { + return fmt.Errorf("timed out waiting for api to come online") + } + + return ctx.Err() }, } diff --git a/cli/wallet.go b/cli/wallet.go index f0f4e11f9..9faa10677 100644 --- a/cli/wallet.go +++ b/cli/wallet.go @@ -10,6 +10,8 @@ import ( "os" "strings" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/build" "github.com/urfave/cli/v2" @@ -634,14 +636,21 @@ var walletMarketWithdraw = &cli.Command{ return err } - var withdrawn abi.TokenAmount - if err := withdrawn.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { + nv, err := api.StateNetworkVersion(ctx, wait.TipSet) + if err != nil { return err } - fmt.Printf("Successfully withdrew %s FIL\n", withdrawn) - if withdrawn != amt { - fmt.Printf("Note that this is less than the requested amount of %s FIL\n", amt) + if nv >= network.Version14 { + var withdrawn abi.TokenAmount + if err := withdrawn.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { + return err + } + + fmt.Printf("Successfully withdrew %s \n", types.FIL(withdrawn)) + if withdrawn.LessThan(amt) { + fmt.Printf("Note that this is less than the requested amount of %s \n", types.FIL(amt)) + } } return nil diff --git a/cmd/eudico/checkpoint.go b/cmd/eudico/checkpoint.go new file mode 100644 index 000000000..7349f9d9d --- /dev/null +++ b/cmd/eudico/checkpoint.go @@ -0,0 +1,105 @@ +package main + +import ( + "fmt" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/urfave/cli/v2" +) + +var checkpointCmds = &cli.Command{ + Name: "checkpoint", + Usage: "Commands related with subneting", + Subcommands: []*cli.Command{ + listCheckpoints, + validateCheckpoints, + }, +} + +var listCheckpoints = &cli.Command{ + Name: "list-checkpoints", + Usage: "list latest checkpoints committed for subnet", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "subnet", + Usage: "specify the id of the subnet to list checkpoints from", + Value: address.RootSubnet.String(), + }, + &cli.IntFlag{ + Name: "num", + Usage: "specify the number of checkpoints to list from current tipset (default=10)", + Value: 10, + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + + // If subnet not set use root. Otherwise, use flag value + var subnet string + if cctx.String("subnet") != address.RootSubnet.String() { + subnet = cctx.String("subnet") + } + + chs, err := api.ListCheckpoints(ctx, address.SubnetID(subnet), cctx.Int("num")) + if err != nil { + return err + } + for _, ch := range chs { + chcid, _ := ch.Cid() + prev, _ := ch.PreviousCheck() + lc := len(ch.CrossMsgs()) > 0 + fmt.Printf("epoch: %d - cid=%s, previous=%v, childs=%v, crossmsgs=%v\n", ch.Epoch(), chcid, prev, ch.LenChilds(), lc) + } + return nil + }, +} + +var validateCheckpoints = &cli.Command{ + Name: "validate", + Usage: "validate checkpoint for certain epoch", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "subnet", + Usage: "specify the id of the subnet to list checkpoints from", + Value: address.RootSubnet.String(), + }, + &cli.IntFlag{ + Name: "epoch", + Usage: "specify checkpoint epoch (default=latest)", + Value: -1, + }, + }, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + + // If subnet not set use root. Otherwise, use flag value + var subnet string + if cctx.String("subnet") != address.RootSubnet.String() { + subnet = cctx.String("subnet") + } + + ch, err := api.ValidateCheckpoint(ctx, address.SubnetID(subnet), abi.ChainEpoch(cctx.Int("epoch"))) + if err != nil { + fmt.Println("Verified KO!") + return err + } + chcid, _ := ch.Cid() + prev, _ := ch.PreviousCheck() + fmt.Println("Verified OK!") + fmt.Printf("epoch: %d - cid=%s, previous=%v, childs=%v\n", ch.Epoch(), chcid, prev, ch.LenChilds()) + + return nil + }, +} diff --git a/cmd/eudico/daemon.go b/cmd/eudico/daemon.go index f059b88a3..26a1a0acc 100644 --- a/cmd/eudico/daemon.go +++ b/cmd/eudico/daemon.go @@ -18,7 +18,7 @@ import ( "github.com/filecoin-project/go-jsonrpc" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet" + snmgr "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet/manager" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" "github.com/filecoin-project/lotus/lib/peermgr" @@ -290,12 +290,12 @@ func daemonCmd(overrides node.Option) *cli.Command { } } else { // If not instantiate a subnet api - api, ok := iapi.(*subnet.API) + api, ok := iapi.(*snmgr.API) if !ok { return xerrors.Errorf("Couldn't instantiate new subnet API. Something went wrong: %s", err) } // Instantiate the full node handler. - h, err = subnet.FullNodeHandler(pp, api, true, serverOptions...) + h, err = snmgr.FullNodeHandler(pp, api, true, serverOptions...) if err != nil { return fmt.Errorf("failed to instantiate rpc handler: %s", err) } diff --git a/cmd/eudico/delegated.go b/cmd/eudico/delegated.go index a5dafd7bc..794a00f9c 100644 --- a/cmd/eudico/delegated.go +++ b/cmd/eudico/delegated.go @@ -5,16 +5,26 @@ import ( "time" "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/checkpointing" + + "github.com/filecoin-project/lotus/chain" + "github.com/filecoin-project/lotus/chain/beacon" + "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/urfave/cli/v2" "golang.org/x/xerrors" "github.com/filecoin-project/lotus/chain/consensus" + "github.com/filecoin-project/lotus/chain/consensus/common" "github.com/filecoin-project/lotus/chain/consensus/delegcns" "github.com/filecoin-project/lotus/chain/consensus/hierarchical" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/subnet" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet/resolver" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" lcli "github.com/filecoin-project/lotus/cli" @@ -22,6 +32,11 @@ import ( "github.com/filecoin-project/lotus/node" ) +func NewRootDelegatedConsensus(sm *stmgr.StateManager, beacon beacon.Schedule, r *resolver.Resolver, + verifier ffiwrapper.Verifier, genesis chain.Genesis, netName dtypes.NetworkName) consensus.Consensus { + return delegcns.NewDelegatedConsensus(sm, nil, beacon, r, verifier, genesis, netName) +} + var delegatedCmd = &cli.Command{ Name: "delegated", Usage: "Delegated consensus testbed", @@ -30,14 +45,15 @@ var delegatedCmd = &cli.Command{ delegatedMinerCmd, daemonCmd(node.Options( - node.Override(new(consensus.Consensus), delegcns.NewDelegatedConsensus), + node.Override(new(consensus.Consensus), NewRootDelegatedConsensus), node.Override(new(store.WeightFunc), delegcns.Weight), - node.Override(new(stmgr.Executor), delegcns.TipSetExecutor()), - node.Override(new(stmgr.UpgradeSchedule), delegcns.DefaultUpgradeSchedule()), + node.Override(new(stmgr.Executor), common.RootTipSetExecutor), + node.Override(new(stmgr.UpgradeSchedule), common.DefaultUpgradeSchedule()), // Start checkpoint sub node.Override(new(*checkpointing.CheckpointingSub), checkpointing.NewCheckpointSub), node.Override(StartCheckpointingSubKey, checkpointing.BuildCheckpointingSub), + )), }, } @@ -78,7 +94,9 @@ var delegatedGenesisCmd = &cli.Command{ return err } - if err := subnet.WriteGenesis(hierarchical.RootSubnet, subnet.Delegated, miner, vreg, rem, uint64(time.Now().Unix()), f); err != nil { + // TODO: Make configurable + checkPeriod := sca.DefaultCheckpointPeriod + if err := subnet.WriteGenesis(address.RootSubnet, hierarchical.Delegated, miner, vreg, rem, checkPeriod, uint64(time.Now().Unix()), f); err != nil { return xerrors.Errorf("write genesis car: %w", err) } diff --git a/cmd/eudico/main.go b/cmd/eudico/main.go index 0f0d7abe0..c6bbcfafa 100644 --- a/cmd/eudico/main.go +++ b/cmd/eudico/main.go @@ -43,7 +43,7 @@ func main() { jaeger := tracing.SetupJaegerTracing("eudico") defer func() { if jaeger != nil { - jaeger.Flush() + _ = jaeger.ForceFlush(context.Background()) } }() @@ -51,8 +51,10 @@ func main() { cmd := cmd originBefore := cmd.Before cmd.Before = func(cctx *cli.Context) error { - trace.UnregisterExporter(jaeger) - jaeger = tracing.SetupJaegerTracing("eudico/" + cmd.Name) + if jaeger != nil { + _ = jaeger.Shutdown(cctx.Context) + } + jaeger = tracing.SetupJaegerTracing("lotus/" + cmd.Name) if originBefore != nil { return originBefore(cctx) diff --git a/cmd/eudico/subnet.go b/cmd/eudico/subnet.go index 8c6190d6c..626a57e7f 100644 --- a/cmd/eudico/subnet.go +++ b/cmd/eudico/subnet.go @@ -2,13 +2,16 @@ package main import ( "bytes" + "encoding/hex" "fmt" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" big "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/consensus/hierarchical" "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" "github.com/filecoin-project/lotus/chain/types" @@ -25,10 +28,15 @@ var subnetCmds = &cli.Command{ Subcommands: []*cli.Command{ addCmd, joinCmd, + syncCmd, listSubnetsCmd, mineCmd, leaveCmd, killCmd, + checkpointCmds, + fundCmd, + releaseCmd, + sendCmd, }, } @@ -44,14 +52,9 @@ var listSubnetsCmd = &cli.Command{ ctx := lcli.ReqContext(cctx) - ts, err := lcli.LoadTipSet(ctx, cctx, api) - if err != nil { - return err - } - var st sca.SCAState - act, err := api.StateGetActor(ctx, sca.SubnetCoordActorAddr, ts.Key()) + act, err := api.StateGetActor(ctx, hierarchical.SubnetCoordActorAddr, types.EmptyTSK) if err != nil { return xerrors.Errorf("error getting actor state: %w", err) } @@ -71,7 +74,7 @@ var listSubnetsCmd = &cli.Command{ if sh.Status != 0 { status = "Inactive" } - fmt.Printf("%s (%s): status=%v, stake=%v\n", sh.Cid, sh.ID, status, types.FIL(sh.Stake)) + fmt.Printf("%s: status=%v, stake=%v, circulating supply=%v\n", sh.ID, status, types.FIL(sh.Stake), types.FIL(sh.CircSupply)) } return nil @@ -95,6 +98,10 @@ var addCmd = &cli.Command{ Name: "consensus", Usage: "specify consensus for the subnet (0=delegated, 1=PoW)", }, + &cli.IntFlag{ + Name: "checkperiod", + Usage: "optionally specify checkpointing period for subnet (default = 10epochs)", + }, &cli.StringFlag{ Name: "name", Usage: "specify name for the subnet", @@ -138,15 +145,15 @@ var addCmd = &cli.Command{ return lcli.ShowHelp(cctx, fmt.Errorf("no name for subnet specified")) } - parent := hierarchical.RootSubnet + parent := address.RootSubnet if cctx.IsSet("parent") { - parent = hierarchical.SubnetID(cctx.String("parent")) + parent = address.SubnetID(cctx.String("parent")) } // FIXME: This is a horrible workaround to avoid delegminer from // not being set. But need to demo in 30 mins, so will fix it afterwards // (we all know I'll come across this comment in 2 years and laugh at it). - delegminer := sca.SubnetCoordActorAddr + delegminer := hierarchical.SubnetCoordActorAddr if cctx.IsSet("delegminer") { d := cctx.String("delegminer") delegminer, err = address.NewFromString(d) @@ -157,12 +164,13 @@ var addCmd = &cli.Command{ return lcli.ShowHelp(cctx, fmt.Errorf("no delegated miner for delegated consensus specified")) } minerStake := abi.NewStoragePower(1e8) // TODO: Make this value configurable in a flag/argument - actorAddr, err := api.AddSubnet(ctx, addr, parent, name, uint64(consensus), minerStake, delegminer) + checkperiod := abi.ChainEpoch(cctx.Int("checkperiod")) + actorAddr, err := api.AddSubnet(ctx, addr, parent, name, uint64(consensus), minerStake, checkperiod, delegminer) if err != nil { return err } - fmt.Printf("[*] subnet actor deployed as %v and new subnet availabe with ID=%v\n\n", actorAddr, hierarchical.NewSubnetID(parent, actorAddr)) + fmt.Printf("[*] subnet actor deployed as %v and new subnet availabe with ID=%v\n\n", actorAddr, address.NewSubnetID(parent, actorAddr)) fmt.Printf("remember to join and register your subnet for it to be discoverable") return nil }, @@ -180,7 +188,7 @@ var joinCmd = &cli.Command{ &cli.StringFlag{ Name: "subnet", Usage: "specify the id of the subnet to join", - Value: hierarchical.RootSubnet.String(), + Value: address.RootSubnet.String(), }, }, Action: func(cctx *cli.Context) error { @@ -207,7 +215,7 @@ var joinCmd = &cli.Command{ // If subnet not set use root. Otherwise, use flag value var subnet string - if cctx.String("subnet") != hierarchical.RootSubnet.String() { + if cctx.String("subnet") != address.RootSubnet.String() { subnet = cctx.String("subnet") } @@ -216,11 +224,53 @@ var joinCmd = &cli.Command{ return lcli.ShowHelp(cctx, fmt.Errorf("failed to parse amount: %w", err)) } - c, err := api.JoinSubnet(ctx, addr, big.Int(val), hierarchical.SubnetID(subnet)) + c, err := api.JoinSubnet(ctx, addr, big.Int(val), address.SubnetID(subnet)) + if err != nil { + return err + } + fmt.Fprintf(cctx.App.Writer, "Successfully added stake to subnet %s in message: %s\n", subnet, c) + return nil + }, +} + +var syncCmd = &cli.Command{ + Name: "sync", + Usage: "Sync with a subnet without adding stake to it", + ArgsUsage: "[]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "subnet", + Usage: "specify the id of the subnet to sync with", + Value: address.RootSubnet.String(), + }, + &cli.BoolFlag{ + Name: "stop", + Usage: "use this flag to determine if you want to start or stop mining", + }, + }, + Action: func(cctx *cli.Context) error { + + if cctx.Args().Len() != 0 { + return lcli.ShowHelp(cctx, fmt.Errorf("'sync' expects no arguments, and a set of flags")) + } + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + // If subnet not set use root. Otherwise, use flag value + subnet := cctx.String("subnet") + if cctx.String("subnet") == address.RootSubnet.String() { + return xerrors.Errorf("no valid subnet so sync with specified") + } + err = api.SyncSubnet(ctx, address.SubnetID(subnet), cctx.Bool("stop")) if err != nil { return err } - fmt.Fprintf(cctx.App.Writer, "Successfully added stake to subnet in message: %s\n", c) + fmt.Fprintf(cctx.App.Writer, "Successfully started/stopped syncing with subnet %s \n", subnet) return nil }, } @@ -237,11 +287,11 @@ var mineCmd = &cli.Command{ &cli.StringFlag{ Name: "subnet", Usage: "specify the id of the subnet to mine", - Value: hierarchical.RootSubnet.String(), + Value: address.RootSubnet.String(), }, &cli.BoolFlag{ Name: "stop", - Usage: "use this flag to determine if you want to start or stop mining", + Usage: "use this flag to stop mining a subnet", }, }, Action: func(cctx *cli.Context) error { @@ -273,11 +323,11 @@ var mineCmd = &cli.Command{ } // If subnet not set use root. Otherwise, use flag value var subnet string - if cctx.String("subnet") != hierarchical.RootSubnet.String() { + if cctx.String("subnet") != address.RootSubnet.String() { subnet = cctx.String("subnet") } - err = api.MineSubnet(ctx, walletID, hierarchical.SubnetID(subnet), cctx.Bool("stop")) + err = api.MineSubnet(ctx, walletID, address.SubnetID(subnet), cctx.Bool("stop")) if err != nil { return err } @@ -298,7 +348,7 @@ var leaveCmd = &cli.Command{ &cli.StringFlag{ Name: "subnet", Usage: "specify the id of the subnet to mine", - Value: hierarchical.RootSubnet.String(), + Value: address.RootSubnet.String(), }, }, Action: func(cctx *cli.Context) error { @@ -325,11 +375,11 @@ var leaveCmd = &cli.Command{ // If subnet not set use root. Otherwise, use flag value var subnet string - if cctx.String("subnet") != hierarchical.RootSubnet.String() { + if cctx.String("subnet") != address.RootSubnet.String() { subnet = cctx.String("subnet") } - c, err := api.LeaveSubnet(ctx, addr, hierarchical.SubnetID(subnet)) + c, err := api.LeaveSubnet(ctx, addr, address.SubnetID(subnet)) if err != nil { return err } @@ -350,7 +400,7 @@ var killCmd = &cli.Command{ &cli.StringFlag{ Name: "subnet", Usage: "specify the id of the subnet to mine", - Value: hierarchical.RootSubnet.String(), + Value: address.RootSubnet.String(), }, }, Action: func(cctx *cli.Context) error { @@ -377,11 +427,11 @@ var killCmd = &cli.Command{ // If subnet not set use root. Otherwise, use flag value var subnet string - if cctx.String("subnet") != hierarchical.RootSubnet.String() { + if cctx.String("subnet") != address.RootSubnet.String() { subnet = cctx.String("subnet") } - c, err := api.KillSubnet(ctx, addr, hierarchical.SubnetID(subnet)) + c, err := api.KillSubnet(ctx, addr, address.SubnetID(subnet)) if err != nil { return err } @@ -390,6 +440,298 @@ var killCmd = &cli.Command{ }, } +var releaseCmd = &cli.Command{ + Name: "release", + Usage: "Release funds from your ", + ArgsUsage: "[]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "from", + Usage: "optionally specify the account to send funds from", + }, + &cli.StringFlag{ + Name: "subnet", + Usage: "specify the id of the subnet", + Value: address.RootSubnet.String(), + }, + }, + Action: func(cctx *cli.Context) error { + + if cctx.Args().Len() != 1 { + return lcli.ShowHelp(cctx, fmt.Errorf("'fund' expects the amount of FILs to inject to subnet, and a set of flags")) + } + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + // Try to get default address first + addr, _ := api.WalletDefaultAddress(ctx) + if from := cctx.String("from"); from != "" { + addr, err = address.NewFromString(from) + if err != nil { + return err + } + } + + // Releasing funds needs to be done in a subnet + var subnet string + if cctx.String("subnet") == address.RootSubnet.String() || + cctx.String("subnet") == "" { + return xerrors.Errorf("only subnets can release funds, please set a valid subnet") + } + + subnet = cctx.String("subnet") + val, err := types.ParseFIL(cctx.Args().Get(0)) + if err != nil { + return lcli.ShowHelp(cctx, fmt.Errorf("failed to parse amount: %w", err)) + } + + c, err := api.ReleaseFunds(ctx, addr, address.SubnetID(subnet), big.Int(val)) + if err != nil { + return err + } + fmt.Fprintf(cctx.App.Writer, "Successfully sent release message: %s\n", c) + fmt.Fprintf(cctx.App.Writer, "Cross-message should be propagated in the next checkpoint to: %s\n", + address.SubnetID(subnet).Parent()) + return nil + }, +} + +var fundCmd = &cli.Command{ + Name: "fund", + Usage: "Inject new funds to your address in a subnet", + ArgsUsage: "[]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "from", + Usage: "optionally specify the account to send funds from", + }, + &cli.StringFlag{ + Name: "subnet", + Usage: "specify the id of the subnet", + Value: address.RootSubnet.String(), + }, + }, + Action: func(cctx *cli.Context) error { + + if cctx.Args().Len() != 1 { + return lcli.ShowHelp(cctx, fmt.Errorf("'fund' expects the amount of FILs to inject to subnet, and a set of flags")) + } + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + ctx := lcli.ReqContext(cctx) + + // Try to get default address first + addr, _ := api.WalletDefaultAddress(ctx) + if from := cctx.String("from"); from != "" { + addr, err = address.NewFromString(from) + if err != nil { + return err + } + } + + // Injecting funds needs to be done in a subnet + var subnet string + if cctx.String("subnet") == address.RootSubnet.String() || + cctx.String("subnet") == "" { + return xerrors.Errorf("only subnets can be fund with new tokens, please set a valid subnet") + } + + subnet = cctx.String("subnet") + val, err := types.ParseFIL(cctx.Args().Get(0)) + if err != nil { + return lcli.ShowHelp(cctx, fmt.Errorf("failed to parse amount: %w", err)) + } + + c, err := api.FundSubnet(ctx, addr, address.SubnetID(subnet), big.Int(val)) + if err != nil { + return err + } + fmt.Fprintf(cctx.App.Writer, "Successfully funded subnet in message: %s\n", c) + fmt.Fprintf(cctx.App.Writer, "Cross-message should be validated shortly in subnet: %s\n", subnet) + return nil + }, +} + +var sendCmd = &cli.Command{ + Name: "send", + Usage: "Send a cross-net message to a subnet", + ArgsUsage: "[targetAddress] [amount]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "subnet", + Usage: "specify the id of the destination subnet", + }, + &cli.StringFlag{ + Name: "from", + Usage: "optionally specify the account to send funds from", + }, + &cli.StringFlag{ + Name: "gas-premium", + Usage: "specify gas price to use in AttoFIL", + Value: "0", + }, + &cli.StringFlag{ + Name: "gas-feecap", + Usage: "specify gas fee cap to use in AttoFIL", + Value: "0", + }, + &cli.Int64Flag{ + Name: "gas-limit", + Usage: "specify gas limit", + Value: 0, + }, + &cli.Uint64Flag{ + Name: "nonce", + Usage: "specify the nonce to use", + Value: 0, + }, + &cli.Uint64Flag{ + Name: "method", + Usage: "specify method to invoke", + Value: uint64(builtin.MethodSend), + }, + &cli.StringFlag{ + Name: "params-json", + Usage: "specify invocation parameters in json", + }, + &cli.StringFlag{ + Name: "params-hex", + Usage: "specify invocation parameters in hex", + }, + &cli.BoolFlag{ + Name: "force", + Usage: "Deprecated: use global 'force-send'", + }, + }, + Action: func(cctx *cli.Context) error { + + if cctx.Args().Len() != 2 { + return lcli.ShowHelp(cctx, fmt.Errorf("'send' expects the destination address and an amount of FILs to send to subnet, along with a set of mandatory flags")) + } + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + srv, err := lcli.GetFullNodeServices(cctx) + if err != nil { + return err + } + defer srv.Close() //nolint:errcheck + + ctx := lcli.ReqContext(cctx) + var params lcli.SendParams + params.To, err = address.NewFromString(cctx.Args().Get(0)) + if err != nil { + return lcli.ShowHelp(cctx, fmt.Errorf("failed to parse target address: %w", err)) + } + + val, err := types.ParseFIL(cctx.Args().Get(1)) + if err != nil { + return lcli.ShowHelp(cctx, fmt.Errorf("failed to parse amount: %w", err)) + } + params.Val = abi.TokenAmount(val) + + if from := cctx.String("from"); from != "" { + addr, err := address.NewFromString(from) + if err != nil { + return err + } + + params.From = addr + } + + if cctx.IsSet("gas-premium") { + gp, err := types.BigFromString(cctx.String("gas-premium")) + if err != nil { + return err + } + params.GasPremium = &gp + } + + if cctx.IsSet("gas-feecap") { + gfc, err := types.BigFromString(cctx.String("gas-feecap")) + if err != nil { + return err + } + params.GasFeeCap = &gfc + } + + if cctx.IsSet("gas-limit") { + limit := cctx.Int64("gas-limit") + params.GasLimit = &limit + } + + params.Method = abi.MethodNum(cctx.Uint64("method")) + + if cctx.IsSet("params-json") { + decparams, err := srv.DecodeTypedParamsFromJSON(ctx, params.To, params.Method, cctx.String("params-json")) + if err != nil { + return fmt.Errorf("failed to decode json params: %w", err) + } + params.Params = decparams + } + if cctx.IsSet("params-hex") { + if params.Params != nil { + return fmt.Errorf("can only specify one of 'params-json' and 'params-hex'") + } + decparams, err := hex.DecodeString(cctx.String("params-hex")) + if err != nil { + return fmt.Errorf("failed to decode hex params: %w", err) + } + params.Params = decparams + } + + if cctx.IsSet("nonce") { + n := cctx.Uint64("nonce") + params.Nonce = &n + } + + proto, err := srv.MessageForSend(ctx, params) + if err != nil { + return xerrors.Errorf("creating message prototype: %w", err) + } + + if cctx.String("subnet") == "" { + return xerrors.Errorf("no destination subnet specified") + } + + subnet := address.SubnetID(cctx.String("subnet")) + crossParams := &sca.CrossMsgParams{ + Destination: subnet, + Msg: proto.Message, + } + serparams, err := actors.SerializeParams(crossParams) + if err != nil { + return xerrors.Errorf("failed serializing init actor params: %s", err) + } + smsg, aerr := api.MpoolPushMessage(ctx, &types.Message{ + To: hierarchical.SubnetCoordActorAddr, + From: params.From, + Value: params.Val, + Method: sca.Methods.SendCross, + Params: serparams, + }, nil) + if aerr != nil { + return xerrors.Errorf("Error sending message: %s", aerr) + } + + fmt.Fprintf(cctx.App.Writer, "Successfully send cross-message with cid: %s\n", smsg.Cid()) + fmt.Fprintf(cctx.App.Writer, "Cross-message should be propagated shortly to the right subnet: %s\n", subnet) + return nil + }, +} + func MustSerialize(i cbg.CBORMarshaler) []byte { buf := new(bytes.Buffer) if err := i.MarshalCBOR(buf); err != nil { diff --git a/cmd/eudico/tspow.go b/cmd/eudico/tspow.go index 269a8215a..ce7e54e45 100644 --- a/cmd/eudico/tspow.go +++ b/cmd/eudico/tspow.go @@ -1,42 +1,41 @@ package main import ( - "context" "fmt" "os" "time" "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/chain/actors/adt" - "github.com/filecoin-project/lotus/chain/actors/builtin/system" + "github.com/filecoin-project/lotus/chain" + "github.com/filecoin-project/lotus/chain/beacon" + "github.com/filecoin-project/lotus/chain/consensus/common" "github.com/filecoin-project/lotus/chain/consensus/hierarchical" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/sca" "github.com/filecoin-project/lotus/chain/consensus/hierarchical/actors/subnet" - param "github.com/filecoin-project/lotus/chain/consensus/params" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet/resolver" "github.com/filecoin-project/lotus/chain/consensus/tspow" "github.com/filecoin-project/lotus/chain/gen/slashfilter" - adt0 "github.com/filecoin-project/specs-actors/actors/util/adt" - "github.com/ipfs/go-cid" - cbor "github.com/ipfs/go-ipld-cbor" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/urfave/cli/v2" "golang.org/x/xerrors" - bstore "github.com/filecoin-project/lotus/blockstore" - "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/consensus" genesis2 "github.com/filecoin-project/lotus/chain/gen/genesis" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/chain/vm" "github.com/filecoin-project/lotus/chain/wallet" lcli "github.com/filecoin-project/lotus/cli" cliutil "github.com/filecoin-project/lotus/cli/util" - "github.com/filecoin-project/lotus/genesis" - "github.com/filecoin-project/lotus/journal" "github.com/filecoin-project/lotus/node" ) +func NewRootTSPoWConsensus(sm *stmgr.StateManager, beacon beacon.Schedule, r *resolver.Resolver, + verifier ffiwrapper.Verifier, genesis chain.Genesis, netName dtypes.NetworkName) consensus.Consensus { + return tspow.NewTSPoWConsensus(sm, nil, beacon, r, verifier, genesis, netName) +} + var tpowCmd = &cli.Command{ Name: "tspow", Usage: "TipSet PoW consensus testbed", @@ -45,14 +44,11 @@ var tpowCmd = &cli.Command{ tpowMinerCmd, daemonCmd(node.Options( - node.Override(new(consensus.Consensus), tspow.NewTSPoWConsensus), + node.Override(new(consensus.Consensus), NewRootTSPoWConsensus), node.Override(new(store.WeightFunc), tspow.Weight), node.Unset(new(*slashfilter.SlashFilter)), - // TODO: This doesn't seem to be right, we should implement the right - // executor and upgradeSchedule for this consensus, we currently - // use of the delegated consensus. - node.Override(new(stmgr.Executor), tspow.TipSetExecutor()), //todo - node.Override(new(stmgr.UpgradeSchedule), tspow.DefaultUpgradeSchedule()), + node.Override(new(stmgr.Executor), common.RootTipSetExecutor), + node.Override(new(stmgr.UpgradeSchedule), common.DefaultUpgradeSchedule()), )), }, } @@ -63,7 +59,7 @@ var tpowGenesisCmd = &cli.Command{ ArgsUsage: "[outfile]", Action: func(cctx *cli.Context) error { if cctx.Args().Len() != 1 { - return xerrors.Errorf("expected 2 arguments") + return xerrors.Errorf("expected 1 argument") } memks := wallet.NewMemKeyStore() @@ -88,7 +84,9 @@ var tpowGenesisCmd = &cli.Command{ return err } - if err := subnet.WriteGenesis(hierarchical.RootSubnet, subnet.PoW, address.Undef, vreg, rem, uint64(time.Now().Unix()), f); err != nil { + // TODO: Make configurable + checkPeriod := sca.DefaultCheckpointPeriod + if err := subnet.WriteGenesis(address.RootSubnet, hierarchical.PoW, address.Undef, vreg, rem, checkPeriod, uint64(time.Now().Unix()), f); err != nil { return xerrors.Errorf("write genesis car: %w", err) } @@ -121,100 +119,7 @@ var tpowMinerCmd = &cli.Command{ return xerrors.Errorf("no miner address specified to start mining") } - log.Infow("Starting mining with miner", miner) + log.Infow("Starting mining with miner", "miner", miner) return tspow.Mine(ctx, miner, api) }, } - -func MakePoWGenesisBlock(ctx context.Context, j journal.Journal, bs bstore.Blockstore, sys vm.SyscallBuilder, template genesis.Template) (*genesis2.GenesisBootstrap, error) { - if j == nil { - j = journal.NilJournal() - } - st, _, err := genesis2.MakeInitialStateTree(ctx, bs, template) - if err != nil { - return nil, xerrors.Errorf("make initial state tree failed: %w", err) - } - - stateroot, err := st.Flush(ctx) - if err != nil { - return nil, xerrors.Errorf("flush state tree failed: %w", err) - } - - // temp chainstore - //cs := store.NewChainStore(bs, bs, datastore.NewMapDatastore(), j) - - /* // Verify PreSealed Data - stateroot, err = VerifyPreSealedData(ctx, cs, sys, stateroot, template, keyIDs, template.NetworkVersion) - if err != nil { - return nil, xerrors.Errorf("failed to verify presealed data: %w", err) - } - - stateroot, err = SetupStorageMiners(ctx, cs, sys, stateroot, template.Miners, template.NetworkVersion) - if err != nil { - return nil, xerrors.Errorf("setup miners failed: %w", err) - }*/ - - store := adt.WrapStore(ctx, cbor.NewCborStore(bs)) - emptyroot, err := adt0.MakeEmptyArray(store).Root() - if err != nil { - return nil, xerrors.Errorf("amt build failed: %w", err) - } - - mm := &types.MsgMeta{ - BlsMessages: emptyroot, - SecpkMessages: emptyroot, - } - mmb, err := mm.ToStorageBlock() - if err != nil { - return nil, xerrors.Errorf("serializing msgmeta failed: %w", err) - } - if err := bs.Put(mmb); err != nil { - return nil, xerrors.Errorf("putting msgmeta block to blockstore: %w", err) - } - - log.Infof("Empty Genesis root: %s", emptyroot) - - wtb, err := param.GenesisWorkTarget.Bytes() - if err != nil { - return nil, err - } - - genesisticket := &types.Ticket{ - VRFProof: wtb, - } - - b := &types.BlockHeader{ - Miner: system.Address, - Ticket: genesisticket, - Parents: []cid.Cid{}, - Height: 0, - ParentWeight: types.NewInt(0), - ParentStateRoot: stateroot, - Messages: mmb.Cid(), - ParentMessageReceipts: emptyroot, - BLSAggregate: nil, - BlockSig: nil, - Timestamp: template.Timestamp, - ElectionProof: new(types.ElectionProof), - BeaconEntries: []types.BeaconEntry{ - { - Round: 0, - Data: make([]byte, 32), - }, - }, - ParentBaseFee: abi.NewTokenAmount(build.InitialBaseFee), - } - - sb, err := b.ToStorageBlock() - if err != nil { - return nil, xerrors.Errorf("serializing block header failed: %w", err) - } - - if err := bs.Put(sb); err != nil { - return nil, xerrors.Errorf("putting header to blockstore: %w", err) - } - - return &genesis2.GenesisBootstrap{ - Genesis: b, - }, nil -} diff --git a/cmd/lotus-bench/caching_verifier.go b/cmd/lotus-bench/caching_verifier.go index f4cc0f837..0fddf515d 100644 --- a/cmd/lotus-bench/caching_verifier.go +++ b/cmd/lotus-bench/caching_verifier.go @@ -5,10 +5,11 @@ import ( "context" "errors" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" - proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" "github.com/ipfs/go-datastore" "github.com/minio/blake2b-simd" cbg "github.com/whyrusleeping/cbor-gen" @@ -36,7 +37,7 @@ func (cv cachingVerifier) withCache(execute func() (bool, error), param cbg.CBOR } hash := hasher.Sum(nil) key := datastore.NewKey(string(hash)) - fromDs, err := cv.ds.Get(key) + fromDs, err := cv.ds.Get(context.Background(), key) if err == nil { switch fromDs[0] { case 's': @@ -66,7 +67,7 @@ func (cv cachingVerifier) withCache(execute func() (bool, error), param cbg.CBOR } if len(save) != 0 { - errSave := cv.ds.Put(key, save) + errSave := cv.ds.Put(context.Background(), key, save) if errSave != nil { log.Errorf("error saving result: %+v", errSave) } @@ -85,9 +86,10 @@ func (cv *cachingVerifier) VerifySeal(svi proof2.SealVerifyInfo) (bool, error) { }, &svi) } -func (cv *cachingVerifier) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) { +func (cv *cachingVerifier) VerifyWinningPoSt(ctx context.Context, info proof7.WinningPoStVerifyInfo) (bool, error) { return cv.backend.VerifyWinningPoSt(ctx, info) } + func (cv *cachingVerifier) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStVerifyInfo) (bool, error) { return cv.withCache(func() (bool, error) { return cv.backend.VerifyWindowPoSt(ctx, info) @@ -97,8 +99,12 @@ func (cv *cachingVerifier) GenerateWinningPoStSectorChallenge(ctx context.Contex return cv.backend.GenerateWinningPoStSectorChallenge(ctx, proofType, a, rnd, u) } -func (cv cachingVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { +func (cv cachingVerifier) VerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) (bool, error) { return cv.backend.VerifyAggregateSeals(aggregate) } +func (cv cachingVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) { + return cv.backend.VerifyReplicaUpdate(update) +} + var _ ffiwrapper.Verifier = (*cachingVerifier)(nil) diff --git a/cmd/lotus-bench/import.go b/cmd/lotus-bench/import.go index c66b90deb..e2c9979d9 100644 --- a/cmd/lotus-bench/import.go +++ b/cmd/lotus-bench/import.go @@ -17,8 +17,6 @@ import ( "time" ocprom "contrib.go.opencensus.io/exporter/prometheus" - "github.com/cockroachdb/pebble" - "github.com/cockroachdb/pebble/bloom" "github.com/ipfs/go-cid" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -46,7 +44,6 @@ import ( "github.com/ipfs/go-datastore" badger "github.com/ipfs/go-ds-badger2" measure "github.com/ipfs/go-ds-measure" - pebbleds "github.com/ipfs/go-ds-pebble" "github.com/urfave/cli/v2" "golang.org/x/xerrors" @@ -114,9 +111,6 @@ var importBenchCmd = &cli.Command{ &cli.BoolFlag{ Name: "only-import", }, - &cli.BoolFlag{ - Name: "use-pebble", - }, &cli.BoolFlag{ Name: "use-native-badger", }, @@ -178,29 +172,6 @@ var importBenchCmd = &cli.Command{ ) switch { - case cctx.Bool("use-pebble"): - log.Info("using pebble") - cache := 512 - ds, err = pebbleds.NewDatastore(tdir, &pebble.Options{ - // Pebble has a single combined cache area and the write - // buffers are taken from this too. Assign all available - // memory allowance for cache. - Cache: pebble.NewCache(int64(cache * 1024 * 1024)), - // The size of memory table(as well as the write buffer). - // Note, there may have more than two memory tables in the system. - // MemTableStopWritesThreshold can be configured to avoid the memory abuse. - MemTableSize: cache * 1024 * 1024 / 4, - // The default compaction concurrency(1 thread), - // Here use all available CPUs for faster compaction. - MaxConcurrentCompactions: runtime.NumCPU(), - // Per-level options. Options for at least one level must be specified. The - // options for the last level are used for all subsequent levels. - Levels: []pebble.LevelOptions{ - {TargetFileSize: 16 * 1024 * 1024, FilterPolicy: bloom.FilterPolicy(10), Compression: pebble.NoCompression}, - }, - Logger: log, - }) - case cctx.Bool("use-native-badger"): log.Info("using native badger") var opts badgerbs.Options @@ -258,7 +229,7 @@ var importBenchCmd = &cli.Command{ defer cs.Close() //nolint:errcheck // TODO: We need to supply the actual beacon after v14 - stm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), vm.Syscalls(verifier), filcns.DefaultUpgradeSchedule(), nil) + stm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), nil, vm.Syscalls(verifier), filcns.DefaultUpgradeSchedule(), nil) if err != nil { return err } @@ -333,7 +304,7 @@ var importBenchCmd = &cli.Command{ return fmt.Errorf("no CAR file provided for import") } - head, err = cs.Import(carFile) + head, err = cs.Import(cctx.Context, carFile) if err != nil { return err } @@ -356,7 +327,7 @@ var importBenchCmd = &cli.Command{ return xerrors.Errorf("failed to parse head tipset key: %w", err) } - head, err = cs.LoadTipSet(types.NewTipSetKey(cids...)) + head, err = cs.LoadTipSet(cctx.Context, types.NewTipSetKey(cids...)) if err != nil { return err } @@ -365,7 +336,7 @@ var importBenchCmd = &cli.Command{ if err != nil { return err } - head, err = cs.LoadTipSet(types.NewTipSetKey(cr.Header.Roots...)) + head, err = cs.LoadTipSet(cctx.Context, types.NewTipSetKey(cr.Header.Roots...)) if err != nil { return err } @@ -382,7 +353,7 @@ var importBenchCmd = &cli.Command{ if cids, err = lcli.ParseTipSetString(tsk); err != nil { return xerrors.Errorf("failed to parse genesis tipset key: %w", err) } - genesis, err = cs.LoadTipSet(types.NewTipSetKey(cids...)) + genesis, err = cs.LoadTipSet(cctx.Context, types.NewTipSetKey(cids...)) } else { log.Warnf("getting genesis by height; this will be slow; pass in the genesis tipset through --genesis-tipset") // fallback to the slow path of walking the chain. @@ -393,7 +364,7 @@ var importBenchCmd = &cli.Command{ return err } - if err = cs.SetGenesis(genesis.Blocks()[0]); err != nil { + if err = cs.SetGenesis(cctx.Context, genesis.Blocks()[0]); err != nil { return err } @@ -404,10 +375,10 @@ var importBenchCmd = &cli.Command{ if cids, err = lcli.ParseTipSetString(tsk); err != nil { return xerrors.Errorf("failed to end genesis tipset key: %w", err) } - end, err = cs.LoadTipSet(types.NewTipSetKey(cids...)) + end, err = cs.LoadTipSet(cctx.Context, types.NewTipSetKey(cids...)) } else if h := cctx.Int64("end-height"); h != 0 { log.Infof("getting end tipset at height %d...", h) - end, err = cs.GetTipsetByHeight(context.TODO(), abi.ChainEpoch(h), head, true) + end, err = cs.GetTipsetByHeight(cctx.Context, abi.ChainEpoch(h), head, true) } if err != nil { @@ -426,7 +397,7 @@ var importBenchCmd = &cli.Command{ if cids, err = lcli.ParseTipSetString(tsk); err != nil { return xerrors.Errorf("failed to start genesis tipset key: %w", err) } - start, err = cs.LoadTipSet(types.NewTipSetKey(cids...)) + start, err = cs.LoadTipSet(cctx.Context, types.NewTipSetKey(cids...)) } else if h := cctx.Int64("start-height"); h != 0 { log.Infof("getting start tipset at height %d...", h) // lookback from the end tipset (which falls back to head if not supplied). @@ -439,7 +410,7 @@ var importBenchCmd = &cli.Command{ if start != nil { startEpoch = start.Height() - if err := cs.ForceHeadSilent(context.Background(), start); err != nil { + if err := cs.ForceHeadSilent(cctx.Context, start); err != nil { // if err := cs.SetHead(start); err != nil { return err } @@ -450,7 +421,7 @@ var importBenchCmd = &cli.Command{ if h := ts.Height(); h%100 == 0 { log.Infof("walking back the chain; loaded tipset at height %d...", h) } - next, err := cs.LoadTipSet(ts.Parents()) + next, err := cs.LoadTipSet(cctx.Context, ts.Parents()) if err != nil { return err } diff --git a/cmd/lotus-bench/main.go b/cmd/lotus-bench/main.go index 0b8ec6fe3..b0e71b90e 100644 --- a/cmd/lotus-bench/main.go +++ b/cmd/lotus-bench/main.go @@ -12,6 +12,8 @@ import ( "time" saproof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + saproof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/docker/go-units" logging "github.com/ipfs/go-log/v2" @@ -260,7 +262,8 @@ var sealBenchCmd = &cli.Command{ sectorNumber := c.Int("num-sectors") var sealTimings []SealingResult - var sealedSectors []saproof2.SectorInfo + var extendedSealedSectors []saproof7.ExtendedSectorInfo + var sealedSectors []saproof7.SectorInfo if robench == "" { var err error @@ -269,7 +272,7 @@ var sealBenchCmd = &cli.Command{ PreCommit2: 1, Commit: 1, } - sealTimings, sealedSectors, err = runSeals(sb, sbfs, sectorNumber, parCfg, mid, sectorSize, []byte(c.String("ticket-preimage")), c.String("save-commit2-input"), skipc2, c.Bool("skip-unseal")) + sealTimings, extendedSealedSectors, err = runSeals(sb, sbfs, sectorNumber, parCfg, mid, sectorSize, []byte(c.String("ticket-preimage")), c.String("save-commit2-input"), skipc2, c.Bool("skip-unseal")) if err != nil { return xerrors.Errorf("failed to run seals: %w", err) } @@ -296,7 +299,13 @@ var sealBenchCmd = &cli.Command{ } for _, s := range genm.Sectors { - sealedSectors = append(sealedSectors, saproof2.SectorInfo{ + extendedSealedSectors = append(extendedSealedSectors, saproof7.ExtendedSectorInfo{ + SealedCID: s.CommR, + SectorNumber: s.SectorID, + SealProof: s.ProofType, + SectorKey: nil, + }) + sealedSectors = append(sealedSectors, proof.SectorInfo{ SealedCID: s.CommR, SectorNumber: s.SectorID, SealProof: s.ProofType, @@ -325,20 +334,20 @@ var sealBenchCmd = &cli.Command{ return err } - fcandidates, err := ffiwrapper.ProofVerifier.GenerateWinningPoStSectorChallenge(context.TODO(), wipt, mid, challenge[:], uint64(len(sealedSectors))) + fcandidates, err := ffiwrapper.ProofVerifier.GenerateWinningPoStSectorChallenge(context.TODO(), wipt, mid, challenge[:], uint64(len(extendedSealedSectors))) if err != nil { return err } - candidates := make([]saproof2.SectorInfo, len(fcandidates)) + xcandidates := make([]saproof7.ExtendedSectorInfo, len(fcandidates)) for i, fcandidate := range fcandidates { - candidates[i] = sealedSectors[fcandidate] + xcandidates[i] = extendedSealedSectors[fcandidate] } gencandidates := time.Now() log.Info("computing winning post snark (cold)") - proof1, err := sb.GenerateWinningPoSt(context.TODO(), mid, candidates, challenge[:]) + proof1, err := sb.GenerateWinningPoSt(context.TODO(), mid, xcandidates, challenge[:]) if err != nil { return err } @@ -346,14 +355,23 @@ var sealBenchCmd = &cli.Command{ winningpost1 := time.Now() log.Info("computing winning post snark (hot)") - proof2, err := sb.GenerateWinningPoSt(context.TODO(), mid, candidates, challenge[:]) + proof2, err := sb.GenerateWinningPoSt(context.TODO(), mid, xcandidates, challenge[:]) if err != nil { return err } + candidates := make([]saproof7.SectorInfo, len(xcandidates)) + for i, xsi := range xcandidates { + candidates[i] = saproof7.SectorInfo{ + SealedCID: xsi.SealedCID, + SectorNumber: xsi.SectorNumber, + SealProof: xsi.SealProof, + } + } + winnningpost2 := time.Now() - pvi1 := saproof2.WinningPoStVerifyInfo{ + pvi1 := saproof7.WinningPoStVerifyInfo{ Randomness: abi.PoStRandomness(challenge[:]), Proofs: proof1, ChallengedSectors: candidates, @@ -369,7 +387,7 @@ var sealBenchCmd = &cli.Command{ verifyWinningPost1 := time.Now() - pvi2 := saproof2.WinningPoStVerifyInfo{ + pvi2 := saproof7.WinningPoStVerifyInfo{ Randomness: abi.PoStRandomness(challenge[:]), Proofs: proof2, ChallengedSectors: candidates, @@ -386,7 +404,7 @@ var sealBenchCmd = &cli.Command{ verifyWinningPost2 := time.Now() log.Info("computing window post snark (cold)") - wproof1, _, err := sb.GenerateWindowPoSt(context.TODO(), mid, sealedSectors, challenge[:]) + wproof1, _, err := sb.GenerateWindowPoSt(context.TODO(), mid, extendedSealedSectors, challenge[:]) if err != nil { return err } @@ -394,7 +412,7 @@ var sealBenchCmd = &cli.Command{ windowpost1 := time.Now() log.Info("computing window post snark (hot)") - wproof2, _, err := sb.GenerateWindowPoSt(context.TODO(), mid, sealedSectors, challenge[:]) + wproof2, _, err := sb.GenerateWindowPoSt(context.TODO(), mid, extendedSealedSectors, challenge[:]) if err != nil { return err } @@ -502,10 +520,10 @@ type ParCfg struct { Commit int } -func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par ParCfg, mid abi.ActorID, sectorSize abi.SectorSize, ticketPreimage []byte, saveC2inp string, skipc2, skipunseal bool) ([]SealingResult, []saproof2.SectorInfo, error) { +func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par ParCfg, mid abi.ActorID, sectorSize abi.SectorSize, ticketPreimage []byte, saveC2inp string, skipc2, skipunseal bool) ([]SealingResult, []saproof7.ExtendedSectorInfo, error) { var pieces []abi.PieceInfo sealTimings := make([]SealingResult, numSectors) - sealedSectors := make([]saproof2.SectorInfo, numSectors) + sealedSectors := make([]saproof7.ExtendedSectorInfo, numSectors) preCommit2Sema := make(chan struct{}, par.PreCommit2) commitSema := make(chan struct{}, par.Commit) @@ -579,10 +597,11 @@ func runSeals(sb *ffiwrapper.Sealer, sbfs *basicfs.Provider, numSectors int, par precommit2 := time.Now() <-preCommit2Sema - sealedSectors[i] = saproof2.SectorInfo{ + sealedSectors[i] = saproof7.ExtendedSectorInfo{ SealProof: sid.ProofType, SectorNumber: i, SealedCID: cids.Sealed, + SectorKey: nil, } seed := lapi.SealSeed{ diff --git a/cmd/lotus-miner/actor.go b/cmd/lotus-miner/actor.go index 29c5a4bf4..6c4611327 100644 --- a/cmd/lotus-miner/actor.go +++ b/cmd/lotus-miner/actor.go @@ -6,6 +6,8 @@ import ( "os" "strings" + "github.com/filecoin-project/go-state-types/network" + rlepluslazy "github.com/filecoin-project/go-bitfield/rle" cbor "github.com/ipfs/go-ipld-cbor" @@ -255,7 +257,7 @@ var actorWithdrawCmd = &cli.Command{ amount = abi.TokenAmount(f) if amount.GreaterThan(available) { - return xerrors.Errorf("can't withdraw more funds than available; requested: %s; available: %s", amount, available) + return xerrors.Errorf("can't withdraw more funds than available; requested: %s; available: %s", types.FIL(amount), types.FIL(available)) } } @@ -280,6 +282,8 @@ var actorWithdrawCmd = &cli.Command{ fmt.Printf("Requested rewards withdrawal in message %s\n", smsg.Cid()) // wait for it to get mined into a block + fmt.Printf("waiting for %d epochs for confirmation..\n", uint64(cctx.Int("confidence"))) + wait, err := api.StateWaitMsg(ctx, smsg.Cid(), uint64(cctx.Int("confidence"))) if err != nil { return err @@ -291,14 +295,21 @@ var actorWithdrawCmd = &cli.Command{ return err } - var withdrawn abi.TokenAmount - if err := withdrawn.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { + nv, err := api.StateNetworkVersion(ctx, wait.TipSet) + if err != nil { return err } - fmt.Printf("Successfully withdrew %s FIL\n", withdrawn) - if withdrawn != amount { - fmt.Printf("Note that this is less than the requested amount of %s FIL\n", amount) + if nv >= network.Version14 { + var withdrawn abi.TokenAmount + if err := withdrawn.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { + return err + } + + fmt.Printf("Successfully withdrew %s \n", types.FIL(withdrawn)) + if withdrawn.LessThan(amount) { + fmt.Printf("Note that this is less than the requested amount of %s\n", types.FIL(amount)) + } } return nil diff --git a/cmd/lotus-miner/info.go b/cmd/lotus-miner/info.go index e50c4366e..39de942aa 100644 --- a/cmd/lotus-miner/info.go +++ b/cmd/lotus-miner/info.go @@ -470,6 +470,8 @@ var stateList = []stateMeta{ {col: color.FgBlue, state: sealing.Empty}, {col: color.FgBlue, state: sealing.WaitDeals}, {col: color.FgBlue, state: sealing.AddPiece}, + {col: color.FgBlue, state: sealing.SnapDealsWaitDeals}, + {col: color.FgBlue, state: sealing.SnapDealsAddPiece}, {col: color.FgRed, state: sealing.UndefinedSectorState}, {col: color.FgYellow, state: sealing.Packing}, @@ -488,6 +490,12 @@ var stateList = []stateMeta{ {col: color.FgYellow, state: sealing.SubmitCommitAggregate}, {col: color.FgYellow, state: sealing.CommitAggregateWait}, {col: color.FgYellow, state: sealing.FinalizeSector}, + {col: color.FgYellow, state: sealing.SnapDealsPacking}, + {col: color.FgYellow, state: sealing.UpdateReplica}, + {col: color.FgYellow, state: sealing.ProveReplicaUpdate}, + {col: color.FgYellow, state: sealing.SubmitReplicaUpdate}, + {col: color.FgYellow, state: sealing.ReplicaUpdateWait}, + {col: color.FgYellow, state: sealing.FinalizeReplicaUpdate}, {col: color.FgCyan, state: sealing.Terminating}, {col: color.FgCyan, state: sealing.TerminateWait}, @@ -495,6 +503,7 @@ var stateList = []stateMeta{ {col: color.FgCyan, state: sealing.TerminateFailed}, {col: color.FgCyan, state: sealing.Removing}, {col: color.FgCyan, state: sealing.Removed}, + {col: color.FgCyan, state: sealing.AbortUpgrade}, {col: color.FgRed, state: sealing.FailedUnrecoverable}, {col: color.FgRed, state: sealing.AddPieceFailed}, @@ -512,6 +521,9 @@ var stateList = []stateMeta{ {col: color.FgRed, state: sealing.RemoveFailed}, {col: color.FgRed, state: sealing.DealsExpired}, {col: color.FgRed, state: sealing.RecoverDealIDs}, + {col: color.FgRed, state: sealing.SnapDealsAddPieceFailed}, + {col: color.FgRed, state: sealing.SnapDealsDealsExpired}, + {col: color.FgRed, state: sealing.ReplicaUpdateFailed}, } func init() { diff --git a/cmd/lotus-miner/init.go b/cmd/lotus-miner/init.go index a5e9710a9..ae742c663 100644 --- a/cmd/lotus-miner/init.go +++ b/cmd/lotus-miner/init.go @@ -13,6 +13,8 @@ import ( "path/filepath" "strconv" + power6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/power" + "github.com/docker/go-units" "github.com/google/uuid" "github.com/ipfs/go-datastore" @@ -345,7 +347,7 @@ func migratePreSealMeta(ctx context.Context, api v1api.FullNode, metadata string return err } - if err := mds.Put(sectorKey, b); err != nil { + if err := mds.Put(ctx, sectorKey, b); err != nil { return err } @@ -385,7 +387,7 @@ func migratePreSealMeta(ctx context.Context, api v1api.FullNode, metadata string buf := make([]byte, binary.MaxVarintLen64) size := binary.PutUvarint(buf, uint64(maxSectorID)) - return mds.Put(datastore.NewKey(modules.StorageCounterDSPrefix), buf[:size]) + return mds.Put(ctx, datastore.NewKey(modules.StorageCounterDSPrefix), buf[:size]) } func findMarketDealID(ctx context.Context, api v1api.FullNode, deal market2.DealProposal) (abi.DealID, error) { @@ -426,7 +428,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode return xerrors.Errorf("peer ID from private key: %w", err) } - mds, err := lr.Datastore(context.TODO(), "/metadata") + mds, err := lr.Datastore(ctx, "/metadata") if err != nil { return err } @@ -439,7 +441,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode } if cctx.Bool("genesis-miner") { - if err := mds.Put(datastore.NewKey("miner-address"), a.Bytes()); err != nil { + if err := mds.Put(ctx, datastore.NewKey("miner-address"), a.Bytes()); err != nil { return err } @@ -546,7 +548,7 @@ func storageMinerInit(ctx context.Context, cctx *cli.Context, api v1api.FullNode } log.Infof("Created new miner: %s", addr) - if err := mds.Put(datastore.NewKey("miner-address"), addr.Bytes()); err != nil { + if err := mds.Put(ctx, datastore.NewKey("miner-address"), addr.Bytes()); err != nil { return err } @@ -644,11 +646,26 @@ func createStorageMiner(ctx context.Context, api v1api.FullNode, peerid peer.ID, return address.Address{}, err } + sender := owner + if fromstr := cctx.String("from"); fromstr != "" { + faddr, err := address.NewFromString(fromstr) + if err != nil { + return address.Undef, fmt.Errorf("could not parse from address: %w", err) + } + sender = faddr + } + + // make sure the sender account exists on chain + _, err = api.StateLookupID(ctx, owner, types.EmptyTSK) + if err != nil { + return address.Undef, xerrors.Errorf("sender must exist on chain: %w", err) + } + // make sure the worker account exists on chain _, err = api.StateLookupID(ctx, worker, types.EmptyTSK) if err != nil { signed, err := api.MpoolPushMessage(ctx, &types.Message{ - From: owner, + From: sender, To: worker, Value: types.NewInt(0), }, nil) @@ -668,35 +685,46 @@ func createStorageMiner(ctx context.Context, api v1api.FullNode, peerid peer.ID, } } - nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK) + // make sure the owner account exists on chain + _, err = api.StateLookupID(ctx, owner, types.EmptyTSK) if err != nil { - return address.Undef, xerrors.Errorf("getting network version: %w", err) + signed, err := api.MpoolPushMessage(ctx, &types.Message{ + From: sender, + To: owner, + Value: types.NewInt(0), + }, nil) + if err != nil { + return address.Undef, xerrors.Errorf("push owner init: %w", err) + } + + log.Infof("Initializing owner account %s, message: %s", worker, signed.Cid()) + log.Infof("Waiting for confirmation") + + mw, err := api.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence, lapi.LookbackNoLimit, true) + if err != nil { + return address.Undef, xerrors.Errorf("waiting for owner init: %w", err) + } + if mw.Receipt.ExitCode != 0 { + return address.Undef, xerrors.Errorf("initializing owner account failed: exit code %d", mw.Receipt.ExitCode) + } } - spt, err := miner.SealProofTypeFromSectorSize(abi.SectorSize(ssize), nv) + // Note: the correct thing to do would be to call SealProofTypeFromSectorSize if actors version is v3 or later, but this still works + spt, err := miner.WindowPoStProofTypeFromSectorSize(abi.SectorSize(ssize)) if err != nil { - return address.Undef, xerrors.Errorf("getting seal proof type: %w", err) + return address.Undef, xerrors.Errorf("getting post proof type: %w", err) } - params, err := actors.SerializeParams(&power2.CreateMinerParams{ - Owner: owner, - Worker: worker, - SealProofType: spt, - Peer: abi.PeerID(peerid), + params, err := actors.SerializeParams(&power6.CreateMinerParams{ + Owner: owner, + Worker: worker, + WindowPoStProofType: spt, + Peer: abi.PeerID(peerid), }) if err != nil { return address.Undef, err } - sender := owner - if fromstr := cctx.String("from"); fromstr != "" { - faddr, err := address.NewFromString(fromstr) - if err != nil { - return address.Undef, fmt.Errorf("could not parse from address: %w", err) - } - sender = faddr - } - createStorageMinerMsg := &types.Message{ To: power.Address, From: sender, diff --git a/cmd/lotus-miner/init_restore.go b/cmd/lotus-miner/init_restore.go index 0974a7c5d..1aaa7909a 100644 --- a/cmd/lotus-miner/init_restore.go +++ b/cmd/lotus-miner/init_restore.go @@ -233,7 +233,7 @@ func restore(ctx context.Context, cctx *cli.Context, targetPath string, strConfi log.Info("Restoring metadata backup") - mds, err := lr.Datastore(context.TODO(), "/metadata") + mds, err := lr.Datastore(ctx, "/metadata") if err != nil { return err } @@ -255,7 +255,7 @@ func restore(ctx context.Context, cctx *cli.Context, targetPath string, strConfi log.Info("Checking actor metadata") - abytes, err := mds.Get(datastore.NewKey("miner-address")) + abytes, err := mds.Get(ctx, datastore.NewKey("miner-address")) if err != nil { return xerrors.Errorf("getting actor address from metadata datastore: %w", err) } diff --git a/cmd/lotus-miner/main.go b/cmd/lotus-miner/main.go index 110748f48..57b5d8a3e 100644 --- a/cmd/lotus-miner/main.go +++ b/cmd/lotus-miner/main.go @@ -7,7 +7,6 @@ import ( "github.com/fatih/color" logging "github.com/ipfs/go-log/v2" "github.com/urfave/cli/v2" - "go.opencensus.io/trace" "golang.org/x/xerrors" cliutil "github.com/filecoin-project/lotus/cli/util" @@ -55,10 +54,11 @@ func main() { lcli.WithCategory("storage", sealingCmd), lcli.WithCategory("retrieval", piecesCmd), } + jaeger := tracing.SetupJaegerTracing("lotus") defer func() { if jaeger != nil { - jaeger.Flush() + _ = jaeger.ForceFlush(context.Background()) } }() @@ -66,7 +66,9 @@ func main() { cmd := cmd originBefore := cmd.Before cmd.Before = func(cctx *cli.Context) error { - trace.UnregisterExporter(jaeger) + if jaeger != nil { + _ = jaeger.Shutdown(cctx.Context) + } jaeger = tracing.SetupJaegerTracing("lotus/" + cmd.Name) if cctx.IsSet("color") { diff --git a/cmd/lotus-miner/market.go b/cmd/lotus-miner/market.go index c32f44b6a..c7089e74e 100644 --- a/cmd/lotus-miner/market.go +++ b/cmd/lotus-miner/market.go @@ -352,6 +352,7 @@ var storageDealsCmd = &cli.Command{ resetBlocklistCmd, setSealDurationCmd, dealsPendingPublish, + dealsRetryPublish, }, } @@ -637,6 +638,7 @@ var dataTransfersCmd = &cli.Command{ transfersListCmd, marketRestartTransfer, marketCancelTransfer, + transfersDiagnosticsCmd, }, } @@ -856,6 +858,38 @@ var transfersListCmd = &cli.Command{ }, } +var transfersDiagnosticsCmd = &cli.Command{ + Name: "diagnostics", + Usage: "Get detailed diagnostics on active transfers with a specific peer", + Flags: []cli.Flag{}, + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return cli.ShowCommandHelp(cctx, cctx.Command.Name) + } + api, closer, err := lcli.GetMarketsAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + + targetPeer, err := peer.Decode(cctx.Args().First()) + if err != nil { + return err + } + diagnostics, err := api.MarketDataTransferDiagnostics(ctx, targetPeer) + if err != nil { + return err + } + out, err := json.MarshalIndent(diagnostics, "", "\t") + if err != nil { + return err + } + fmt.Println(string(out)) + return nil + }, +} + var dealsPendingPublish = &cli.Command{ Name: "pending-publish", Usage: "list deals waiting in publish queue", @@ -910,6 +944,36 @@ var dealsPendingPublish = &cli.Command{ }, } +var dealsRetryPublish = &cli.Command{ + Name: "retry-publish", + Usage: "retry publishing a deal", + ArgsUsage: "", + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return cli.ShowCommandHelp(cctx, cctx.Command.Name) + } + api, closer, err := lcli.GetMarketsAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + + propcid := cctx.Args().First() + fmt.Printf("retrying deal with proposal-cid: %s\n", propcid) + + cid, err := cid.Decode(propcid) + if err != nil { + return err + } + if err := api.MarketRetryPublishDeal(ctx, cid); err != nil { + return xerrors.Errorf("retrying publishing deal: %w", err) + } + fmt.Println("retried to publish deal") + return nil + }, +} + func listDealsWithJSON(cctx *cli.Context) error { node, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { diff --git a/cmd/lotus-miner/pieces.go b/cmd/lotus-miner/pieces.go index 75605c1ed..778f8e6cf 100644 --- a/cmd/lotus-miner/pieces.go +++ b/cmd/lotus-miner/pieces.go @@ -6,6 +6,7 @@ import ( "text/tabwriter" lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/lib/tablewriter" "github.com/ipfs/go-cid" "github.com/urfave/cli/v2" ) @@ -48,6 +49,12 @@ var piecesListPiecesCmd = &cli.Command{ var piecesListCidInfosCmd = &cli.Command{ Name: "list-cids", Usage: "list registered payload CIDs", + Flags: []cli.Flag{ + &cli.BoolFlag{ + Name: "verbose", + Aliases: []string{"v"}, + }, + }, Action: func(cctx *cli.Context) error { nodeApi, closer, err := lcli.GetMarketsAPI(cctx) if err != nil { @@ -61,9 +68,54 @@ var piecesListCidInfosCmd = &cli.Command{ return err } + w := tablewriter.New(tablewriter.Col("CID"), + tablewriter.Col("Piece"), + tablewriter.Col("BlockOffset"), + tablewriter.Col("BlockLen"), + tablewriter.Col("Deal"), + tablewriter.Col("Sector"), + tablewriter.Col("DealOffset"), + tablewriter.Col("DealLen"), + ) + for _, c := range cids { - fmt.Println(c) + if !cctx.Bool("verbose") { + fmt.Println(c) + continue + } + + ci, err := nodeApi.PiecesGetCIDInfo(ctx, c) + if err != nil { + fmt.Printf("Error getting CID info: %s\n", err) + continue + } + + for _, location := range ci.PieceBlockLocations { + pi, err := nodeApi.PiecesGetPieceInfo(ctx, location.PieceCID) + if err != nil { + fmt.Printf("Error getting piece info: %s\n", err) + continue + } + + for _, deal := range pi.Deals { + w.Write(map[string]interface{}{ + "CID": c, + "Piece": location.PieceCID, + "BlockOffset": location.RelOffset, + "BlockLen": location.BlockSize, + "Deal": deal.DealID, + "Sector": deal.SectorID, + "DealOffset": deal.Offset, + "DealLen": deal.Length, + }) + } + } + } + + if cctx.Bool("verbose") { + return w.Flush(os.Stdout) } + return nil }, } diff --git a/cmd/lotus-miner/proving.go b/cmd/lotus-miner/proving.go index 5dfe5d4ce..ee15785fe 100644 --- a/cmd/lotus-miner/proving.go +++ b/cmd/lotus-miner/proving.go @@ -17,6 +17,7 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" lcli "github.com/filecoin-project/lotus/cli" + "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/specs-storage/storage" ) @@ -360,6 +361,10 @@ var provingCheckProvableCmd = &cli.Command{ Name: "slow", Usage: "run slower checks", }, + &cli.StringFlag{ + Name: "storage-id", + Usage: "filter sectors by storage path (path id)", + }, }, Action: func(cctx *cli.Context) error { if cctx.Args().Len() != 1 { @@ -408,6 +413,21 @@ var provingCheckProvableCmd = &cli.Command{ tw := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0) _, _ = fmt.Fprintln(tw, "deadline\tpartition\tsector\tstatus") + var filter map[abi.SectorID]struct{} + + if cctx.IsSet("storage-id") { + sl, err := sapi.StorageList(ctx) + if err != nil { + return err + } + decls := sl[stores.ID(cctx.String("storage-id"))] + + filter = map[abi.SectorID]struct{}{} + for _, decl := range decls { + filter[decl.SectorID] = struct{}{} + } + } + for parIdx, par := range partitions { sectors := make(map[abi.SectorNumber]struct{}) @@ -418,13 +438,21 @@ var provingCheckProvableCmd = &cli.Command{ var tocheck []storage.SectorRef for _, info := range sectorInfos { + si := abi.SectorID{ + Miner: abi.ActorID(mid), + Number: info.SectorNumber, + } + + if filter != nil { + if _, found := filter[si]; !found { + continue + } + } + sectors[info.SectorNumber] = struct{}{} tocheck = append(tocheck, storage.SectorRef{ ProofType: info.SealProof, - ID: abi.SectorID{ - Miner: abi.ActorID(mid), - Number: info.SectorNumber, - }, + ID: si, }) } diff --git a/cmd/lotus-miner/retrieval-deals.go b/cmd/lotus-miner/retrieval-deals.go index 1ce1f6593..bd5d30a4e 100644 --- a/cmd/lotus-miner/retrieval-deals.go +++ b/cmd/lotus-miner/retrieval-deals.go @@ -3,6 +3,7 @@ package main import ( "fmt" "os" + "sort" "text/tabwriter" "github.com/docker/go-units" @@ -137,6 +138,10 @@ var retrievalDealsListCmd = &cli.Command{ return err } + sort.Slice(deals, func(i, j int) bool { + return deals[i].ID < deals[j].ID + }) + w := tabwriter.NewWriter(os.Stdout, 2, 4, 2, ' ', 0) _, _ = fmt.Fprintf(w, "Receiver\tDealID\tPayload\tState\tPricePerByte\tBytesSent\tMessage\n") diff --git a/cmd/lotus-miner/sealing.go b/cmd/lotus-miner/sealing.go index a7e0a8de8..16b02f7bb 100644 --- a/cmd/lotus-miner/sealing.go +++ b/cmd/lotus-miner/sealing.go @@ -4,6 +4,7 @@ import ( "encoding/hex" "encoding/json" "fmt" + "math" "os" "sort" "strings" @@ -32,6 +33,17 @@ var sealingCmd = &cli.Command{ }, } +var barCols = float64(64) + +func barString(total, y, g float64) string { + yBars := int(math.Round(y / total * barCols)) + gBars := int(math.Round(g / total * barCols)) + eBars := int(barCols) - yBars - gBars + return color.YellowString(strings.Repeat("|", yBars)) + + color.GreenString(strings.Repeat("|", gBars)) + + strings.Repeat(" ", eBars) +} + var sealingWorkersCmd = &cli.Command{ Name: "workers", Usage: "list workers", @@ -77,7 +89,7 @@ var sealingWorkersCmd = &cli.Command{ for _, stat := range st { gpuUse := "not " gpuCol := color.FgBlue - if stat.GpuUsed { + if stat.GpuUsed > 0 { gpuCol = color.FgGreen gpuUse = "" } @@ -89,56 +101,43 @@ var sealingWorkersCmd = &cli.Command{ fmt.Printf("Worker %s, host %s%s\n", stat.id, color.MagentaString(stat.Info.Hostname), disabled) - var barCols = uint64(64) - cpuBars := int(stat.CpuUse * barCols / stat.Info.Resources.CPUs) - cpuBar := strings.Repeat("|", cpuBars) - if int(barCols)-cpuBars >= 0 { - cpuBar += strings.Repeat(" ", int(barCols)-cpuBars) - } - fmt.Printf("\tCPU: [%s] %d/%d core(s) in use\n", - color.GreenString(cpuBar), stat.CpuUse, stat.Info.Resources.CPUs) - - ramBarsRes := int(stat.Info.Resources.MemReserved * barCols / stat.Info.Resources.MemPhysical) - ramBarsUsed := int(stat.MemUsedMin * barCols / stat.Info.Resources.MemPhysical) - ramRepeatSpace := int(barCols) - (ramBarsUsed + ramBarsRes) - - colorFunc := color.YellowString - if ramRepeatSpace < 0 { - ramRepeatSpace = 0 - colorFunc = color.RedString + barString(float64(stat.Info.Resources.CPUs), 0, float64(stat.CpuUse)), stat.CpuUse, stat.Info.Resources.CPUs) + + ramTotal := stat.Info.Resources.MemPhysical + ramTasks := stat.MemUsedMin + ramUsed := stat.Info.Resources.MemUsed + var ramReserved uint64 = 0 + if ramUsed > ramTasks { + ramReserved = ramUsed - ramTasks } - - ramBar := colorFunc(strings.Repeat("|", ramBarsRes)) + - color.GreenString(strings.Repeat("|", ramBarsUsed)) + - strings.Repeat(" ", ramRepeatSpace) - - vmem := stat.Info.Resources.MemPhysical + stat.Info.Resources.MemSwap - - vmemBarsRes := int(stat.Info.Resources.MemReserved * barCols / vmem) - vmemBarsUsed := int(stat.MemUsedMax * barCols / vmem) - vmemRepeatSpace := int(barCols) - (vmemBarsUsed + vmemBarsRes) - - colorFunc = color.YellowString - if vmemRepeatSpace < 0 { - vmemRepeatSpace = 0 - colorFunc = color.RedString - } - - vmemBar := colorFunc(strings.Repeat("|", vmemBarsRes)) + - color.GreenString(strings.Repeat("|", vmemBarsUsed)) + - strings.Repeat(" ", vmemRepeatSpace) + ramBar := barString(float64(ramTotal), float64(ramReserved), float64(ramTasks)) fmt.Printf("\tRAM: [%s] %d%% %s/%s\n", ramBar, - (stat.Info.Resources.MemReserved+stat.MemUsedMin)*100/stat.Info.Resources.MemPhysical, - types.SizeStr(types.NewInt(stat.Info.Resources.MemReserved+stat.MemUsedMin)), + (ramTasks+ramReserved)*100/stat.Info.Resources.MemPhysical, + types.SizeStr(types.NewInt(ramTasks+ramUsed)), types.SizeStr(types.NewInt(stat.Info.Resources.MemPhysical))) - fmt.Printf("\tVMEM: [%s] %d%% %s/%s\n", vmemBar, - (stat.Info.Resources.MemReserved+stat.MemUsedMax)*100/vmem, - types.SizeStr(types.NewInt(stat.Info.Resources.MemReserved+stat.MemUsedMax)), - types.SizeStr(types.NewInt(vmem))) + vmemTotal := stat.Info.Resources.MemPhysical + stat.Info.Resources.MemSwap + vmemTasks := stat.MemUsedMax + vmemUsed := stat.Info.Resources.MemUsed + stat.Info.Resources.MemSwapUsed + var vmemReserved uint64 = 0 + if vmemUsed > vmemTasks { + vmemReserved = vmemUsed - vmemTasks + } + vmemBar := barString(float64(vmemTotal), float64(vmemReserved), float64(vmemTasks)) + fmt.Printf("\tVMEM: [%s] %d%% %s/%s\n", vmemBar, + (vmemTasks+vmemReserved)*100/vmemTotal, + types.SizeStr(types.NewInt(vmemTasks+vmemReserved)), + types.SizeStr(types.NewInt(vmemTotal))) + + if len(stat.Info.Resources.GPUs) > 0 { + gpuBar := barString(float64(len(stat.Info.Resources.GPUs)), 0, stat.GpuUsed) + fmt.Printf("\tGPU: [%s] %.f%% %.2f/%d gpu(s) in use\n", color.GreenString(gpuBar), + stat.GpuUsed*100/float64(len(stat.Info.Resources.GPUs)), + stat.GpuUsed, len(stat.Info.Resources.GPUs)) + } for _, gpu := range stat.Info.Resources.GPUs { fmt.Printf("\tGPU: %s\n", color.New(gpuCol).Sprintf("%s, %sused", gpu, gpuUse)) } @@ -224,8 +223,10 @@ var sealingJobsCmd = &cli.Command{ for _, l := range lines { state := "running" switch { - case l.RunWait > 0: + case l.RunWait > 1: state = fmt.Sprintf("assigned(%d)", l.RunWait-1) + case l.RunWait == storiface.RWPrepared: + state = "prepared" case l.RunWait == storiface.RWRetDone: if !cctx.Bool("show-ret-done") { continue diff --git a/cmd/lotus-miner/sectors.go b/cmd/lotus-miner/sectors.go index 43a71fd9e..cc5668334 100644 --- a/cmd/lotus-miner/sectors.go +++ b/cmd/lotus-miner/sectors.go @@ -11,6 +11,9 @@ import ( "strings" "time" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/builtin" + "github.com/docker/go-units" "github.com/fatih/color" cbor "github.com/ipfs/go-ipld-cbor" @@ -20,6 +23,7 @@ import ( "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/network" miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" "github.com/filecoin-project/lotus/api" @@ -50,11 +54,13 @@ var sectorsCmd = &cli.Command{ sectorsExtendCmd, sectorsTerminateCmd, sectorsRemoveCmd, + sectorsSnapUpCmd, sectorsMarkForUpgradeCmd, sectorsStartSealCmd, sectorsSealDelayCmd, sectorsCapacityCollateralCmd, sectorsBatching, + sectorsRefreshPieceMatchingCmd, }, } @@ -1476,6 +1482,44 @@ var sectorsRemoveCmd = &cli.Command{ }, } +var sectorsSnapUpCmd = &cli.Command{ + Name: "snap-up", + Usage: "Mark a committed capacity sector to be filled with deals", + ArgsUsage: "", + Action: func(cctx *cli.Context) error { + if cctx.Args().Len() != 1 { + return lcli.ShowHelp(cctx, xerrors.Errorf("must pass sector number")) + } + + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + api, nCloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer nCloser() + ctx := lcli.ReqContext(cctx) + + nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("failed to get network version: %w", err) + } + if nv < network.Version15 { + return xerrors.Errorf("snap deals upgrades enabled in network v15") + } + + id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64) + if err != nil { + return xerrors.Errorf("could not parse sector number: %w", err) + } + + return nodeApi.SectorMarkForUpgrade(ctx, abi.SectorNumber(id), true) + }, +} + var sectorsMarkForUpgradeCmd = &cli.Command{ Name: "mark-for-upgrade", Usage: "Mark a committed capacity sector for replacement by a sector with deals", @@ -1490,14 +1534,40 @@ var sectorsMarkForUpgradeCmd = &cli.Command{ return err } defer closer() + + api, nCloser, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer nCloser() ctx := lcli.ReqContext(cctx) + nv, err := api.StateNetworkVersion(ctx, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("failed to get network version: %w", err) + } + if nv >= network.Version15 { + return xerrors.Errorf("classic cc upgrades disabled v15 and beyond, use `snap-up`") + } + + // disable mark for upgrade two days before the ntwk v15 upgrade + // TODO: remove the following block in v1.15.1 + head, err := api.ChainHead(ctx) + if err != nil { + return xerrors.Errorf("failed to get chain head: %w", err) + } + twoDays := abi.ChainEpoch(2 * builtin.EpochsInDay) + if head.Height() > (build.UpgradeOhSnapHeight - twoDays) { + return xerrors.Errorf("OhSnap is coming soon, " + + "please use `snap-up` to upgrade your cc sectors after the network v15 upgrade!") + } + id, err := strconv.ParseUint(cctx.Args().Get(0), 10, 64) if err != nil { return xerrors.Errorf("could not parse sector number: %w", err) } - return nodeApi.SectorMarkForUpgrade(ctx, abi.SectorNumber(id)) + return nodeApi.SectorMarkForUpgrade(ctx, abi.SectorNumber(id), false) }, } @@ -1648,6 +1718,11 @@ var sectorsUpdateCmd = &cli.Command{ return xerrors.Errorf("could not parse sector number: %w", err) } + _, err = nodeApi.SectorsStatus(ctx, abi.SectorNumber(id), false) + if err != nil { + return xerrors.Errorf("sector %d not found, could not change state", id) + } + newState := cctx.Args().Get(1) if _, ok := sealing.ExistSectorStateList[sealing.SectorState(newState)]; !ok { fmt.Printf(" \"%s\" is not a valid state. Possible states for sectors are: \n", newState) @@ -1995,6 +2070,25 @@ var sectorsBatchingPendingPreCommit = &cli.Command{ }, } +var sectorsRefreshPieceMatchingCmd = &cli.Command{ + Name: "match-pending-pieces", + Usage: "force a refreshed match of pending pieces to open sectors without manually waiting for more deals", + Action: func(cctx *cli.Context) error { + nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + + if err := nodeApi.SectorMatchPendingPiecesToOpenSectors(ctx); err != nil { + return err + } + + return nil + }, +} + func yesno(b bool) string { if b { return color.GreenString("YES") diff --git a/cmd/lotus-miner/storage.go b/cmd/lotus-miner/storage.go index e7508eb29..6f7a627f6 100644 --- a/cmd/lotus-miner/storage.go +++ b/cmd/lotus-miner/storage.go @@ -48,6 +48,7 @@ stored while moving through the sealing pipeline (references as 'seal').`, storageListCmd, storageFindCmd, storageCleanupCmd, + storageLocks, }, } @@ -95,6 +96,14 @@ over time Name: "max-storage", Usage: "(for init) limit storage space for sectors (expensive for very large paths!)", }, + &cli.StringSliceFlag{ + Name: "groups", + Usage: "path group names", + }, + &cli.StringSliceFlag{ + Name: "allow-to", + Usage: "path groups allowed to pull data from this path (allow all if not specified)", + }, }, Action: func(cctx *cli.Context) error { nodeApi, closer, err := lcli.GetStorageMinerAPI(cctx) @@ -142,6 +151,8 @@ over time CanSeal: cctx.Bool("seal"), CanStore: cctx.Bool("store"), MaxStorage: uint64(maxStor), + Groups: cctx.StringSlice("groups"), + AllowTo: cctx.StringSlice("allow-to"), } if !(cfg.CanStore || cfg.CanSeal) { @@ -322,10 +333,17 @@ var storageListCmd = &cli.Command{ if si.CanStore { fmt.Print(color.CyanString("Store")) } - fmt.Println("") } else { fmt.Print(color.HiYellowString("Use: ReadOnly")) } + fmt.Println() + + if len(si.Groups) > 0 { + fmt.Printf("\tGroups: %s\n", strings.Join(si.Groups, ", ")) + } + if len(si.AllowTo) > 0 { + fmt.Printf("\tAllowTo: %s\n", strings.Join(si.AllowTo, ", ")) + } if localPath, ok := local[s.ID]; ok { fmt.Printf("\tLocal: %s\n", color.GreenString(localPath)) @@ -741,3 +759,43 @@ func cleanupRemovedSectorData(ctx context.Context, api api.StorageMiner, napi v0 return nil } + +var storageLocks = &cli.Command{ + Name: "locks", + Usage: "show active sector locks", + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetStorageMinerAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + + locks, err := api.StorageGetLocks(ctx) + if err != nil { + return err + } + + for _, lock := range locks.Locks { + st, err := api.SectorsStatus(ctx, lock.Sector.Number, false) + if err != nil { + return xerrors.Errorf("getting sector status(%d): %w", lock.Sector.Number, err) + } + + lockstr := fmt.Sprintf("%d\t%s\t", lock.Sector.Number, color.New(stateOrder[sealing.SectorState(st.State)].col).Sprint(st.State)) + + for i := 0; i < storiface.FileTypes; i++ { + if lock.Write[i] > 0 { + lockstr += fmt.Sprintf("%s(%s) ", storiface.SectorFileType(1< 0 { + lockstr += fmt.Sprintf("%s(%s:%d) ", storiface.SectorFileType(1<= network.Version14 { + var withdrawn abi.TokenAmount + if err := withdrawn.UnmarshalCBOR(bytes.NewReader(wait.Receipt.Return)); err != nil { + return err + } + + fmt.Printf("Successfully withdrew %s \n", types.FIL(withdrawn)) + if withdrawn.LessThan(amount) { + fmt.Printf("Note that this is less than the requested amount of %s \n", types.FIL(amount)) + } } return nil diff --git a/cmd/lotus-shed/balancer.go b/cmd/lotus-shed/balancer.go new file mode 100644 index 000000000..edc484ab6 --- /dev/null +++ b/cmd/lotus-shed/balancer.go @@ -0,0 +1,222 @@ +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/ipfs/go-cid" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" +) + +var balancerCmd = &cli.Command{ + Name: "balancer", + Usage: "Utility for balancing tokens between multiple wallets", + Description: `Tokens are balanced based on the specification provided in arguments + +Each argument specifies an address, role, and role parameters separated by ';' + +Supported roles: + - request;[addr];[low];[high] - request tokens when balance drops to [low], topping up to [high] + - provide;[addr];[min] - provide tokens to other addresses as long as the balance is above [min] +`, + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetFullNodeAPIV1(cctx) + if err != nil { + return err + } + + defer closer() + ctx := lcli.ReqContext(cctx) + + type request struct { + addr address.Address + low, high abi.TokenAmount + } + type provide struct { + addr address.Address + min abi.TokenAmount + } + + var requests []request + var provides []provide + + for i, s := range cctx.Args().Slice() { + ss := strings.Split(s, ";") + switch ss[0] { + case "request": + if len(ss) != 4 { + return xerrors.Errorf("request role needs 4 parameters (arg %d)", i) + } + + addr, err := address.NewFromString(ss[1]) + if err != nil { + return xerrors.Errorf("parsing address in arg %d: %w", i, err) + } + + low, err := types.ParseFIL(ss[2]) + if err != nil { + return xerrors.Errorf("parsing low in arg %d: %w", i, err) + } + + high, err := types.ParseFIL(ss[3]) + if err != nil { + return xerrors.Errorf("parsing high in arg %d: %w", i, err) + } + + if abi.TokenAmount(low).GreaterThanEqual(abi.TokenAmount(high)) { + return xerrors.Errorf("low must be less than high in arg %d", i) + } + + requests = append(requests, request{ + addr: addr, + low: abi.TokenAmount(low), + high: abi.TokenAmount(high), + }) + case "provide": + if len(ss) != 3 { + return xerrors.Errorf("provide role needs 3 parameters (arg %d)", i) + } + + addr, err := address.NewFromString(ss[1]) + if err != nil { + return xerrors.Errorf("parsing address in arg %d: %w", i, err) + } + + min, err := types.ParseFIL(ss[2]) + if err != nil { + return xerrors.Errorf("parsing min in arg %d: %w", i, err) + } + + provides = append(provides, provide{ + addr: addr, + min: abi.TokenAmount(min), + }) + default: + return xerrors.Errorf("unknown role '%s' in arg %d", ss[0], i) + } + } + + if len(provides) == 0 { + return xerrors.Errorf("no provides specified") + } + if len(requests) == 0 { + return xerrors.Errorf("no requests specified") + } + + const confidence = 16 + + var notifs <-chan []*lapi.HeadChange + for { + if notifs == nil { + notifs, err = api.ChainNotify(ctx) + if err != nil { + return xerrors.Errorf("chain notify error: %w", err) + } + } + + var ts *types.TipSet + loop: + for { + time.Sleep(150 * time.Millisecond) + select { + case n := <-notifs: + for _, change := range n { + if change.Type != store.HCApply { + continue + } + + ts = change.Val + } + case <-ctx.Done(): + return nil + default: + break loop + } + } + + type send struct { + to address.Address + amt abi.TokenAmount + filled bool + } + var toSend []*send + + for _, req := range requests { + bal, err := api.StateGetActor(ctx, req.addr, ts.Key()) + if err != nil { + return err + } + + if bal.Balance.LessThan(req.low) { + toSend = append(toSend, &send{ + to: req.addr, + amt: big.Sub(req.high, bal.Balance), + }) + } + } + + for _, s := range toSend { + fmt.Printf("REQUEST %s for %s\n", types.FIL(s.amt), s.to) + } + + var msgs []cid.Cid + + for _, prov := range provides { + bal, err := api.StateGetActor(ctx, prov.addr, ts.Key()) + if err != nil { + return err + } + + avail := big.Sub(bal.Balance, prov.min) + for _, s := range toSend { + if s.filled { + continue + } + if avail.LessThan(s.amt) { + continue + } + + m, err := api.MpoolPushMessage(ctx, &types.Message{ + From: prov.addr, + To: s.to, + Value: s.amt, + }, nil) + if err != nil { + fmt.Printf("SEND ERROR %s\n", err.Error()) + } + fmt.Printf("SEND %s; %s from %s TO %s\n", m.Cid(), types.FIL(s.amt), s.to, prov.addr) + + msgs = append(msgs, m.Cid()) + s.filled = true + avail = big.Sub(avail, s.amt) + } + } + + if len(msgs) > 0 { + fmt.Printf("WAITING FOR %d MESSAGES\n", len(msgs)) + } + + for _, msg := range msgs { + ml, err := api.StateWaitMsg(ctx, msg, confidence, lapi.LookbackNoLimit, true) + if err != nil { + return err + } + if ml.Receipt.ExitCode != exitcode.Ok { + fmt.Printf("MSG %s NON-ZERO EXITCODE: %s\n", msg, ml.Receipt.ExitCode) + } + } + } + }, +} diff --git a/cmd/lotus-shed/balances.go b/cmd/lotus-shed/balances.go index 1a22be3c3..7a07a431e 100644 --- a/cmd/lotus-shed/balances.go +++ b/cmd/lotus-shed/balances.go @@ -517,7 +517,7 @@ var chainBalanceStateCmd = &cli.Command{ cst := cbor.NewCborStore(bs) store := adt.WrapStore(ctx, cst) - sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil) + sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), nil, vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil) if err != nil { return err } @@ -741,7 +741,7 @@ var chainPledgeCmd = &cli.Command{ cst := cbor.NewCborStore(bs) store := adt.WrapStore(ctx, cst) - sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil) + sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), nil, vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil) if err != nil { return err } diff --git a/cmd/lotus-shed/datastore.go b/cmd/lotus-shed/datastore.go index c3a9e572c..ff740a772 100644 --- a/cmd/lotus-shed/datastore.go +++ b/cmd/lotus-shed/datastore.go @@ -83,7 +83,7 @@ var datastoreListCmd = &cli.Command{ genc := cctx.String("get-enc") - q, err := ds.Query(dsq.Query{ + q, err := ds.Query(context.Background(), dsq.Query{ Prefix: datastore.NewKey(cctx.Args().Get(1)).String(), KeysOnly: genc == "", }) @@ -147,7 +147,7 @@ var datastoreGetCmd = &cli.Command{ return err } - val, err := ds.Get(datastore.NewKey(cctx.Args().Get(1))) + val, err := ds.Get(context.Background(), datastore.NewKey(cctx.Args().Get(1))) if err != nil { return xerrors.Errorf("get: %w", err) } diff --git a/cmd/lotus-shed/export.go b/cmd/lotus-shed/export.go index e711ba2bb..3851e4922 100644 --- a/cmd/lotus-shed/export.go +++ b/cmd/lotus-shed/export.go @@ -93,7 +93,7 @@ var exportChainCmd = &cli.Command{ cs := store.NewChainStore(bs, bs, mds, nil, nil) defer cs.Close() //nolint:errcheck - if err := cs.Load(); err != nil { + if err := cs.Load(context.Background()); err != nil { return err } @@ -110,7 +110,7 @@ var exportChainCmd = &cli.Command{ tsk := types.NewTipSetKey(cids...) - selts, err := cs.LoadTipSet(tsk) + selts, err := cs.LoadTipSet(context.Background(), tsk) if err != nil { return xerrors.Errorf("loading tipset: %w", err) } diff --git a/cmd/lotus-shed/genesis-verify.go b/cmd/lotus-shed/genesis-verify.go index 4a692d4c9..cc042064d 100644 --- a/cmd/lotus-shed/genesis-verify.go +++ b/cmd/lotus-shed/genesis-verify.go @@ -64,7 +64,7 @@ var genesisVerifyCmd = &cli.Command{ return xerrors.Errorf("opening the car file: %w", err) } - ts, err := cs.Import(f) + ts, err := cs.Import(cctx.Context, f) if err != nil { return err } diff --git a/cmd/lotus-shed/import-car.go b/cmd/lotus-shed/import-car.go index 4e465029f..973e7b31b 100644 --- a/cmd/lotus-shed/import-car.go +++ b/cmd/lotus-shed/import-car.go @@ -82,7 +82,7 @@ var importCarCmd = &cli.Command{ return err case nil: fmt.Printf("\r%s", blk.Cid()) - if err := bs.Put(blk); err != nil { + if err := bs.Put(context.Background(), blk); err != nil { if err := f.Close(); err != nil { return err } @@ -146,7 +146,7 @@ var importObjectCmd = &cli.Command{ return err } - if err := bs.Put(blk); err != nil { + if err := bs.Put(context.Background(), blk); err != nil { return err } diff --git a/cmd/lotus-shed/main.go b/cmd/lotus-shed/main.go index 21971a628..9bcea7224 100644 --- a/cmd/lotus-shed/main.go +++ b/cmd/lotus-shed/main.go @@ -59,10 +59,15 @@ func main() { signaturesCmd, actorCmd, minerTypesCmd, + minerPeeridCmd, minerMultisigsCmd, splitstoreCmd, fr32Cmd, chainCmd, + balancerCmd, + sendCsvCmd, + terminationsCmd, + migrationsCmd, } app := &cli.App{ diff --git a/cmd/lotus-shed/market.go b/cmd/lotus-shed/market.go index 8221e53eb..aaef4690e 100644 --- a/cmd/lotus-shed/market.go +++ b/cmd/lotus-shed/market.go @@ -1,6 +1,7 @@ package main import ( + "context" "fmt" "os" "path" @@ -198,7 +199,7 @@ var marketExportDatastoreCmd = &cli.Command{ } // Write the backup to the file - if err := bds.Backup(out); err != nil { + if err := bds.Backup(context.Background(), out); err != nil { if cerr := out.Close(); cerr != nil { log.Errorw("error closing backup file while handling backup error", "closeErr", cerr, "backupErr", err) } @@ -215,7 +216,7 @@ var marketExportDatastoreCmd = &cli.Command{ } func exportPrefix(prefix string, ds datastore.Batching, backupDs datastore.Batching) error { - q, err := ds.Query(dsq.Query{ + q, err := ds.Query(context.Background(), dsq.Query{ Prefix: prefix, }) if err != nil { @@ -225,7 +226,7 @@ func exportPrefix(prefix string, ds datastore.Batching, backupDs datastore.Batch for res := range q.Next() { fmt.Println("Exporting key " + res.Key) - err := backupDs.Put(datastore.NewKey(res.Key), res.Value) + err := backupDs.Put(context.Background(), datastore.NewKey(res.Key), res.Value) if err != nil { return xerrors.Errorf("putting %s to backup datastore: %w", res.Key, err) } diff --git a/cmd/lotus-shed/migrations.go b/cmd/lotus-shed/migrations.go new file mode 100644 index 000000000..3d61fbd48 --- /dev/null +++ b/cmd/lotus-shed/migrations.go @@ -0,0 +1,127 @@ +package main + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/filecoin-project/lotus/chain/stmgr" + "github.com/filecoin-project/lotus/chain/vm" + "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" + "github.com/filecoin-project/specs-actors/v7/actors/migration/nv15" + + "github.com/filecoin-project/lotus/chain/types" + + "github.com/filecoin-project/lotus/chain/consensus/filcns" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/node/repo" + "github.com/ipfs/go-cid" + "github.com/urfave/cli/v2" +) + +var migrationsCmd = &cli.Command{ + Name: "migrate-nv15", + Description: "Run the specified migration", + ArgsUsage: "[block to look back from]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "repo", + Value: "~/.lotus", + }, + }, + Action: func(cctx *cli.Context) error { + ctx := context.TODO() + + if cctx.NArg() != 1 { + return fmt.Errorf("must pass block cid") + } + + blkCid, err := cid.Decode(cctx.Args().First()) + if err != nil { + return fmt.Errorf("failed to parse input: %w", err) + } + + fsrepo, err := repo.NewFS(cctx.String("repo")) + if err != nil { + return err + } + + lkrepo, err := fsrepo.Lock(repo.FullNode) + if err != nil { + return err + } + + defer lkrepo.Close() //nolint:errcheck + + bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore) + if err != nil { + return fmt.Errorf("failed to open blockstore: %w", err) + } + + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() + + mds, err := lkrepo.Datastore(context.Background(), "/metadata") + if err != nil { + return err + } + + cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) + defer cs.Close() //nolint:errcheck + + sm, err := stmgr.NewStateManager(cs, filcns.NewTipSetExecutor(), nil, vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil) + if err != nil { + return err + } + + cache := nv15.NewMemMigrationCache() + + blk, err := cs.GetBlock(ctx, blkCid) + if err != nil { + return err + } + + migrationTs, err := cs.LoadTipSet(ctx, types.NewTipSetKey(blk.Parents...)) + if err != nil { + return err + } + + ts1, err := cs.GetTipsetByHeight(ctx, blk.Height-240, migrationTs, false) + if err != nil { + return err + } + + startTime := time.Now() + + err = filcns.PreUpgradeActorsV7(ctx, sm, cache, ts1.ParentState(), ts1.Height()-1, ts1) + if err != nil { + return err + } + + fmt.Println("completed round 1, took ", time.Since(startTime)) + startTime = time.Now() + + newCid1, err := filcns.UpgradeActorsV7(ctx, sm, cache, nil, blk.ParentStateRoot, blk.Height-1, migrationTs) + if err != nil { + return err + } + fmt.Println("completed round actual (with cache), took ", time.Since(startTime)) + + fmt.Println("new cid", newCid1) + + newCid2, err := filcns.UpgradeActorsV7(ctx, sm, nv15.NewMemMigrationCache(), nil, blk.ParentStateRoot, blk.Height-1, migrationTs) + if err != nil { + return err + } + fmt.Println("completed round actual (without cache), took ", time.Since(startTime)) + + fmt.Println("new cid", newCid2) + return nil + }, +} diff --git a/cmd/lotus-shed/miner-peerid.go b/cmd/lotus-shed/miner-peerid.go new file mode 100644 index 000000000..3ccfb429b --- /dev/null +++ b/cmd/lotus-shed/miner-peerid.go @@ -0,0 +1,117 @@ +package main + +import ( + "context" + "fmt" + "io" + + "github.com/libp2p/go-libp2p-core/peer" + + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" + + "github.com/filecoin-project/lotus/chain/consensus/filcns" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/node/repo" + "github.com/filecoin-project/specs-actors/v4/actors/util/adt" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" +) + +var minerPeeridCmd = &cli.Command{ + Name: "miner-peerid", + Usage: "Scrape state to find a miner based on peerid", Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "repo", + Value: "~/.lotus", + }, + }, + Action: func(cctx *cli.Context) error { + ctx := context.TODO() + + if cctx.NArg() != 2 { + return fmt.Errorf("must pass peer id and state root") + } + + pid, err := peer.Decode(cctx.Args().Get(0)) + if err != nil { + return fmt.Errorf("failed to parse input as a peerId: %w", err) + } + + sroot, err := cid.Decode(cctx.Args().Get(1)) + if err != nil { + return fmt.Errorf("failed to parse state root: %w", err) + } + + fsrepo, err := repo.NewFS(cctx.String("repo")) + if err != nil { + return err + } + + lkrepo, err := fsrepo.Lock(repo.FullNode) + if err != nil { + return err + } + + defer lkrepo.Close() //nolint:errcheck + + bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore) + if err != nil { + return fmt.Errorf("failed to open blockstore: %w", err) + } + + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() + + mds, err := lkrepo.Datastore(context.Background(), "/metadata") + if err != nil { + return err + } + + cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) + defer cs.Close() //nolint:errcheck + + cst := cbor.NewCborStore(bs) + store := adt.WrapStore(ctx, cst) + + tree, err := state.LoadStateTree(cst, sroot) + if err != nil { + return err + } + + err = tree.ForEach(func(addr address.Address, act *types.Actor) error { + if act.Code == builtin5.StorageMinerActorCodeID { + ms, err := miner.Load(store, act) + if err != nil { + return err + } + + mi, err := ms.Info() + if err != nil { + return err + } + + if mi.PeerId != nil && *mi.PeerId == pid { + fmt.Println(addr) + } + } + return nil + }) + if err != nil { + return xerrors.Errorf("failed to loop over actors: %w", err) + } + + return nil + }, +} diff --git a/cmd/lotus-shed/miner.go b/cmd/lotus-shed/miner.go index ec5a445f9..479e081e9 100644 --- a/cmd/lotus-shed/miner.go +++ b/cmd/lotus-shed/miner.go @@ -2,11 +2,29 @@ package main import ( "bufio" + "bytes" + "fmt" "io" "os" "path/filepath" "strings" + miner2 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + + power6 "github.com/filecoin-project/specs-actors/v6/actors/builtin/power" + + "github.com/docker/go-units" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" + "github.com/mitchellh/go-homedir" "github.com/urfave/cli/v2" "golang.org/x/xerrors" @@ -17,6 +35,231 @@ var minerCmd = &cli.Command{ Usage: "miner-related utilities", Subcommands: []*cli.Command{ minerUnpackInfoCmd, + minerCreateCmd, + minerFaultsCmd, + }, +} + +var minerFaultsCmd = &cli.Command{ + Name: "faults", + Usage: "Display a list of faulty sectors for a SP", + ArgsUsage: "[minerAddress]", + Flags: []cli.Flag{ + &cli.Uint64Flag{ + Name: "expiring-in", + Usage: "only list sectors that are expiring in the next epochs", + Value: 0, + }, + }, + Action: func(cctx *cli.Context) error { + if !cctx.Args().Present() { + return fmt.Errorf("must pass miner address") + } + + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + + defer closer() + + ctx := lcli.ReqContext(cctx) + + m, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + faultBf, err := api.StateMinerFaults(ctx, m, types.EmptyTSK) + if err != nil { + return err + } + + faults, err := faultBf.All(miner2.SectorsMax) + if err != nil { + return err + } + + if len(faults) == 0 { + fmt.Println("no faults") + return nil + } + + expEpoch := abi.ChainEpoch(cctx.Uint64("expiring-in")) + + if expEpoch == 0 { + fmt.Print("faulty sectors: ") + for _, v := range faults { + fmt.Printf("%d ", v) + } + + return nil + } + + h, err := api.ChainHead(ctx) + if err != nil { + return err + } + + fmt.Printf("faulty sectors expiring in the next %d epochs: ", expEpoch) + for _, v := range faults { + ss, err := api.StateSectorExpiration(ctx, m, abi.SectorNumber(v), types.EmptyTSK) + if err != nil { + return err + } + + if ss.Early < h.Height()+expEpoch { + fmt.Printf("%d ", v) + } + } + + return nil + }, +} + +var minerCreateCmd = &cli.Command{ + Name: "create", + Usage: "sends a create miner msg", + ArgsUsage: "[sender] [owner] [worker] [sector size]", + Action: func(cctx *cli.Context) error { + wapi, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + + defer closer() + ctx := lcli.ReqContext(cctx) + + if cctx.Args().Len() != 4 { + return xerrors.Errorf("expected 4 args (sender owner worker sectorSize)") + } + + sender, err := address.NewFromString(cctx.Args().First()) + if err != nil { + return err + } + + owner, err := address.NewFromString(cctx.Args().Get(1)) + if err != nil { + return err + } + + worker, err := address.NewFromString(cctx.Args().Get(2)) + if err != nil { + return err + } + + ssize, err := units.RAMInBytes(cctx.Args().Get(3)) + if err != nil { + return fmt.Errorf("failed to parse sector size: %w", err) + } + + // make sure the sender account exists on chain + _, err = wapi.StateLookupID(ctx, owner, types.EmptyTSK) + if err != nil { + return xerrors.Errorf("sender must exist on chain: %w", err) + } + + // make sure the worker account exists on chain + _, err = wapi.StateLookupID(ctx, worker, types.EmptyTSK) + if err != nil { + signed, err := wapi.MpoolPushMessage(ctx, &types.Message{ + From: sender, + To: worker, + Value: types.NewInt(0), + }, nil) + if err != nil { + return xerrors.Errorf("push worker init: %w", err) + } + + log.Infof("Initializing worker account %s, message: %s", worker, signed.Cid()) + log.Infof("Waiting for confirmation") + + mw, err := wapi.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence) + if err != nil { + return xerrors.Errorf("waiting for worker init: %w", err) + } + + if mw.Receipt.ExitCode != 0 { + return xerrors.Errorf("initializing worker account failed: exit code %d", mw.Receipt.ExitCode) + } + } + + // make sure the owner account exists on chain + _, err = wapi.StateLookupID(ctx, owner, types.EmptyTSK) + if err != nil { + signed, err := wapi.MpoolPushMessage(ctx, &types.Message{ + From: sender, + To: owner, + Value: types.NewInt(0), + }, nil) + if err != nil { + return xerrors.Errorf("push owner init: %w", err) + } + + log.Infof("Initializing owner account %s, message: %s", worker, signed.Cid()) + log.Infof("Wating for confirmation") + + mw, err := wapi.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence) + if err != nil { + return xerrors.Errorf("waiting for owner init: %w", err) + } + + if mw.Receipt.ExitCode != 0 { + return xerrors.Errorf("initializing owner account failed: exit code %d", mw.Receipt.ExitCode) + } + } + + // Note: the correct thing to do would be to call SealProofTypeFromSectorSize if actors version is v3 or later, but this still works + spt, err := miner.WindowPoStProofTypeFromSectorSize(abi.SectorSize(ssize)) + if err != nil { + return xerrors.Errorf("getting post proof type: %w", err) + } + + params, err := actors.SerializeParams(&power6.CreateMinerParams{ + Owner: owner, + Worker: worker, + WindowPoStProofType: spt, + }) + + if err != nil { + return err + } + + createStorageMinerMsg := &types.Message{ + To: power.Address, + From: sender, + Value: big.Zero(), + + Method: power.Methods.CreateMiner, + Params: params, + } + + signed, err := wapi.MpoolPushMessage(ctx, createStorageMinerMsg, nil) + if err != nil { + return xerrors.Errorf("pushing createMiner message: %w", err) + } + + log.Infof("Pushed CreateMiner message: %s", signed.Cid()) + log.Infof("Waiting for confirmation") + + mw, err := wapi.StateWaitMsg(ctx, signed.Cid(), build.MessageConfidence) + if err != nil { + return xerrors.Errorf("waiting for createMiner message: %w", err) + } + + if mw.Receipt.ExitCode != 0 { + return xerrors.Errorf("create miner failed: exit code %d", mw.Receipt.ExitCode) + } + + var retval power6.CreateMinerReturn + if err := retval.UnmarshalCBOR(bytes.NewReader(mw.Receipt.Return)); err != nil { + return err + } + + log.Infof("New miners address is: %s (%s)", retval.IDAddress, retval.RobustAddress) + + return nil }, } diff --git a/cmd/lotus-shed/msg.go b/cmd/lotus-shed/msg.go index b640fb9c9..7853624a6 100644 --- a/cmd/lotus-shed/msg.go +++ b/cmd/lotus-shed/msg.go @@ -148,6 +148,15 @@ func printMessage(cctx *cli.Context, msg *types.Message) error { fmt.Println("Params:", p) + if msg, err := messageFromBytes(cctx, msg.Params); err == nil { + fmt.Println("---") + color.Red("Params message:") + + if err := printMessage(cctx, msg.VMMessage()); err != nil { + return err + } + } + return nil } diff --git a/cmd/lotus-shed/pruning.go b/cmd/lotus-shed/pruning.go index 186a3191a..164ff197a 100644 --- a/cmd/lotus-shed/pruning.go +++ b/cmd/lotus-shed/pruning.go @@ -171,7 +171,7 @@ var stateTreePruneCmd = &cli.Command{ cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) defer cs.Close() //nolint:errcheck - if err := cs.Load(); err != nil { + if err := cs.Load(context.Background()); err != nil { return fmt.Errorf("loading chainstore: %w", err) } diff --git a/cmd/lotus-shed/sectors.go b/cmd/lotus-shed/sectors.go index 726d992c4..4894a6eea 100644 --- a/cmd/lotus-shed/sectors.go +++ b/cmd/lotus-shed/sectors.go @@ -2,11 +2,14 @@ package main import ( "bytes" + "context" "encoding/base64" + "encoding/binary" "fmt" "image" "image/color" "image/png" + "io" "os" "sort" "strconv" @@ -23,6 +26,7 @@ import ( miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/chain/actors" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" "github.com/filecoin-project/lotus/chain/types" @@ -38,6 +42,7 @@ var sectorsCmd = &cli.Command{ terminateSectorCmd, terminateSectorPenaltyEstimationCmd, visAllocatedSectorsCmd, + dumpRLESectorCmd, }, } @@ -275,6 +280,113 @@ var terminateSectorPenaltyEstimationCmd = &cli.Command{ }, } +func activeMiners(ctx context.Context, api v0api.FullNode) ([]address.Address, error) { + miners, err := api.StateListMiners(ctx, types.EmptyTSK) + if err != nil { + return nil, err + } + powCache := make(map[address.Address]types.BigInt) + var lk sync.Mutex + parmap.Par(32, miners, func(a address.Address) { + pow, err := api.StateMinerPower(ctx, a, types.EmptyTSK) + + lk.Lock() + if err == nil { + powCache[a] = pow.MinerPower.QualityAdjPower + } else { + powCache[a] = types.NewInt(0) + } + lk.Unlock() + }) + sort.Slice(miners, func(i, j int) bool { + return powCache[miners[i]].GreaterThan(powCache[miners[j]]) + }) + n := sort.Search(len(miners), func(i int) bool { + pow := powCache[miners[i]] + return pow.IsZero() + }) + return append(miners[0:0:0], miners[:n]...), nil +} + +var dumpRLESectorCmd = &cli.Command{ + Name: "dump-rles", + Usage: "Dump AllocatedSectors RLEs from miners passed as arguments as run lengths in uint64 LE format.\nIf no arguments are passed, dumps all active miners in the state tree.", + Action: func(cctx *cli.Context) error { + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + ctx := lcli.ReqContext(cctx) + var miners []address.Address + if cctx.NArg() == 0 { + miners, err = activeMiners(ctx, api) + if err != nil { + return xerrors.Errorf("getting active miners: %w", err) + } + } else { + for _, mS := range cctx.Args().Slice() { + mA, err := address.NewFromString(mS) + if err != nil { + return xerrors.Errorf("parsing address '%s': %w", mS, err) + } + miners = append(miners, mA) + } + } + wbuf := make([]byte, 8) + buf := &bytes.Buffer{} + + for i := 0; i < len(miners); i++ { + buf.Reset() + err := func() error { + state, err := api.StateReadState(ctx, miners[i], types.EmptyTSK) + if err != nil { + return xerrors.Errorf("getting state: %+v", err) + } + allocSString := state.State.(map[string]interface{})["AllocatedSectors"].(map[string]interface{})["/"].(string) + + allocCid, err := cid.Decode(allocSString) + if err != nil { + return xerrors.Errorf("decoding cid: %+v", err) + } + rle, err := api.ChainReadObj(ctx, allocCid) + if err != nil { + return xerrors.Errorf("reading AllocatedSectors: %+v", err) + } + + var bf bitfield.BitField + err = bf.UnmarshalCBOR(bytes.NewReader(rle)) + if err != nil { + return xerrors.Errorf("decoding bitfield: %w", err) + } + ri, err := bf.RunIterator() + if err != nil { + return xerrors.Errorf("creating iterator: %w", err) + } + + for ri.HasNext() { + run, err := ri.NextRun() + if err != nil { + return xerrors.Errorf("getting run: %w", err) + } + binary.LittleEndian.PutUint64(wbuf, run.Len) + buf.Write(wbuf) + } + _, err = io.Copy(os.Stdout, buf) + if err != nil { + return xerrors.Errorf("copy: %w", err) + } + + return nil + }() + if err != nil { + log.Errorf("miner %d: %s: %+v", i, miners[i], err) + } + } + return nil + }, +} + var visAllocatedSectorsCmd = &cli.Command{ Name: "vis-allocated", Usage: "Produces a html with visualisation of allocated sectors", @@ -287,32 +399,10 @@ var visAllocatedSectorsCmd = &cli.Command{ ctx := lcli.ReqContext(cctx) var miners []address.Address if cctx.NArg() == 0 { - miners, err = api.StateListMiners(ctx, types.EmptyTSK) + miners, err = activeMiners(ctx, api) if err != nil { - return err + return xerrors.Errorf("getting active miners: %w", err) } - powCache := make(map[address.Address]types.BigInt) - var lk sync.Mutex - parmap.Par(32, miners, func(a address.Address) { - pow, err := api.StateMinerPower(ctx, a, types.EmptyTSK) - - lk.Lock() - if err == nil { - powCache[a] = pow.MinerPower.QualityAdjPower - } else { - powCache[a] = types.NewInt(0) - } - lk.Unlock() - }) - sort.Slice(miners, func(i, j int) bool { - return powCache[miners[i]].GreaterThan(powCache[miners[j]]) - }) - n := sort.Search(len(miners), func(i int) bool { - pow := powCache[miners[i]] - log.Infof("pow @%d = %s", i, pow) - return pow.IsZero() - }) - miners = miners[:n] } else { for _, mS := range cctx.Args().Slice() { mA, err := address.NewFromString(mS) diff --git a/cmd/lotus-shed/send-csv.go b/cmd/lotus-shed/send-csv.go new file mode 100644 index 000000000..ce1c8b68a --- /dev/null +++ b/cmd/lotus-shed/send-csv.go @@ -0,0 +1,152 @@ +package main + +import ( + "encoding/csv" + "encoding/hex" + "fmt" + "os" + "strconv" + "strings" + + "github.com/ipfs/go-cid" + "github.com/urfave/cli/v2" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/exitcode" + + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + lcli "github.com/filecoin-project/lotus/cli" +) + +var sendCsvCmd = &cli.Command{ + Name: "send-csv", + Usage: "Utility for sending a batch of balance transfers", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "from", + Usage: "specify the account to send funds from", + Required: true, + }, + }, + ArgsUsage: "[csvfile]", + Action: func(cctx *cli.Context) error { + if cctx.NArg() != 1 { + return xerrors.New("must supply path to csv file") + } + + api, closer, err := lcli.GetFullNodeAPIV1(cctx) + if err != nil { + return err + } + + defer closer() + ctx := lcli.ReqContext(cctx) + + srv, err := lcli.GetFullNodeServices(cctx) + if err != nil { + return err + } + defer srv.Close() //nolint:errcheck + + sender, err := address.NewFromString(cctx.String("from")) + if err != nil { + return err + } + + fileReader, err := os.Open(cctx.Args().First()) + if err != nil { + return xerrors.Errorf("read csv: %w", err) + } + + defer fileReader.Close() //nolint:errcheck + r := csv.NewReader(fileReader) + records, err := r.ReadAll() + if err != nil { + return xerrors.Errorf("read csv: %w", err) + } + + if strings.TrimSpace(records[0][0]) != "Recipient" || + strings.TrimSpace(records[0][1]) != "FIL" || + strings.TrimSpace(records[0][2]) != "Method" || + strings.TrimSpace(records[0][3]) != "Params" { + return xerrors.Errorf("expected header row to be \"Recipient, FIL, Method, Params\"") + } + + var msgs []*types.Message + for i, e := range records[1:] { + addr, err := address.NewFromString(e[0]) + if err != nil { + return xerrors.Errorf("failed to parse address in row %d: %w", i, err) + } + + value, err := types.ParseFIL(strings.TrimSpace(e[1])) + if err != nil { + return xerrors.Errorf("failed to parse value balance: %w", err) + } + + method, err := strconv.Atoi(strings.TrimSpace(e[2])) + if err != nil { + return xerrors.Errorf("failed to parse method number: %w", err) + } + + var params []byte + if strings.TrimSpace(e[3]) != "nil" { + params, err = hex.DecodeString(strings.TrimSpace(e[3])) + if err != nil { + return xerrors.Errorf("failed to parse hexparams: %w", err) + } + } + + msgs = append(msgs, &types.Message{ + To: addr, + From: sender, + Value: abi.TokenAmount(value), + Method: abi.MethodNum(method), + Params: params, + }) + } + + if len(msgs) == 0 { + return nil + } + + var msgCids []cid.Cid + for i, msg := range msgs { + smsg, err := api.MpoolPushMessage(ctx, msg, nil) + if err != nil { + fmt.Printf("%d, ERROR %s\n", i, err) + continue + } + + fmt.Printf("%d, %s\n", i, smsg.Cid()) + + if i > 0 && i%100 == 0 { + fmt.Printf("catching up until latest message lands") + _, err := api.StateWaitMsg(ctx, smsg.Cid(), 1, lapi.LookbackNoLimit, true) + if err != nil { + return err + } + } + + msgCids = append(msgCids, smsg.Cid()) + } + + fmt.Println("waiting on messages...") + + for _, msgCid := range msgCids { + ml, err := api.StateWaitMsg(ctx, msgCid, 5, lapi.LookbackNoLimit, true) + if err != nil { + return err + } + if ml.Receipt.ExitCode != exitcode.Ok { + fmt.Printf("MSG %s NON-ZERO EXITCODE: %s\n", msgCid, ml.Receipt.ExitCode) + } + } + + fmt.Println("all sent messages succeeded") + return nil + }, +} diff --git a/cmd/lotus-shed/splitstore.go b/cmd/lotus-shed/splitstore.go index 4f668888e..58563955f 100644 --- a/cmd/lotus-shed/splitstore.go +++ b/cmd/lotus-shed/splitstore.go @@ -331,7 +331,7 @@ func deleteSplitstoreKeys(lr repo.LockedRepo) error { } var keys []datastore.Key - res, err := ds.Query(query.Query{Prefix: "/splitstore"}) + res, err := ds.Query(context.Background(), query.Query{Prefix: "/splitstore"}) if err != nil { return xerrors.Errorf("error querying datastore for splitstore keys: %w", err) } @@ -346,7 +346,7 @@ func deleteSplitstoreKeys(lr repo.LockedRepo) error { for _, k := range keys { fmt.Printf("deleting %s from datastore...\n", k) - err = ds.Delete(k) + err = ds.Delete(context.Background(), k) if err != nil { return xerrors.Errorf("error deleting key %s from datastore: %w", k, err) } diff --git a/cmd/lotus-shed/terminations.go b/cmd/lotus-shed/terminations.go new file mode 100644 index 000000000..0691f35da --- /dev/null +++ b/cmd/lotus-shed/terminations.go @@ -0,0 +1,181 @@ +package main + +import ( + "bytes" + "context" + "fmt" + "io" + "strconv" + + "github.com/filecoin-project/lotus/chain/actors/builtin" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/types" + + "github.com/filecoin-project/lotus/chain/actors/builtin/market" + + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/consensus/filcns" + "github.com/filecoin-project/lotus/chain/state" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/node/repo" + miner2 "github.com/filecoin-project/specs-actors/actors/builtin/miner" + "github.com/ipfs/go-cid" + cbor "github.com/ipfs/go-ipld-cbor" + "github.com/urfave/cli/v2" +) + +var terminationsCmd = &cli.Command{ + Name: "terminations", + Description: "Lists terminated deals from the past 2 days", + ArgsUsage: "[block to look back from] [lookback period (epochs)]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "repo", + Value: "~/.lotus", + }, + }, + Action: func(cctx *cli.Context) error { + ctx := context.TODO() + + if cctx.NArg() != 2 { + return fmt.Errorf("must pass block cid && lookback period") + } + + blkCid, err := cid.Decode(cctx.Args().First()) + if err != nil { + return fmt.Errorf("failed to parse input: %w", err) + } + + fsrepo, err := repo.NewFS(cctx.String("repo")) + if err != nil { + return err + } + + lkrepo, err := fsrepo.Lock(repo.FullNode) + if err != nil { + return err + } + + defer lkrepo.Close() //nolint:errcheck + + bs, err := lkrepo.Blockstore(ctx, repo.UniversalBlockstore) + if err != nil { + return fmt.Errorf("failed to open blockstore: %w", err) + } + + defer func() { + if c, ok := bs.(io.Closer); ok { + if err := c.Close(); err != nil { + log.Warnf("failed to close blockstore: %s", err) + } + } + }() + + mds, err := lkrepo.Datastore(context.Background(), "/metadata") + if err != nil { + return err + } + + cs := store.NewChainStore(bs, bs, mds, filcns.Weight, nil) + defer cs.Close() //nolint:errcheck + + cst := cbor.NewCborStore(bs) + store := adt.WrapStore(ctx, cst) + + blk, err := cs.GetBlock(ctx, blkCid) + if err != nil { + return err + } + + lbp, err := strconv.Atoi(cctx.Args().Get(1)) + if err != nil { + return fmt.Errorf("failed to parse input: %w", err) + } + + cutoff := blk.Height - abi.ChainEpoch(lbp) + + for blk.Height > cutoff { + pts, err := cs.LoadTipSet(ctx, types.NewTipSetKey(blk.Parents...)) + if err != nil { + return err + } + + blk = pts.Blocks()[0] + + msgs, err := cs.MessagesForTipset(ctx, pts) + if err != nil { + return err + } + + for _, v := range msgs { + msg := v.VMMessage() + if msg.Method != miner.Methods.TerminateSectors { + continue + } + + tree, err := state.LoadStateTree(cst, blk.ParentStateRoot) + if err != nil { + return err + } + + minerAct, err := tree.GetActor(msg.To) + if err != nil { + return err + } + + if !builtin.IsStorageMinerActor(minerAct.Code) { + continue + } + + minerSt, err := miner.Load(store, minerAct) + if err != nil { + return err + } + + marketAct, err := tree.GetActor(market.Address) + if err != nil { + return err + } + + marketSt, err := market.Load(store, marketAct) + if err != nil { + return err + } + + proposals, err := marketSt.Proposals() + if err != nil { + return err + } + + var termParams miner2.TerminateSectorsParams + err = termParams.UnmarshalCBOR(bytes.NewBuffer(msg.Params)) + if err != nil { + return err + } + + for _, t := range termParams.Terminations { + sectors, err := minerSt.LoadSectors(&t.Sectors) + if err != nil { + return err + } + + for _, sector := range sectors { + for _, deal := range sector.DealIDs { + prop, find, err := proposals.Get(deal) + if err != nil { + return err + } + if find { + fmt.Printf("%s, %d, %d, %s, %s, %s\n", msg.To, sector.SectorNumber, deal, prop.Client, prop.PieceCID, prop.Label) + } + } + } + } + } + } + + return nil + }, +} diff --git a/cmd/lotus-sim/create.go b/cmd/lotus-sim/create.go index 4867a5da5..23ea454a3 100644 --- a/cmd/lotus-sim/create.go +++ b/cmd/lotus-sim/create.go @@ -26,7 +26,7 @@ var createSimCommand = &cli.Command{ var ts *types.TipSet switch cctx.NArg() { case 0: - if err := node.Chainstore.Load(); err != nil { + if err := node.Chainstore.Load(cctx.Context); err != nil { return err } ts = node.Chainstore.GetHeaviestTipSet() @@ -36,7 +36,7 @@ var createSimCommand = &cli.Command{ return err } tsk := types.NewTipSetKey(cids...) - ts, err = node.Chainstore.LoadTipSet(tsk) + ts, err = node.Chainstore.LoadTipSet(cctx.Context, tsk) if err != nil { return err } diff --git a/cmd/lotus-sim/info_capacity.go b/cmd/lotus-sim/info_capacity.go index 4372ee34a..a92d2cde4 100644 --- a/cmd/lotus-sim/info_capacity.go +++ b/cmd/lotus-sim/info_capacity.go @@ -39,7 +39,7 @@ var infoCapacityGrowthSimCommand = &cli.Command{ lastHeight := ts.Height() for ts.Height() > firstEpoch && cctx.Err() == nil { - ts, err = sim.Node.Chainstore.LoadTipSet(ts.Parents()) + ts, err = sim.Node.Chainstore.LoadTipSet(cctx.Context, ts.Parents()) if err != nil { return err } diff --git a/cmd/lotus-sim/info_state.go b/cmd/lotus-sim/info_state.go index 5c9541513..125dae81d 100644 --- a/cmd/lotus-sim/info_state.go +++ b/cmd/lotus-sim/info_state.go @@ -60,7 +60,7 @@ var infoStateGrowthSimCommand = &cli.Command{ var links []cid.Cid var totalSize uint64 - if err := store.View(c, func(data []byte) error { + if err := store.View(cctx.Context, c, func(data []byte) error { totalSize += uint64(len(data)) return cbg.ScanForLinks(bytes.NewReader(data), func(c cid.Cid) { if c.Prefix().Codec != cid.DagCBOR { @@ -131,7 +131,7 @@ var infoStateGrowthSimCommand = &cli.Command{ fmt.Fprintf(cctx.App.Writer, "%d: %s\n", ts.Height(), types.SizeStr(types.NewInt(parentStateSize))) } - ts, err = sim.Node.Chainstore.LoadTipSet(ts.Parents()) + ts, err = sim.Node.Chainstore.LoadTipSet(cctx.Context, ts.Parents()) if err != nil { return err } diff --git a/cmd/lotus-sim/simulation/block.go b/cmd/lotus-sim/simulation/block.go index 93e6a3191..106bc53f5 100644 --- a/cmd/lotus-sim/simulation/block.go +++ b/cmd/lotus-sim/simulation/block.go @@ -73,7 +73,7 @@ func (sim *Simulation) makeTipSet(ctx context.Context, messages []*types.Message Timestamp: uts, ElectionProof: &types.ElectionProof{WinCount: 1}, }} - err = sim.Node.Chainstore.PersistBlockHeaders(blks...) + err = sim.Node.Chainstore.PersistBlockHeaders(ctx, blks...) if err != nil { return nil, xerrors.Errorf("failed to persist block headers: %w", err) } diff --git a/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go b/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go index a6353e4f4..fb822eb6e 100644 --- a/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go +++ b/cmd/lotus-sim/simulation/blockbuilder/blockbuilder.go @@ -79,7 +79,7 @@ func NewBlockBuilder(ctx context.Context, logger *zap.SugaredLogger, sm *stmgr.S // 1. We don't charge a fee. // 2. The runtime has "fake" proof logic. // 3. We don't actually save any of the results. - r := lrand.NewStateRand(sm.ChainStore(), parentTs.Cids(), sm.Beacon()) + r := lrand.NewStateRand(sm.ChainStore(), parentTs.Cids(), sm.Beacon(), sm.GetNetworkVersion) vmopt := &vm.VMOpts{ StateBase: parentState, Epoch: parentTs.Height() + 1, @@ -88,7 +88,7 @@ func NewBlockBuilder(ctx context.Context, logger *zap.SugaredLogger, sm *stmgr.S Actors: filcns.NewActorRegistry(), Syscalls: sm.VMSys(), CircSupplyCalc: sm.GetVMCirculatingSupply, - NtwkVersion: sm.GetNtwkVersion, + NetworkVersion: sm.GetNetworkVersion(ctx, parentTs.Height()+1), BaseFee: abi.NewTokenAmount(0), LookbackState: stmgr.LookbackStateGetterForTipset(sm, parentTs), } @@ -265,7 +265,7 @@ func (bb *BlockBuilder) Height() abi.ChainEpoch { // NetworkVersion returns the network version for the target block. func (bb *BlockBuilder) NetworkVersion() network.Version { - return bb.sm.GetNtwkVersion(bb.ctx, bb.Height()) + return bb.sm.GetNetworkVersion(bb.ctx, bb.Height()) } // StateManager returns the stmgr.StateManager. diff --git a/cmd/lotus-sim/simulation/messages.go b/cmd/lotus-sim/simulation/messages.go index 5bed27436..d6dd98d43 100644 --- a/cmd/lotus-sim/simulation/messages.go +++ b/cmd/lotus-sim/simulation/messages.go @@ -31,7 +31,7 @@ func (sim *Simulation) storeMessages(ctx context.Context, messages []*types.Mess // fail a pre-commit... var msgCids []cid.Cid for _, msg := range messages { - c, err := sim.Node.Chainstore.PutMessage(msg) + c, err := sim.Node.Chainstore.PutMessage(ctx, msg) if err != nil { return cid.Undef, err } diff --git a/cmd/lotus-sim/simulation/mock/mock.go b/cmd/lotus-sim/simulation/mock/mock.go index 38648f758..b1d36ba48 100644 --- a/cmd/lotus-sim/simulation/mock/mock.go +++ b/cmd/lotus-sim/simulation/mock/mock.go @@ -6,6 +6,8 @@ import ( "encoding/binary" "fmt" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/ipfs/go-cid" @@ -70,7 +72,13 @@ func (mockVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyPro ) return false, nil } -func (mockVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) { + +// TODO: do the thing +func (mockVerifier) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) { + return false, nil +} + +func (mockVerifier) VerifyWinningPoSt(ctx context.Context, info proof7.WinningPoStVerifyInfo) (bool, error) { panic("should not be called") } func (mockVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) { diff --git a/cmd/lotus-sim/simulation/node.go b/cmd/lotus-sim/simulation/node.go index c18f27a33..4668b5175 100644 --- a/cmd/lotus-sim/simulation/node.go +++ b/cmd/lotus-sim/simulation/node.go @@ -106,7 +106,7 @@ func (nd *Node) LoadSim(ctx context.Context, name string) (*Simulation, error) { if err != nil { return nil, xerrors.Errorf("failed to create upgrade schedule for simulation %s: %w", name, err) } - sim.StateManager, err = stmgr.NewStateManager(nd.Chainstore, filcns.NewTipSetExecutor(), vm.Syscalls(mock.Verifier), us, nil) + sim.StateManager, err = stmgr.NewStateManager(nd.Chainstore, filcns.NewTipSetExecutor(), nil, vm.Syscalls(mock.Verifier), us, nil) if err != nil { return nil, xerrors.Errorf("failed to create state manager for simulation %s: %w", name, err) } @@ -125,7 +125,7 @@ func (nd *Node) CreateSim(ctx context.Context, name string, head *types.TipSet) if err != nil { return nil, err } - sm, err := stmgr.NewStateManager(nd.Chainstore, filcns.NewTipSetExecutor(), vm.Syscalls(mock.Verifier), filcns.DefaultUpgradeSchedule(), nil) + sm, err := stmgr.NewStateManager(nd.Chainstore, filcns.NewTipSetExecutor(), nil, vm.Syscalls(mock.Verifier), filcns.DefaultUpgradeSchedule(), nil) if err != nil { return nil, xerrors.Errorf("creating state manager: %w", err) } @@ -135,7 +135,7 @@ func (nd *Node) CreateSim(ctx context.Context, name string, head *types.TipSet) StateManager: sm, stages: stages, } - if has, err := nd.MetadataDS.Has(sim.key("head")); err != nil { + if has, err := nd.MetadataDS.Has(ctx, sim.key("head")); err != nil { return nil, err } else if has { return nil, xerrors.Errorf("simulation named %s already exists", name) @@ -155,7 +155,7 @@ func (nd *Node) CreateSim(ctx context.Context, name string, head *types.TipSet) // ListSims lists all simulations. func (nd *Node) ListSims(ctx context.Context) ([]string, error) { prefix := simulationPrefix.ChildString("head").String() - items, err := nd.MetadataDS.Query(query.Query{ + items, err := nd.MetadataDS.Query(ctx, query.Query{ Prefix: prefix, KeysOnly: true, Orders: []query.Order{query.OrderByKey{}}, @@ -192,7 +192,7 @@ func (nd *Node) DeleteSim(ctx context.Context, name string) error { var err error for _, field := range simFields { key := simulationPrefix.ChildString(field).ChildString(name) - err = multierr.Append(err, nd.MetadataDS.Delete(key)) + err = multierr.Append(err, nd.MetadataDS.Delete(ctx, key)) } return err } @@ -209,7 +209,7 @@ func (nd *Node) CopySim(ctx context.Context, oldName, newName string) error { values := make(map[string][]byte) for _, field := range simFields { key := simulationPrefix.ChildString(field).ChildString(oldName) - value, err := nd.MetadataDS.Get(key) + value, err := nd.MetadataDS.Get(ctx, key) if err == datastore.ErrNotFound { continue } else if err != nil { @@ -226,9 +226,9 @@ func (nd *Node) CopySim(ctx context.Context, oldName, newName string) error { key := simulationPrefix.ChildString(field).ChildString(newName) var err error if value, ok := values[field]; ok { - err = nd.MetadataDS.Put(key, value) + err = nd.MetadataDS.Put(ctx, key, value) } else { - err = nd.MetadataDS.Delete(key) + err = nd.MetadataDS.Delete(ctx, key) } if err != nil { return err diff --git a/cmd/lotus-sim/simulation/simulation.go b/cmd/lotus-sim/simulation/simulation.go index 02792e332..3595fac50 100644 --- a/cmd/lotus-sim/simulation/simulation.go +++ b/cmd/lotus-sim/simulation/simulation.go @@ -90,7 +90,7 @@ type Simulation struct { // loadConfig loads a simulation's config from the datastore. This must be called on startup and may // be called to restore the config from-disk. func (sim *Simulation) loadConfig() error { - configBytes, err := sim.Node.MetadataDS.Get(sim.key("config")) + configBytes, err := sim.Node.MetadataDS.Get(context.TODO(), sim.key("config")) if err == nil { err = json.Unmarshal(configBytes, &sim.config) } @@ -111,7 +111,7 @@ func (sim *Simulation) saveConfig() error { if err != nil { return err } - return sim.Node.MetadataDS.Put(sim.key("config"), buf) + return sim.Node.MetadataDS.Put(context.TODO(), sim.key("config"), buf) } var simulationPrefix = datastore.NewKey("/simulation") @@ -124,7 +124,7 @@ func (sim *Simulation) key(subkey string) datastore.Key { // loadNamedTipSet the tipset with the given name (for this simulation) func (sim *Simulation) loadNamedTipSet(name string) (*types.TipSet, error) { - tskBytes, err := sim.Node.MetadataDS.Get(sim.key(name)) + tskBytes, err := sim.Node.MetadataDS.Get(context.TODO(), sim.key(name)) if err != nil { return nil, xerrors.Errorf("failed to load tipset %s/%s: %w", sim.name, name, err) } @@ -132,7 +132,7 @@ func (sim *Simulation) loadNamedTipSet(name string) (*types.TipSet, error) { if err != nil { return nil, xerrors.Errorf("failed to parse tipste %v (%s/%s): %w", tskBytes, sim.name, name, err) } - ts, err := sim.Node.Chainstore.LoadTipSet(tsk) + ts, err := sim.Node.Chainstore.LoadTipSet(context.TODO(), tsk) if err != nil { return nil, xerrors.Errorf("failed to load tipset %s (%s/%s): %w", tsk, sim.name, name, err) } @@ -141,7 +141,7 @@ func (sim *Simulation) loadNamedTipSet(name string) (*types.TipSet, error) { // storeNamedTipSet stores the tipset at name (relative to the simulation). func (sim *Simulation) storeNamedTipSet(name string, ts *types.TipSet) error { - if err := sim.Node.MetadataDS.Put(sim.key(name), ts.Key().Bytes()); err != nil { + if err := sim.Node.MetadataDS.Put(context.TODO(), sim.key(name), ts.Key().Bytes()); err != nil { return xerrors.Errorf("failed to store tipset (%s/%s): %w", sim.name, name, err) } return nil @@ -159,7 +159,7 @@ func (sim *Simulation) GetStart() *types.TipSet { // GetNetworkVersion returns the current network version for the simulation. func (sim *Simulation) GetNetworkVersion() network.Version { - return sim.StateManager.GetNtwkVersion(context.TODO(), sim.head.Height()) + return sim.StateManager.GetNetworkVersion(context.TODO(), sim.head.Height()) } // SetHead updates the current head of the simulation and stores it in the metadata store. This is @@ -201,7 +201,7 @@ func (sim *Simulation) SetUpgradeHeight(nv network.Version, epoch abi.ChainEpoch if err != nil { return err } - sm, err := stmgr.NewStateManager(sim.Node.Chainstore, filcns.NewTipSetExecutor(), vm.Syscalls(mock.Verifier), newUpgradeSchedule, nil) + sm, err := stmgr.NewStateManager(sim.Node.Chainstore, filcns.NewTipSetExecutor(), nil, vm.Syscalls(mock.Verifier), newUpgradeSchedule, nil) if err != nil { return err } @@ -308,7 +308,7 @@ func (sim *Simulation) Walk( stCid = ts.MinTicketBlock().ParentStateRoot recCid = ts.MinTicketBlock().ParentMessageReceipts - ts, err = sim.Node.Chainstore.LoadTipSet(ts.Parents()) + ts, err = sim.Node.Chainstore.LoadTipSet(ctx, ts.Parents()) if err != nil { return xerrors.Errorf("loading parent: %w", err) } @@ -342,7 +342,7 @@ func (sim *Simulation) Walk( break } - msgs, err := sim.Node.Chainstore.MessagesForTipset(job.ts) + msgs, err := sim.Node.Chainstore.MessagesForTipset(ctx, job.ts) if err != nil { return err } diff --git a/cmd/lotus-sim/simulation/step.go b/cmd/lotus-sim/simulation/step.go index 902f2ad6c..f9d58529e 100644 --- a/cmd/lotus-sim/simulation/step.go +++ b/cmd/lotus-sim/simulation/step.go @@ -41,8 +41,8 @@ func (sim *Simulation) popNextMessages(ctx context.Context) ([]*types.Message, e // This isn't what the network does, but it makes things easier. Otherwise, we'd need to run // migrations before this epoch and I'd rather not deal with that. nextHeight := parentTs.Height() + 1 - prevVer := sim.StateManager.GetNtwkVersion(ctx, nextHeight-1) - nextVer := sim.StateManager.GetNtwkVersion(ctx, nextHeight) + prevVer := sim.StateManager.GetNetworkVersion(ctx, nextHeight-1) + nextVer := sim.StateManager.GetNetworkVersion(ctx, nextHeight) if nextVer != prevVer { log.Warnw("packing no messages for version upgrade block", "old", prevVer, diff --git a/cmd/lotus-stats/docker-compose.yml b/cmd/lotus-stats/docker-compose.yml index b08a2157e..4453f49ec 100644 --- a/cmd/lotus-stats/docker-compose.yml +++ b/cmd/lotus-stats/docker-compose.yml @@ -2,7 +2,7 @@ version: '3' services: influxdb: - image: influxdb:latest + image: influxdb:1.8 container_name: influxdb ports: - "18086:8086" diff --git a/cmd/lotus-stats/main.go b/cmd/lotus-stats/main.go index b4c13ea8c..706a453eb 100644 --- a/cmd/lotus-stats/main.go +++ b/cmd/lotus-stats/main.go @@ -2,18 +2,36 @@ package main import ( "context" + "net/http" + _ "net/http/pprof" "os" + "time" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/build" lcli "github.com/filecoin-project/lotus/cli" - "github.com/filecoin-project/lotus/tools/stats" + "github.com/filecoin-project/lotus/tools/stats/influx" + "github.com/filecoin-project/lotus/tools/stats/ipldstore" + "github.com/filecoin-project/lotus/tools/stats/metrics" + "github.com/filecoin-project/lotus/tools/stats/points" + "github.com/filecoin-project/lotus/tools/stats/sync" logging "github.com/ipfs/go-log/v2" "github.com/urfave/cli/v2" + + "contrib.go.opencensus.io/exporter/prometheus" + stats "go.opencensus.io/stats" + "go.opencensus.io/stats/view" ) var log = logging.Logger("stats") +func init() { + if err := view.Register(metrics.DefaultViews...); err != nil { + log.Fatal(err) + } +} + func main() { local := []*cli.Command{ runCmd, @@ -37,7 +55,7 @@ func main() { }, }, Before: func(cctx *cli.Context) error { - return logging.SetLogLevel("stats", cctx.String("log-level")) + return logging.SetLogLevelRegex("stats/*", cctx.String("log-level")) }, Commands: local, } @@ -104,6 +122,12 @@ var runCmd = &cli.Command{ Usage: "do not wait for chain sync to complete", Value: false, }, + &cli.IntFlag{ + Name: "ipld-store-cache-size", + Usage: "size of lru cache for ChainReadObj", + EnvVars: []string{"LOTUS_STATS_IPLD_STORE_CACHE_SIZE"}, + Value: 2 << 15, + }, }, Action: func(cctx *cli.Context) error { ctx := context.Background() @@ -118,43 +142,125 @@ var runCmd = &cli.Command{ influxPasswordFlag := cctx.String("influx-password") influxDatabaseFlag := cctx.String("influx-database") + ipldStoreCacheSizeFlag := cctx.Int("ipld-store-cache-size") + log.Infow("opening influx client", "hostname", influxHostnameFlag, "username", influxUsernameFlag, "database", influxDatabaseFlag) - influx, err := stats.InfluxClient(influxHostnameFlag, influxUsernameFlag, influxPasswordFlag) + influxClient, err := influx.NewClient(influxHostnameFlag, influxUsernameFlag, influxPasswordFlag) if err != nil { - log.Fatal(err) + return err } + exporter, err := prometheus.NewExporter(prometheus.Options{ + Namespace: "lotus_stats", + }) + if err != nil { + return err + } + + go func() { + http.Handle("/metrics", exporter) + if err := http.ListenAndServe(":6688", nil); err != nil { + log.Errorw("failed to start http server", "err", err) + } + }() + if resetFlag { - if err := stats.ResetDatabase(influx, influxDatabaseFlag); err != nil { - log.Fatal(err) + if err := influx.ResetDatabase(influxClient, influxDatabaseFlag); err != nil { + return err } } - height := int64(heightFlag) + api, closer, err := lcli.GetFullNodeAPI(cctx) + if err != nil { + return err + } + defer closer() + + if !noSyncFlag { + if err := sync.SyncWait(ctx, api); err != nil { + return err + } + } + + gtp, err := api.ChainGetGenesis(ctx) + if err != nil { + return err + } + genesisTime := time.Unix(int64(gtp.MinTimestamp()), 0) + + // When height is set to `0` we will resume from the best height we can. + // The goal is to ensure we have data in the last 60 tipsets + height := int64(heightFlag) if !resetFlag && height == 0 { - h, err := stats.GetLastRecordedHeight(influx, influxDatabaseFlag) + lastHeight, err := influx.GetLastRecordedHeight(influxClient, influxDatabaseFlag) + if err != nil { + return err + } + + sinceGenesis := build.Clock.Now().Sub(genesisTime) + expectedHeight := int64(sinceGenesis.Seconds()) / int64(build.BlockDelaySecs) + + startOfWindowHeight := expectedHeight - 60 + + if lastHeight > startOfWindowHeight { + height = lastHeight + } else { + height = startOfWindowHeight + } + + ts, err := api.ChainHead(ctx) if err != nil { - log.Info(err) + return err } - height = h + headHeight := int64(ts.Height()) + if headHeight < height { + height = headHeight + } } - api, closer, err := lcli.GetFullNodeAPI(cctx) + go func() { + t := time.NewTicker(time.Second) + + for { + select { + case <-t.C: + sinceGenesis := build.Clock.Now().Sub(genesisTime) + expectedHeight := int64(sinceGenesis.Seconds()) / int64(build.BlockDelaySecs) + + stats.Record(ctx, metrics.TipsetCollectionHeightExpected.M(expectedHeight)) + } + } + }() + + store, err := ipldstore.NewApiIpldStore(ctx, api, ipldStoreCacheSizeFlag) if err != nil { return err } - defer closer() - if !noSyncFlag { - if err := stats.WaitForSyncComplete(ctx, api); err != nil { - log.Fatal(err) - } + collector, err := points.NewChainPointCollector(ctx, store, api) + if err != nil { + return err } - stats.Collect(ctx, api, influx, influxDatabaseFlag, height, headLagFlag) + tipsets, err := sync.BufferedTipsetChannel(ctx, api, abi.ChainEpoch(height), headLagFlag) + if err != nil { + return err + } + + wq := influx.NewWriteQueue(ctx, influxClient) + defer wq.Close() + + for tipset := range tipsets { + if nb, err := collector.Collect(ctx, tipset); err != nil { + log.Warnw("failed to collect points", "err", err) + } else { + nb.SetDatabase(influxDatabaseFlag) + wq.AddBatch(nb) + } + } return nil }, diff --git a/cmd/lotus/backup.go b/cmd/lotus/backup.go index d41e0c098..4bdd21322 100644 --- a/cmd/lotus/backup.go +++ b/cmd/lotus/backup.go @@ -1,7 +1,6 @@ package main import ( - "context" "os" dstore "github.com/ipfs/go-datastore" @@ -88,7 +87,7 @@ func restore(cctx *cli.Context, r repo.Repo) error { log.Info("Restoring metadata backup") - mds, err := lr.Datastore(context.TODO(), "/metadata") + mds, err := lr.Datastore(cctx.Context, "/metadata") if err != nil { return err } @@ -111,10 +110,10 @@ func restore(cctx *cli.Context, r repo.Repo) error { log.Info("Resetting chainstore metadata") chainHead := dstore.NewKey("head") - if err := mds.Delete(chainHead); err != nil { + if err := mds.Delete(cctx.Context, chainHead); err != nil { return xerrors.Errorf("clearing chain head: %w", err) } - if err := store.FlushValidationCache(mds); err != nil { + if err := store.FlushValidationCache(cctx.Context, mds); err != nil { return xerrors.Errorf("clearing chain validation cache: %w", err) } diff --git a/cmd/lotus/daemon.go b/cmd/lotus/daemon.go index 35f518129..73948b264 100644 --- a/cmd/lotus/daemon.go +++ b/cmd/lotus/daemon.go @@ -474,7 +474,7 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) return xerrors.Errorf("failed to open blockstore: %w", err) } - mds, err := lr.Datastore(context.TODO(), "/metadata") + mds, err := lr.Datastore(ctx, "/metadata") if err != nil { return err } @@ -499,14 +499,14 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) bar.Units = pb.U_BYTES bar.Start() - ts, err := cst.Import(br) + ts, err := cst.Import(ctx, br) bar.Finish() if err != nil { return xerrors.Errorf("importing chain failed: %w", err) } - if err := cst.FlushValidationCache(); err != nil { + if err := cst.FlushValidationCache(ctx); err != nil { return xerrors.Errorf("flushing validation cache failed: %w", err) } @@ -515,13 +515,13 @@ func ImportChain(ctx context.Context, r repo.Repo, fname string, snapshot bool) return err } - err = cst.SetGenesis(gb.Blocks()[0]) + err = cst.SetGenesis(ctx, gb.Blocks()[0]) if err != nil { return err } // TODO: We need to supply the actual beacon after v14 - stm, err := stmgr.NewStateManager(cst, filcns.NewTipSetExecutor(), vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil) + stm, err := stmgr.NewStateManager(cst, filcns.NewTipSetExecutor(), nil, vm.Syscalls(ffiwrapper.ProofVerifier), filcns.DefaultUpgradeSchedule(), nil) if err != nil { return err } diff --git a/cmd/lotus/main.go b/cmd/lotus/main.go index 7aa2e704e..c19b9fce4 100644 --- a/cmd/lotus/main.go +++ b/cmd/lotus/main.go @@ -39,7 +39,7 @@ func main() { jaeger := tracing.SetupJaegerTracing("lotus") defer func() { if jaeger != nil { - jaeger.Flush() + _ = jaeger.ForceFlush(context.Background()) } }() @@ -47,7 +47,9 @@ func main() { cmd := cmd originBefore := cmd.Before cmd.Before = func(cctx *cli.Context) error { - trace.UnregisterExporter(jaeger) + if jaeger != nil { + _ = jaeger.Shutdown(cctx.Context) + } jaeger = tracing.SetupJaegerTracing("lotus/" + cmd.Name) if originBefore != nil { diff --git a/cmd/tvx/codenames.go b/cmd/tvx/codenames.go index f8da07e8d..81143c85c 100644 --- a/cmd/tvx/codenames.go +++ b/cmd/tvx/codenames.go @@ -24,6 +24,15 @@ var ProtocolCodenames = []struct { {build.UpgradeTapeHeight + 1, "tape"}, {build.UpgradeLiftoffHeight + 1, "liftoff"}, {build.UpgradeKumquatHeight + 1, "postliftoff"}, + {build.UpgradeCalicoHeight + 1, "calico"}, + {build.UpgradePersianHeight + 1, "persian"}, + {build.UpgradeOrangeHeight + 1, "orange"}, + {build.UpgradeTrustHeight + 1, "trust"}, + {build.UpgradeNorwegianHeight + 1, "norwegian"}, + {build.UpgradeTurboHeight + 1, "turbo"}, + {build.UpgradeHyperdriveHeight + 1, "hyperdrive"}, + {build.UpgradeChocolateHeight + 1, "chocolate"}, + {build.UpgradeOhSnapHeight + 1, "ohsnap"}, } // GetProtocolCodename gets the protocol codename associated with a height. diff --git a/cmd/tvx/exec.go b/cmd/tvx/exec.go index 15bb543a5..750684c36 100644 --- a/cmd/tvx/exec.go +++ b/cmd/tvx/exec.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io" + "io/fs" "log" "os" "path/filepath" @@ -136,25 +137,31 @@ func processTipsetOpts() error { } func execVectorDir(path string, outdir string) error { - files, err := filepath.Glob(filepath.Join(path, "*")) - if err != nil { - return fmt.Errorf("failed to glob input directory %s: %w", path, err) - } - for _, f := range files { - outfile := strings.TrimSuffix(filepath.Base(f), filepath.Ext(f)) + ".out" + return filepath.WalkDir(path, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return fmt.Errorf("failed while visiting path %s: %w", path, err) + } + if d.IsDir() || !strings.HasSuffix(path, "json") { + return nil + } + // Create an output file to capture the output from the run of the vector. + outfile := strings.TrimSuffix(filepath.Base(path), filepath.Ext(path)) + ".out" outpath := filepath.Join(outdir, outfile) outw, err := os.Create(outpath) if err != nil { return fmt.Errorf("failed to create file %s: %w", outpath, err) } - log.Printf("processing vector %s; sending output to %s", f, outpath) + log.Printf("processing vector %s; sending output to %s", path, outpath) + + // Actually run the vector. log.SetOutput(io.MultiWriter(os.Stderr, outw)) // tee the output. - _, _ = execVectorFile(new(conformance.LogReporter), f) + _, _ = execVectorFile(new(conformance.LogReporter), path) log.SetOutput(os.Stderr) _ = outw.Close() - } - return nil + + return nil + }) } func execVectorsStdin() error { diff --git a/cmd/tvx/extract_message.go b/cmd/tvx/extract_message.go index 71035867f..68376654a 100644 --- a/cmd/tvx/extract_message.go +++ b/cmd/tvx/extract_message.go @@ -8,12 +8,11 @@ import ( "io" "log" - "github.com/filecoin-project/lotus/api/v0api" - "github.com/fatih/color" "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/chain/actors/builtin" init_ "github.com/filecoin-project/lotus/chain/actors/builtin/init" "github.com/filecoin-project/lotus/chain/actors/builtin/reward" @@ -43,6 +42,15 @@ func doExtractMessage(opts extractOpts) error { return fmt.Errorf("failed to resolve message and tipsets from chain: %w", err) } + // Assumes that the desired message isn't at the boundary of network versions. + // Otherwise this will be inaccurate. But it's such a tiny edge case that + // it's not worth spending the time to support boundary messages unless + // actually needed. + nv, err := FullAPI.StateNetworkVersion(ctx, incTs.Key()) + if err != nil { + return fmt.Errorf("failed to resolve network version from inclusion height: %w", err) + } + // get the circulating supply before the message was executed. circSupplyDetail, err := FullAPI.StateVMCirculatingSupplyInternal(ctx, incTs.Key()) if err != nil { @@ -53,6 +61,7 @@ func doExtractMessage(opts extractOpts) error { log.Printf("message was executed in tipset: %s", execTs.Key()) log.Printf("message was included in tipset: %s", incTs.Key()) + log.Printf("network version at inclusion: %d", nv) log.Printf("circulating supply at inclusion tipset: %d", circSupply) log.Printf("finding precursor messages using mode: %s", opts.precursor) @@ -110,7 +119,8 @@ func doExtractMessage(opts extractOpts) error { CircSupply: circSupplyDetail.FilCirculating, BaseFee: basefee, // recorded randomness will be discarded. - Rand: conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI), + Rand: conformance.NewRecordingRand(new(conformance.LogReporter), FullAPI), + NetworkVersion: nv, }) if err != nil { return fmt.Errorf("failed to execute precursor message: %w", err) @@ -140,12 +150,13 @@ func doExtractMessage(opts extractOpts) error { preroot = root applyret, postroot, err = driver.ExecuteMessage(pst.Blockstore, conformance.ExecuteMessageParams{ - Preroot: preroot, - Epoch: execTs.Height(), - Message: msg, - CircSupply: circSupplyDetail.FilCirculating, - BaseFee: basefee, - Rand: recordingRand, + Preroot: preroot, + Epoch: execTs.Height(), + Message: msg, + CircSupply: circSupplyDetail.FilCirculating, + BaseFee: basefee, + Rand: recordingRand, + NetworkVersion: nv, }) if err != nil { return fmt.Errorf("failed to execute message: %w", err) @@ -263,11 +274,6 @@ func doExtractMessage(opts extractOpts) error { return err } - nv, err := FullAPI.StateNetworkVersion(ctx, execTs.Key()) - if err != nil { - return err - } - codename := GetProtocolCodename(execTs.Height()) // Write out the test vector. diff --git a/cmd/tvx/simulate.go b/cmd/tvx/simulate.go index da9a034e9..5428e16ee 100644 --- a/cmd/tvx/simulate.go +++ b/cmd/tvx/simulate.go @@ -129,6 +129,7 @@ func runSimulateCmd(_ *cli.Context) error { CircSupply: circSupply.FilCirculating, BaseFee: baseFee, Rand: rand, + // TODO NetworkVersion }) if err != nil { return fmt.Errorf("failed to apply message: %w", err) diff --git a/cmd/tvx/stores.go b/cmd/tvx/stores.go index 040005641..ba54a7f9e 100644 --- a/cmd/tvx/stores.go +++ b/cmd/tvx/stores.go @@ -113,14 +113,14 @@ func (pb *proxyingBlockstore) FinishTracing() map[cid.Cid]struct{} { return ret } -func (pb *proxyingBlockstore) Get(cid cid.Cid) (blocks.Block, error) { +func (pb *proxyingBlockstore) Get(ctx context.Context, cid cid.Cid) (blocks.Block, error) { pb.lk.Lock() if pb.tracing { pb.traced[cid] = struct{}{} } pb.lk.Unlock() - if block, err := pb.Blockstore.Get(cid); err == nil { + if block, err := pb.Blockstore.Get(ctx, cid); err == nil { return block, err } @@ -134,7 +134,7 @@ func (pb *proxyingBlockstore) Get(cid cid.Cid) (blocks.Block, error) { return nil, err } - err = pb.Blockstore.Put(block) + err = pb.Blockstore.Put(ctx, block) if err != nil { return nil, err } @@ -142,16 +142,16 @@ func (pb *proxyingBlockstore) Get(cid cid.Cid) (blocks.Block, error) { return block, nil } -func (pb *proxyingBlockstore) Put(block blocks.Block) error { +func (pb *proxyingBlockstore) Put(ctx context.Context, block blocks.Block) error { pb.lk.Lock() if pb.tracing { pb.traced[block.Cid()] = struct{}{} } pb.lk.Unlock() - return pb.Blockstore.Put(block) + return pb.Blockstore.Put(ctx, block) } -func (pb *proxyingBlockstore) PutMany(blocks []blocks.Block) error { +func (pb *proxyingBlockstore) PutMany(ctx context.Context, blocks []blocks.Block) error { pb.lk.Lock() if pb.tracing { for _, b := range blocks { @@ -159,5 +159,5 @@ func (pb *proxyingBlockstore) PutMany(blocks []blocks.Block) error { } } pb.lk.Unlock() - return pb.Blockstore.PutMany(blocks) + return pb.Blockstore.PutMany(ctx, blocks) } diff --git a/conformance/driver.go b/conformance/driver.go index 8669089da..0fc1800dd 100644 --- a/conformance/driver.go +++ b/conformance/driver.go @@ -5,6 +5,8 @@ import ( gobig "math/big" "os" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/blockstore" "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/state" @@ -104,7 +106,7 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params cs = store.NewChainStore(bs, bs, ds, filcns.Weight, nil) tse = filcns.NewTipSetExecutor() - sm, err = stmgr.NewStateManager(cs, tse, syscalls, filcns.DefaultUpgradeSchedule(), nil) + sm, err = stmgr.NewStateManager(cs, tse, nil, syscalls, filcns.DefaultUpgradeSchedule(), nil) ) if err != nil { return nil, err @@ -153,6 +155,14 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params results: []*vm.ApplyRet{}, } + sm.SetVMConstructor(func(ctx context.Context, vmopt *vm.VMOpts) (*vm.VM, error) { + vmopt.CircSupplyCalc = func(context.Context, abi.ChainEpoch, *state.StateTree) (abi.TokenAmount, error) { + return big.Zero(), nil + } + + return vm.NewVM(ctx, vmopt) + }) + postcid, receiptsroot, err := tse.ApplyBlocks(context.Background(), sm, params.ParentEpoch, @@ -179,11 +189,12 @@ func (d *Driver) ExecuteTipset(bs blockstore.Blockstore, ds ds.Batching, params } type ExecuteMessageParams struct { - Preroot cid.Cid - Epoch abi.ChainEpoch - Message *types.Message - CircSupply abi.TokenAmount - BaseFee abi.TokenAmount + Preroot cid.Cid + Epoch abi.ChainEpoch + Message *types.Message + CircSupply abi.TokenAmount + BaseFee abi.TokenAmount + NetworkVersion network.Version // Rand is an optional vm.Rand implementation to use. If nil, the driver // will use a vm.Rand that returns a fixed value for all calls. @@ -202,13 +213,6 @@ func (d *Driver) ExecuteMessage(bs blockstore.Blockstore, params ExecuteMessageP params.Rand = NewFixedRand() } - // dummy state manager; only to reference the GetNetworkVersion method, - // which does not depend on state. - sm, err := stmgr.NewStateManager(nil, filcns.NewTipSetExecutor(), nil, filcns.DefaultUpgradeSchedule(), nil) - if err != nil { - return nil, cid.Cid{}, err - } - vmOpts := &vm.VMOpts{ StateBase: params.Preroot, Epoch: params.Epoch, @@ -217,9 +221,9 @@ func (d *Driver) ExecuteMessage(bs blockstore.Blockstore, params ExecuteMessageP CircSupplyCalc: func(_ context.Context, _ abi.ChainEpoch, _ *state.StateTree) (abi.TokenAmount, error) { return params.CircSupply, nil }, - Rand: params.Rand, - BaseFee: params.BaseFee, - NtwkVersion: sm.GetNtwkVersion, + Rand: params.Rand, + BaseFee: params.BaseFee, + NetworkVersion: params.NetworkVersion, } lvm, err := vm.NewVM(context.TODO(), vmOpts) diff --git a/conformance/rand_fixed.go b/conformance/rand_fixed.go index c34980efe..d356b53d0 100644 --- a/conformance/rand_fixed.go +++ b/conformance/rand_fixed.go @@ -19,22 +19,10 @@ func NewFixedRand() vm.Rand { return &fixedRand{} } -func (r *fixedRand) GetChainRandomnessV1(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { +func (r *fixedRand) GetChainRandomness(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. } -func (r *fixedRand) GetChainRandomnessV2(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { - return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. -} - -func (r *fixedRand) GetBeaconRandomnessV3(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { - return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. -} - -func (r *fixedRand) GetBeaconRandomnessV1(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { - return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. -} - -func (r *fixedRand) GetBeaconRandomnessV2(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { +func (r *fixedRand) GetBeaconRandomness(_ context.Context, _ crypto.DomainSeparationTag, _ abi.ChainEpoch, _ []byte) ([]byte, error) { return []byte("i_am_random_____i_am_random_____"), nil // 32 bytes. } diff --git a/conformance/rand_record.go b/conformance/rand_record.go index 97bd93eb4..8422ad31d 100644 --- a/conformance/rand_record.go +++ b/conformance/rand_record.go @@ -45,17 +45,9 @@ func (r *RecordingRand) loadHead() { r.head = head.Key() } -func (r *RecordingRand) GetChainRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getChainRandomness(ctx, pers, round, entropy) -} - -func (r *RecordingRand) GetChainRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getChainRandomness(ctx, pers, round, entropy) -} - -func (r *RecordingRand) getChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (r *RecordingRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { r.once.Do(r.loadHead) - // FullNode's ChainGetRandomnessFromTickets handles whether we should be looking forward or back + // FullNode's v0 ChainGetRandomnessFromTickets handles whether we should be looking forward or back ret, err := r.api.ChainGetRandomnessFromTickets(ctx, r.head, pers, round, entropy) if err != nil { return ret, err @@ -79,19 +71,7 @@ func (r *RecordingRand) getChainRandomness(ctx context.Context, pers crypto.Doma return ret, err } -func (r *RecordingRand) GetBeaconRandomnessV3(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getBeaconRandomness(ctx, pers, round, entropy) -} - -func (r *RecordingRand) GetBeaconRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getBeaconRandomness(ctx, pers, round, entropy) -} - -func (r *RecordingRand) GetBeaconRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getBeaconRandomness(ctx, pers, round, entropy) -} - -func (r *RecordingRand) getBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { +func (r *RecordingRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { r.once.Do(r.loadHead) ret, err := r.api.StateGetRandomnessFromBeacon(ctx, pers, round, entropy, r.head) if err != nil { diff --git a/conformance/rand_replay.go b/conformance/rand_replay.go index 5d850f7eb..6c2282752 100644 --- a/conformance/rand_replay.go +++ b/conformance/rand_replay.go @@ -43,15 +43,7 @@ func (r *ReplayingRand) match(requested schema.RandomnessRule) ([]byte, bool) { return nil, false } -func (r *ReplayingRand) GetChainRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getChainRandomness(ctx, pers, round, entropy, false) -} - -func (r *ReplayingRand) GetChainRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getChainRandomness(ctx, pers, round, entropy, true) -} - -func (r *ReplayingRand) getChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) { +func (r *ReplayingRand) GetChainRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { rule := schema.RandomnessRule{ Kind: schema.RandomnessChain, DomainSeparationTag: int64(pers), @@ -66,26 +58,10 @@ func (r *ReplayingRand) getChainRandomness(ctx context.Context, pers crypto.Doma r.reporter.Logf("returning fallback chain randomness: dst=%d, epoch=%d, entropy=%x", pers, round, entropy) - if lookback { - return r.fallback.GetChainRandomnessV1(ctx, pers, round, entropy) - } - - return r.fallback.GetChainRandomnessV2(ctx, pers, round, entropy) -} - -func (r *ReplayingRand) GetBeaconRandomnessV3(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getBeaconRandomness(ctx, pers, round, entropy, false) -} - -func (r *ReplayingRand) GetBeaconRandomnessV1(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getBeaconRandomness(ctx, pers, round, entropy, true) + return r.fallback.GetChainRandomness(ctx, pers, round, entropy) } -func (r *ReplayingRand) GetBeaconRandomnessV2(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { - return r.getBeaconRandomness(ctx, pers, round, entropy, true) -} - -func (r *ReplayingRand) getBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte, lookback bool) ([]byte, error) { +func (r *ReplayingRand) GetBeaconRandomness(ctx context.Context, pers crypto.DomainSeparationTag, round abi.ChainEpoch, entropy []byte) ([]byte, error) { rule := schema.RandomnessRule{ Kind: schema.RandomnessBeacon, DomainSeparationTag: int64(pers), @@ -100,9 +76,5 @@ func (r *ReplayingRand) getBeaconRandomness(ctx context.Context, pers crypto.Dom r.reporter.Logf("returning fallback beacon randomness: dst=%d, epoch=%d, entropy=%x", pers, round, entropy) - if lookback { - return r.fallback.GetBeaconRandomnessV1(ctx, pers, round, entropy) - } - - return r.fallback.GetBeaconRandomnessV3(ctx, pers, round, entropy) + return r.fallback.GetBeaconRandomness(ctx, pers, round, entropy) } diff --git a/conformance/runner.go b/conformance/runner.go index 1044bb329..fd44ecff9 100644 --- a/conformance/runner.go +++ b/conformance/runner.go @@ -7,6 +7,7 @@ import ( "encoding/base64" "fmt" "io/ioutil" + "math" "os" "os/exec" "strconv" @@ -14,6 +15,7 @@ import ( "github.com/fatih/color" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/go-state-types/network" "github.com/hashicorp/go-multierror" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-blockservice" @@ -27,6 +29,7 @@ import ( "github.com/filecoin-project/test-vectors/schema" "github.com/filecoin-project/lotus/blockstore" + "github.com/filecoin-project/lotus/chain/consensus/filcns" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" ) @@ -50,11 +53,58 @@ var TipsetVectorOpts struct { OnTipsetApplied []func(bs blockstore.Blockstore, params *ExecuteTipsetParams, res *ExecuteTipsetResult) } +type GasPricingRestoreFn func() + +// adjustGasPricing adjusts the global gas price mapping to make sure that the +// gas pricelist for vector's network version is used at the vector's epoch. +// Because it manipulates a global, it returns a function that reverts the +// change. The caller MUST invoke this function or the test vector runner will +// become invalid. +func adjustGasPricing(vectorEpoch abi.ChainEpoch, vectorNv network.Version) GasPricingRestoreFn { + // Stash the current pricing mapping. + // Ok to take a reference instead of a copy, because we override the map + // with a new one below. + var old = vm.Prices + + // Resolve the epoch at which the vector network version kicks in. + var epoch abi.ChainEpoch = math.MaxInt64 + if vectorNv == network.Version0 { + // genesis is not an upgrade. + epoch = 0 + } else { + for _, u := range filcns.DefaultUpgradeSchedule() { + if u.Network == vectorNv { + epoch = u.Height + break + } + } + } + + if epoch == math.MaxInt64 { + panic(fmt.Sprintf("could not resolve network version %d to height", vectorNv)) + } + + // Find the right pricelist for this network version. + pricelist := vm.PricelistByEpoch(epoch) + + // Override the pricing mapping by setting the relevant pricelist for the + // network version at the epoch where the vector runs. + vm.Prices = map[abi.ChainEpoch]vm.Pricelist{ + vectorEpoch: pricelist, + } + + // Return a function to restore the original mapping. + return func() { + vm.Prices = old + } +} + // ExecuteMessageVector executes a message-class test vector. func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema.Variant) (diffs []string, err error) { var ( ctx = context.Background() - baseEpoch = variant.Epoch + baseEpoch = abi.ChainEpoch(variant.Epoch) + nv = network.Version(variant.NetworkVersion) root = vector.Pre.StateTree.RootCID ) @@ -67,6 +117,10 @@ func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema // Create a new Driver. driver := NewDriver(ctx, vector.Selector, DriverOpts{DisableVMFlush: true}) + // Monkey patch the gas pricing. + revertFn := adjustGasPricing(baseEpoch, nv) + defer revertFn() + // Apply every message. for i, m := range vector.ApplyMessages { msg, err := types.DecodeMessage(m.Bytes) @@ -76,18 +130,19 @@ func ExecuteMessageVector(r Reporter, vector *schema.TestVector, variant *schema // add the epoch offset if one is set. if m.EpochOffset != nil { - baseEpoch += *m.EpochOffset + baseEpoch += abi.ChainEpoch(*m.EpochOffset) } // Execute the message. var ret *vm.ApplyRet ret, root, err = driver.ExecuteMessage(bs, ExecuteMessageParams{ - Preroot: root, - Epoch: abi.ChainEpoch(baseEpoch), - Message: msg, - BaseFee: BaseFeeOrDefault(vector.Pre.BaseFee), - CircSupply: CircSupplyOrDefault(vector.Pre.CircSupply), - Rand: NewReplayingRand(r, vector.Randomness), + Preroot: root, + Epoch: baseEpoch, + Message: msg, + BaseFee: BaseFeeOrDefault(vector.Pre.BaseFee), + CircSupply: CircSupplyOrDefault(vector.Pre.CircSupply), + Rand: NewReplayingRand(r, vector.Randomness), + NetworkVersion: nv, }) if err != nil { r.Fatalf("fatal failure when executing message: %s", err) @@ -184,8 +239,10 @@ func ExecuteTipsetVector(r Reporter, vector *schema.TestVector, variant *schema. func AssertMsgResult(r Reporter, expected *schema.Receipt, actual *vm.ApplyRet, label string) { r.Helper() + applyret := actual if expected, actual := exitcode.ExitCode(expected.ExitCode), actual.ExitCode; expected != actual { r.Errorf("exit code of msg %s did not match; expected: %s, got: %s", label, expected, actual) + r.Errorf("\t\\==> actor error: %s", applyret.ActorErr) } if expected, actual := expected.GasUsed, actual.GasUsed; expected != actual { r.Errorf("gas used of msg %s did not match; expected: %d, got: %d", label, expected, actual) @@ -282,7 +339,7 @@ func writeStateToTempCAR(bs blockstore.Blockstore, roots ...cid.Cid) (string, er continue } // ignore things we don't have, the state tree is incomplete. - if has, err := bs.Has(link.Cid); err != nil { + if has, err := bs.Has(context.TODO(), link.Cid); err != nil { return nil, err } else if has { out = append(out, link) @@ -317,7 +374,7 @@ func LoadBlockstore(vectorCAR schema.Base64EncodedBytes) (blockstore.Blockstore, defer r.Close() // nolint // Load the CAR embedded in the test vector into the Blockstore. - _, err = car.LoadCar(bs, r) + _, err = car.LoadCar(context.TODO(), bs, r) if err != nil { return nil, fmt.Errorf("failed to load state tree car from test vector: %s", err) } diff --git a/data/alice/config.toml b/data/alice/config.toml deleted file mode 100644 index 32ff945a4..000000000 --- a/data/alice/config.toml +++ /dev/null @@ -1,194 +0,0 @@ -[API] - # Binding address for the Lotus API - # - # type: string - # env var: LOTUS_API_LISTENADDRESS - ListenAddress = "/ip4/0.0.0.0/tcp/1234/http" - - # type: string - # env var: LOTUS_API_REMOTELISTENADDRESS - #RemoteListenAddress = "" - - # type: Duration - # env var: LOTUS_API_TIMEOUT - #Timeout = "30s" - - -[Backup] - # Note that in case of metadata corruption it might be much harder to recover - # your node if metadata log is disabled - # - # type: bool - # env var: LOTUS_BACKUP_DISABLEMETADATALOG - #DisableMetadataLog = false - - -[Libp2p] - # Binding address for the libp2p host - 0 means random port. - # Format: multiaddress; see https://multiformats.io/multiaddr/ - # - # type: []string - # env var: LOTUS_LIBP2P_LISTENADDRESSES - ListenAddresses = ["/ip4/0.0.0.0/tcp/3000"] - - # Addresses to explicitally announce to other peers. If not specified, - # all interface addresses are announced - # Format: multiaddress - # - # type: []string - # env var: LOTUS_LIBP2P_ANNOUNCEADDRESSES - #AnnounceAddresses = [] - - # Addresses to not announce - # Format: multiaddress - # - # type: []string - # env var: LOTUS_LIBP2P_NOANNOUNCEADDRESSES - #NoAnnounceAddresses = [] - - # When not disabled (default), lotus asks NAT devices (e.g., routers), to - # open up an external port and forward it to the port lotus is running on. - # When this works (i.e., when your router supports NAT port forwarding), - # it makes the local lotus node accessible from the public internet - # - # type: bool - # env var: LOTUS_LIBP2P_DISABLENATPORTMAP - #DisableNatPortMap = false - - # ConnMgrLow is the number of connections that the basic connection manager - # will trim down to. - # - # type: uint - # env var: LOTUS_LIBP2P_CONNMGRLOW - #ConnMgrLow = 150 - - # ConnMgrHigh is the number of connections that, when exceeded, will trigger - # a connection GC operation. Note: protected/recently formed connections don't - # count towards this limit. - # - # type: uint - # env var: LOTUS_LIBP2P_CONNMGRHIGH - #ConnMgrHigh = 180 - - # ConnMgrGrace is a time duration that new connections are immune from being - # closed by the connection manager. - # - # type: Duration - # env var: LOTUS_LIBP2P_CONNMGRGRACE - #ConnMgrGrace = "20s" - - -[Pubsub] - # Run the node in bootstrap-node mode - # - # type: bool - # env var: LOTUS_PUBSUB_BOOTSTRAPPER - #Bootstrapper = false - - # type: string - # env var: LOTUS_PUBSUB_REMOTETRACER - #RemoteTracer = "" - - -[Client] - # type: bool - # env var: LOTUS_CLIENT_USEIPFS - #UseIpfs = false - - # type: bool - # env var: LOTUS_CLIENT_IPFSONLINEMODE - #IpfsOnlineMode = false - - # type: string - # env var: LOTUS_CLIENT_IPFSMADDR - #IpfsMAddr = "" - - # type: bool - # env var: LOTUS_CLIENT_IPFSUSEFORRETRIEVAL - #IpfsUseForRetrieval = false - - # The maximum number of simultaneous data transfers between the client - # and storage providers for storage deals - # - # type: uint64 - # env var: LOTUS_CLIENT_SIMULTANEOUSTRANSFERSFORSTORAGE - #SimultaneousTransfersForStorage = 20 - - # The maximum number of simultaneous data transfers between the client - # and storage providers for retrieval deals - # - # type: uint64 - # env var: LOTUS_CLIENT_SIMULTANEOUSTRANSFERSFORRETRIEVAL - #SimultaneousTransfersForRetrieval = 20 - - -[Wallet] - # type: string - # env var: LOTUS_WALLET_REMOTEBACKEND - #RemoteBackend = "" - - # type: bool - # env var: LOTUS_WALLET_ENABLELEDGER - #EnableLedger = false - - # type: bool - # env var: LOTUS_WALLET_DISABLELOCAL - #DisableLocal = false - - -[Fees] - # type: types.FIL - # env var: LOTUS_FEES_DEFAULTMAXFEE - #DefaultMaxFee = "0.07 FIL" - - -[Chainstore] - # type: bool - # env var: LOTUS_CHAINSTORE_ENABLESPLITSTORE - #EnableSplitstore = false - - [Chainstore.Splitstore] - # ColdStoreType specifies the type of the coldstore. - # It can be "universal" (default) or "discard" for discarding cold blocks. - # - # type: string - # env var: LOTUS_CHAINSTORE_SPLITSTORE_COLDSTORETYPE - #ColdStoreType = "universal" - - # HotStoreType specifies the type of the hotstore. - # Only currently supported value is "badger". - # - # type: string - # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTORETYPE - #HotStoreType = "badger" - - # MarkSetType specifies the type of the markset. - # It can be "map" (default) for in memory marking or "badger" for on-disk marking. - # - # type: string - # env var: LOTUS_CHAINSTORE_SPLITSTORE_MARKSETTYPE - #MarkSetType = "map" - - # HotStoreMessageRetention specifies the retention policy for messages, in finalities beyond - # the compaction boundary; default is 0. - # - # type: uint64 - # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREMESSAGERETENTION - #HotStoreMessageRetention = 0 - - # HotStoreFullGCFrequency specifies how often to perform a full (moving) GC on the hotstore. - # A value of 0 disables, while a value 1 will do full GC in every compaction. - # Default is 20 (about once a week). - # - # type: uint64 - # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREFULLGCFREQUENCY - #HotStoreFullGCFrequency = 20 - -[Checkpoint] - Fee = 0.01 - BitcoinHost="http://btc-rpc:8332" - PublicKey="66a554b8af6719851a936b0afc29dc59c3678f0e3f0b7bb081c57c5b77ad48ff" - MinioHost = "minio:9000" - MinioAccessKeyID = "lola" - MinioSecretAccessKey = "123secure" - MinioBucketName = "eudico" \ No newline at end of file diff --git a/data/alice/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU b/data/alice/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU deleted file mode 100644 index ee63c46ab..000000000 --- a/data/alice/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU +++ /dev/null @@ -1 +0,0 @@ -{"Type":"jwt-hmac-secret","PrivateKey":"uqujAEsAAM0A+4GwmywzA3uiBLJgRkpS4geuXZQzXko="} \ No newline at end of file diff --git a/data/alice/keystore/MRSWMYLVNR2A b/data/alice/keystore/MRSWMYLVNR2A deleted file mode 100644 index 3e74a17f6..000000000 --- a/data/alice/keystore/MRSWMYLVNR2A +++ /dev/null @@ -1 +0,0 @@ -{"Type":"secp256k1","PrivateKey":"8VcW07ADswS4BV2cxi5rnIadVsyTDDhY1NfDH19T8Uo="} \ No newline at end of file diff --git a/data/alice/keystore/NRUWE4BSOAWWQ33TOQ b/data/alice/keystore/NRUWE4BSOAWWQ33TOQ deleted file mode 100644 index 116f7a2be..000000000 --- a/data/alice/keystore/NRUWE4BSOAWWQ33TOQ +++ /dev/null @@ -1 +0,0 @@ -{"Type":"libp2p-host","PrivateKey":"CAESQEztxOsWTHFFmqkcTzyVl0ZaRKNPtWVcpv6YU0YPMAOaqOCjp1qnEbCnn1zifx1Qy8JwD0RBlm4WwFJWZCZaTZc="} \ No newline at end of file diff --git a/data/alice/keystore/O5QWY3DFOQWXIMLEGJ4HE6TDONWHQN3YNRRGE6LMMM2WGM3EGVWHMYLOMRYXONDJO5WDMZLQPBRGC b/data/alice/keystore/O5QWY3DFOQWXIMLEGJ4HE6TDONWHQN3YNRRGE6LMMM2WGM3EGVWHMYLOMRYXONDJO5WDMZLQPBRGC deleted file mode 100644 index 3e74a17f6..000000000 --- a/data/alice/keystore/O5QWY3DFOQWXIMLEGJ4HE6TDONWHQN3YNRRGE6LMMM2WGM3EGVWHMYLOMRYXONDJO5WDMZLQPBRGC +++ /dev/null @@ -1 +0,0 @@ -{"Type":"secp256k1","PrivateKey":"8VcW07ADswS4BV2cxi5rnIadVsyTDDhY1NfDH19T8Uo="} \ No newline at end of file diff --git a/data/alice/share.toml b/data/alice/share.toml deleted file mode 100755 index 6395c5e0c..000000000 --- a/data/alice/share.toml +++ /dev/null @@ -1,15 +0,0 @@ -Treshold=2 -PrivateShare="0fac81c461206928dcddcebe8c77343ddb3adba0f74f47dcf2d8d0461796df40" -PublicKey="66a554b8af6719851a936b0afc29dc59c3678f0e3f0b7bb081c57c5b77ad48ff" - -[VerificationShares] - -[VerificationShares.12D3KooWMBbLLKTM9Voo89TXLd98w4MjkJUych6QvECptousGtR4] -Share="02de7b1922283afa65adbacf74e543c0841fd84d948911d668d10835df07f5c203" - -[VerificationShares.12D3KooWNTyoBdMB9bpSkf7PVWR863ejGVPq9ssaaAipNvhPeQ4t] -Share="03dda30f606203bfa25da11375eaac64f915401b8bfb8067b0241d467a1cdab681" - -[VerificationShares.12D3KooWF1aFCGUtsGEaqNks3QADLUDZxW7ot7jZPSoDiAFKJuM6] -Share="034b172e82079d742c84c9ed0a6e4fc2d9172edae9507d81624446754f9eed709c" - diff --git a/data/alice/token b/data/alice/token deleted file mode 100644 index efa5ba845..000000000 --- a/data/alice/token +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0.5vRs10xGPlN132VXqL396WAPiqYTVPKPasqu1TMLqzU \ No newline at end of file diff --git a/data/bob/config.toml b/data/bob/config.toml deleted file mode 100644 index a662bbe81..000000000 --- a/data/bob/config.toml +++ /dev/null @@ -1,194 +0,0 @@ -[API] - # Binding address for the Lotus API - # - # type: string - # env var: LOTUS_API_LISTENADDRESS - ListenAddress = "/ip4/0.0.0.0/tcp/1235/http" - - # type: string - # env var: LOTUS_API_REMOTELISTENADDRESS - #RemoteListenAddress = "" - - # type: Duration - # env var: LOTUS_API_TIMEOUT - #Timeout = "30s" - - -[Backup] - # Note that in case of metadata corruption it might be much harder to recover - # your node if metadata log is disabled - # - # type: bool - # env var: LOTUS_BACKUP_DISABLEMETADATALOG - #DisableMetadataLog = false - - -[Libp2p] - # Binding address for the libp2p host - 0 means random port. - # Format: multiaddress; see https://multiformats.io/multiaddr/ - # - # type: []string - # env var: LOTUS_LIBP2P_LISTENADDRESSES - ListenAddresses = ["/ip4/0.0.0.0/tcp/3001"] - - # Addresses to explicitally announce to other peers. If not specified, - # all interface addresses are announced - # Format: multiaddress - # - # type: []string - # env var: LOTUS_LIBP2P_ANNOUNCEADDRESSES - #AnnounceAddresses = [] - - # Addresses to not announce - # Format: multiaddress - # - # type: []string - # env var: LOTUS_LIBP2P_NOANNOUNCEADDRESSES - #NoAnnounceAddresses = [] - - # When not disabled (default), lotus asks NAT devices (e.g., routers), to - # open up an external port and forward it to the port lotus is running on. - # When this works (i.e., when your router supports NAT port forwarding), - # it makes the local lotus node accessible from the public internet - # - # type: bool - # env var: LOTUS_LIBP2P_DISABLENATPORTMAP - #DisableNatPortMap = false - - # ConnMgrLow is the number of connections that the basic connection manager - # will trim down to. - # - # type: uint - # env var: LOTUS_LIBP2P_CONNMGRLOW - #ConnMgrLow = 150 - - # ConnMgrHigh is the number of connections that, when exceeded, will trigger - # a connection GC operation. Note: protected/recently formed connections don't - # count towards this limit. - # - # type: uint - # env var: LOTUS_LIBP2P_CONNMGRHIGH - #ConnMgrHigh = 180 - - # ConnMgrGrace is a time duration that new connections are immune from being - # closed by the connection manager. - # - # type: Duration - # env var: LOTUS_LIBP2P_CONNMGRGRACE - #ConnMgrGrace = "20s" - - -[Pubsub] - # Run the node in bootstrap-node mode - # - # type: bool - # env var: LOTUS_PUBSUB_BOOTSTRAPPER - #Bootstrapper = false - - # type: string - # env var: LOTUS_PUBSUB_REMOTETRACER - #RemoteTracer = "" - - -[Client] - # type: bool - # env var: LOTUS_CLIENT_USEIPFS - #UseIpfs = false - - # type: bool - # env var: LOTUS_CLIENT_IPFSONLINEMODE - #IpfsOnlineMode = false - - # type: string - # env var: LOTUS_CLIENT_IPFSMADDR - #IpfsMAddr = "" - - # type: bool - # env var: LOTUS_CLIENT_IPFSUSEFORRETRIEVAL - #IpfsUseForRetrieval = false - - # The maximum number of simultaneous data transfers between the client - # and storage providers for storage deals - # - # type: uint64 - # env var: LOTUS_CLIENT_SIMULTANEOUSTRANSFERSFORSTORAGE - #SimultaneousTransfersForStorage = 20 - - # The maximum number of simultaneous data transfers between the client - # and storage providers for retrieval deals - # - # type: uint64 - # env var: LOTUS_CLIENT_SIMULTANEOUSTRANSFERSFORRETRIEVAL - #SimultaneousTransfersForRetrieval = 20 - - -[Wallet] - # type: string - # env var: LOTUS_WALLET_REMOTEBACKEND - #RemoteBackend = "" - - # type: bool - # env var: LOTUS_WALLET_ENABLELEDGER - #EnableLedger = false - - # type: bool - # env var: LOTUS_WALLET_DISABLELOCAL - #DisableLocal = false - - -[Fees] - # type: types.FIL - # env var: LOTUS_FEES_DEFAULTMAXFEE - #DefaultMaxFee = "0.07 FIL" - - -[Chainstore] - # type: bool - # env var: LOTUS_CHAINSTORE_ENABLESPLITSTORE - #EnableSplitstore = false - - [Chainstore.Splitstore] - # ColdStoreType specifies the type of the coldstore. - # It can be "universal" (default) or "discard" for discarding cold blocks. - # - # type: string - # env var: LOTUS_CHAINSTORE_SPLITSTORE_COLDSTORETYPE - #ColdStoreType = "universal" - - # HotStoreType specifies the type of the hotstore. - # Only currently supported value is "badger". - # - # type: string - # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTORETYPE - #HotStoreType = "badger" - - # MarkSetType specifies the type of the markset. - # It can be "map" (default) for in memory marking or "badger" for on-disk marking. - # - # type: string - # env var: LOTUS_CHAINSTORE_SPLITSTORE_MARKSETTYPE - #MarkSetType = "map" - - # HotStoreMessageRetention specifies the retention policy for messages, in finalities beyond - # the compaction boundary; default is 0. - # - # type: uint64 - # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREMESSAGERETENTION - #HotStoreMessageRetention = 0 - - # HotStoreFullGCFrequency specifies how often to perform a full (moving) GC on the hotstore. - # A value of 0 disables, while a value 1 will do full GC in every compaction. - # Default is 20 (about once a week). - # - # type: uint64 - # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREFULLGCFREQUENCY - #HotStoreFullGCFrequency = 20 - -[Checkpoint] - Fee = 0.01 - BitcoinHost="http://btc-rpc:8332" - PublicKey="66a554b8af6719851a936b0afc29dc59c3678f0e3f0b7bb081c57c5b77ad48ff" - MinioHost = "minio:9000" - MinioAccessKeyID = "lola" - MinioSecretAccessKey = "123secure" - MinioBucketName = "eudico" \ No newline at end of file diff --git a/data/bob/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU b/data/bob/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU deleted file mode 100644 index f7a10cceb..000000000 --- a/data/bob/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU +++ /dev/null @@ -1 +0,0 @@ -{"Type":"jwt-hmac-secret","PrivateKey":"XySQypIONp3i5/hTlV01537sm6fdjxaYKsYVFMSAuwM="} \ No newline at end of file diff --git a/data/bob/keystore/NRUWE4BSOAWWQ33TOQ b/data/bob/keystore/NRUWE4BSOAWWQ33TOQ deleted file mode 100644 index 311666243..000000000 --- a/data/bob/keystore/NRUWE4BSOAWWQ33TOQ +++ /dev/null @@ -1 +0,0 @@ -{"Type":"libp2p-host","PrivateKey":"CAESQEuL5Q2fYZCNH+zSJ3FziYhP7w3PKkmWPUSapKCM9It2u+8En9WWVaKdOOkwJ1GPTsB6FtUU76isveFMiJAxVAU="} \ No newline at end of file diff --git a/data/bob/share.toml b/data/bob/share.toml deleted file mode 100755 index 8f69161f1..000000000 --- a/data/bob/share.toml +++ /dev/null @@ -1,15 +0,0 @@ -Treshold=2 -PrivateShare="93810b585c063419dd04555b37b7ba822b6309fcac8a6ff3ac5ef793139f4b1b" -PublicKey="66a554b8af6719851a936b0afc29dc59c3678f0e3f0b7bb081c57c5b77ad48ff" - -[VerificationShares] - -[VerificationShares.12D3KooWF1aFCGUtsGEaqNks3QADLUDZxW7ot7jZPSoDiAFKJuM6] -Share="034b172e82079d742c84c9ed0a6e4fc2d9172edae9507d81624446754f9eed709c" - -[VerificationShares.12D3KooWMBbLLKTM9Voo89TXLd98w4MjkJUych6QvECptousGtR4] -Share="02de7b1922283afa65adbacf74e543c0841fd84d948911d668d10835df07f5c203" - -[VerificationShares.12D3KooWNTyoBdMB9bpSkf7PVWR863ejGVPq9ssaaAipNvhPeQ4t] -Share="03dda30f606203bfa25da11375eaac64f915401b8bfb8067b0241d467a1cdab681" - diff --git a/data/bob/token b/data/bob/token deleted file mode 100644 index dfc037d67..000000000 --- a/data/bob/token +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0.5t2bfN0HkFcf8BMLbfuVOFA_8ojdbCQ0mSjwsPnVbf4 \ No newline at end of file diff --git a/data/charlie/config.toml b/data/charlie/config.toml deleted file mode 100644 index 9db03b0dc..000000000 --- a/data/charlie/config.toml +++ /dev/null @@ -1,194 +0,0 @@ -[API] - # Binding address for the Lotus API - # - # type: string - # env var: LOTUS_API_LISTENADDRESS - ListenAddress = "/ip4/0.0.0.0/tcp/1236/http" - - # type: string - # env var: LOTUS_API_REMOTELISTENADDRESS - #RemoteListenAddress = "" - - # type: Duration - # env var: LOTUS_API_TIMEOUT - #Timeout = "30s" - - -[Backup] - # Note that in case of metadata corruption it might be much harder to recover - # your node if metadata log is disabled - # - # type: bool - # env var: LOTUS_BACKUP_DISABLEMETADATALOG - #DisableMetadataLog = false - - -[Libp2p] - # Binding address for the libp2p host - 0 means random port. - # Format: multiaddress; see https://multiformats.io/multiaddr/ - # - # type: []string - # env var: LOTUS_LIBP2P_LISTENADDRESSES - ListenAddresses = ["/ip4/0.0.0.0/tcp/3002"] - - # Addresses to explicitally announce to other peers. If not specified, - # all interface addresses are announced - # Format: multiaddress - # - # type: []string - # env var: LOTUS_LIBP2P_ANNOUNCEADDRESSES - #AnnounceAddresses = [] - - # Addresses to not announce - # Format: multiaddress - # - # type: []string - # env var: LOTUS_LIBP2P_NOANNOUNCEADDRESSES - #NoAnnounceAddresses = [] - - # When not disabled (default), lotus asks NAT devices (e.g., routers), to - # open up an external port and forward it to the port lotus is running on. - # When this works (i.e., when your router supports NAT port forwarding), - # it makes the local lotus node accessible from the public internet - # - # type: bool - # env var: LOTUS_LIBP2P_DISABLENATPORTMAP - #DisableNatPortMap = false - - # ConnMgrLow is the number of connections that the basic connection manager - # will trim down to. - # - # type: uint - # env var: LOTUS_LIBP2P_CONNMGRLOW - #ConnMgrLow = 150 - - # ConnMgrHigh is the number of connections that, when exceeded, will trigger - # a connection GC operation. Note: protected/recently formed connections don't - # count towards this limit. - # - # type: uint - # env var: LOTUS_LIBP2P_CONNMGRHIGH - #ConnMgrHigh = 180 - - # ConnMgrGrace is a time duration that new connections are immune from being - # closed by the connection manager. - # - # type: Duration - # env var: LOTUS_LIBP2P_CONNMGRGRACE - #ConnMgrGrace = "20s" - - -[Pubsub] - # Run the node in bootstrap-node mode - # - # type: bool - # env var: LOTUS_PUBSUB_BOOTSTRAPPER - #Bootstrapper = false - - # type: string - # env var: LOTUS_PUBSUB_REMOTETRACER - #RemoteTracer = "" - - -[Client] - # type: bool - # env var: LOTUS_CLIENT_USEIPFS - #UseIpfs = false - - # type: bool - # env var: LOTUS_CLIENT_IPFSONLINEMODE - #IpfsOnlineMode = false - - # type: string - # env var: LOTUS_CLIENT_IPFSMADDR - #IpfsMAddr = "" - - # type: bool - # env var: LOTUS_CLIENT_IPFSUSEFORRETRIEVAL - #IpfsUseForRetrieval = false - - # The maximum number of simultaneous data transfers between the client - # and storage providers for storage deals - # - # type: uint64 - # env var: LOTUS_CLIENT_SIMULTANEOUSTRANSFERSFORSTORAGE - #SimultaneousTransfersForStorage = 20 - - # The maximum number of simultaneous data transfers between the client - # and storage providers for retrieval deals - # - # type: uint64 - # env var: LOTUS_CLIENT_SIMULTANEOUSTRANSFERSFORRETRIEVAL - #SimultaneousTransfersForRetrieval = 20 - - -[Wallet] - # type: string - # env var: LOTUS_WALLET_REMOTEBACKEND - #RemoteBackend = "" - - # type: bool - # env var: LOTUS_WALLET_ENABLELEDGER - #EnableLedger = false - - # type: bool - # env var: LOTUS_WALLET_DISABLELOCAL - #DisableLocal = false - - -[Fees] - # type: types.FIL - # env var: LOTUS_FEES_DEFAULTMAXFEE - #DefaultMaxFee = "0.07 FIL" - - -[Chainstore] - # type: bool - # env var: LOTUS_CHAINSTORE_ENABLESPLITSTORE - #EnableSplitstore = false - - [Chainstore.Splitstore] - # ColdStoreType specifies the type of the coldstore. - # It can be "universal" (default) or "discard" for discarding cold blocks. - # - # type: string - # env var: LOTUS_CHAINSTORE_SPLITSTORE_COLDSTORETYPE - #ColdStoreType = "universal" - - # HotStoreType specifies the type of the hotstore. - # Only currently supported value is "badger". - # - # type: string - # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTORETYPE - #HotStoreType = "badger" - - # MarkSetType specifies the type of the markset. - # It can be "map" (default) for in memory marking or "badger" for on-disk marking. - # - # type: string - # env var: LOTUS_CHAINSTORE_SPLITSTORE_MARKSETTYPE - #MarkSetType = "map" - - # HotStoreMessageRetention specifies the retention policy for messages, in finalities beyond - # the compaction boundary; default is 0. - # - # type: uint64 - # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREMESSAGERETENTION - #HotStoreMessageRetention = 0 - - # HotStoreFullGCFrequency specifies how often to perform a full (moving) GC on the hotstore. - # A value of 0 disables, while a value 1 will do full GC in every compaction. - # Default is 20 (about once a week). - # - # type: uint64 - # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREFULLGCFREQUENCY - #HotStoreFullGCFrequency = 20 - -[Checkpoint] - Fee = 0.01 - BitcoinHost="http://btc-rpc:8332" - PublicKey="66a554b8af6719851a936b0afc29dc59c3678f0e3f0b7bb081c57c5b77ad48ff" - MinioHost = "minio:9000" - MinioAccessKeyID = "lola" - MinioSecretAccessKey = "123secure" - MinioBucketName = "eudico" \ No newline at end of file diff --git a/data/charlie/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU b/data/charlie/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU deleted file mode 100644 index fc0eb9641..000000000 --- a/data/charlie/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU +++ /dev/null @@ -1 +0,0 @@ -{"Type":"jwt-hmac-secret","PrivateKey":"3dGa2EutToPWz5hc5AMO7yQVQB9ElMuWDeTtq8yuHXg="} \ No newline at end of file diff --git a/data/charlie/keystore/NRUWE4BSOAWWQ33TOQ b/data/charlie/keystore/NRUWE4BSOAWWQ33TOQ deleted file mode 100644 index 1c53fd795..000000000 --- a/data/charlie/keystore/NRUWE4BSOAWWQ33TOQ +++ /dev/null @@ -1 +0,0 @@ -{"Type":"libp2p-host","PrivateKey":"CAESQPbXOriy+aEtHT9qZsfvsX6YFMGbw+sUk7Cyx1EOJm5GTSl6jqgrrFgtgPOBplufrDiRVVog4TunwoXI0YgQOSU="} \ No newline at end of file diff --git a/data/charlie/share.toml b/data/charlie/share.toml deleted file mode 100755 index da06fb57d..000000000 --- a/data/charlie/share.toml +++ /dev/null @@ -1,15 +0,0 @@ -Treshold=2 -PrivateShare="68297d9dab2c0ba0f508eb30626eaca737951a70d7132dc7c7efaae3dd96db38" -PublicKey="66a554b8af6719851a936b0afc29dc59c3678f0e3f0b7bb081c57c5b77ad48ff" - -[VerificationShares] - -[VerificationShares.12D3KooWMBbLLKTM9Voo89TXLd98w4MjkJUych6QvECptousGtR4] -Share="02de7b1922283afa65adbacf74e543c0841fd84d948911d668d10835df07f5c203" - -[VerificationShares.12D3KooWNTyoBdMB9bpSkf7PVWR863ejGVPq9ssaaAipNvhPeQ4t] -Share="03dda30f606203bfa25da11375eaac64f915401b8bfb8067b0241d467a1cdab681" - -[VerificationShares.12D3KooWF1aFCGUtsGEaqNks3QADLUDZxW7ot7jZPSoDiAFKJuM6] -Share="034b172e82079d742c84c9ed0a6e4fc2d9172edae9507d81624446754f9eed709c" - diff --git a/data/charlie/token b/data/charlie/token deleted file mode 100644 index 6c449da95..000000000 --- a/data/charlie/token +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0.lIFY8AwZkqwCZ5Av1aGpHur2gf34kmxhD-U8YnKmLVA \ No newline at end of file diff --git a/data/dom/config.toml b/data/dom/config.toml deleted file mode 100644 index 5b78021ab..000000000 --- a/data/dom/config.toml +++ /dev/null @@ -1,196 +0,0 @@ -[API] - # Binding address for the Lotus API - # - # type: string - # env var: LOTUS_API_LISTENADDRESS - ListenAddress = "/ip4/127.0.0.1/tcp/1237/http" - - # type: string - # env var: LOTUS_API_REMOTELISTENADDRESS - #RemoteListenAddress = "" - - # type: Duration - # env var: LOTUS_API_TIMEOUT - #Timeout = "30s" - - -[Backup] - # Note that in case of metadata corruption it might be much harder to recover - # your node if metadata log is disabled - # - # type: bool - # env var: LOTUS_BACKUP_DISABLEMETADATALOG - #DisableMetadataLog = false - - -[Libp2p] - # Binding address for the libp2p host - 0 means random port. - # Format: multiaddress; see https://multiformats.io/multiaddr/ - # - # type: []string - # env var: LOTUS_LIBP2P_LISTENADDRESSES - ListenAddresses = ["/ip4/0.0.0.0/tcp/0"] - - # Addresses to explicitally announce to other peers. If not specified, - # all interface addresses are announced - # Format: multiaddress - # - # type: []string - # env var: LOTUS_LIBP2P_ANNOUNCEADDRESSES - #AnnounceAddresses = [] - - # Addresses to not announce - # Format: multiaddress - # - # type: []string - # env var: LOTUS_LIBP2P_NOANNOUNCEADDRESSES - #NoAnnounceAddresses = [] - - # When not disabled (default), lotus asks NAT devices (e.g., routers), to - # open up an external port and forward it to the port lotus is running on. - # When this works (i.e., when your router supports NAT port forwarding), - # it makes the local lotus node accessible from the public internet - # - # type: bool - # env var: LOTUS_LIBP2P_DISABLENATPORTMAP - #DisableNatPortMap = false - - # ConnMgrLow is the number of connections that the basic connection manager - # will trim down to. - # - # type: uint - # env var: LOTUS_LIBP2P_CONNMGRLOW - #ConnMgrLow = 150 - - # ConnMgrHigh is the number of connections that, when exceeded, will trigger - # a connection GC operation. Note: protected/recently formed connections don't - # count towards this limit. - # - # type: uint - # env var: LOTUS_LIBP2P_CONNMGRHIGH - #ConnMgrHigh = 180 - - # ConnMgrGrace is a time duration that new connections are immune from being - # closed by the connection manager. - # - # type: Duration - # env var: LOTUS_LIBP2P_CONNMGRGRACE - #ConnMgrGrace = "20s" - - -[Pubsub] - # Run the node in bootstrap-node mode - # - # type: bool - # env var: LOTUS_PUBSUB_BOOTSTRAPPER - #Bootstrapper = false - - # type: string - # env var: LOTUS_PUBSUB_REMOTETRACER - #RemoteTracer = "" - - -[Client] - # type: bool - # env var: LOTUS_CLIENT_USEIPFS - #UseIpfs = false - - # type: bool - # env var: LOTUS_CLIENT_IPFSONLINEMODE - #IpfsOnlineMode = false - - # type: string - # env var: LOTUS_CLIENT_IPFSMADDR - #IpfsMAddr = "" - - # type: bool - # env var: LOTUS_CLIENT_IPFSUSEFORRETRIEVAL - #IpfsUseForRetrieval = false - - # The maximum number of simultaneous data transfers between the client - # and storage providers for storage deals - # - # type: uint64 - # env var: LOTUS_CLIENT_SIMULTANEOUSTRANSFERSFORSTORAGE - #SimultaneousTransfersForStorage = 20 - - # The maximum number of simultaneous data transfers between the client - # and storage providers for retrieval deals - # - # type: uint64 - # env var: LOTUS_CLIENT_SIMULTANEOUSTRANSFERSFORRETRIEVAL - #SimultaneousTransfersForRetrieval = 20 - - -[Wallet] - # type: string - # env var: LOTUS_WALLET_REMOTEBACKEND - #RemoteBackend = "" - - # type: bool - # env var: LOTUS_WALLET_ENABLELEDGER - #EnableLedger = false - - # type: bool - # env var: LOTUS_WALLET_DISABLELOCAL - #DisableLocal = false - - -[Fees] - # type: types.FIL - # env var: LOTUS_FEES_DEFAULTMAXFEE - #DefaultMaxFee = "0.07 FIL" - - -[Chainstore] - # type: bool - # env var: LOTUS_CHAINSTORE_ENABLESPLITSTORE - #EnableSplitstore = false - - [Chainstore.Splitstore] - # ColdStoreType specifies the type of the coldstore. - # It can be "universal" (default) or "discard" for discarding cold blocks. - # - # type: string - # env var: LOTUS_CHAINSTORE_SPLITSTORE_COLDSTORETYPE - #ColdStoreType = "universal" - - # HotStoreType specifies the type of the hotstore. - # Only currently supported value is "badger". - # - # type: string - # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTORETYPE - #HotStoreType = "badger" - - # MarkSetType specifies the type of the markset. - # It can be "map" (default) for in memory marking or "badger" for on-disk marking. - # - # type: string - # env var: LOTUS_CHAINSTORE_SPLITSTORE_MARKSETTYPE - #MarkSetType = "map" - - # HotStoreMessageRetention specifies the retention policy for messages, in finalities beyond - # the compaction boundary; default is 0. - # - # type: uint64 - # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREMESSAGERETENTION - #HotStoreMessageRetention = 0 - - # HotStoreFullGCFrequency specifies how often to perform a full (moving) GC on the hotstore. - # A value of 0 disables, while a value 1 will do full GC in every compaction. - # Default is 20 (about once a week). - # - # type: uint64 - # env var: LOTUS_CHAINSTORE_SPLITSTORE_HOTSTOREFULLGCFREQUENCY - #HotStoreFullGCFrequency = 20 - - -[Checkpoint] - Fee = 0.01 - BitcoinHost="http://btc-rpc:8332" - PublicKey="66a554b8af6719851a936b0afc29dc59c3678f0e3f0b7bb081c57c5b77ad48ff" - MinioHost = "minio:9000" - MinioAccessKeyID = "lola" - MinioSecretAccessKey = "123secure" - MinioBucketName = "eudico" - diff --git a/data/dom/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU b/data/dom/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU deleted file mode 100644 index a48b91486..000000000 --- a/data/dom/keystore/MF2XI2BNNJ3XILLQOJUXMYLUMU +++ /dev/null @@ -1 +0,0 @@ -{"Type":"jwt-hmac-secret","PrivateKey":"2dLb0u762Yl4grYXYJNwLyf+Iy1uuRwZ1t36151wOb8="} \ No newline at end of file diff --git a/data/dom/keystore/NRUWE4BSOAWWQ33TOQ b/data/dom/keystore/NRUWE4BSOAWWQ33TOQ deleted file mode 100644 index 84b4b07e5..000000000 --- a/data/dom/keystore/NRUWE4BSOAWWQ33TOQ +++ /dev/null @@ -1 +0,0 @@ -{"Type":"libp2p-host","PrivateKey":"CAESQG9g5EcVv0mltdtjatOpGUnBpb/0pYysTSjcsZYbbNE0/L75FuQoOE/areMjh83oo5aaFWALMIA3+ZrczqHnQ3Y="} \ No newline at end of file diff --git a/data/dom/token b/data/dom/token deleted file mode 100644 index a5186b816..000000000 --- a/data/dom/token +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJBbGxvdyI6WyJyZWFkIiwid3JpdGUiLCJzaWduIiwiYWRtaW4iXX0.97sLtihGv3qKbl3X9iCbc8FHk_lrF2JseXC2ad2nIME \ No newline at end of file diff --git a/data/script.py b/data/script.py deleted file mode 100644 index af94dacd6..000000000 --- a/data/script.py +++ /dev/null @@ -1,21 +0,0 @@ -import sys - -print(sys.argv[1]) - -f = open(sys.argv[1],"r") -lines = f.readlines() - -participants = [] -count = 0 -command = 'eudico send --from t1d2xrzcslx7xlbbylc5c3d5lvandqw4iwl6epxba --method 2 --params-json "{\\"Miners\\":[' -for line in lines: - count += 1 - participant = line.split('/')[-1].rstrip() - participants.append(participant) - command = command + '\\"' + participant + '\\"' - if count < len(lines): - command = command + ',' - -command = command + ']}" t065 0' - -print(command) diff --git a/docker-compose.yaml b/docker-compose.yaml index b962d5cc2..d68eed8db 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -103,6 +103,7 @@ services: # - FULLNODE_API_INFO=/dns/lotus/tcp/1234/http # - LOTUS_JAEGER_AGENT_HOST=jaeger # - LOTUS_JAEGER_AGENT_PORT=6831 + # - DOCKER_LOTUS_MINER_INIT=true # deploy: # restart_policy: # condition: on-failure diff --git a/documentation/en/api-v0-methods-miner.md b/documentation/en/api-v0-methods-miner.md index dd7a1f88e..63891c3f2 100644 --- a/documentation/en/api-v0-methods-miner.md +++ b/documentation/en/api-v0-methods-miner.md @@ -49,6 +49,7 @@ * [LogSetLevel](#LogSetLevel) * [Market](#Market) * [MarketCancelDataTransfer](#MarketCancelDataTransfer) + * [MarketDataTransferDiagnostics](#MarketDataTransferDiagnostics) * [MarketDataTransferUpdates](#MarketDataTransferUpdates) * [MarketGetAsk](#MarketGetAsk) * [MarketGetDealUpdates](#MarketGetDealUpdates) @@ -61,6 +62,7 @@ * [MarketPendingDeals](#MarketPendingDeals) * [MarketPublishPendingDeals](#MarketPublishPendingDeals) * [MarketRestartDataTransfer](#MarketRestartDataTransfer) + * [MarketRetryPublishDeal](#MarketRetryPublishDeal) * [MarketSetAsk](#MarketSetAsk) * [MarketSetRetrievalAsk](#MarketSetRetrievalAsk) * [Mining](#Mining) @@ -79,9 +81,12 @@ * [NetConnectedness](#NetConnectedness) * [NetDisconnect](#NetDisconnect) * [NetFindPeer](#NetFindPeer) + * [NetLimit](#NetLimit) * [NetPeerInfo](#NetPeerInfo) * [NetPeers](#NetPeers) * [NetPubsubScores](#NetPubsubScores) + * [NetSetLimit](#NetSetLimit) + * [NetStat](#NetStat) * [Pieces](#Pieces) * [PiecesGetCIDInfo](#PiecesGetCIDInfo) * [PiecesGetPieceInfo](#PiecesGetPieceInfo) @@ -93,9 +98,13 @@ * [ReturnAddPiece](#ReturnAddPiece) * [ReturnFetch](#ReturnFetch) * [ReturnFinalizeSector](#ReturnFinalizeSector) + * [ReturnGenerateSectorKeyFromData](#ReturnGenerateSectorKeyFromData) * [ReturnMoveStorage](#ReturnMoveStorage) + * [ReturnProveReplicaUpdate1](#ReturnProveReplicaUpdate1) + * [ReturnProveReplicaUpdate2](#ReturnProveReplicaUpdate2) * [ReturnReadPiece](#ReturnReadPiece) * [ReturnReleaseUnsealed](#ReturnReleaseUnsealed) + * [ReturnReplicaUpdate](#ReturnReplicaUpdate) * [ReturnSealCommit1](#ReturnSealCommit1) * [ReturnSealCommit2](#ReturnSealCommit2) * [ReturnSealPreCommit1](#ReturnSealPreCommit1) @@ -113,6 +122,7 @@ * [SectorGetExpectedSealDuration](#SectorGetExpectedSealDuration) * [SectorGetSealDelay](#SectorGetSealDelay) * [SectorMarkForUpgrade](#SectorMarkForUpgrade) + * [SectorMatchPendingPiecesToOpenSectors](#SectorMatchPendingPiecesToOpenSectors) * [SectorPreCommitFlush](#SectorPreCommitFlush) * [SectorPreCommitPending](#SectorPreCommitPending) * [SectorRemove](#SectorRemove) @@ -137,6 +147,7 @@ * [StorageDeclareSector](#StorageDeclareSector) * [StorageDropSector](#StorageDropSector) * [StorageFindSector](#StorageFindSector) + * [StorageGetLocks](#StorageGetLocks) * [StorageInfo](#StorageInfo) * [StorageList](#StorageList) * [StorageLocal](#StorageLocal) @@ -208,7 +219,7 @@ Response: ```json { "Version": "string value", - "APIVersion": 131328, + "APIVersion": 131584, "BlockDelay": 42 } ``` @@ -235,10 +246,18 @@ Inputs: `null` Response: ```json { - "PreCommitControl": null, - "CommitControl": null, - "TerminateControl": null, - "DealPublishControl": null, + "PreCommitControl": [ + "f01234" + ], + "CommitControl": [ + "f01234" + ], + "TerminateControl": [ + "f01234" + ], + "DealPublishControl": [ + "f01234" + ], "DisableOwnerFallback": true, "DisableWorkerFallback": true } @@ -269,7 +288,9 @@ Perms: admin Inputs: ```json [ - null + [ + "write" + ] ] ``` @@ -287,7 +308,12 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + "write" +] +``` ## Check @@ -301,7 +327,15 @@ Inputs: ```json [ 8, - null, + [ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + } + ], true ] ``` @@ -324,12 +358,31 @@ Perms: read Inputs: ```json [ - null, - null + [ + { + "SealProof": 8, + "SectorNumber": 9, + "SectorKey": null, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } + ], + "Bw==", + 10101, + 15 ] ``` -Response: `null` +Response: +```json +[ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } +] +``` ## Create @@ -363,7 +416,16 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Key": "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq", + "Success": false, + "Error": "\u003cerror\u003e" + } +] +``` ### DagstoreInitializeAll DagstoreInitializeAll initializes all uninitialized shards in bulk, @@ -439,7 +501,16 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Key": "baga6ea4seaqecmtz7iak33dsfshi627abz4i4665dfuzr3qfs4bmad6dx3iigdq", + "State": "ShardStateAvailable", + "Error": "\u003cerror\u003e" + } +] +``` ### DagstoreRecoverShard DagstoreRecoverShard attempts to recover a failed shard. @@ -541,7 +612,33 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "string value", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "State": { + "SectorStartEpoch": 10101, + "LastUpdatedEpoch": 10101, + "SlashEpoch": 10101 + } + } +] +``` ### DealsPieceCidBlocklist @@ -550,7 +647,14 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` ### DealsSetConsiderOfflineRetrievalDeals @@ -644,7 +748,11 @@ Perms: admin Inputs: ```json [ - null + [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ] ] ``` @@ -672,7 +780,28 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Type": { + "System": "string value", + "Subsystem": "string value" + }, + "Active": true, + "LastActive": { + "Type": "string value", + "Message": "json raw message", + "Time": "0001-01-01T00:00:00Z" + }, + "LastResolved": { + "Type": "string value", + "Message": "json raw message", + "Time": "0001-01-01T00:00:00Z" + } + } +] +``` ### LogList @@ -681,7 +810,12 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + "string value" +] +``` ### LogSetLevel @@ -718,6 +852,113 @@ Inputs: Response: `{}` +### MarketDataTransferDiagnostics +MarketDataTransferDiagnostics generates debugging information about current data transfers over graphsync + + +Perms: write + +Inputs: +```json +[ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" +] +``` + +Response: +```json +{ + "ReceivingTransfers": [ + { + "RequestID": 4, + "RequestState": "string value", + "IsCurrentChannelRequest": true, + "ChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "ChannelState": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + }, + "Diagnostics": [ + "string value" + ] + } + ], + "SendingTransfers": [ + { + "RequestID": 4, + "RequestState": "string value", + "IsCurrentChannelRequest": true, + "ChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "ChannelState": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + }, + "Diagnostics": [ + "string value" + ] + } + ] +} +``` + ### MarketDataTransferUpdates @@ -740,7 +981,20 @@ Response: "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Transferred": 42, "Stages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] } } ``` @@ -878,7 +1132,40 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + } +] +``` ### MarketListDeals @@ -887,7 +1174,33 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "string value", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "State": { + "SectorStartEpoch": 10101, + "LastUpdatedEpoch": 10101, + "SlashEpoch": 10101 + } + } +] +``` ### MarketListIncompleteDeals @@ -896,7 +1209,65 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "string value", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "ClientSignature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "ProposalCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "AddFundsCid": null, + "PublishCid": null, + "Miner": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Client": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "State": 42, + "PiecePath": ".lotusminer/fstmp123", + "MetadataPath": ".lotusminer/fstmp123", + "SlashEpoch": 10101, + "FastRetrieval": true, + "Message": "string value", + "FundsReserved": "0", + "Ref": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024, + "RawBlockSize": 42 + }, + "AvailableForRetrieval": true, + "DealID": 5432, + "CreationTime": "0001-01-01T00:00:00Z", + "TransferChannelId": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "SectorNumber": 9, + "InboundCAR": "string value" + } +] +``` ### MarketListRetrievalDeals @@ -905,7 +1276,51 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + { + "PayloadCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ID": 5, + "Selector": { + "Raw": "Ynl0ZSBhcnJheQ==" + }, + "PieceCID": null, + "PricePerByte": "0", + "PaymentInterval": 42, + "PaymentIntervalIncrease": 42, + "UnsealPrice": "0", + "StoreID": 42, + "ChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "PieceInfo": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Deals": [ + { + "DealID": 5432, + "SectorID": 9, + "Offset": 1032, + "Length": 1032 + } + ] + }, + "Status": 0, + "Receiver": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "TotalSent": 42, + "FundsReceived": "0", + "Message": "string value", + "CurrentInterval": 42, + "LegacyProtocol": true + } +] +``` ### MarketPendingDeals @@ -917,7 +1332,29 @@ Inputs: `null` Response: ```json { - "Deals": null, + "Deals": [ + { + "Proposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "string value", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "ClientSignature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + } + ], "PublishPeriodStart": "0001-01-01T00:00:00Z", "PublishPeriod": 60000000000 } @@ -949,6 +1386,22 @@ Inputs: Response: `{}` +### MarketRetryPublishDeal + + +Perms: admin + +Inputs: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: `{}` + ### MarketSetAsk @@ -1019,7 +1472,9 @@ Response: ```json { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Addrs": [] + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] } ``` @@ -1116,9 +1571,15 @@ Inputs: ```json [ { - "Peers": null, - "IPAddrs": null, - "IPSubnets": null + "Peers": [ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + ], + "IPAddrs": [ + "string value" + ], + "IPSubnets": [ + "string value" + ] } ] ``` @@ -1135,9 +1596,15 @@ Inputs: `null` Response: ```json { - "Peers": null, - "IPAddrs": null, - "IPSubnets": null + "Peers": [ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + ], + "IPAddrs": [ + "string value" + ], + "IPSubnets": [ + "string value" + ] } ``` @@ -1150,9 +1617,15 @@ Inputs: ```json [ { - "Peers": null, - "IPAddrs": null, - "IPSubnets": null + "Peers": [ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + ], + "IPAddrs": [ + "string value" + ], + "IPSubnets": [ + "string value" + ] } ] ``` @@ -1169,7 +1642,9 @@ Inputs: [ { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Addrs": [] + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] } ] ``` @@ -1220,7 +1695,35 @@ Response: ```json { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Addrs": [] + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] +} +``` + +### NetLimit + + +Perms: read + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +{ + "Memory": 123, + "Streams": 3, + "StreamsInbound": 1, + "StreamsOutbound": 2, + "Conns": 4, + "ConnsInbound": 3, + "ConnsOutbound": 4, + "FD": 5 } ``` @@ -1241,8 +1744,12 @@ Response: { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Agent": "string value", - "Addrs": null, - "Protocols": null, + "Addrs": [ + "string value" + ], + "Protocols": [ + "string value" + ], "ConnMgrMeta": { "FirstSeen": "0001-01-01T00:00:00Z", "Value": 123, @@ -1256,24 +1763,143 @@ Response: } ``` -### NetPeers - - -Perms: read - -Inputs: `null` - -Response: `null` - -### NetPubsubScores - - -Perms: read - -Inputs: `null` - -Response: `null` - +### NetPeers + + +Perms: read + +Inputs: `null` + +Response: +```json +[ + { + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] + } +] +``` + +### NetPubsubScores + + +Perms: read + +Inputs: `null` + +Response: +```json +[ + { + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Score": { + "Score": 12.3, + "Topics": { + "/blocks": { + "TimeInMesh": 60000000000, + "FirstMessageDeliveries": 122, + "MeshMessageDeliveries": 1234, + "InvalidMessageDeliveries": 3 + } + }, + "AppSpecificScore": 12.3, + "IPColocationFactor": 12.3, + "BehaviourPenalty": 12.3 + } + } +] +``` + +### NetSetLimit + + +Perms: admin + +Inputs: +```json +[ + "string value", + { + "Memory": 123, + "Streams": 3, + "StreamsInbound": 1, + "StreamsOutbound": 2, + "Conns": 4, + "ConnsInbound": 3, + "ConnsOutbound": 4, + "FD": 5 + } +] +``` + +Response: `{}` + +### NetStat + + +Perms: read + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +{ + "System": { + "NumStreamsInbound": 123, + "NumStreamsOutbound": 123, + "NumConnsInbound": 123, + "NumConnsOutbound": 123, + "NumFD": 123, + "Memory": 9 + }, + "Transient": { + "NumStreamsInbound": 123, + "NumStreamsOutbound": 123, + "NumConnsInbound": 123, + "NumConnsOutbound": 123, + "NumFD": 123, + "Memory": 9 + }, + "Services": { + "abc": { + "NumStreamsInbound": 1, + "NumStreamsOutbound": 2, + "NumConnsInbound": 3, + "NumConnsOutbound": 4, + "NumFD": 5, + "Memory": 123 + } + }, + "Protocols": { + "abc": { + "NumStreamsInbound": 1, + "NumStreamsOutbound": 2, + "NumConnsInbound": 3, + "NumConnsOutbound": 4, + "NumFD": 5, + "Memory": 123 + } + }, + "Peers": { + "abc": { + "NumStreamsInbound": 1, + "NumStreamsOutbound": 2, + "NumConnsInbound": 3, + "NumConnsOutbound": 4, + "NumFD": 5, + "Memory": 123 + } + } +} +``` + ## Pieces @@ -1297,7 +1923,15 @@ Response: "CID": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, - "PieceBlockLocations": null + "PieceBlockLocations": [ + { + "RelOffset": 42, + "BlockSize": 42, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } + ] } ``` @@ -1321,7 +1955,14 @@ Response: "PieceCID": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, - "Deals": null + "Deals": [ + { + "DealID": 5432, + "SectorID": 9, + "Offset": 1032, + "Length": 1032 + } + ] } ``` @@ -1332,7 +1973,14 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` ### PiecesListPieces @@ -1341,7 +1989,14 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` ## Pledge @@ -1423,6 +2078,30 @@ Response: `{}` ### ReturnFinalizeSector +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnGenerateSectorKeyFromData + + Perms: admin Inputs: @@ -1468,6 +2147,58 @@ Inputs: Response: `{}` +### ReturnProveReplicaUpdate1 + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + [ + "Ynl0ZSBhcnJheQ==" + ], + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + +### ReturnProveReplicaUpdate2 + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + "Bw==", + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + ### ReturnReadPiece @@ -1517,6 +2248,38 @@ Inputs: Response: `{}` +### ReturnReplicaUpdate + + +Perms: admin + +Inputs: +```json +[ + { + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" + }, + { + "NewSealed": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "NewUnsealed": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + }, + { + "Code": 0, + "Message": "string value" + } +] +``` + +Response: `{}` + ### ReturnSealCommit1 @@ -1532,7 +2295,7 @@ Inputs: }, "ID": "07070707-0707-0707-0707-070707070707" }, - null, + "Bw==", { "Code": 0, "Message": "string value" @@ -1557,7 +2320,7 @@ Inputs: }, "ID": "07070707-0707-0707-0707-070707070707" }, - null, + "Bw==", { "Code": 0, "Message": "string value" @@ -1582,7 +2345,7 @@ Inputs: }, "ID": "07070707-0707-0707-0707-070707070707" }, - null, + "Bw==", { "Code": 0, "Message": "string value" @@ -1768,7 +2531,22 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Sectors": [ + 123, + 124 + ], + "FailedSectors": { + "123": "can't acquire read lock" + }, + "Msg": null, + "Error": "string value" + } +] +``` ### SectorCommitPending SectorCommitPending returns a list of pending Commit sectors to be sent in the next aggregate message @@ -1778,7 +2556,15 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Miner": 1000, + "Number": 9 + } +] +``` ### SectorGetExpectedSealDuration SectorGetExpectedSealDuration gets the expected time for a sector to seal @@ -1809,12 +2595,22 @@ Perms: admin Inputs: ```json [ - 9 + 9, + true ] ``` Response: `{}` +### SectorMatchPendingPiecesToOpenSectors + + +Perms: admin + +Inputs: `null` + +Response: `{}` + ### SectorPreCommitFlush SectorPreCommitFlush immediately sends a PreCommit message with sectors batched for PreCommit. Returns null if message wasn't sent @@ -1824,7 +2620,19 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Sectors": [ + 123, + 124 + ], + "Msg": null, + "Error": "string value" + } +] +``` ### SectorPreCommitPending SectorPreCommitPending returns a list of pending PreCommit sectors to be sent in the next batch message @@ -1834,7 +2642,15 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Miner": 1000, + "Number": 9 + } +] +``` ### SectorRemove SectorRemove removes the sector from storage. It doesn't terminate it on-chain, which can @@ -1934,7 +2750,15 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Miner": 1000, + "Number": 9 + } +] +``` ## Sectors @@ -1964,7 +2788,9 @@ Perms: read Inputs: ```json [ - null + [ + "Proving" + ] ] ``` @@ -2018,14 +2844,49 @@ Response: "CommD": null, "CommR": null, "Proof": "Ynl0ZSBhcnJheQ==", - "Deals": null, - "Pieces": null, + "Deals": [ + 5432 + ], + "Pieces": [ + { + "Piece": { + "Size": 1032, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + }, + "DealInfo": { + "PublishCid": null, + "DealID": 5432, + "DealProposal": { + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceSize": 1032, + "VerifiedDeal": true, + "Client": "f01234", + "Provider": "f01234", + "Label": "string value", + "StartEpoch": 10101, + "EndEpoch": 10101, + "StoragePricePerEpoch": "0", + "ProviderCollateral": "0", + "ClientCollateral": "0" + }, + "DealSchedule": { + "StartEpoch": 10101, + "EndEpoch": 10101 + }, + "KeepUnsealed": true + } + } + ], "Ticket": { - "Value": null, + "Value": "Bw==", "Epoch": 10101 }, "Seed": { - "Value": null, + "Value": "Bw==", "Epoch": 10101 }, "PreCommitMsg": null, @@ -2033,7 +2894,14 @@ Response: "Retries": 42, "ToUpgrade": true, "LastErr": "string value", - "Log": null, + "Log": [ + { + "Kind": "string value", + "Timestamp": 42, + "Trace": "string value", + "Message": "string value" + } + ], "SealProof": 8, "Activation": 10101, "Expiration": 10101, @@ -2077,7 +2945,7 @@ Inputs: }, 1040384, 1024, - null, + "Bw==", null ] ``` @@ -2127,11 +2995,19 @@ Inputs: [ { "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", - "URLs": null, + "URLs": [ + "string value" + ], "Weight": 42, "MaxStorage": 42, "CanSeal": true, - "CanStore": true + "CanStore": true, + "Groups": [ + "string value" + ], + "AllowTo": [ + "string value" + ] }, { "Capacity": 9, @@ -2160,7 +3036,27 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", + "URLs": [ + "string value" + ], + "Weight": 42, + "MaxStorage": 42, + "CanSeal": true, + "CanStore": true, + "Groups": [ + "string value" + ], + "AllowTo": [ + "string value" + ] + } +] +``` ### StorageDeclareSector @@ -2219,7 +3115,56 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", + "URLs": [ + "string value" + ], + "Weight": 42, + "CanSeal": true, + "CanStore": true, + "Primary": true + } +] +``` + +### StorageGetLocks + + +Perms: admin + +Inputs: `null` + +Response: +```json +{ + "Locks": [ + { + "Sector": { + "Miner": 1000, + "Number": 123 + }, + "Write": [ + 0, + 0, + 1, + 0, + 0 + ], + "Read": [ + 2, + 3, + 0, + 0, + 0 + ] + } + ] +} +``` ### StorageInfo @@ -2237,11 +3182,19 @@ Response: ```json { "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", - "URLs": null, + "URLs": [ + "string value" + ], "Weight": 42, "MaxStorage": 42, "CanSeal": true, - "CanStore": true + "CanStore": true, + "Groups": [ + "string value" + ], + "AllowTo": [ + "string value" + ] } ``` @@ -2432,18 +3385,923 @@ Response: "IgnoreResources": false, "Resources": { "MemPhysical": 274877906944, + "MemUsed": 2147483648, "MemSwap": 128849018880, - "MemReserved": 2147483648, + "MemSwapUsed": 2147483648, "CPUs": 64, "GPUs": [ "aGPU 1337" - ] + ], + "Resources": { + "seal/v0/addpiece": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/commit/1": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/commit/2": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240 + }, + "3": { + "MinMemory": 32212254720, + "MaxMemory": 161061273600, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368 + }, + "4": { + "MinMemory": 64424509440, + "MaxMemory": 204010946560, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240 + }, + "8": { + "MinMemory": 32212254720, + "MaxMemory": 161061273600, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368 + }, + "9": { + "MinMemory": 64424509440, + "MaxMemory": 204010946560, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736 + } + }, + "seal/v0/fetch": { + "0": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "1": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "2": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "3": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "4": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "5": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "6": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "7": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "8": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "9": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + } + }, + "seal/v0/precommit/1": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "3": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "4": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "8": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "9": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + } + }, + "seal/v0/precommit/2": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 16106127360, + "MaxMemory": 16106127360, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 32212254720, + "MaxMemory": 32212254720, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 16106127360, + "MaxMemory": 16106127360, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 32212254720, + "MaxMemory": 32212254720, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/provereplicaupdate/1": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/provereplicaupdate/2": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240 + }, + "3": { + "MinMemory": 32212254720, + "MaxMemory": 161061273600, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368 + }, + "4": { + "MinMemory": 64424509440, + "MaxMemory": 204010946560, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240 + }, + "8": { + "MinMemory": 32212254720, + "MaxMemory": 161061273600, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368 + }, + "9": { + "MinMemory": 64424509440, + "MaxMemory": 204010946560, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736 + } + }, + "seal/v0/regensectorkey": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/replicaupdate": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/unseal": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "3": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "4": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "8": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "9": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + } + } + } } }, "Enabled": true, "MemUsedMin": 0, "MemUsedMax": 0, - "GpuUsed": false, + "GpuUsed": 0, "CpuUse": 0 } } diff --git a/documentation/en/api-v0-methods-worker.md b/documentation/en/api-v0-methods-worker.md index c620113f4..959265a4d 100644 --- a/documentation/en/api-v0-methods-worker.md +++ b/documentation/en/api-v0-methods-worker.md @@ -11,12 +11,19 @@ * [AddPiece](#AddPiece) * [Finalize](#Finalize) * [FinalizeSector](#FinalizeSector) +* [Generate](#Generate) + * [GenerateSectorKeyFromData](#GenerateSectorKeyFromData) * [Move](#Move) * [MoveStorage](#MoveStorage) * [Process](#Process) * [ProcessSession](#ProcessSession) +* [Prove](#Prove) + * [ProveReplicaUpdate1](#ProveReplicaUpdate1) + * [ProveReplicaUpdate2](#ProveReplicaUpdate2) * [Release](#Release) * [ReleaseUnsealed](#ReleaseUnsealed) +* [Replica](#Replica) + * [ReplicaUpdate](#ReplicaUpdate) * [Seal](#Seal) * [SealCommit1](#SealCommit1) * [SealCommit2](#SealCommit2) @@ -92,10 +99,917 @@ Response: "IgnoreResources": true, "Resources": { "MemPhysical": 42, + "MemUsed": 42, "MemSwap": 42, - "MemReserved": 42, + "MemSwapUsed": 42, "CPUs": 42, - "GPUs": null + "GPUs": [ + "string value" + ], + "Resources": { + "seal/v0/addpiece": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/commit/1": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/commit/2": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240 + }, + "3": { + "MinMemory": 32212254720, + "MaxMemory": 161061273600, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368 + }, + "4": { + "MinMemory": 64424509440, + "MaxMemory": 204010946560, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240 + }, + "8": { + "MinMemory": 32212254720, + "MaxMemory": 161061273600, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368 + }, + "9": { + "MinMemory": 64424509440, + "MaxMemory": 204010946560, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736 + } + }, + "seal/v0/fetch": { + "0": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "1": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "2": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "3": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "4": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "5": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "6": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "7": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "8": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + }, + "9": { + "MinMemory": 1048576, + "MaxMemory": 1048576, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 0 + } + }, + "seal/v0/precommit/1": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "3": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "4": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "8": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "9": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + } + }, + "seal/v0/precommit/2": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 16106127360, + "MaxMemory": 16106127360, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 32212254720, + "MaxMemory": 32212254720, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 0, + "MaxParallelism": -1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 16106127360, + "MaxMemory": 16106127360, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 32212254720, + "MaxMemory": 32212254720, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/provereplicaupdate/1": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 0, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/provereplicaupdate/2": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240 + }, + "3": { + "MinMemory": 32212254720, + "MaxMemory": 161061273600, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368 + }, + "4": { + "MinMemory": 64424509440, + "MaxMemory": 204010946560, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1610612736, + "GPUUtilization": 1, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10737418240 + }, + "8": { + "MinMemory": 32212254720, + "MaxMemory": 161061273600, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 34359738368 + }, + "9": { + "MinMemory": 64424509440, + "MaxMemory": 204010946560, + "GPUUtilization": 1, + "MaxParallelism": -1, + "MaxParallelismGPU": 6, + "BaseMinMemory": 68719476736 + } + }, + "seal/v0/regensectorkey": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/replicaupdate": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "3": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "4": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 1073741824, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "8": { + "MinMemory": 4294967296, + "MaxMemory": 4294967296, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + }, + "9": { + "MinMemory": 8589934592, + "MaxMemory": 8589934592, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1073741824 + } + }, + "seal/v0/unseal": { + "0": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "1": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "2": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "3": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "4": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "5": { + "MinMemory": 2048, + "MaxMemory": 2048, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 2048 + }, + "6": { + "MinMemory": 8388608, + "MaxMemory": 8388608, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 8388608 + }, + "7": { + "MinMemory": 805306368, + "MaxMemory": 1073741824, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 1048576 + }, + "8": { + "MinMemory": 60129542144, + "MaxMemory": 68719476736, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + }, + "9": { + "MinMemory": 120259084288, + "MaxMemory": 137438953472, + "GPUUtilization": 0, + "MaxParallelism": 1, + "MaxParallelismGPU": 0, + "BaseMinMemory": 10485760 + } + } + } } } ``` @@ -107,7 +1021,18 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "ID": "76f1988b-ef30-4d7e-b3ec-9a627f4ba5a8", + "Weight": 42, + "LocalPath": "string value", + "CanSeal": true, + "CanStore": true + } +] +``` ### Remove Storage / Other @@ -144,7 +1069,7 @@ Perms: admin Inputs: `null` -Response: `131328` +Response: `131584` ## Add @@ -165,7 +1090,9 @@ Inputs: }, "ProofType": 8 }, - null, + [ + 1024 + ], 1024, {} ] @@ -200,7 +1127,47 @@ Inputs: }, "ProofType": 8 }, - null + [ + { + "Offset": 1024, + "Size": 1024 + } + ] +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +## Generate + + +### GenerateSectorKeyFromData + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } ] ``` @@ -262,6 +1229,88 @@ Inputs: `null` Response: `"07070707-0707-0707-0707-070707070707"` +## Prove + + +### ProveReplicaUpdate1 + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +### ProveReplicaUpdate2 + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + [ + "Ynl0ZSBhcnJheQ==" + ] +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + ## Release @@ -280,7 +1329,52 @@ Inputs: }, "ProofType": 8 }, - null + [ + { + "Offset": 1024, + "Size": 1024 + } + ] +] +``` + +Response: +```json +{ + "Sector": { + "Miner": 1000, + "Number": 9 + }, + "ID": "07070707-0707-0707-0707-070707070707" +} +``` + +## Replica + + +### ReplicaUpdate + + +Perms: admin + +Inputs: +```json +[ + { + "ID": { + "Miner": 1000, + "Number": 9 + }, + "ProofType": 8 + }, + [ + { + "Size": 1032, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } + ] ] ``` @@ -313,9 +1407,16 @@ Inputs: }, "ProofType": 8 }, - null, - null, - null, + "Bw==", + "Bw==", + [ + { + "Size": 1032, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } + ], { "Unsealed": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" @@ -353,7 +1454,7 @@ Inputs: }, "ProofType": 8 }, - null + "Bw==" ] ``` @@ -383,8 +1484,15 @@ Inputs: }, "ProofType": 8 }, - null, - null + "Bw==", + [ + { + "Size": 1032, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } + ] ] ``` @@ -414,7 +1522,7 @@ Inputs: }, "ProofType": 8 }, - null + "Bw==" ] ``` @@ -531,7 +1639,7 @@ Inputs: }, 1040384, 1024, - null, + "Bw==", { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" } diff --git a/documentation/en/api-v0-methods.md b/documentation/en/api-v0-methods.md index f177c6008..883d4d274 100644 --- a/documentation/en/api-v0-methods.md +++ b/documentation/en/api-v0-methods.md @@ -128,9 +128,12 @@ * [NetConnectedness](#NetConnectedness) * [NetDisconnect](#NetDisconnect) * [NetFindPeer](#NetFindPeer) + * [NetLimit](#NetLimit) * [NetPeerInfo](#NetPeerInfo) * [NetPeers](#NetPeers) * [NetPubsubScores](#NetPubsubScores) + * [NetSetLimit](#NetSetLimit) + * [NetStat](#NetStat) * [Paych](#Paych) * [PaychAllocateLane](#PaychAllocateLane) * [PaychAvailableFunds](#PaychAvailableFunds) @@ -283,7 +286,7 @@ Response: ```json { "Version": "string value", - "APIVersion": 131328, + "APIVersion": 131584, "BlockDelay": 42 } ``` @@ -299,7 +302,9 @@ Perms: admin Inputs: ```json [ - null + [ + "write" + ] ] ``` @@ -317,7 +322,12 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + "write" +] +``` ## Beacon The Beacon method group contains methods for interacting with the random beacon (DRAND) @@ -422,9 +432,23 @@ Response: "WinCount": 9, "VRFProof": "Ynl0ZSBhcnJheQ==" }, - "BeaconEntries": null, - "WinPoStProof": null, - "Parents": null, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], "ParentWeight": "0", "Height": 10101, "ParentStateRoot": { @@ -479,9 +503,54 @@ Inputs: Response: ```json { - "BlsMessages": null, - "SecpkMessages": null, - "Cids": null + "BlsMessages": [ + { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], + "SecpkMessages": [ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], + "Cids": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ] } ``` @@ -557,7 +626,31 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Cid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + } +] +``` ### ChainGetNode @@ -597,7 +690,31 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Cid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + } +] +``` ### ChainGetParentReceipts ChainGetParentReceipts returns receipts for messages in parent tipset of @@ -616,7 +733,16 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + } +] +``` ### ChainGetPath ChainGetPath returns a set of revert/apply operations needed to get from @@ -658,7 +784,19 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Type": "string value", + "Val": { + "Cids": null, + "Blocks": null, + "Height": 0 + } + } +] +``` ### ChainGetRandomnessFromBeacon ChainGetRandomnessFromBeacon is used to sample the beacon for randomness. @@ -683,7 +821,7 @@ Inputs: ] ``` -Response: `null` +Response: `"Bw=="` ### ChainGetRandomnessFromTickets ChainGetRandomnessFromTickets is used to sample the chain for randomness. @@ -708,7 +846,7 @@ Inputs: ] ``` -Response: `null` +Response: `"Bw=="` ### ChainGetTipSet ChainGetTipSet returns the tipset specified by the given TipSetKey. @@ -814,7 +952,19 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Type": "string value", + "Val": { + "Cids": null, + "Blocks": null, + "Height": 0 + } + } +] +``` ### ChainReadObj ChainReadObj reads ipld nodes referenced by the specified CID from chain @@ -988,7 +1138,20 @@ Response: "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Transferred": 42, "Stages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] } } ``` @@ -1058,7 +1221,30 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Err": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Piece": null, + "Size": 42, + "MinPrice": "0", + "UnsealPrice": "0", + "PricePerByte": "0", + "PaymentInterval": 42, + "PaymentIntervalIncrease": 42, + "Miner": "f01234", + "MinerPeer": { + "Address": "f01234", + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "PieceCID": null + } + } +] +``` ### ClientGenCar ClientGenCar generates a CAR file for the specified file. @@ -1103,7 +1289,21 @@ Response: "State": 42, "Message": "string value", "DealStages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "ExpectedDuration": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] }, "Provider": "f01234", "DataRef": { @@ -1142,7 +1342,20 @@ Response: "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Transferred": 42, "Stages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] } } } @@ -1180,7 +1393,21 @@ Response: "State": 42, "Message": "string value", "DealStages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "ExpectedDuration": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] }, "Provider": "f01234", "DataRef": { @@ -1219,7 +1446,20 @@ Response: "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Transferred": 42, "Stages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] } } } @@ -1267,9 +1507,23 @@ Response: "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Transferred": 42, "Stages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] } - } + }, + "Event": 5 } ``` @@ -1324,7 +1578,40 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + } +] +``` ### ClientListDeals ClientListDeals returns information about the deals made by the local client. @@ -1334,7 +1621,88 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + { + "ProposalCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "State": 42, + "Message": "string value", + "DealStages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "ExpectedDuration": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + }, + "Provider": "f01234", + "DataRef": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024, + "RawBlockSize": 42 + }, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 42, + "PricePerEpoch": "0", + "Duration": 42, + "DealID": 5432, + "CreationTime": "0001-01-01T00:00:00Z", + "Verified": true, + "TransferChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "DataTransfer": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + } + } +] +``` ### ClientListImports ClientListImports lists imported files and their root CIDs @@ -1344,7 +1712,19 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Key": 50, + "Err": "string value", + "Root": null, + "Source": "string value", + "FilePath": "string value", + "CARPath": "string value" + } +] +``` ### ClientListRetrievals ClientQueryAsk returns a signed StorageAsk from the specified miner. @@ -1355,7 +1735,61 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + { + "PayloadCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ID": 5, + "PieceCID": null, + "PricePerByte": "0", + "UnsealPrice": "0", + "Status": 0, + "Message": "string value", + "Provider": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "BytesReceived": 42, + "BytesPaidFor": 42, + "TotalPaid": "0", + "TransferChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "DataTransfer": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + }, + "Event": 5 + } +] +``` ### ClientMinerQueryOffer ClientMinerQueryOffer returns a QueryOffer for the specific miner and file. @@ -1385,6 +1819,7 @@ Response: "Size": 42, "MinPrice": "0", "UnsealPrice": "0", + "PricePerByte": "0", "PaymentInterval": 42, "PaymentIntervalIncrease": 42, "Miner": "f01234", @@ -1828,7 +2263,28 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Type": { + "System": "string value", + "Subsystem": "string value" + }, + "Active": true, + "LastActive": { + "Type": "string value", + "Message": "json raw message", + "Time": "0001-01-01T00:00:00Z" + }, + "LastResolved": { + "Type": "string value", + "Message": "json raw message", + "Time": "0001-01-01T00:00:00Z" + } + } +] +``` ### LogList @@ -1837,7 +2293,12 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + "string value" +] +``` ### LogSetLevel @@ -1982,11 +2443,46 @@ Inputs: "WinCount": 9, "VRFProof": "Ynl0ZSBhcnJheQ==" }, - "BeaconValues": null, - "Messages": null, - "Epoch": 10101, + "BeaconValues": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "Messages": [ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], + "Epoch": 10101, "Timestamp": 42, - "WinningPoStProof": null + "WinningPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ] } ] ``` @@ -2003,9 +2499,23 @@ Response: "WinCount": 9, "VRFProof": "Ynl0ZSBhcnJheQ==" }, - "BeaconEntries": null, - "WinPoStProof": null, - "Parents": null, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], "ParentWeight": "0", "Height": 10101, "ParentStateRoot": { @@ -2029,8 +2539,16 @@ Response: "ForkSignaling": 42, "ParentBaseFee": "0" }, - "BlsMessages": null, - "SecpkMessages": null + "BlsMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], + "SecpkMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ] } ``` @@ -2060,14 +2578,28 @@ Response: { "MinerPower": "0", "NetworkPower": "0", - "Sectors": null, + "Sectors": [ + { + "SealProof": 8, + "SectorNumber": 9, + "SectorKey": null, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } + ], "WorkerKey": "f01234", "SectorSize": 34359738368, "PrevBeaconEntry": { "Round": 42, "Data": "Ynl0ZSBhcnJheQ==" }, - "BeaconEntries": null, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], "EligibleForMining": true } ``` @@ -2086,11 +2618,43 @@ Perms: write Inputs: ```json [ - null + [ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ] ] ``` -Response: `null` +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` ### MpoolBatchPushMessage MpoolBatchPushMessage batch pushes a unsigned message to mempool. @@ -2101,14 +2665,58 @@ Perms: sign Inputs: ```json [ - null, + [ + { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], { "MaxFee": "0" } ] ``` -Response: `null` +Response: +```json +[ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` ### MpoolBatchPushUntrusted MpoolBatchPushUntrusted batch pushes a signed message to mempool from untrusted sources. @@ -2119,11 +2727,43 @@ Perms: write Inputs: ```json [ - null + [ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ] ] ``` -Response: `null` +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` ### MpoolClear MpoolClear clears pending messages from the mpool @@ -2151,7 +2791,9 @@ Inputs: `null` Response: ```json { - "PriorityAddrs": null, + "PriorityAddrs": [ + "f01234" + ], "SizeLimitHigh": 123, "SizeLimitLow": 123, "ReplaceByFeeRatio": 12.3, @@ -2196,7 +2838,35 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` ### MpoolPush MpoolPush pushes a signed message to mempool. @@ -2368,7 +3038,35 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` ### MpoolSetConfig MpoolSetConfig sets the mpool config to (a copy of) the supplied config @@ -2380,7 +3078,9 @@ Inputs: ```json [ { - "PriorityAddrs": null, + "PriorityAddrs": [ + "f01234" + ], "SizeLimitHigh": 123, "SizeLimitLow": 123, "ReplaceByFeeRatio": 12.3, @@ -2608,7 +3308,9 @@ Inputs: ```json [ 42, - null, + [ + "f01234" + ], 10101, "0", "f01234", @@ -2669,7 +3371,21 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "ID": 9, + "To": "f01234", + "Value": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "Approved": [ + "f01234" + ] + } +] +``` ### MsigGetVested MsigGetVested returns the amount of FIL that vested in a multisig in a certain period. @@ -2879,7 +3595,9 @@ Response: ```json { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Addrs": [] + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] } ``` @@ -2976,9 +3694,15 @@ Inputs: ```json [ { - "Peers": null, - "IPAddrs": null, - "IPSubnets": null + "Peers": [ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + ], + "IPAddrs": [ + "string value" + ], + "IPSubnets": [ + "string value" + ] } ] ``` @@ -2995,9 +3719,15 @@ Inputs: `null` Response: ```json { - "Peers": null, - "IPAddrs": null, - "IPSubnets": null + "Peers": [ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + ], + "IPAddrs": [ + "string value" + ], + "IPSubnets": [ + "string value" + ] } ``` @@ -3010,9 +3740,15 @@ Inputs: ```json [ { - "Peers": null, - "IPAddrs": null, - "IPSubnets": null + "Peers": [ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + ], + "IPAddrs": [ + "string value" + ], + "IPSubnets": [ + "string value" + ] } ] ``` @@ -3029,7 +3765,9 @@ Inputs: [ { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Addrs": [] + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] } ] ``` @@ -3080,7 +3818,35 @@ Response: ```json { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Addrs": [] + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] +} +``` + +### NetLimit + + +Perms: read + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +{ + "Memory": 123, + "Streams": 3, + "StreamsInbound": 1, + "StreamsOutbound": 2, + "Conns": 4, + "ConnsInbound": 3, + "ConnsOutbound": 4, + "FD": 5 } ``` @@ -3101,8 +3867,12 @@ Response: { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Agent": "string value", - "Addrs": null, - "Protocols": null, + "Addrs": [ + "string value" + ], + "Protocols": [ + "string value" + ], "ConnMgrMeta": { "FirstSeen": "0001-01-01T00:00:00Z", "Value": 123, @@ -3113,26 +3883,145 @@ Response: "name": "2021-03-08T22:52:18Z" } } -} +} +``` + +### NetPeers + + +Perms: read + +Inputs: `null` + +Response: +```json +[ + { + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] + } +] +``` + +### NetPubsubScores + + +Perms: read + +Inputs: `null` + +Response: +```json +[ + { + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Score": { + "Score": 12.3, + "Topics": { + "/blocks": { + "TimeInMesh": 60000000000, + "FirstMessageDeliveries": 122, + "MeshMessageDeliveries": 1234, + "InvalidMessageDeliveries": 3 + } + }, + "AppSpecificScore": 12.3, + "IPColocationFactor": 12.3, + "BehaviourPenalty": 12.3 + } + } +] +``` + +### NetSetLimit + + +Perms: admin + +Inputs: +```json +[ + "string value", + { + "Memory": 123, + "Streams": 3, + "StreamsInbound": 1, + "StreamsOutbound": 2, + "Conns": 4, + "ConnsInbound": 3, + "ConnsOutbound": 4, + "FD": 5 + } +] ``` -### NetPeers - - -Perms: read - -Inputs: `null` - -Response: `null` +Response: `{}` -### NetPubsubScores +### NetStat Perms: read -Inputs: `null` +Inputs: +```json +[ + "string value" +] +``` -Response: `null` +Response: +```json +{ + "System": { + "NumStreamsInbound": 123, + "NumStreamsOutbound": 123, + "NumConnsInbound": 123, + "NumConnsOutbound": 123, + "NumFD": 123, + "Memory": 9 + }, + "Transient": { + "NumStreamsInbound": 123, + "NumStreamsOutbound": 123, + "NumConnsInbound": 123, + "NumConnsOutbound": 123, + "NumFD": 123, + "Memory": 9 + }, + "Services": { + "abc": { + "NumStreamsInbound": 1, + "NumStreamsOutbound": 2, + "NumConnsInbound": 3, + "NumConnsOutbound": 4, + "NumFD": 5, + "Memory": 123 + } + }, + "Protocols": { + "abc": { + "NumStreamsInbound": 1, + "NumStreamsOutbound": 2, + "NumConnsInbound": 3, + "NumConnsOutbound": 4, + "NumFD": 5, + "Memory": 123 + } + }, + "Peers": { + "abc": { + "NumStreamsInbound": 1, + "NumStreamsOutbound": 2, + "NumConnsInbound": 3, + "NumConnsOutbound": 4, + "NumFD": 5, + "Memory": 123 + } + } +} +``` ## Paych The Paych methods are for interacting with and managing payment channels @@ -3271,7 +4160,12 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + "f01234" +] +``` ### PaychNewPayment @@ -3283,7 +4177,19 @@ Inputs: [ "f01234", "f01234", - null + [ + { + "Amount": "0", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "MinSettle": 10101, + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + } + } + ] ] ``` @@ -3294,7 +4200,33 @@ Response: "WaitSentinel": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, - "Vouchers": null + "Vouchers": [ + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretPreimage": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + } + ] } ``` @@ -3360,7 +4292,12 @@ Inputs: "Nonce": 42, "Amount": "0", "MinSettleHeight": 10101, - "Merges": null, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" @@ -3396,7 +4333,12 @@ Inputs: "Nonce": 42, "Amount": "0", "MinSettleHeight": 10101, - "Merges": null, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" @@ -3432,7 +4374,12 @@ Inputs: "Nonce": 42, "Amount": "0", "MinSettleHeight": 10101, - "Merges": null, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" @@ -3474,7 +4421,12 @@ Response: "Nonce": 42, "Amount": "0", "MinSettleHeight": 10101, - "Merges": null, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" @@ -3496,7 +4448,36 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretPreimage": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + } +] +``` ### PaychVoucherSubmit @@ -3521,7 +4502,12 @@ Inputs: "Nonce": 42, "Amount": "0", "MinSettleHeight": 10101, - "Merges": null, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" @@ -3589,7 +4575,15 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Miner": "f01234", + "Epoch": 10101 + } +] +``` ### StateCall StateCall runs the given message and returns its result without any persisted changes. @@ -3691,8 +4685,73 @@ Response: }, "Error": "string value", "Duration": 60000000000, - "GasCharges": null, - "Subcalls": null + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": [ + { + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": null + } + ] }, "Error": "string value", "Duration": 60000000000 @@ -3798,7 +4857,23 @@ Inputs: ```json [ 10101, - null, + [ + { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], [ { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" @@ -3816,7 +4891,138 @@ Response: "Root": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, - "Trace": null + "Trace": [ + { + "MsgCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "GasCost": { + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "GasUsed": "0", + "BaseFeeBurn": "0", + "OverEstimationBurn": "0", + "MinerPenalty": "0", + "MinerTip": "0", + "Refund": "0", + "TotalCost": "0" + }, + "ExecutionTrace": { + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": [ + { + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": null + } + ] + }, + "Error": "string value", + "Duration": 60000000000 + } + ] } ``` @@ -3934,7 +5140,7 @@ Inputs: ] ``` -Response: `null` +Response: `"Bw=="` ### StateGetRandomnessFromTickets StateGetRandomnessFromTickets is used to sample the chain for randomness. @@ -3959,7 +5165,7 @@ Inputs: ] ``` -Response: `null` +Response: `"Bw=="` ### StateGetReceipt StateGetReceipt returns the message receipt for the given message or for a @@ -4021,7 +5227,12 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + "f01234" +] +``` ### StateListMessages StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height. @@ -4048,7 +5259,14 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` ### StateListMiners StateListMiners returns the addresses of every miner that has claimed power in the Power Actor @@ -4070,7 +5288,12 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + "f01234" +] +``` ### StateLookupID StateLookupID retrieves the ID address of the given address @@ -4270,7 +5493,29 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "SectorNumber": 9, + "SealProof": 8, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "DealIDs": [ + 5432 + ], + "Activation": 10101, + "Expiration": 10101, + "DealWeight": "0", + "VerifiedDealWeight": "0", + "InitialPledge": "0", + "ExpectedDayReward": "0", + "ExpectedStoragePledge": "0", + "SectorKeyCID": null + } +] +``` ### StateMinerAvailableBalance StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent @@ -4316,7 +5561,18 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "PostSubmissions": [ + 5, + 1 + ], + "DisputableProofCount": 42 + } +] +``` ### StateMinerFaults StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner @@ -4374,10 +5630,14 @@ Response: "Owner": "f01234", "Worker": "f01234", "NewWorker": "f01234", - "ControlAddresses": null, + "ControlAddresses": [ + "f01234" + ], "WorkerChangeEpoch": 10101, "PeerId": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Multiaddrs": null, + "Multiaddrs": [ + "Ynl0ZSBhcnJheQ==" + ], "WindowPoStProofType": 8, "SectorSize": 34359738368, "WindowPoStPartitionSectors": 42, @@ -4402,7 +5662,9 @@ Inputs: "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, "SealRandEpoch": 10101, - "DealIDs": null, + "DealIDs": [ + 5432 + ], "Expiration": 10101, "ReplaceCapacity": true, "ReplaceSectorDeadline": 42, @@ -4444,7 +5706,33 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "AllSectors": [ + 5, + 1 + ], + "FaultySectors": [ + 5, + 1 + ], + "RecoveringSectors": [ + 5, + 1 + ], + "LiveSectors": [ + 5, + 1 + ], + "ActiveSectors": [ + 5, + 1 + ] + } +] +``` ### StateMinerPower StateMinerPower returns the power of the indicated miner @@ -4499,7 +5787,9 @@ Inputs: "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, "SealRandEpoch": 10101, - "DealIDs": null, + "DealIDs": [ + 5432 + ], "Expiration": 10101, "ReplaceCapacity": true, "ReplaceSectorDeadline": 42, @@ -4666,7 +5956,29 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "SectorNumber": 9, + "SealProof": 8, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "DealIDs": [ + 5432 + ], + "Activation": 10101, + "Expiration": 10101, + "DealWeight": "0", + "VerifiedDealWeight": "0", + "InitialPledge": "0", + "ExpectedDayReward": "0", + "ExpectedStoragePledge": "0", + "SectorKeyCID": null + } +] +``` ### StateNetworkName StateNetworkName returns the name of the network the node is synced to @@ -4698,7 +6010,7 @@ Inputs: ] ``` -Response: `13` +Response: `15` ### StateReadState StateReadState returns the indicated actor's state. @@ -4832,8 +6144,73 @@ Response: }, "Error": "string value", "Duration": 60000000000, - "GasCharges": null, - "Subcalls": null + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": [ + { + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": null + } + ] }, "Error": "string value", "Duration": 60000000000 @@ -5009,14 +6386,17 @@ Response: "SealedCID": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, - "DealIDs": null, + "DealIDs": [ + 5432 + ], "Activation": 10101, "Expiration": 10101, "DealWeight": "0", "VerifiedDealWeight": "0", "InitialPledge": "0", "ExpectedDayReward": "0", - "ExpectedStoragePledge": "0" + "ExpectedStoragePledge": "0", + "SectorKeyCID": null } ``` @@ -5082,7 +6462,9 @@ Response: "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, "SealRandEpoch": 10101, - "DealIDs": null, + "DealIDs": [ + 5432 + ], "Expiration": 10101, "ReplaceCapacity": true, "ReplaceSectorDeadline": 42, @@ -5378,9 +6760,23 @@ Response: "WinCount": 9, "VRFProof": "Ynl0ZSBhcnJheQ==" }, - "BeaconEntries": null, - "WinPoStProof": null, - "Parents": null, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], "ParentWeight": "0", "Height": 10101, "ParentStateRoot": { @@ -5435,7 +6831,26 @@ Inputs: `null` Response: ```json { - "ActiveSyncs": null, + "ActiveSyncs": [ + { + "WorkerID": 42, + "Base": { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "Target": { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "Stage": 1, + "Height": 10101, + "Start": "0001-01-01T00:00:00Z", + "End": "0001-01-01T00:00:00Z", + "Message": "string value" + } + ], "VMApplied": 42 } ``` @@ -5460,9 +6875,23 @@ Inputs: "WinCount": 9, "VRFProof": "Ynl0ZSBhcnJheQ==" }, - "BeaconEntries": null, - "WinPoStProof": null, - "Parents": null, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], "ParentWeight": "0", "Height": 10101, "ParentStateRoot": { @@ -5486,8 +6915,16 @@ Inputs: "ForkSignaling": 42, "ParentBaseFee": "0" }, - "BlsMessages": null, - "SecpkMessages": null + "BlsMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], + "SecpkMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ] } ] ``` @@ -5648,7 +7085,12 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + "f01234" +] +``` ### WalletNew WalletNew creates a new address in the wallet with the given sigType. diff --git a/documentation/en/api-v1-unstable-methods.md b/documentation/en/api-v1-unstable-methods.md index 397ff3c6f..a5fdd9994 100644 --- a/documentation/en/api-v1-unstable-methods.md +++ b/documentation/en/api-v1-unstable-methods.md @@ -41,6 +41,7 @@ * [ClientDataTransferUpdates](#ClientDataTransferUpdates) * [ClientDealPieceCID](#ClientDealPieceCID) * [ClientDealSize](#ClientDealSize) + * [ClientExport](#ClientExport) * [ClientFindData](#ClientFindData) * [ClientGenCar](#ClientGenCar) * [ClientGetDealInfo](#ClientGetDealInfo) @@ -59,7 +60,7 @@ * [ClientRestartDataTransfer](#ClientRestartDataTransfer) * [ClientRetrieve](#ClientRetrieve) * [ClientRetrieveTryRestartInsufficientFunds](#ClientRetrieveTryRestartInsufficientFunds) - * [ClientRetrieveWithEvents](#ClientRetrieveWithEvents) + * [ClientRetrieveWait](#ClientRetrieveWait) * [ClientStartDeal](#ClientStartDeal) * [ClientStatelessDeal](#ClientStatelessDeal) * [Create](#Create) @@ -108,6 +109,7 @@ * [MsigApprove](#MsigApprove) * [MsigApproveTxnHash](#MsigApproveTxnHash) * [MsigCancel](#MsigCancel) + * [MsigCancelTxnHash](#MsigCancelTxnHash) * [MsigCreate](#MsigCreate) * [MsigGetAvailableBalance](#MsigGetAvailableBalance) * [MsigGetPending](#MsigGetPending) @@ -132,9 +134,12 @@ * [NetConnectedness](#NetConnectedness) * [NetDisconnect](#NetDisconnect) * [NetFindPeer](#NetFindPeer) + * [NetLimit](#NetLimit) * [NetPeerInfo](#NetPeerInfo) * [NetPeers](#NetPeers) * [NetPubsubScores](#NetPubsubScores) + * [NetSetLimit](#NetSetLimit) + * [NetStat](#NetStat) * [Node](#Node) * [NodeStatus](#NodeStatus) * [Paych](#Paych) @@ -287,7 +292,7 @@ Response: ```json { "Version": "string value", - "APIVersion": 131328, + "APIVersion": 131584, "BlockDelay": 42 } ``` @@ -303,7 +308,9 @@ Perms: admin Inputs: ```json [ - null + [ + "write" + ] ] ``` @@ -321,7 +328,12 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + "write" +] +``` ## Beacon The Beacon method group contains methods for interacting with the random beacon (DRAND) @@ -452,9 +464,23 @@ Response: "WinCount": 9, "VRFProof": "Ynl0ZSBhcnJheQ==" }, - "BeaconEntries": null, - "WinPoStProof": null, - "Parents": null, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], "ParentWeight": "0", "Height": 10101, "ParentStateRoot": { @@ -509,9 +535,54 @@ Inputs: Response: ```json { - "BlsMessages": null, - "SecpkMessages": null, - "Cids": null + "BlsMessages": [ + { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], + "SecpkMessages": [ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], + "Cids": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ] } ``` @@ -587,7 +658,31 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Cid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + } +] +``` ### ChainGetNode @@ -627,7 +722,31 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Cid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + } +] +``` ### ChainGetParentReceipts ChainGetParentReceipts returns receipts for messages in parent tipset of @@ -646,7 +765,16 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + } +] +``` ### ChainGetPath ChainGetPath returns a set of revert/apply operations needed to get from @@ -688,7 +816,19 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Type": "string value", + "Val": { + "Cids": null, + "Blocks": null, + "Height": 0 + } + } +] +``` ### ChainGetTipSet ChainGetTipSet returns the tipset specified by the given TipSetKey. @@ -826,7 +966,19 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Type": "string value", + "Val": { + "Cids": null, + "Blocks": null, + "Height": 0 + } + } +] +``` ### ChainReadObj ChainReadObj reads ipld nodes referenced by the specified CID from chain @@ -1000,7 +1152,20 @@ Response: "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Transferred": 42, "Stages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] } } ``` @@ -1054,6 +1219,37 @@ Response: } ``` +### ClientExport +ClientExport exports a file stored in the local filestore to a system file + + +Perms: admin + +Inputs: +```json +[ + { + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "DAGs": [ + { + "DataSelector": "Links/21/Hash/Links/42/Hash", + "ExportMerkleProof": true + } + ], + "FromLocalCAR": "string value", + "DealID": 5 + }, + { + "Path": "string value", + "IsCAR": true + } +] +``` + +Response: `{}` + ### ClientFindData ClientFindData identifies peers that have a certain file, and returns QueryOffers (one per peer). @@ -1070,7 +1266,30 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Err": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Piece": null, + "Size": 42, + "MinPrice": "0", + "UnsealPrice": "0", + "PricePerByte": "0", + "PaymentInterval": 42, + "PaymentIntervalIncrease": 42, + "Miner": "f01234", + "MinerPeer": { + "Address": "f01234", + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "PieceCID": null + } + } +] +``` ### ClientGenCar ClientGenCar generates a CAR file for the specified file. @@ -1115,7 +1334,21 @@ Response: "State": 42, "Message": "string value", "DealStages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "ExpectedDuration": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] }, "Provider": "f01234", "DataRef": { @@ -1154,7 +1387,20 @@ Response: "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Transferred": 42, "Stages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] } } } @@ -1192,7 +1438,21 @@ Response: "State": 42, "Message": "string value", "DealStages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "ExpectedDuration": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] }, "Provider": "f01234", "DataRef": { @@ -1231,7 +1491,20 @@ Response: "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Transferred": 42, "Stages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] } } } @@ -1279,9 +1552,23 @@ Response: "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Transferred": 42, "Stages": { - "Stages": null + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] } - } + }, + "Event": 5 } ``` @@ -1336,7 +1623,40 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + } +] +``` ### ClientListDeals ClientListDeals returns information about the deals made by the local client. @@ -1346,7 +1666,88 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + { + "ProposalCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "State": 42, + "Message": "string value", + "DealStages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "ExpectedDuration": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + }, + "Provider": "f01234", + "DataRef": { + "TransferType": "string value", + "Root": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "PieceCid": null, + "PieceSize": 1024, + "RawBlockSize": 42 + }, + "PieceCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Size": 42, + "PricePerEpoch": "0", + "Duration": 42, + "DealID": 5432, + "CreationTime": "0001-01-01T00:00:00Z", + "Verified": true, + "TransferChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "DataTransfer": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + } + } +] +``` ### ClientListImports ClientListImports lists imported files and their root CIDs @@ -1356,7 +1757,19 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Key": 50, + "Err": "string value", + "Root": null, + "Source": "string value", + "FilePath": "string value", + "CARPath": "string value" + } +] +``` ### ClientListRetrievals ClientListRetrievals returns information about retrievals made by the local client @@ -1366,7 +1779,61 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + { + "PayloadCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "ID": 5, + "PieceCID": null, + "PricePerByte": "0", + "UnsealPrice": "0", + "Status": 0, + "Message": "string value", + "Provider": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "BytesReceived": 42, + "BytesPaidFor": 42, + "TotalPaid": "0", + "TransferChannelID": { + "Initiator": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Responder": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "ID": 3 + }, + "DataTransfer": { + "TransferID": 3, + "Status": 1, + "BaseCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "IsInitiator": true, + "IsSender": true, + "Voucher": "string value", + "Message": "string value", + "OtherPeer": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Transferred": 42, + "Stages": { + "Stages": [ + { + "Name": "string value", + "Description": "string value", + "CreatedTime": "0001-01-01T00:00:00Z", + "UpdatedTime": "0001-01-01T00:00:00Z", + "Logs": [ + { + "Log": "string value", + "UpdatedTime": "0001-01-01T00:00:00Z" + } + ] + } + ] + } + }, + "Event": 5 + } +] +``` ### ClientMinerQueryOffer ClientMinerQueryOffer returns a QueryOffer for the specific miner and file. @@ -1396,6 +1863,7 @@ Response: "Size": 42, "MinPrice": "0", "UnsealPrice": "0", + "PricePerByte": "0", "PaymentInterval": 42, "PaymentIntervalIncrease": 42, "Miner": "f01234", @@ -1481,9 +1949,8 @@ Inputs: "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, "Piece": null, - "DatamodelPathSelector": "Links/21/Hash/Links/42/Hash", + "DataSelector": "Links/21/Hash/Links/42/Hash", "Size": 42, - "FromLocalCAR": "string value", "Total": "0", "UnsealPrice": "0", "PaymentInterval": 42, @@ -1495,15 +1962,16 @@ Inputs: "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "PieceCID": null } - }, - { - "Path": "string value", - "IsCAR": true } ] ``` -Response: `{}` +Response: +```json +{ + "DealID": 5 +} +``` ### ClientRetrieveTryRestartInsufficientFunds ClientRetrieveTryRestartInsufficientFunds attempts to restart stalled retrievals on a given payment channel @@ -1521,9 +1989,8 @@ Inputs: Response: `{}` -### ClientRetrieveWithEvents -ClientRetrieveWithEvents initiates the retrieval of a file, as specified in the order, and provides a channel -of status updates. +### ClientRetrieveWait +ClientRetrieveWait waits for retrieval to be complete Perms: admin @@ -1531,43 +1998,11 @@ Perms: admin Inputs: ```json [ - { - "Root": { - "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" - }, - "Piece": null, - "DatamodelPathSelector": "Links/21/Hash/Links/42/Hash", - "Size": 42, - "FromLocalCAR": "string value", - "Total": "0", - "UnsealPrice": "0", - "PaymentInterval": 42, - "PaymentIntervalIncrease": 42, - "Client": "f01234", - "Miner": "f01234", - "MinerPeer": { - "Address": "f01234", - "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "PieceCID": null - } - }, - { - "Path": "string value", - "IsCAR": true - } + 5 ] ``` -Response: -```json -{ - "Event": 5, - "Status": 0, - "BytesReceived": 42, - "FundsSpent": "0", - "Err": "string value" -} -``` +Response: `{}` ### ClientStartDeal ClientStartDeal proposes a deal with a miner. @@ -1840,7 +2275,28 @@ Perms: admin Inputs: `null` -Response: `null` +Response: +```json +[ + { + "Type": { + "System": "string value", + "Subsystem": "string value" + }, + "Active": true, + "LastActive": { + "Type": "string value", + "Message": "json raw message", + "Time": "0001-01-01T00:00:00Z" + }, + "LastResolved": { + "Type": "string value", + "Message": "json raw message", + "Time": "0001-01-01T00:00:00Z" + } + } +] +``` ### LogList @@ -1849,7 +2305,12 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + "string value" +] +``` ### LogSetLevel @@ -1994,11 +2455,46 @@ Inputs: "WinCount": 9, "VRFProof": "Ynl0ZSBhcnJheQ==" }, - "BeaconValues": null, - "Messages": null, + "BeaconValues": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "Messages": [ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], "Epoch": 10101, "Timestamp": 42, - "WinningPoStProof": null + "WinningPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ] } ] ``` @@ -2015,9 +2511,23 @@ Response: "WinCount": 9, "VRFProof": "Ynl0ZSBhcnJheQ==" }, - "BeaconEntries": null, - "WinPoStProof": null, - "Parents": null, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], "ParentWeight": "0", "Height": 10101, "ParentStateRoot": { @@ -2041,8 +2551,16 @@ Response: "ForkSignaling": 42, "ParentBaseFee": "0" }, - "BlsMessages": null, - "SecpkMessages": null + "BlsMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], + "SecpkMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ] } ``` @@ -2072,14 +2590,28 @@ Response: { "MinerPower": "0", "NetworkPower": "0", - "Sectors": null, + "Sectors": [ + { + "SealProof": 8, + "SectorNumber": 9, + "SectorKey": null, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + } + ], "WorkerKey": "f01234", "SectorSize": 34359738368, "PrevBeaconEntry": { "Round": 42, "Data": "Ynl0ZSBhcnJheQ==" }, - "BeaconEntries": null, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], "EligibleForMining": true } ``` @@ -2098,11 +2630,43 @@ Perms: write Inputs: ```json [ - null + [ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ] ] ``` -Response: `null` +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` ### MpoolBatchPushMessage MpoolBatchPushMessage batch pushes a unsigned message to mempool. @@ -2113,14 +2677,58 @@ Perms: sign Inputs: ```json [ - null, + [ + { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], { "MaxFee": "0" } ] ``` -Response: `null` +Response: +```json +[ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` ### MpoolBatchPushUntrusted MpoolBatchPushUntrusted batch pushes a signed message to mempool from untrusted sources. @@ -2131,12 +2739,44 @@ Perms: write Inputs: ```json [ - null + [ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ] +] +``` + +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } ] ``` -Response: `null` - ### MpoolCheckMessages MpoolCheckMessages performs logical checks on a batch of messages @@ -2146,11 +2786,47 @@ Perms: read Inputs: ```json [ - null + [ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true + } + ] ] ``` -Response: `null` +Response: +```json +[ + [ + { + "Cid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Code": 0, + "OK": true, + "Err": "string value", + "Hint": { + "abc": 123 + } + } + ] +] +``` ### MpoolCheckPendingMessages MpoolCheckPendingMessages performs logical checks for all pending messages from a given address @@ -2165,7 +2841,24 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + [ + { + "Cid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Code": 0, + "OK": true, + "Err": "string value", + "Hint": { + "abc": 123 + } + } + ] +] +``` ### MpoolCheckReplaceMessages MpoolCheckReplaceMessages performs logical checks on pending messages with replacement @@ -2176,11 +2869,44 @@ Perms: read Inputs: ```json [ - null + [ + { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ] ] ``` -Response: `null` +Response: +```json +[ + [ + { + "Cid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Code": 0, + "OK": true, + "Err": "string value", + "Hint": { + "abc": 123 + } + } + ] +] +``` ### MpoolClear MpoolClear clears pending messages from the mpool @@ -2208,7 +2934,9 @@ Inputs: `null` Response: ```json { - "PriorityAddrs": null, + "PriorityAddrs": [ + "f01234" + ], "SizeLimitHigh": 123, "SizeLimitLow": 123, "ReplaceByFeeRatio": 12.3, @@ -2253,7 +2981,35 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` ### MpoolPush MpoolPush pushes a signed message to mempool. @@ -2425,7 +3181,35 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } +] +``` ### MpoolSetConfig MpoolSetConfig sets the mpool config to (a copy of) the supplied config @@ -2437,7 +3221,9 @@ Inputs: ```json [ { - "PriorityAddrs": null, + "PriorityAddrs": [ + "f01234" + ], "SizeLimitHigh": 123, "SizeLimitLow": 123, "ReplaceByFeeRatio": 12.3, @@ -2702,6 +3488,44 @@ Response: ### MsigCancel MsigCancel cancels a previously-proposed multisig message +It takes the following params: , + + +Perms: sign + +Inputs: +```json +[ + "f01234", + 42, + "f01234" +] +``` + +Response: +```json +{ + "Message": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "ValidNonce": true +} +``` + +### MsigCancelTxnHash +MsigCancel cancels a previously-proposed multisig message It takes the following params: , , , , , , @@ -2755,7 +3579,9 @@ Inputs: ```json [ 42, - null, + [ + "f01234" + ], 10101, "0", "f01234", @@ -2831,7 +3657,21 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "ID": 9, + "To": "f01234", + "Value": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "Approved": [ + "f01234" + ] + } +] +``` ### MsigGetVested MsigGetVested returns the amount of FIL that vested in a multisig in a certain period. @@ -3116,7 +3956,9 @@ Response: ```json { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Addrs": [] + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] } ``` @@ -3213,9 +4055,15 @@ Inputs: ```json [ { - "Peers": null, - "IPAddrs": null, - "IPSubnets": null + "Peers": [ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + ], + "IPAddrs": [ + "string value" + ], + "IPSubnets": [ + "string value" + ] } ] ``` @@ -3232,9 +4080,15 @@ Inputs: `null` Response: ```json { - "Peers": null, - "IPAddrs": null, - "IPSubnets": null + "Peers": [ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + ], + "IPAddrs": [ + "string value" + ], + "IPSubnets": [ + "string value" + ] } ``` @@ -3247,9 +4101,15 @@ Inputs: ```json [ { - "Peers": null, - "IPAddrs": null, - "IPSubnets": null + "Peers": [ + "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf" + ], + "IPAddrs": [ + "string value" + ], + "IPSubnets": [ + "string value" + ] } ] ``` @@ -3266,7 +4126,9 @@ Inputs: [ { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Addrs": [] + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] } ] ``` @@ -3317,7 +4179,35 @@ Response: ```json { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Addrs": [] + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] +} +``` + +### NetLimit + + +Perms: read + +Inputs: +```json +[ + "string value" +] +``` + +Response: +```json +{ + "Memory": 123, + "Streams": 3, + "StreamsInbound": 1, + "StreamsOutbound": 2, + "Conns": 4, + "ConnsInbound": 3, + "ConnsOutbound": 4, + "FD": 5 } ``` @@ -3338,8 +4228,12 @@ Response: { "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", "Agent": "string value", - "Addrs": null, - "Protocols": null, + "Addrs": [ + "string value" + ], + "Protocols": [ + "string value" + ], "ConnMgrMeta": { "FirstSeen": "0001-01-01T00:00:00Z", "Value": 123, @@ -3353,23 +4247,142 @@ Response: } ``` -### NetPeers - - -Perms: read - -Inputs: `null` - -Response: `null` +### NetPeers + + +Perms: read + +Inputs: `null` + +Response: +```json +[ + { + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Addrs": [ + "/ip4/52.36.61.156/tcp/1347/p2p/12D3KooWFETiESTf1v4PGUvtnxMAcEFMzLZbJGg4tjWfGEimYior" + ] + } +] +``` + +### NetPubsubScores + + +Perms: read + +Inputs: `null` + +Response: +```json +[ + { + "ID": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", + "Score": { + "Score": 12.3, + "Topics": { + "/blocks": { + "TimeInMesh": 60000000000, + "FirstMessageDeliveries": 122, + "MeshMessageDeliveries": 1234, + "InvalidMessageDeliveries": 3 + } + }, + "AppSpecificScore": 12.3, + "IPColocationFactor": 12.3, + "BehaviourPenalty": 12.3 + } + } +] +``` + +### NetSetLimit + + +Perms: admin + +Inputs: +```json +[ + "string value", + { + "Memory": 123, + "Streams": 3, + "StreamsInbound": 1, + "StreamsOutbound": 2, + "Conns": 4, + "ConnsInbound": 3, + "ConnsOutbound": 4, + "FD": 5 + } +] +``` + +Response: `{}` -### NetPubsubScores +### NetStat Perms: read -Inputs: `null` +Inputs: +```json +[ + "string value" +] +``` -Response: `null` +Response: +```json +{ + "System": { + "NumStreamsInbound": 123, + "NumStreamsOutbound": 123, + "NumConnsInbound": 123, + "NumConnsOutbound": 123, + "NumFD": 123, + "Memory": 9 + }, + "Transient": { + "NumStreamsInbound": 123, + "NumStreamsOutbound": 123, + "NumConnsInbound": 123, + "NumConnsOutbound": 123, + "NumFD": 123, + "Memory": 9 + }, + "Services": { + "abc": { + "NumStreamsInbound": 1, + "NumStreamsOutbound": 2, + "NumConnsInbound": 3, + "NumConnsOutbound": 4, + "NumFD": 5, + "Memory": 123 + } + }, + "Protocols": { + "abc": { + "NumStreamsInbound": 1, + "NumStreamsOutbound": 2, + "NumConnsInbound": 3, + "NumConnsOutbound": 4, + "NumFD": 5, + "Memory": 123 + } + }, + "Peers": { + "abc": { + "NumStreamsInbound": 1, + "NumStreamsOutbound": 2, + "NumConnsInbound": 3, + "NumConnsOutbound": 4, + "NumFD": 5, + "Memory": 123 + } + } +} +``` ## Node These methods are general node management and status commands @@ -3542,7 +4555,12 @@ Perms: read Inputs: `null` -Response: `null` +Response: +```json +[ + "f01234" +] +``` ### PaychNewPayment @@ -3554,7 +4572,19 @@ Inputs: [ "f01234", "f01234", - null + [ + { + "Amount": "0", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "MinSettle": 10101, + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + } + } + ] ] ``` @@ -3565,7 +4595,33 @@ Response: "WaitSentinel": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, - "Vouchers": null + "Vouchers": [ + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretPreimage": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + } + ] } ``` @@ -3631,7 +4687,12 @@ Inputs: "Nonce": 42, "Amount": "0", "MinSettleHeight": 10101, - "Merges": null, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" @@ -3667,7 +4728,12 @@ Inputs: "Nonce": 42, "Amount": "0", "MinSettleHeight": 10101, - "Merges": null, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" @@ -3703,7 +4769,12 @@ Inputs: "Nonce": 42, "Amount": "0", "MinSettleHeight": 10101, - "Merges": null, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" @@ -3745,7 +4816,12 @@ Response: "Nonce": 42, "Amount": "0", "MinSettleHeight": 10101, - "Merges": null, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" @@ -3767,7 +4843,36 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "ChannelAddr": "f01234", + "TimeLockMin": 10101, + "TimeLockMax": 10101, + "SecretPreimage": "Ynl0ZSBhcnJheQ==", + "Extra": { + "Actor": "f01234", + "Method": 1, + "Data": "Ynl0ZSBhcnJheQ==" + }, + "Lane": 42, + "Nonce": 42, + "Amount": "0", + "MinSettleHeight": 10101, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], + "Signature": { + "Type": 2, + "Data": "Ynl0ZSBhcnJheQ==" + } + } +] +``` ### PaychVoucherSubmit @@ -3792,7 +4897,12 @@ Inputs: "Nonce": 42, "Amount": "0", "MinSettleHeight": 10101, - "Merges": null, + "Merges": [ + { + "Lane": 42, + "Nonce": 42 + } + ], "Signature": { "Type": 2, "Data": "Ynl0ZSBhcnJheQ==" @@ -3860,7 +4970,15 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "Miner": "f01234", + "Epoch": 10101 + } +] +``` ### StateCall StateCall runs the given message and returns its result without any persisted changes. @@ -3962,8 +5080,73 @@ Response: }, "Error": "string value", "Duration": 60000000000, - "GasCharges": null, - "Subcalls": null + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": [ + { + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": null + } + ] }, "Error": "string value", "Duration": 60000000000 @@ -4069,7 +5252,23 @@ Inputs: ```json [ 10101, - null, + [ + { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + } + ], [ { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" @@ -4087,7 +5286,138 @@ Response: "Root": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, - "Trace": null + "Trace": [ + { + "MsgCid": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "GasCost": { + "Message": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "GasUsed": "0", + "BaseFeeBurn": "0", + "OverEstimationBurn": "0", + "MinerPenalty": "0", + "MinerTip": "0", + "Refund": "0", + "TotalCost": "0" + }, + "ExecutionTrace": { + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": [ + { + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": null + } + ] + }, + "Error": "string value", + "Duration": 60000000000 + } + ] } ``` @@ -4160,7 +5490,7 @@ Inputs: "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, 1, - null + "json raw message" ] ``` @@ -4224,7 +5554,7 @@ Inputs: ] ``` -Response: `null` +Response: `"Bw=="` ### StateGetRandomnessFromTickets StateGetRandomnessFromTickets is used to sample the chain for randomness. @@ -4249,7 +5579,7 @@ Inputs: ] ``` -Response: `null` +Response: `"Bw=="` ### StateListActors StateListActors returns the addresses of every actor in the state @@ -4271,7 +5601,12 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + "f01234" +] +``` ### StateListMessages StateListMessages looks back and returns all messages with a matching to or from address, stopping at the given height. @@ -4298,7 +5633,14 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } +] +``` ### StateListMiners StateListMiners returns the addresses of every miner that has claimed power in the Power Actor @@ -4320,7 +5662,12 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + "f01234" +] +``` ### StateLookupID StateLookupID retrieves the ID address of the given address @@ -4520,7 +5867,29 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "SectorNumber": 9, + "SealProof": 8, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "DealIDs": [ + 5432 + ], + "Activation": 10101, + "Expiration": 10101, + "DealWeight": "0", + "VerifiedDealWeight": "0", + "InitialPledge": "0", + "ExpectedDayReward": "0", + "ExpectedStoragePledge": "0", + "SectorKeyCID": null + } +] +``` ### StateMinerAvailableBalance StateMinerAvailableBalance returns the portion of a miner's balance that can be withdrawn or spent @@ -4566,7 +5935,18 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "PostSubmissions": [ + 5, + 1 + ], + "DisputableProofCount": 42 + } +] +``` ### StateMinerFaults StateMinerFaults returns a bitfield indicating the faulty sectors of the given miner @@ -4624,10 +6004,14 @@ Response: "Owner": "f01234", "Worker": "f01234", "NewWorker": "f01234", - "ControlAddresses": null, + "ControlAddresses": [ + "f01234" + ], "WorkerChangeEpoch": 10101, "PeerId": "12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf", - "Multiaddrs": null, + "Multiaddrs": [ + "Ynl0ZSBhcnJheQ==" + ], "WindowPoStProofType": 8, "SectorSize": 34359738368, "WindowPoStPartitionSectors": 42, @@ -4652,7 +6036,9 @@ Inputs: "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, "SealRandEpoch": 10101, - "DealIDs": null, + "DealIDs": [ + 5432 + ], "Expiration": 10101, "ReplaceCapacity": true, "ReplaceSectorDeadline": 42, @@ -4694,7 +6080,33 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "AllSectors": [ + 5, + 1 + ], + "FaultySectors": [ + 5, + 1 + ], + "RecoveringSectors": [ + 5, + 1 + ], + "LiveSectors": [ + 5, + 1 + ], + "ActiveSectors": [ + 5, + 1 + ] + } +] +``` ### StateMinerPower StateMinerPower returns the power of the indicated miner @@ -4749,7 +6161,9 @@ Inputs: "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, "SealRandEpoch": 10101, - "DealIDs": null, + "DealIDs": [ + 5432 + ], "Expiration": 10101, "ReplaceCapacity": true, "ReplaceSectorDeadline": 42, @@ -4916,7 +6330,29 @@ Inputs: ] ``` -Response: `null` +Response: +```json +[ + { + "SectorNumber": 9, + "SealProof": 8, + "SealedCID": { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + }, + "DealIDs": [ + 5432 + ], + "Activation": 10101, + "Expiration": 10101, + "DealWeight": "0", + "VerifiedDealWeight": "0", + "InitialPledge": "0", + "ExpectedDayReward": "0", + "ExpectedStoragePledge": "0", + "SectorKeyCID": null + } +] +``` ### StateNetworkName StateNetworkName returns the name of the network the node is synced to @@ -4948,7 +6384,7 @@ Inputs: ] ``` -Response: `13` +Response: `15` ### StateReadState StateReadState returns the indicated actor's state. @@ -5082,8 +6518,73 @@ Response: }, "Error": "string value", "Duration": 60000000000, - "GasCharges": null, - "Subcalls": null + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": [ + { + "Msg": { + "Version": 42, + "To": "f01234", + "From": "f01234", + "Nonce": 42, + "Value": "0", + "GasLimit": 9, + "GasFeeCap": "0", + "GasPremium": "0", + "Method": 1, + "Params": "Ynl0ZSBhcnJheQ==", + "CID": { + "/": "bafy2bzacebbpdegvr3i4cosewthysg5xkxpqfn2wfcz6mv2hmoktwbdxkax4s" + } + }, + "MsgRct": { + "ExitCode": 0, + "Return": "Ynl0ZSBhcnJheQ==", + "GasUsed": 9 + }, + "Error": "string value", + "Duration": 60000000000, + "GasCharges": [ + { + "Name": "string value", + "loc": [ + { + "File": "string value", + "Line": 123, + "Function": "string value" + } + ], + "tg": 9, + "cg": 9, + "sg": 9, + "vtg": 9, + "vcg": 9, + "vsg": 9, + "tt": 60000000000, + "ex": {} + } + ], + "Subcalls": null + } + ] }, "Error": "string value", "Duration": 60000000000 @@ -5216,14 +6717,17 @@ Response: "SealedCID": { "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, - "DealIDs": null, + "DealIDs": [ + 5432 + ], "Activation": 10101, "Expiration": 10101, "DealWeight": "0", "VerifiedDealWeight": "0", "InitialPledge": "0", "ExpectedDayReward": "0", - "ExpectedStoragePledge": "0" + "ExpectedStoragePledge": "0", + "SectorKeyCID": null } ``` @@ -5289,7 +6793,9 @@ Response: "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" }, "SealRandEpoch": 10101, - "DealIDs": null, + "DealIDs": [ + 5432 + ], "Expiration": 10101, "ReplaceCapacity": true, "ReplaceSectorDeadline": 42, @@ -5532,9 +7038,23 @@ Response: "WinCount": 9, "VRFProof": "Ynl0ZSBhcnJheQ==" }, - "BeaconEntries": null, - "WinPoStProof": null, - "Parents": null, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], "ParentWeight": "0", "Height": 10101, "ParentStateRoot": { @@ -5589,7 +7109,26 @@ Inputs: `null` Response: ```json { - "ActiveSyncs": null, + "ActiveSyncs": [ + { + "WorkerID": 42, + "Base": { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "Target": { + "Cids": null, + "Blocks": null, + "Height": 0 + }, + "Stage": 1, + "Height": 10101, + "Start": "0001-01-01T00:00:00Z", + "End": "0001-01-01T00:00:00Z", + "Message": "string value" + } + ], "VMApplied": 42 } ``` @@ -5614,9 +7153,23 @@ Inputs: "WinCount": 9, "VRFProof": "Ynl0ZSBhcnJheQ==" }, - "BeaconEntries": null, - "WinPoStProof": null, - "Parents": null, + "BeaconEntries": [ + { + "Round": 42, + "Data": "Ynl0ZSBhcnJheQ==" + } + ], + "WinPoStProof": [ + { + "PoStProof": 8, + "ProofBytes": "Ynl0ZSBhcnJheQ==" + } + ], + "Parents": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], "ParentWeight": "0", "Height": 10101, "ParentStateRoot": { @@ -5640,8 +7193,16 @@ Inputs: "ForkSignaling": 42, "ParentBaseFee": "0" }, - "BlsMessages": null, - "SecpkMessages": null + "BlsMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ], + "SecpkMessages": [ + { + "/": "bafy2bzacea3wsdh6y3a36tb3skempjoxqpuyompjbmfeyf34fi3uy6uue42v4" + } + ] } ] ``` @@ -5802,7 +7363,12 @@ Perms: write Inputs: `null` -Response: `null` +Response: +```json +[ + "f01234" +] +``` ### WalletNew WalletNew creates a new address in the wallet with the given sigType. diff --git a/documentation/en/cli-lotus-miner.md b/documentation/en/cli-lotus-miner.md index 5a0888621..6543beab7 100644 --- a/documentation/en/cli-lotus-miner.md +++ b/documentation/en/cli-lotus-miner.md @@ -7,7 +7,7 @@ USAGE: lotus-miner [global options] command [command options] [arguments...] VERSION: - 1.13.1-dev + 1.15.0-dev COMMANDS: init Initialize a lotus miner repo @@ -590,7 +590,8 @@ CATEGORY: DEVELOPER OPTIONS: - --help, -h show help (default: false) + --timeout value duration to wait till fail (default: 30s) + --help, -h show help (default: false) ``` @@ -629,6 +630,7 @@ COMMANDS: reset-blocklist Remove all entries from the miner's piece CID blocklist set-seal-duration Set the expected time, in minutes, that you expect sealing sectors to take. Deals that start before this duration will be rejected. pending-publish list deals waiting in publish queue + retry-publish retry publishing a deal help, h Shows a list of commands or help for one command OPTIONS: @@ -825,6 +827,19 @@ OPTIONS: ``` +### lotus-miner storage-deals retry-publish +``` +NAME: + lotus-miner storage-deals retry-publish - retry publishing a deal + +USAGE: + lotus-miner storage-deals retry-publish [command options] + +OPTIONS: + --help, -h show help (default: false) + +``` + ## lotus-miner retrieval-deals ``` NAME: @@ -959,10 +974,11 @@ USAGE: lotus-miner data-transfers command [command options] [arguments...] COMMANDS: - list List ongoing data transfers for this miner - restart Force restart a stalled data transfer - cancel Force cancel a data transfer - help, h Shows a list of commands or help for one command + list List ongoing data transfers for this miner + restart Force restart a stalled data transfer + cancel Force cancel a data transfer + diagnostics Get detailed diagnostics on active transfers with a specific peer + help, h Shows a list of commands or help for one command OPTIONS: --help, -h show help (default: false) @@ -1019,6 +1035,19 @@ OPTIONS: ``` +### lotus-miner data-transfers diagnostics +``` +NAME: + lotus-miner data-transfers diagnostics - Get detailed diagnostics on active transfers with a specific peer + +USAGE: + lotus-miner data-transfers diagnostics [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + ## lotus-miner dagstore ``` NAME: @@ -1131,6 +1160,8 @@ COMMANDS: reachability Print information about reachability from the internet bandwidth Print bandwidth usage information block Manage network connection gating rules + stat Report resource usage for a scope + limit Get or set resource limits for a scope help, h Shows a list of commands or help for one command OPTIONS: @@ -1399,6 +1430,58 @@ OPTIONS: ``` +### lotus-miner net stat +``` +NAME: + lotus-miner net stat - Report resource usage for a scope + +USAGE: + lotus-miner net stat [command options] scope + +DESCRIPTION: + Report resource usage for a scope. + + The scope can be one of the following: + - system -- reports the system aggregate resource usage. + - transient -- reports the transient resource usage. + - svc: -- reports the resource usage of a specific service. + - proto: -- reports the resource usage of a specific protocol. + - peer: -- reports the resource usage of a specific peer. + - all -- reports the resource usage for all currently active scopes. + + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus-miner net limit +``` +NAME: + lotus-miner net limit - Get or set resource limits for a scope + +USAGE: + lotus-miner net limit [command options] scope [limit] + +DESCRIPTION: + Get or set resource limits for a scope. + + The scope can be one of the following: + - system -- reports the system aggregate resource usage. + - transient -- reports the transient resource usage. + - svc: -- reports the resource usage of a specific service. + - proto: -- reports the resource usage of a specific protocol. + - peer: -- reports the resource usage of a specific peer. + + The limit is json-formatted, with the same structure as the limits file. + + +OPTIONS: + --set set the limit for a scope (default: false) + --help, -h show help (default: false) + +``` + ## lotus-miner pieces ``` NAME: @@ -1445,7 +1528,8 @@ USAGE: lotus-miner pieces list-cids [command options] [arguments...] OPTIONS: - --help, -h show help (default: false) + --verbose, -v (default: false) + --help, -h show help (default: false) ``` @@ -1484,23 +1568,25 @@ USAGE: lotus-miner sectors command [command options] [arguments...] COMMANDS: - status Get the seal status of a sector by its number - list List sectors - refs List References to sectors - update-state ADVANCED: manually update the state of a sector, this may aid in error recovery - pledge store random data in a sector - check-expire Inspect expiring sectors - expired Get or cleanup expired sectors - renew Renew expiring sectors while not exceeding each sector's max life - extend Extend sector expiration - terminate Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector) - remove Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector (use 'terminate' for lower penalty)) - mark-for-upgrade Mark a committed capacity sector for replacement by a sector with deals - seal Manually start sealing a sector (filling any unused space with junk) - set-seal-delay Set the time, in minutes, that a new sector waits for deals before sealing starts - get-cc-collateral Get the collateral required to pledge a committed capacity sector - batching manage batch sector operations - help, h Shows a list of commands or help for one command + status Get the seal status of a sector by its number + list List sectors + refs List References to sectors + update-state ADVANCED: manually update the state of a sector, this may aid in error recovery + pledge store random data in a sector + check-expire Inspect expiring sectors + expired Get or cleanup expired sectors + renew Renew expiring sectors while not exceeding each sector's max life + extend Extend sector expiration + terminate Terminate sector on-chain then remove (WARNING: This means losing power and collateral for the removed sector) + remove Forcefully remove a sector (WARNING: This means losing power and collateral for the removed sector (use 'terminate' for lower penalty)) + snap-up Mark a committed capacity sector to be filled with deals + mark-for-upgrade Mark a committed capacity sector for replacement by a sector with deals + seal Manually start sealing a sector (filling any unused space with junk) + set-seal-delay Set the time, in minutes, that a new sector waits for deals before sealing starts + get-cc-collateral Get the collateral required to pledge a committed capacity sector + batching manage batch sector operations + match-pending-pieces force a refreshed match of pending pieces to open sectors without manually waiting for more deals + help, h Shows a list of commands or help for one command OPTIONS: --help, -h show help (default: false) @@ -1716,6 +1802,19 @@ OPTIONS: ``` +### lotus-miner sectors snap-up +``` +NAME: + lotus-miner sectors snap-up - Mark a committed capacity sector to be filled with deals + +USAGE: + lotus-miner sectors snap-up [command options] + +OPTIONS: + --help, -h show help (default: false) + +``` + ### lotus-miner sectors mark-for-upgrade ``` NAME: @@ -1816,6 +1915,19 @@ OPTIONS: ``` +### lotus-miner sectors match-pending-pieces +``` +NAME: + lotus-miner sectors match-pending-pieces - force a refreshed match of pending pieces to open sectors without manually waiting for more deals + +USAGE: + lotus-miner sectors match-pending-pieces [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + ## lotus-miner proving ``` NAME: @@ -1899,9 +2011,10 @@ USAGE: lotus-miner proving check [command options] OPTIONS: - --only-bad print only bad sectors (default: false) - --slow run slower checks (default: false) - --help, -h show help (default: false) + --only-bad print only bad sectors (default: false) + --slow run slower checks (default: false) + --storage-id value filter sectors by storage path (path id) + --help, -h show help (default: false) ``` @@ -1924,6 +2037,7 @@ COMMANDS: list list local storage paths find find sector in the storage system cleanup trigger cleanup actions + locks show active sector locks help, h Shows a list of commands or help for one command OPTIONS: @@ -1967,6 +2081,8 @@ OPTIONS: --seal (for init) use path for sealing (default: false) --store (for init) use path for long-term storage (default: false) --max-storage value (for init) limit storage space for sectors (expensive for very large paths!) + --groups value path group names + --allow-to value path groups allowed to pull data from this path (allow all if not specified) --help, -h show help (default: false) ``` @@ -2031,6 +2147,19 @@ OPTIONS: ``` +### lotus-miner storage locks +``` +NAME: + lotus-miner storage locks - show active sector locks + +USAGE: + lotus-miner storage locks [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + +``` + ## lotus-miner sealing ``` NAME: diff --git a/documentation/en/cli-lotus-worker.md b/documentation/en/cli-lotus-worker.md index cec744b14..5969323c2 100644 --- a/documentation/en/cli-lotus-worker.md +++ b/documentation/en/cli-lotus-worker.md @@ -7,7 +7,7 @@ USAGE: lotus-worker [global options] command [command options] [arguments...] VERSION: - 1.13.1-dev + 1.15.0-dev COMMANDS: run Start lotus worker @@ -15,6 +15,7 @@ COMMANDS: storage manage sector storage set Manage worker settings wait-quiet Block until all running tasks exit + resources Manage resource table overrides tasks Manage task processing help, h Shows a list of commands or help for one command @@ -43,6 +44,8 @@ OPTIONS: --unseal enable unsealing (32G sectors: 1 core, 128GiB Memory) (default: true) --precommit2 enable precommit2 (32G sectors: all cores, 96GiB Memory) (default: true) --commit enable commit (32G sectors: all cores or GPUs, 128GiB Memory + 64GiB swap) (default: true) + --replica-update enable replica update (default: true) + --prove-replica-update2 enable prove replica update 2 (default: true) --parallel-fetch-limit value maximum fetch operations to run in parallel (default: 5) --timeout value used when 'listen' is unspecified. must be a valid duration recognized by golang's time.ParseDuration function (default: "30m") --help, -h show help (default: false) @@ -94,6 +97,8 @@ OPTIONS: --seal (for init) use path for sealing (default: false) --store (for init) use path for long-term storage (default: false) --max-storage value (for init) limit storage space for sectors (expensive for very large paths!) + --groups value path group names + --allow-to value path groups allowed to pull data from this path (allow all if not specified) --help, -h show help (default: false) ``` @@ -125,6 +130,21 @@ OPTIONS: ``` +## lotus-worker resources +``` +NAME: + lotus-worker resources - Manage resource table overrides + +USAGE: + lotus-worker resources [command options] [arguments...] + +OPTIONS: + --all print all resource envvars (default: false) + --default print default resource envvars (default: false) + --help, -h show help (default: false) + +``` + ## lotus-worker tasks ``` NAME: diff --git a/documentation/en/cli-lotus.md b/documentation/en/cli-lotus.md index c3f45b533..54c0d36df 100644 --- a/documentation/en/cli-lotus.md +++ b/documentation/en/cli-lotus.md @@ -7,7 +7,7 @@ USAGE: lotus [global options] command [command options] [arguments...] VERSION: - 1.13.1-dev + 1.15.0-dev COMMANDS: daemon Start a lotus daemon process @@ -425,7 +425,10 @@ COMMANDS: stat Print information about a locally stored file (piece size, etc) RETRIEVAL: find Find data in the network + retrieval-ask Get a miner's retrieval ask retrieve Retrieve data from network + cat Show data from network + ls List object links cancel-retrieval Cancel a retrieval deal by deal ID; this also cancels the associated transfer list-retrievals List retrieval market deals STORAGE: @@ -533,6 +536,23 @@ OPTIONS: ``` +### lotus client retrieval-ask +``` +NAME: + lotus client retrieval-ask - Get a miner's retrieval ask + +USAGE: + lotus client retrieval-ask [command options] [minerAddress] [data CID] + +CATEGORY: + RETRIEVAL + +OPTIONS: + --size value data size in bytes (default: 0) + --help, -h show help (default: false) + +``` + ### lotus client retrieve ``` NAME: @@ -544,12 +564,94 @@ USAGE: CATEGORY: RETRIEVAL +DESCRIPTION: + Retrieve data from the Filecoin network. + +The retrieve command will attempt to find a provider make a retrieval deal with +them. In case a provider can't be found, it can be specified with the --provider +flag. + +By default the data will be interpreted as DAG-PB UnixFSv1 File. Alternatively +a CAR file containing the raw IPLD graph can be exported by setting the --car +flag. + +Partial Retrieval: + +The --data-selector flag can be used to specify a sub-graph to fetch. The +selector can be specified as either IPLD datamodel text-path selector, or IPLD +json selector. + +In case of unixfs retrieval, the selector must point at a single root node, and +match the entire graph under that node. + +In case of CAR retrieval, the selector must have one common "sub-root" node. + +Examples: + +- Retrieve a file by CID + $ lotus client retrieve Qm... my-file.txt + +- Retrieve a file by CID from f0123 + $ lotus client retrieve --provider f0123 Qm... my-file.txt + +- Retrieve a first file from a specified directory + $ lotus client retrieve --data-selector /Links/0/Hash Qm... my-file.txt + + OPTIONS: + --car Export to a car file instead of a regular file (default: false) + --data-selector value, --datamodel-path-selector value IPLD datamodel text-path selector, or IPLD json selector + --car-export-merkle-proof (requires --data-selector and --car) Export data-selector merkle proof (default: false) + --from value address to send transactions from + --provider value, --miner value provider to use for retrieval, if not present it'll use local discovery + --maxPrice value maximum price the client is willing to consider (default: 0 FIL) + --pieceCid value require data to be retrieved from a specific Piece CID + --allow-local (default: false) + --help, -h show help (default: false) + +``` + +### lotus client cat +``` +NAME: + lotus client cat - Show data from network + +USAGE: + lotus client cat [command options] [dataCid] + +CATEGORY: + RETRIEVAL + +OPTIONS: + --ipld list IPLD datamodel links (default: false) + --data-selector value IPLD datamodel text-path selector, or IPLD json selector --from value address to send transactions from - --car export to a car file instead of a regular file (default: false) - --miner value miner address for retrieval, if not present it'll use local discovery - --datamodel-path-selector value a rudimentary (DM-level-only) text-path selector, allowing for sub-selection within a deal - --maxPrice value maximum price the client is willing to consider (default: 0.01 FIL) + --provider value, --miner value provider to use for retrieval, if not present it'll use local discovery + --maxPrice value maximum price the client is willing to consider (default: 0 FIL) + --pieceCid value require data to be retrieved from a specific Piece CID + --allow-local (default: false) + --help, -h show help (default: false) + +``` + +### lotus client ls +``` +NAME: + lotus client ls - List object links + +USAGE: + lotus client ls [command options] [dataCid] + +CATEGORY: + RETRIEVAL + +OPTIONS: + --ipld list IPLD datamodel links (default: false) + --depth value list links recursively up to the specified depth (default: 1) + --data-selector value IPLD datamodel text-path selector, or IPLD json selector + --from value address to send transactions from + --provider value, --miner value provider to use for retrieval, if not present it'll use local discovery + --maxPrice value maximum price the client is willing to consider (default: 0 FIL) --pieceCid value require data to be retrieved from a specific Piece CID --allow-local (default: false) --help, -h show help (default: false) @@ -857,6 +959,7 @@ COMMANDS: propose Propose a multisig transaction propose-remove Propose to remove a signer approve Approve a multisig message + cancel Cancel a multisig message add-propose Propose to add a signer add-approve Approve a message to add a signer add-cancel Cancel a message to add a signer @@ -952,6 +1055,20 @@ OPTIONS: ``` +### lotus msig cancel +``` +NAME: + lotus msig cancel - Cancel a multisig message + +USAGE: + lotus msig cancel [command options] [destination value [methodId methodParams]] + +OPTIONS: + --from value account to send the cancel message from + --help, -h show help (default: false) + +``` + ### lotus msig add-propose ``` NAME: @@ -1117,12 +1234,12 @@ USAGE: lotus filplus command [command options] [arguments...] COMMANDS: - grant-datacap give allowance to the specified verified client address - list-notaries list all notaries - list-clients list all verified clients - check-client-datacap check verified client remaining bytes - check-notaries-datacap check notaries remaining bytes - help, h Shows a list of commands or help for one command + grant-datacap give allowance to the specified verified client address + list-notaries list all notaries + list-clients list all verified clients + check-client-datacap check verified client remaining bytes + check-notary-datacap check a notary's remaining bytes + help, h Shows a list of commands or help for one command OPTIONS: --help, -h show help (default: false) @@ -1183,13 +1300,13 @@ OPTIONS: ``` -### lotus filplus check-notaries-datacap +### lotus filplus check-notary-datacap ``` NAME: - lotus filplus check-notaries-datacap - check notaries remaining bytes + lotus filplus check-notary-datacap - check a notary's remaining bytes USAGE: - lotus filplus check-notaries-datacap [command options] [arguments...] + lotus filplus check-notary-datacap [command options] [arguments...] OPTIONS: --help, -h show help (default: false) @@ -1580,8 +1697,18 @@ OPTIONS: --help, -h show help (default: false) ``` -# nage + +### lotus mpool manage ``` +NAME: + lotus mpool manage - + +USAGE: + lotus mpool manage [command options] [arguments...] + +OPTIONS: + --help, -h show help (default: false) + ``` ## lotus state @@ -2450,7 +2577,8 @@ CATEGORY: DEVELOPER OPTIONS: - --help, -h show help (default: false) + --timeout value duration to wait till fail (default: 30s) + --help, -h show help (default: false) ``` @@ -2488,6 +2616,8 @@ COMMANDS: reachability Print information about reachability from the internet bandwidth Print bandwidth usage information block Manage network connection gating rules + stat Report resource usage for a scope + limit Get or set resource limits for a scope help, h Shows a list of commands or help for one command OPTIONS: @@ -2756,6 +2886,58 @@ OPTIONS: ``` +### lotus net stat +``` +NAME: + lotus net stat - Report resource usage for a scope + +USAGE: + lotus net stat [command options] scope + +DESCRIPTION: + Report resource usage for a scope. + + The scope can be one of the following: + - system -- reports the system aggregate resource usage. + - transient -- reports the transient resource usage. + - svc: -- reports the resource usage of a specific service. + - proto: -- reports the resource usage of a specific protocol. + - peer: -- reports the resource usage of a specific peer. + - all -- reports the resource usage for all currently active scopes. + + +OPTIONS: + --help, -h show help (default: false) + +``` + +### lotus net limit +``` +NAME: + lotus net limit - Get or set resource limits for a scope + +USAGE: + lotus net limit [command options] scope [limit] + +DESCRIPTION: + Get or set resource limits for a scope. + + The scope can be one of the following: + - system -- reports the system aggregate resource usage. + - transient -- reports the transient resource usage. + - svc: -- reports the resource usage of a specific service. + - proto: -- reports the resource usage of a specific protocol. + - peer: -- reports the resource usage of a specific peer. + + The limit is json-formatted, with the same structure as the limits file. + + +OPTIONS: + --set set the limit for a scope (default: false) + --help, -h show help (default: false) + +``` + ## lotus sync ``` NAME: diff --git a/documentation/en/default-lotus-miner-config.toml b/documentation/en/default-lotus-miner-config.toml index d402f65ed..47ac9f1e7 100644 --- a/documentation/en/default-lotus-miner-config.toml +++ b/documentation/en/default-lotus-miner-config.toml @@ -207,6 +207,17 @@ # env var: LOTUS_DEALMAKING_SIMULTANEOUSTRANSFERSFORSTORAGE #SimultaneousTransfersForStorage = 20 + # The maximum number of simultaneous data transfers from any single client + # for storage deals. + # Unset by default (0), and values higher than SimultaneousTransfersForStorage + # will have no effect; i.e. the total number of simultaneous data transfers + # across all storage clients is bound by SimultaneousTransfersForStorage + # regardless of this number. + # + # type: uint64 + # env var: LOTUS_DEALMAKING_SIMULTANEOUSTRANSFERSFORSTORAGEPERCLIENT + #SimultaneousTransfersForStoragePerClient = 0 + # The maximum number of parallel online data transfers for retrieval deals # # type: uint64 @@ -413,6 +424,12 @@ # env var: LOTUS_STORAGE_ALLOWUNSEAL #AllowUnseal = true + # env var: LOTUS_STORAGE_ALLOWREPLICAUPDATE + #AllowReplicaUpdate = true + + # env var: LOTUS_STORAGE_ALLOWPROVEREPLICAUPDATE2 + #AllowProveReplicaUpdate2 = true + # env var: LOTUS_STORAGE_RESOURCEFILTERING #ResourceFiltering = "hardware" @@ -533,6 +550,14 @@ # env var: LOTUS_DAGSTORE_MAXCONCURRENTREADYFETCHES #MaxConcurrentReadyFetches = 0 + # The maximum amount of unseals that can be processed simultaneously + # from the storage subsystem. 0 means unlimited. + # Default value: 0 (unlimited). + # + # type: int + # env var: LOTUS_DAGSTORE_MAXCONCURRENTUNSEALS + #MaxConcurrentUnseals = 5 + # The maximum number of simultaneous inflight API calls to the storage # subsystem. # Default value: 100. diff --git a/documentation/misc/actors_version_checklist.md b/documentation/misc/actors_version_checklist.md index 1fae4bd8a..5e6038c2b 100644 --- a/documentation/misc/actors_version_checklist.md +++ b/documentation/misc/actors_version_checklist.md @@ -3,17 +3,11 @@ - [ ] Import new actors - [ ] Define upgrade heights in `build/params_` - [ ] Generate adapters - - [ ] Add the new version in `chain/actors/agen/main.go` + - [ ] Update `gen/inlinegen-data.json` + - [ ] Update `chain/actors/version.go` - [ ] Update adapter code in `chain/actors/builtin` if needed -- [ ] Update `chain/actors/policy/policy.go` -- [ ] Update `chain/actors/version.go` -- [ ] Register in `chain/vm/invoker.go` -- [ ] Register in `chain/vm/mkactor.go` -- [ ] Update `chain/types/state.go` -- [ ] Update `chain/state/statetree.go` (New / Load) -- [ ] Update `chain/stmgr/forks.go` + - [ ] Run `make actors-gen` +- [ ] Update `chain/consensus/filcns/upgrades.go` - [ ] Schedule - [ ] Migration -- [ ] Update upgrade schedule in `api/test/test.go` and `chain/sync_test.go` -- [ ] Update `NewestNetworkVersion` in `build/params_shared_vals.go` -- [ ] Register in init in `chain/stmgr/utils.go` +- [ ] Update upgrade schedule in `chain/sync_test.go` diff --git a/extern/filecoin-ffi b/extern/filecoin-ffi index 4e75bb4a2..e660df561 160000 --- a/extern/filecoin-ffi +++ b/extern/filecoin-ffi @@ -1 +1 @@ -Subproject commit 4e75bb4a20d185bc912939c60fdcdc6c41fd8e60 +Subproject commit e660df5616e397b2d8ac316f45ddfa7a44637971 diff --git a/extern/sector-storage/cgroups.go b/extern/sector-storage/cgroups.go new file mode 100644 index 000000000..e2ec0564e --- /dev/null +++ b/extern/sector-storage/cgroups.go @@ -0,0 +1,12 @@ +//go:build !linux +// +build !linux + +package sectorstorage + +func cgroupV1Mem() (memoryMax, memoryUsed, swapMax, swapUsed uint64, err error) { + return 0, 0, 0, 0, nil +} + +func cgroupV2Mem() (memoryMax, memoryUsed, swapMax, swapUsed uint64, err error) { + return 0, 0, 0, 0, nil +} diff --git a/extern/sector-storage/cgroups_linux.go b/extern/sector-storage/cgroups_linux.go new file mode 100644 index 000000000..38fe88f19 --- /dev/null +++ b/extern/sector-storage/cgroups_linux.go @@ -0,0 +1,117 @@ +//go:build linux +// +build linux + +package sectorstorage + +import ( + "bufio" + "bytes" + "math" + "os" + "path/filepath" + + "github.com/containerd/cgroups" + cgroupv2 "github.com/containerd/cgroups/v2" +) + +func cgroupV2MountPoint() (string, error) { + f, err := os.Open("/proc/self/mountinfo") + if err != nil { + return "", err + } + defer f.Close() //nolint + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + fields := bytes.Fields(scanner.Bytes()) + if len(fields) >= 9 && bytes.Equal(fields[8], []byte("cgroup2")) { + return string(fields[4]), nil + } + } + return "", cgroups.ErrMountPointNotExist +} + +func cgroupV1Mem() (memoryMax, memoryUsed, swapMax, swapUsed uint64, err error) { + path := cgroups.NestedPath("") + if pid := os.Getpid(); pid == 1 { + path = cgroups.RootPath + } + c, err := cgroups.Load(cgroups.SingleSubsystem(cgroups.V1, cgroups.Memory), path) + if err != nil { + return 0, 0, 0, 0, err + } + stats, err := c.Stat() + if err != nil { + return 0, 0, 0, 0, err + } + if stats.Memory == nil { + return 0, 0, 0, 0, nil + } + if stats.Memory.Usage != nil { + memoryMax = stats.Memory.Usage.Limit + // Exclude cached files + memoryUsed = stats.Memory.Usage.Usage - stats.Memory.InactiveFile - stats.Memory.ActiveFile + } + if stats.Memory.Swap != nil { + swapMax = stats.Memory.Swap.Limit + swapUsed = stats.Memory.Swap.Usage + } + return memoryMax, memoryUsed, swapMax, swapUsed, nil +} + +func cgroupV2MemFromPath(mp, path string) (memoryMax, memoryUsed, swapMax, swapUsed uint64, err error) { + c, err := cgroupv2.LoadManager(mp, path) + if err != nil { + return 0, 0, 0, 0, err + } + + stats, err := c.Stat() + if err != nil { + return 0, 0, 0, 0, err + } + + if stats.Memory != nil { + memoryMax = stats.Memory.UsageLimit + // Exclude memory used caching files + memoryUsed = stats.Memory.Usage - stats.Memory.File + swapMax = stats.Memory.SwapLimit + swapUsed = stats.Memory.SwapUsage + } + + return memoryMax, memoryUsed, swapMax, swapUsed, nil +} + +func cgroupV2Mem() (memoryMax, memoryUsed, swapMax, swapUsed uint64, err error) { + memoryMax = math.MaxUint64 + swapMax = math.MaxUint64 + + path, err := cgroupv2.PidGroupPath(os.Getpid()) + if err != nil { + return 0, 0, 0, 0, err + } + + mp, err := cgroupV2MountPoint() + if err != nil { + return 0, 0, 0, 0, err + } + + for path != "/" { + cgMemoryMax, cgMemoryUsed, cgSwapMax, cgSwapUsed, err := cgroupV2MemFromPath(mp, path) + if err != nil { + return 0, 0, 0, 0, err + } + if cgMemoryMax != 0 && cgMemoryMax < memoryMax { + log.Debugf("memory limited by cgroup %s: %v", path, cgMemoryMax) + memoryMax = cgMemoryMax + memoryUsed = cgMemoryUsed + } + if cgSwapMax != 0 && cgSwapMax < swapMax { + log.Debugf("swap limited by cgroup %s: %v", path, cgSwapMax) + swapMax = cgSwapMax + swapUsed = cgSwapUsed + } + path = filepath.Dir(path) + } + + return memoryMax, memoryUsed, swapMax, swapUsed, nil +} diff --git a/extern/sector-storage/ffiwrapper/basicfs/fs.go b/extern/sector-storage/ffiwrapper/basicfs/fs.go index a833f728c..3d19e49ef 100644 --- a/extern/sector-storage/ffiwrapper/basicfs/fs.go +++ b/extern/sector-storage/ffiwrapper/basicfs/fs.go @@ -34,6 +34,12 @@ func (b *Provider) AcquireSector(ctx context.Context, id storage.SectorRef, exis if err := os.Mkdir(filepath.Join(b.Root, storiface.FTCache.String()), 0755); err != nil && !os.IsExist(err) { // nolint return storiface.SectorPaths{}, nil, err } + if err := os.Mkdir(filepath.Join(b.Root, storiface.FTUpdate.String()), 0755); err != nil && !os.IsExist(err) { // nolint + return storiface.SectorPaths{}, nil, err + } + if err := os.Mkdir(filepath.Join(b.Root, storiface.FTUpdateCache.String()), 0755); err != nil && !os.IsExist(err) { // nolint + return storiface.SectorPaths{}, nil, err + } done := func() {} diff --git a/extern/sector-storage/ffiwrapper/sealer_cgo.go b/extern/sector-storage/ffiwrapper/sealer_cgo.go index 59770ec9a..c35cefd56 100644 --- a/extern/sector-storage/ffiwrapper/sealer_cgo.go +++ b/extern/sector-storage/ffiwrapper/sealer_cgo.go @@ -7,6 +7,9 @@ import ( "bufio" "bytes" "context" + "crypto/rand" + "encoding/base64" + "encoding/json" "io" "math/bits" "os" @@ -21,6 +24,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-storage/storage" + "github.com/detailyang/go-fallocate" commpffi "github.com/filecoin-project/go-commp-utils/ffiwrapper" "github.com/filecoin-project/go-commp-utils/zerocomm" "github.com/filecoin-project/lotus/extern/sector-storage/fr32" @@ -250,6 +254,23 @@ func (sb *Sealer) pieceCid(spt abi.RegisteredSealProof, in []byte) (cid.Cid, err return pieceCID, werr() } +func (sb *Sealer) tryDecodeUpdatedReplica(ctx context.Context, sector storage.SectorRef, commD cid.Cid, unsealedPath string) (bool, error) { + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUpdate|storiface.FTSealed|storiface.FTCache, storiface.FTNone, storiface.PathStorage) + if xerrors.Is(err, storiface.ErrSectorNotFound) { + return false, nil + } else if err != nil { + return false, xerrors.Errorf("reading updated replica: %w", err) + } + defer done() + + // Sector data stored in replica update + updateProof, err := sector.ProofType.RegisteredUpdateProof() + if err != nil { + return false, err + } + return true, ffi.SectorUpdate.DecodeFrom(updateProof, unsealedPath, paths.Update, paths.Sealed, paths.Cache, commD) +} + func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, commd cid.Cid) error { ssize, err := sector.ProofType.SectorSize() if err != nil { @@ -300,6 +321,16 @@ func (sb *Sealer) UnsealPiece(ctx context.Context, sector storage.SectorRef, off return nil } + // If piece data stored in updated replica decode whole sector + decoded, err := sb.tryDecodeUpdatedReplica(ctx, sector, commd, unsealedPath.Unsealed) + if err != nil { + return xerrors.Errorf("decoding sector from replica: %w", err) + } + if decoded { + return pf.MarkAllocated(0, maxPieceSize) + } + + // Piece data sealed in sector srcPaths, srcDone, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTCache|storiface.FTSealed, storiface.FTNone, storiface.PathStorage) if err != nil { return xerrors.Errorf("acquire sealed sector paths: %w", err) @@ -530,9 +561,19 @@ func (sb *Sealer) SealPreCommit1(ctx context.Context, sector storage.SectorRef, if err != nil { return nil, xerrors.Errorf("presealing sector %d (%s): %w", sector.ID.Number, paths.Unsealed, err) } - return p1o, nil + + p1odec := map[string]interface{}{} + if err := json.Unmarshal(p1o, &p1odec); err != nil { + return nil, xerrors.Errorf("unmarshaling pc1 output: %w", err) + } + + p1odec["_lotus_SealRandomness"] = ticket + + return json.Marshal(&p1odec) } +var PC2CheckRounds = 3 + func (sb *Sealer) SealPreCommit2(ctx context.Context, sector storage.SectorRef, phase1Out storage.PreCommit1Out) (storage.SectorCids, error) { paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache, 0, storiface.PathSealing) if err != nil { @@ -545,6 +586,50 @@ func (sb *Sealer) SealPreCommit2(ctx context.Context, sector storage.SectorRef, return storage.SectorCids{}, xerrors.Errorf("presealing sector %d (%s): %w", sector.ID.Number, paths.Unsealed, err) } + ssize, err := sector.ProofType.SectorSize() + if err != nil { + return storage.SectorCids{}, xerrors.Errorf("get ssize: %w", err) + } + + p1odec := map[string]interface{}{} + if err := json.Unmarshal(phase1Out, &p1odec); err != nil { + return storage.SectorCids{}, xerrors.Errorf("unmarshaling pc1 output: %w", err) + } + + var ticket abi.SealRandomness + ti, found := p1odec["_lotus_SealRandomness"] + + if found { + ticket, err = base64.StdEncoding.DecodeString(ti.(string)) + if err != nil { + return storage.SectorCids{}, xerrors.Errorf("decoding ticket: %w", err) + } + + for i := 0; i < PC2CheckRounds; i++ { + var sd [32]byte + _, _ = rand.Read(sd[:]) + + _, err := ffi.SealCommitPhase1( + sector.ProofType, + sealedCID, + unsealedCID, + paths.Cache, + paths.Sealed, + sector.ID.Number, + sector.ID.Miner, + ticket, + sd[:], + []abi.PieceInfo{{Size: abi.PaddedPieceSize(ssize), PieceCID: unsealedCID}}, + ) + if err != nil { + log.Warn("checking PreCommit failed: ", err) + log.Warnf("num:%d tkt:%v seed:%v sealedCID:%v, unsealedCID:%v", sector.ID.Number, ticket, sd[:], sealedCID, unsealedCID) + + return storage.SectorCids{}, xerrors.Errorf("checking PreCommit failed: %w", err) + } + } + } + return storage.SectorCids{ Unsealed: unsealedCID, Sealed: sealedCID, @@ -582,6 +667,108 @@ func (sb *Sealer) SealCommit2(ctx context.Context, sector storage.SectorRef, pha return ffi.SealCommitPhase2(phase1Out, sector.ID.Number, sector.ID.Miner) } +func (sb *Sealer) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storage.ReplicaUpdateOut, error) { + empty := storage.ReplicaUpdateOut{} + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed|storiface.FTSealed|storiface.FTCache, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing) + if err != nil { + return empty, xerrors.Errorf("failed to acquire sector paths: %w", err) + } + defer done() + + updateProofType := abi.SealProofInfos[sector.ProofType].UpdateProof + + s, err := os.Stat(paths.Sealed) + if err != nil { + return empty, err + } + sealedSize := s.Size() + + u, err := os.OpenFile(paths.Update, os.O_RDWR|os.O_CREATE, 0644) // nolint:gosec + if err != nil { + return empty, xerrors.Errorf("ensuring updated replica file exists: %w", err) + } + if err := fallocate.Fallocate(u, 0, sealedSize); err != nil { + return empty, xerrors.Errorf("allocating space for replica update file: %w", err) + } + + if err := u.Close(); err != nil { + return empty, err + } + + if err := os.Mkdir(paths.UpdateCache, 0755); err != nil { // nolint + if os.IsExist(err) { + log.Warnf("existing cache in %s; removing", paths.UpdateCache) + + if err := os.RemoveAll(paths.UpdateCache); err != nil { + return empty, xerrors.Errorf("remove existing sector cache from %s (sector %d): %w", paths.UpdateCache, sector, err) + } + + if err := os.Mkdir(paths.UpdateCache, 0755); err != nil { // nolint:gosec + return empty, xerrors.Errorf("mkdir cache path after cleanup: %w", err) + } + } else { + return empty, err + } + } + sealed, unsealed, err := ffi.SectorUpdate.EncodeInto(updateProofType, paths.Update, paths.UpdateCache, paths.Sealed, paths.Cache, paths.Unsealed, pieces) + if err != nil { + return empty, xerrors.Errorf("failed to update replica %d with new deal data: %w", sector.ID.Number, err) + } + return storage.ReplicaUpdateOut{NewSealed: sealed, NewUnsealed: unsealed}, nil +} + +func (sb *Sealer) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storage.ReplicaVanillaProofs, error) { + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTSealed|storiface.FTCache|storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTNone, storiface.PathSealing) + if err != nil { + return nil, xerrors.Errorf("failed to acquire sector paths: %w", err) + } + defer done() + + updateProofType := abi.SealProofInfos[sector.ProofType].UpdateProof + + vanillaProofs, err := ffi.SectorUpdate.GenerateUpdateVanillaProofs(updateProofType, sectorKey, newSealed, newUnsealed, paths.Update, paths.UpdateCache, paths.Sealed, paths.Cache) + if err != nil { + return nil, xerrors.Errorf("failed to generate proof of replica update for sector %d: %w", sector.ID.Number, err) + } + return vanillaProofs, nil +} + +func (sb *Sealer) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storage.ReplicaUpdateProof, error) { + updateProofType := abi.SealProofInfos[sector.ProofType].UpdateProof + return ffi.SectorUpdate.GenerateUpdateProofWithVanilla(updateProofType, sectorKey, newSealed, newUnsealed, vanillaProofs) +} + +func (sb *Sealer) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) error { + paths, done, err := sb.sectors.AcquireSector(ctx, sector, storiface.FTUnsealed|storiface.FTCache|storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTSealed, storiface.PathSealing) + defer done() + if err != nil { + return xerrors.Errorf("failed to acquire sector paths: %w", err) + } + + s, err := os.Stat(paths.Update) + if err != nil { + return xerrors.Errorf("measuring update file size: %w", err) + } + sealedSize := s.Size() + e, err := os.OpenFile(paths.Sealed, os.O_RDWR|os.O_CREATE, 0644) // nolint:gosec + if err != nil { + return xerrors.Errorf("ensuring sector key file exists: %w", err) + } + if err := fallocate.Fallocate(e, 0, sealedSize); err != nil { + return xerrors.Errorf("allocating space for sector key file: %w", err) + } + if err := e.Close(); err != nil { + return err + } + + updateProofType := abi.SealProofInfos[sector.ProofType].UpdateProof + return ffi.SectorUpdate.RemoveData(updateProofType, paths.Sealed, paths.Cache, paths.Update, paths.UpdateCache, paths.Unsealed, commD) +} + +func (sb *Sealer) ReleaseSealed(ctx context.Context, sector storage.SectorRef) error { + return xerrors.Errorf("not supported at this layer") +} + func (sb *Sealer) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) error { ssize, err := sector.ProofType.SectorSize() if err != nil { @@ -666,6 +853,14 @@ func (sb *Sealer) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, return xerrors.Errorf("not supported at this layer") } +func (sb *Sealer) ReleaseReplicaUpgrade(ctx context.Context, sector storage.SectorRef) error { + return xerrors.Errorf("not supported at this layer") +} + +func (sb *Sealer) ReleaseSectorKey(ctx context.Context, sector storage.SectorRef) error { + return xerrors.Errorf("not supported at this layer") +} + func (sb *Sealer) Remove(ctx context.Context, sector storage.SectorRef) error { return xerrors.Errorf("not supported at this layer") // happens in localworker } diff --git a/extern/sector-storage/ffiwrapper/sealer_test.go b/extern/sector-storage/ffiwrapper/sealer_test.go index 509efe532..cf8978464 100644 --- a/extern/sector-storage/ffiwrapper/sealer_test.go +++ b/extern/sector-storage/ffiwrapper/sealer_test.go @@ -19,6 +19,7 @@ import ( proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/ipfs/go-cid" @@ -180,16 +181,16 @@ func (s *seal) unseal(t *testing.T, sb *Sealer, sp *basicfs.Provider, si storage func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) { randomness := abi.PoStRandomness{0, 9, 2, 7, 6, 5, 4, 3, 2, 1, 0, 9, 8, 7, 6, 45, 3, 2, 1, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 9, 7} - sis := make([]proof2.SectorInfo, len(seals)) + xsis := make([]proof7.ExtendedSectorInfo, len(seals)) for i, s := range seals { - sis[i] = proof2.SectorInfo{ + xsis[i] = proof7.ExtendedSectorInfo{ SealProof: s.ref.ProofType, SectorNumber: s.ref.ID.Number, SealedCID: s.cids.Sealed, } } - proofs, skp, err := sealer.GenerateWindowPoSt(context.TODO(), seals[0].ref.ID.Miner, sis, randomness) + proofs, skp, err := sealer.GenerateWindowPoSt(context.TODO(), seals[0].ref.ID.Miner, xsis, randomness) if len(skipped) > 0 { require.Error(t, err) require.EqualValues(t, skipped, skp) @@ -200,7 +201,16 @@ func post(t *testing.T, sealer *Sealer, skipped []abi.SectorID, seals ...seal) { t.Fatalf("%+v", err) } - ok, err := ProofVerifier.VerifyWindowPoSt(context.TODO(), proof2.WindowPoStVerifyInfo{ + sis := make([]proof7.SectorInfo, len(seals)) + for i, xsi := range xsis { + sis[i] = proof7.SectorInfo{ + SealProof: xsi.SealProof, + SectorNumber: xsi.SectorNumber, + SealedCID: xsi.SealedCID, + } + } + + ok, err := ProofVerifier.VerifyWindowPoSt(context.TODO(), proof7.WindowPoStVerifyInfo{ Randomness: randomness, Proofs: proofs, ChallengedSectors: sis, diff --git a/extern/sector-storage/ffiwrapper/types.go b/extern/sector-storage/ffiwrapper/types.go index a5b2fdf1f..b8d9e90f1 100644 --- a/extern/sector-storage/ffiwrapper/types.go +++ b/extern/sector-storage/ffiwrapper/types.go @@ -4,7 +4,7 @@ import ( "context" "io" - proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/ipfs/go-cid" @@ -34,10 +34,11 @@ type Storage interface { } type Verifier interface { - VerifySeal(proof5.SealVerifyInfo) (bool, error) - VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) - VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) - VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) + VerifySeal(proof.SealVerifyInfo) (bool, error) + VerifyAggregateSeals(aggregate proof.AggregateSealVerifyProofAndInfos) (bool, error) + VerifyReplicaUpdate(update proof.ReplicaUpdateInfo) (bool, error) + VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) + VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) GenerateWinningPoStSectorChallenge(context.Context, abi.RegisteredPoStProof, abi.ActorID, abi.PoStRandomness, uint64) ([]uint64, error) } @@ -46,7 +47,7 @@ type Verifier interface { type Prover interface { // TODO: move GenerateWinningPoStSectorChallenge from the Verifier interface to here - AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) + AggregateSealProofs(aggregateInfo proof.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) } type SectorProvider interface { diff --git a/extern/sector-storage/ffiwrapper/verifier_cgo.go b/extern/sector-storage/ffiwrapper/verifier_cgo.go index ff35ddc1f..6adda05c9 100644 --- a/extern/sector-storage/ffiwrapper/verifier_cgo.go +++ b/extern/sector-storage/ffiwrapper/verifier_cgo.go @@ -11,15 +11,16 @@ import ( ffi "github.com/filecoin-project/filecoin-ffi" "github.com/filecoin-project/go-state-types/abi" - proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + ffiproof "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) -func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, error) { +func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) { randomness[31] &= 0x3f - privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWinningPoStProof) // TODO: FAULTS? + privsectors, skipped, done, err := sb.pubExtendedSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWinningPoStProof) // TODO: FAULTS? if err != nil { return nil, err } @@ -31,12 +32,13 @@ func (sb *Sealer) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, return ffi.GenerateWinningPoSt(minerID, privsectors, randomness) } -func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, []abi.SectorID, error) { +func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, []abi.SectorID, error) { randomness[31] &= 0x3f - privsectors, skipped, done, err := sb.pubSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWindowPoStProof) + privsectors, skipped, done, err := sb.pubExtendedSectorToPriv(ctx, minerID, sectorInfo, nil, abi.RegisteredSealProof.RegisteredWindowPoStProof) if err != nil { return nil, nil, xerrors.Errorf("gathering sector info: %w", err) } + defer done() if len(skipped) > 0 { @@ -52,11 +54,10 @@ func (sb *Sealer) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, s Number: f, }) } - return proof, faultyIDs, err } -func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []proof5.SectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) { +func (sb *Sealer) pubExtendedSectorToPriv(ctx context.Context, mid abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, faults []abi.SectorNumber, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error)) (ffi.SortedPrivateSectorInfo, []abi.SectorID, func(), error) { fmap := map[abi.SectorNumber]struct{}{} for _, fault := range faults { fmap[fault] = struct{}{} @@ -80,14 +81,32 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn ID: abi.SectorID{Miner: mid, Number: s.SectorNumber}, ProofType: s.SealProof, } - - paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTCache|storiface.FTSealed, 0, storiface.PathStorage) - if err != nil { - log.Warnw("failed to acquire sector, skipping", "sector", sid.ID, "error", err) - skipped = append(skipped, sid.ID) - continue + proveUpdate := s.SectorKey != nil + var cache string + var sealed string + if proveUpdate { + log.Debugf("Posting over updated sector for sector id: %d", s.SectorNumber) + paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTUpdateCache|storiface.FTUpdate, 0, storiface.PathStorage) + if err != nil { + log.Warnw("failed to acquire FTUpdateCache and FTUpdate of sector, skipping", "sector", sid.ID, "error", err) + skipped = append(skipped, sid.ID) + continue + } + doneFuncs = append(doneFuncs, d) + cache = paths.UpdateCache + sealed = paths.Update + } else { + log.Debugf("Posting over sector key sector for sector id: %d", s.SectorNumber) + paths, d, err := sb.sectors.AcquireSector(ctx, sid, storiface.FTCache|storiface.FTSealed, 0, storiface.PathStorage) + if err != nil { + log.Warnw("failed to acquire FTCache and FTSealed of sector, skipping", "sector", sid.ID, "error", err) + skipped = append(skipped, sid.ID) + continue + } + doneFuncs = append(doneFuncs, d) + cache = paths.Cache + sealed = paths.Sealed } - doneFuncs = append(doneFuncs, d) postProofType, err := rpt(s.SealProof) if err != nil { @@ -95,11 +114,16 @@ func (sb *Sealer) pubSectorToPriv(ctx context.Context, mid abi.ActorID, sectorIn return ffi.SortedPrivateSectorInfo{}, nil, nil, xerrors.Errorf("acquiring registered PoSt proof from sector info %+v: %w", s, err) } + ffiInfo := ffiproof.SectorInfo{ + SealProof: s.SealProof, + SectorNumber: s.SectorNumber, + SealedCID: s.SealedCID, + } out = append(out, ffi.PrivateSectorInfo{ - CacheDirPath: paths.Cache, + CacheDirPath: cache, PoStProofType: postProofType, - SealedSectorPath: paths.Sealed, - SectorInfo: s, + SealedSectorPath: sealed, + SectorInfo: ffiInfo, }) } @@ -112,15 +136,19 @@ type proofVerifier struct{} var ProofVerifier = proofVerifier{} -func (proofVerifier) VerifySeal(info proof5.SealVerifyInfo) (bool, error) { +func (proofVerifier) VerifySeal(info proof.SealVerifyInfo) (bool, error) { return ffi.VerifySeal(info) } -func (proofVerifier) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { +func (proofVerifier) VerifyAggregateSeals(aggregate proof.AggregateSealVerifyProofAndInfos) (bool, error) { return ffi.VerifyAggregateSeals(aggregate) } -func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) { +func (proofVerifier) VerifyReplicaUpdate(update proof.ReplicaUpdateInfo) (bool, error) { + return ffi.SectorUpdate.VerifyUpdateProof(update) +} + +func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) { info.Randomness[31] &= 0x3f _, span := trace.StartSpan(ctx, "VerifyWinningPoSt") defer span.End() @@ -128,7 +156,7 @@ func (proofVerifier) VerifyWinningPoSt(ctx context.Context, info proof5.WinningP return ffi.VerifyWinningPoSt(info) } -func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) { +func (proofVerifier) VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) { info.Randomness[31] &= 0x3f _, span := trace.StartSpan(ctx, "VerifyWindowPoSt") defer span.End() diff --git a/extern/sector-storage/fr32/fr32.go b/extern/sector-storage/fr32/fr32.go index 17e6a1142..24175719c 100644 --- a/extern/sector-storage/fr32/fr32.go +++ b/extern/sector-storage/fr32/fr32.go @@ -8,7 +8,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" ) -var MTTresh = uint64(32 << 20) +var MTTresh = uint64(512 << 10) func mtChunkCount(usz abi.PaddedPieceSize) uint64 { threads := (uint64(usz)) / MTTresh diff --git a/extern/sector-storage/fr32/readers.go b/extern/sector-storage/fr32/readers.go index f14d5bf1c..163c520aa 100644 --- a/extern/sector-storage/fr32/readers.go +++ b/extern/sector-storage/fr32/readers.go @@ -16,13 +16,21 @@ type unpadReader struct { work []byte } +func BufSize(sz abi.PaddedPieceSize) int { + return int(MTTresh * mtChunkCount(sz)) +} + func NewUnpadReader(src io.Reader, sz abi.PaddedPieceSize) (io.Reader, error) { + buf := make([]byte, BufSize(sz)) + + return NewUnpadReaderBuf(src, sz, buf) +} + +func NewUnpadReaderBuf(src io.Reader, sz abi.PaddedPieceSize, buf []byte) (io.Reader, error) { if err := sz.Validate(); err != nil { return nil, xerrors.Errorf("bad piece size: %w", err) } - buf := make([]byte, MTTresh*mtChunkCount(sz)) - return &unpadReader{ src: src, diff --git a/extern/sector-storage/fsutil/statfs_unix.go b/extern/sector-storage/fsutil/statfs_unix.go index da09c5c60..da87c3364 100644 --- a/extern/sector-storage/fsutil/statfs_unix.go +++ b/extern/sector-storage/fsutil/statfs_unix.go @@ -1,3 +1,6 @@ +//go:build !windows +// +build !windows + package fsutil import ( diff --git a/extern/sector-storage/manager.go b/extern/sector-storage/manager.go index a8de586e1..475c399e9 100644 --- a/extern/sector-storage/manager.go +++ b/extern/sector-storage/manager.go @@ -51,13 +51,8 @@ type SectorManager interface { FaultTracker } -type WorkerID uuid.UUID // worker session UUID var ClosedWorkerID = uuid.UUID{} -func (w WorkerID) String() string { - return uuid.UUID(w).String() -} - type Manager struct { ls stores.LocalStorage storage *stores.Remote @@ -103,11 +98,13 @@ type SealerConfig struct { ParallelFetchLimit int // Local worker config - AllowAddPiece bool - AllowPreCommit1 bool - AllowPreCommit2 bool - AllowCommit bool - AllowUnseal bool + AllowAddPiece bool + AllowPreCommit1 bool + AllowPreCommit2 bool + AllowCommit bool + AllowUnseal bool + AllowReplicaUpdate bool + AllowProveReplicaUpdate2 bool // ResourceFiltering instructs the system which resource filtering strategy // to use when evaluating tasks against this worker. An empty value defaults @@ -149,7 +146,7 @@ func New(ctx context.Context, lstor *stores.Local, stor *stores.Remote, ls store go m.sched.runSched() localTasks := []sealtasks.TaskType{ - sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch, + sealtasks.TTCommit1, sealtasks.TTProveReplicaUpdate1, sealtasks.TTFinalize, sealtasks.TTFetch, } if sc.AllowAddPiece { localTasks = append(localTasks, sealtasks.TTAddPiece) @@ -166,6 +163,12 @@ func New(ctx context.Context, lstor *stores.Local, stor *stores.Remote, ls store if sc.AllowUnseal { localTasks = append(localTasks, sealtasks.TTUnseal) } + if sc.AllowReplicaUpdate { + localTasks = append(localTasks, sealtasks.TTReplicaUpdate) + } + if sc.AllowProveReplicaUpdate2 { + localTasks = append(localTasks, sealtasks.TTProveReplicaUpdate2) + } wcfg := WorkerConfig{ IgnoreResourceFiltering: sc.ResourceFiltering == ResourceFilteringDisabled, @@ -578,29 +581,260 @@ func (m *Manager) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, return nil } +func (m *Manager) ReleaseSectorKey(ctx context.Context, sector storage.SectorRef) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTSealed); err != nil { + return xerrors.Errorf("acquiring sector lock: %w", err) + } + + return m.storage.Remove(ctx, sector.ID, storiface.FTSealed, true, nil) +} + +func (m *Manager) ReleaseReplicaUpgrade(ctx context.Context, sector storage.SectorRef) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTUpdateCache|storiface.FTUpdate); err != nil { + return xerrors.Errorf("acquiring sector lock: %w", err) + } + + if err := m.storage.Remove(ctx, sector.ID, storiface.FTUpdateCache, true, nil); err != nil { + return xerrors.Errorf("removing update cache: %w", err) + } + if err := m.storage.Remove(ctx, sector.ID, storiface.FTUpdate, true, nil); err != nil { + return xerrors.Errorf("removing update: %w", err) + } + return nil +} + +func (m *Manager) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) error { + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTRegenSectorKey, sector, commD) + if err != nil { + return xerrors.Errorf("getWork: %w", err) + } + defer cancel() + + var waitErr error + waitRes := func() { + _, werr := m.waitWork(ctx, wk) + if werr != nil { + waitErr = werr + return + } + } + + if wait { // already in progress + waitRes() + return waitErr + } + + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed|storiface.FTUpdate|storiface.FTUpdateCache, storiface.FTSealed|storiface.FTCache); err != nil { + return xerrors.Errorf("acquiring sector lock: %w", err) + } + + // NOTE: We set allowFetch to false in so that we always execute on a worker + // with direct access to the data. We want to do that because this step is + // generally very cheap / fast, and transferring data is not worth the effort + selector := newExistingSelector(m.index, sector.ID, storiface.FTUnsealed|storiface.FTUpdate|storiface.FTUpdateCache|storiface.FTCache, true) + + err = m.sched.Schedule(ctx, sector, sealtasks.TTRegenSectorKey, selector, m.schedFetch(sector, storiface.FTUpdate|storiface.FTUnsealed, storiface.PathSealing, storiface.AcquireMove), func(ctx context.Context, w Worker) error { + err := m.startWork(ctx, w, wk)(w.GenerateSectorKeyFromData(ctx, sector, commD)) + if err != nil { + return err + } + + waitRes() + return nil + }) + if err != nil { + return err + } + + return waitErr +} + func (m *Manager) Remove(ctx context.Context, sector storage.SectorRef) error { ctx, cancel := context.WithCancel(ctx) defer cancel() - if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache); err != nil { + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTNone, storiface.FTSealed|storiface.FTUnsealed|storiface.FTCache|storiface.FTUpdate|storiface.FTUpdateCache); err != nil { return xerrors.Errorf("acquiring sector lock: %w", err) } var err error - if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTSealed, true); rerr != nil { + if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTSealed, true, nil); rerr != nil { err = multierror.Append(err, xerrors.Errorf("removing sector (sealed): %w", rerr)) } - if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTCache, true); rerr != nil { + if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTCache, true, nil); rerr != nil { err = multierror.Append(err, xerrors.Errorf("removing sector (cache): %w", rerr)) } - if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTUnsealed, true); rerr != nil { + if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTUnsealed, true, nil); rerr != nil { + err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr)) + } + if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTUpdate, true, nil); rerr != nil { + err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr)) + } + if rerr := m.storage.Remove(ctx, sector.ID, storiface.FTUpdateCache, true, nil); rerr != nil { err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr)) } return err } +func (m *Manager) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (out storage.ReplicaUpdateOut, err error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + log.Errorf("manager is doing replica update") + wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTReplicaUpdate, sector, pieces) + if err != nil { + return storage.ReplicaUpdateOut{}, xerrors.Errorf("getWork: %w", err) + } + defer cancel() + + var waitErr error + waitRes := func() { + p, werr := m.waitWork(ctx, wk) + if werr != nil { + waitErr = xerrors.Errorf("waitWork: %w", werr) + return + } + if p != nil { + out = p.(storage.ReplicaUpdateOut) + } + } + + if wait { // already in progress + waitRes() + return out, waitErr + } + + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed|storiface.FTSealed|storiface.FTCache, storiface.FTUpdate|storiface.FTUpdateCache); err != nil { + return storage.ReplicaUpdateOut{}, xerrors.Errorf("acquiring sector lock: %w", err) + } + + selector := newAllocSelector(m.index, storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing) + + err = m.sched.Schedule(ctx, sector, sealtasks.TTReplicaUpdate, selector, m.schedFetch(sector, storiface.FTUnsealed|storiface.FTSealed|storiface.FTCache, storiface.PathSealing, storiface.AcquireCopy), func(ctx context.Context, w Worker) error { + err := m.startWork(ctx, w, wk)(w.ReplicaUpdate(ctx, sector, pieces)) + if err != nil { + return xerrors.Errorf("startWork: %w", err) + } + + waitRes() + return nil + }) + if err != nil { + return storage.ReplicaUpdateOut{}, xerrors.Errorf("Schedule: %w", err) + } + return out, waitErr +} + +func (m *Manager) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (out storage.ReplicaVanillaProofs, err error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTProveReplicaUpdate1, sector, sectorKey, newSealed, newUnsealed) + if err != nil { + return nil, xerrors.Errorf("getWork: %w", err) + } + defer cancel() + + var waitErr error + waitRes := func() { + p, werr := m.waitWork(ctx, wk) + if werr != nil { + waitErr = werr + return + } + if p != nil { + out = p.(storage.ReplicaVanillaProofs) + } + } + + if wait { // already in progress + waitRes() + return out, waitErr + } + + if err := m.index.StorageLock(ctx, sector.ID, storiface.FTSealed|storiface.FTUpdate|storiface.FTCache|storiface.FTUpdateCache, storiface.FTNone); err != nil { + return nil, xerrors.Errorf("acquiring sector lock: %w", err) + } + + // NOTE: We set allowFetch to false in so that we always execute on a worker + // with direct access to the data. We want to do that because this step is + // generally very cheap / fast, and transferring data is not worth the effort + selector := newExistingSelector(m.index, sector.ID, storiface.FTUpdate|storiface.FTUpdateCache|storiface.FTSealed|storiface.FTCache, false) + + err = m.sched.Schedule(ctx, sector, sealtasks.TTProveReplicaUpdate1, selector, m.schedFetch(sector, storiface.FTSealed|storiface.FTCache|storiface.FTUpdate|storiface.FTUpdateCache, storiface.PathSealing, storiface.AcquireCopy), func(ctx context.Context, w Worker) error { + + err := m.startWork(ctx, w, wk)(w.ProveReplicaUpdate1(ctx, sector, sectorKey, newSealed, newUnsealed)) + if err != nil { + return err + } + + waitRes() + return nil + }) + if err != nil { + return nil, err + } + + return out, waitErr +} + +func (m *Manager) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (out storage.ReplicaUpdateProof, err error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + wk, wait, cancel, err := m.getWork(ctx, sealtasks.TTProveReplicaUpdate2, sector, sectorKey, newSealed, newUnsealed, vanillaProofs) + if err != nil { + return nil, xerrors.Errorf("getWork: %w", err) + } + defer cancel() + + var waitErr error + waitRes := func() { + p, werr := m.waitWork(ctx, wk) + if werr != nil { + waitErr = werr + return + } + if p != nil { + out = p.(storage.ReplicaUpdateProof) + } + } + + if wait { // already in progress + waitRes() + return out, waitErr + } + + selector := newTaskSelector() + + err = m.sched.Schedule(ctx, sector, sealtasks.TTProveReplicaUpdate2, selector, schedNop, func(ctx context.Context, w Worker) error { + err := m.startWork(ctx, w, wk)(w.ProveReplicaUpdate2(ctx, sector, sectorKey, newSealed, newUnsealed, vanillaProofs)) + if err != nil { + return err + } + + waitRes() + return nil + }) + + if err != nil { + return nil, err + } + + return out, waitErr +} + func (m *Manager) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error { return m.returnResult(ctx, callID, pi, err) } @@ -629,6 +863,22 @@ func (m *Manager) ReturnReleaseUnsealed(ctx context.Context, callID storiface.Ca return m.returnResult(ctx, callID, nil, err) } +func (m *Manager) ReturnReplicaUpdate(ctx context.Context, callID storiface.CallID, out storage.ReplicaUpdateOut, err *storiface.CallError) error { + return m.returnResult(ctx, callID, out, err) +} + +func (m *Manager) ReturnProveReplicaUpdate1(ctx context.Context, callID storiface.CallID, out storage.ReplicaVanillaProofs, err *storiface.CallError) error { + return m.returnResult(ctx, callID, out, err) +} + +func (m *Manager) ReturnProveReplicaUpdate2(ctx context.Context, callID storiface.CallID, proof storage.ReplicaUpdateProof, err *storiface.CallError) error { + return m.returnResult(ctx, callID, proof, err) +} + +func (m *Manager) ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { + return m.returnResult(ctx, callID, nil, err) +} + func (m *Manager) ReturnMoveStorage(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { return m.returnResult(ctx, callID, nil, err) } diff --git a/extern/sector-storage/manager_calltracker.go b/extern/sector-storage/manager_calltracker.go index 332a08817..7c8703e89 100644 --- a/extern/sector-storage/manager_calltracker.go +++ b/extern/sector-storage/manager_calltracker.go @@ -385,7 +385,6 @@ func (m *Manager) returnResult(ctx context.Context, callID storiface.CallID, r i if ok { return xerrors.Errorf("result for call %v already reported", wid) } - m.results[wid] = res err := m.work.Get(wid).Mutate(func(ws *WorkState) error { diff --git a/extern/sector-storage/manager_test.go b/extern/sector-storage/manager_test.go index d4044bbae..cc1f02a9a 100644 --- a/extern/sector-storage/manager_test.go +++ b/extern/sector-storage/manager_test.go @@ -1,3 +1,4 @@ +//stm: #unit package sectorstorage import ( @@ -5,6 +6,7 @@ import ( "context" "encoding/json" "fmt" + "io" "io/ioutil" "os" "path/filepath" @@ -17,10 +19,12 @@ import ( "github.com/google/uuid" "github.com/ipfs/go-datastore" logging "github.com/ipfs/go-log/v2" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-statestore" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" @@ -65,7 +69,12 @@ func newTestStorage(t *testing.T) *testStorage { } func (t testStorage) cleanup() { + noCleanup := os.Getenv("LOTUS_TEST_NO_CLEANUP") != "" for _, path := range t.StoragePaths { + if noCleanup { + fmt.Printf("Not cleaning up test storage at %s\n", path) + continue + } if err := os.RemoveAll(path.Path); err != nil { fmt.Println("Cleanup error:", err) } @@ -162,6 +171,150 @@ func TestSimple(t *testing.T) { require.NoError(t, err) } +type Reader struct{} + +func (Reader) Read(out []byte) (int, error) { + for i := range out { + out[i] = 0 + } + return len(out), nil +} + +type NullReader struct { + *io.LimitedReader +} + +func NewNullReader(size abi.UnpaddedPieceSize) io.Reader { + return &NullReader{(io.LimitReader(&Reader{}, int64(size))).(*io.LimitedReader)} +} + +func (m NullReader) NullBytes() int64 { + return m.N +} + +func TestSnapDeals(t *testing.T) { + logging.SetAllLoggers(logging.LevelWarn) + ctx := context.Background() + m, lstor, stor, idx, cleanup := newTestMgr(ctx, t, datastore.NewMapDatastore()) + defer cleanup() + + localTasks := []sealtasks.TaskType{ + sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTPreCommit2, sealtasks.TTCommit1, sealtasks.TTCommit2, sealtasks.TTFinalize, + sealtasks.TTFetch, sealtasks.TTReplicaUpdate, sealtasks.TTProveReplicaUpdate1, sealtasks.TTProveReplicaUpdate2, sealtasks.TTUnseal, + sealtasks.TTRegenSectorKey, + } + wds := datastore.NewMapDatastore() + + w := NewLocalWorker(WorkerConfig{TaskTypes: localTasks}, stor, lstor, idx, m, statestore.New(wds)) + err := m.AddWorker(ctx, w) + require.NoError(t, err) + + proofType := abi.RegisteredSealProof_StackedDrg2KiBV1 + ptStr := os.Getenv("LOTUS_TEST_SNAP_DEALS_PROOF_TYPE") + switch ptStr { + case "2k": + case "8M": + proofType = abi.RegisteredSealProof_StackedDrg8MiBV1 + case "512M": + proofType = abi.RegisteredSealProof_StackedDrg512MiBV1 + case "32G": + proofType = abi.RegisteredSealProof_StackedDrg32GiBV1 + case "64G": + proofType = abi.RegisteredSealProof_StackedDrg64GiBV1 + default: + log.Warn("Unspecified proof type, make sure to set LOTUS_TEST_SNAP_DEALS_PROOF_TYPE to '2k', '8M', '512M', '32G' or '64G'") + log.Warn("Continuing test with 2k sectors") + } + + sid := storage.SectorRef{ + ID: abi.SectorID{Miner: 1000, Number: 1}, + ProofType: proofType, + } + ss, err := proofType.SectorSize() + require.NoError(t, err) + + unpaddedSectorSize := abi.PaddedPieceSize(ss).Unpadded() + + // Pack sector with no pieces + p0, err := m.AddPiece(ctx, sid, nil, unpaddedSectorSize, NewNullReader(unpaddedSectorSize)) + require.NoError(t, err) + ccPieces := []abi.PieceInfo{p0} + + // Precommit and Seal a CC sector + fmt.Printf("PC1\n") + ticket := abi.SealRandomness{9, 9, 9, 9, 9, 9, 9, 9} + pc1Out, err := m.SealPreCommit1(ctx, sid, ticket, ccPieces) + require.NoError(t, err) + fmt.Printf("PC2\n") + pc2Out, err := m.SealPreCommit2(ctx, sid, pc1Out) + require.NoError(t, err) + + // Now do a snap deals replica update + sectorKey := pc2Out.Sealed + + // Two pieces each half the size of the sector + unpaddedPieceSize := unpaddedSectorSize / 2 + p1, err := m.AddPiece(ctx, sid, nil, unpaddedPieceSize, strings.NewReader(strings.Repeat("k", int(unpaddedPieceSize)))) + require.NoError(t, err) + require.Equal(t, unpaddedPieceSize.Padded(), p1.Size) + + p2, err := m.AddPiece(ctx, sid, []abi.UnpaddedPieceSize{p1.Size.Unpadded()}, unpaddedPieceSize, strings.NewReader(strings.Repeat("j", int(unpaddedPieceSize)))) + require.NoError(t, err) + require.Equal(t, unpaddedPieceSize.Padded(), p1.Size) + + pieces := []abi.PieceInfo{p1, p2} + fmt.Printf("RU\n") + startRU := time.Now() + out, err := m.ReplicaUpdate(ctx, sid, pieces) + require.NoError(t, err) + fmt.Printf("RU duration (%s): %s\n", ss.ShortString(), time.Since(startRU)) + + updateProofType, err := sid.ProofType.RegisteredUpdateProof() + require.NoError(t, err) + require.NotNil(t, out) + fmt.Printf("PR1\n") + startPR1 := time.Now() + vanillaProofs, err := m.ProveReplicaUpdate1(ctx, sid, sectorKey, out.NewSealed, out.NewUnsealed) + require.NoError(t, err) + require.NotNil(t, vanillaProofs) + fmt.Printf("PR1 duration (%s): %s\n", ss.ShortString(), time.Since(startPR1)) + fmt.Printf("PR2\n") + startPR2 := time.Now() + proof, err := m.ProveReplicaUpdate2(ctx, sid, sectorKey, out.NewSealed, out.NewUnsealed, vanillaProofs) + require.NoError(t, err) + require.NotNil(t, proof) + fmt.Printf("PR2 duration (%s): %s\n", ss.ShortString(), time.Since(startPR2)) + + vInfo := proof7.ReplicaUpdateInfo{ + Proof: proof, + UpdateProofType: updateProofType, + OldSealedSectorCID: sectorKey, + NewSealedSectorCID: out.NewSealed, + NewUnsealedSectorCID: out.NewUnsealed, + } + pass, err := ffiwrapper.ProofVerifier.VerifyReplicaUpdate(vInfo) + require.NoError(t, err) + assert.True(t, pass) + + fmt.Printf("Decode\n") + // Remove unsealed data and decode for retrieval + require.NoError(t, m.FinalizeSector(ctx, sid, nil)) + startDecode := time.Now() + require.NoError(t, m.SectorsUnsealPiece(ctx, sid, 0, p1.Size.Unpadded(), ticket, &out.NewUnsealed)) + fmt.Printf("Decode duration (%s): %s\n", ss.ShortString(), time.Since(startDecode)) + + // Remove just the first piece and decode for retrieval + require.NoError(t, m.FinalizeSector(ctx, sid, []storage.Range{{Offset: p1.Size.Unpadded(), Size: p2.Size.Unpadded()}})) + require.NoError(t, m.SectorsUnsealPiece(ctx, sid, 0, p1.Size.Unpadded(), ticket, &out.NewUnsealed)) + + fmt.Printf("GSK\n") + require.NoError(t, m.ReleaseSectorKey(ctx, sid)) + startGSK := time.Now() + require.NoError(t, m.GenerateSectorKeyFromData(ctx, sid, out.NewUnsealed)) + fmt.Printf("GSK duration (%s): %s\n", ss.ShortString(), time.Since(startGSK)) + +} + func TestRedoPC1(t *testing.T) { logging.SetAllLoggers(logging.LevelDebug) @@ -211,6 +364,7 @@ func TestRedoPC1(t *testing.T) { // Manager restarts in the middle of a task, restarts it, it completes func TestRestartManager(t *testing.T) { + //stm: @WORKER_JOBS_001 test := func(returnBeforeCall bool) func(*testing.T) { return func(t *testing.T) { logging.SetAllLoggers(logging.LevelDebug) @@ -322,7 +476,7 @@ func TestRestartWorker(t *testing.T) { defer cleanup() localTasks := []sealtasks.TaskType{ - sealtasks.TTAddPiece, sealtasks.TTPreCommit1, sealtasks.TTCommit1, sealtasks.TTFinalize, sealtasks.TTFetch, + sealtasks.TTAddPiece, sealtasks.TTFetch, } wds := datastore.NewMapDatastore() @@ -332,7 +486,7 @@ func TestRestartWorker(t *testing.T) { return &testExec{apch: arch}, nil }, WorkerConfig{ TaskTypes: localTasks, - }, stor, lstor, idx, m, statestore.New(wds)) + }, os.LookupEnv, stor, lstor, idx, m, statestore.New(wds)) err := m.AddWorker(ctx, w) require.NoError(t, err) @@ -355,6 +509,7 @@ func TestRestartWorker(t *testing.T) { <-arch require.NoError(t, w.Close()) + //stm: @WORKER_STATS_001 for { if len(m.WorkerStats()) == 0 { break @@ -368,7 +523,7 @@ func TestRestartWorker(t *testing.T) { return &testExec{apch: arch}, nil }, WorkerConfig{ TaskTypes: localTasks, - }, stor, lstor, idx, m, statestore.New(wds)) + }, os.LookupEnv, stor, lstor, idx, m, statestore.New(wds)) err = m.AddWorker(ctx, w) require.NoError(t, err) @@ -404,7 +559,7 @@ func TestReenableWorker(t *testing.T) { return &testExec{apch: arch}, nil }, WorkerConfig{ TaskTypes: localTasks, - }, stor, lstor, idx, m, statestore.New(wds)) + }, os.LookupEnv, stor, lstor, idx, m, statestore.New(wds)) err := m.AddWorker(ctx, w) require.NoError(t, err) @@ -417,6 +572,7 @@ func TestReenableWorker(t *testing.T) { // disable atomic.StoreInt64(&w.testDisable, 1) + //stm: @WORKER_STATS_001 for i := 0; i < 100; i++ { if !m.WorkerStats()[w.session].Enabled { break @@ -453,3 +609,123 @@ func TestReenableWorker(t *testing.T) { i, _ = m.sched.Info(ctx) require.Len(t, i.(SchedDiagInfo).OpenWindows, 2) } + +func TestResUse(t *testing.T) { + logging.SetAllLoggers(logging.LevelDebug) + + ctx, done := context.WithCancel(context.Background()) + defer done() + + ds := datastore.NewMapDatastore() + + m, lstor, stor, idx, cleanup := newTestMgr(ctx, t, ds) + defer cleanup() + + localTasks := []sealtasks.TaskType{ + sealtasks.TTAddPiece, sealtasks.TTFetch, + } + + wds := datastore.NewMapDatastore() + + arch := make(chan chan apres) + w := newLocalWorker(func() (ffiwrapper.Storage, error) { + return &testExec{apch: arch}, nil + }, WorkerConfig{ + TaskTypes: localTasks, + }, func(s string) (string, bool) { + return "", false + }, stor, lstor, idx, m, statestore.New(wds)) + + err := m.AddWorker(ctx, w) + require.NoError(t, err) + + sid := storage.SectorRef{ + ID: abi.SectorID{Miner: 1000, Number: 1}, + ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1, + } + + go func() { + _, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127))) + require.Error(t, err) + }() + +l: + for { + st := m.WorkerStats() + require.Len(t, st, 1) + for _, w := range st { + if w.MemUsedMax > 0 { + break l + } + time.Sleep(time.Millisecond) + } + } + + st := m.WorkerStats() + require.Len(t, st, 1) + for _, w := range st { + require.Equal(t, storiface.ResourceTable[sealtasks.TTAddPiece][abi.RegisteredSealProof_StackedDrg2KiBV1].MaxMemory, w.MemUsedMax) + } +} + +func TestResOverride(t *testing.T) { + logging.SetAllLoggers(logging.LevelDebug) + + ctx, done := context.WithCancel(context.Background()) + defer done() + + ds := datastore.NewMapDatastore() + + m, lstor, stor, idx, cleanup := newTestMgr(ctx, t, ds) + defer cleanup() + + localTasks := []sealtasks.TaskType{ + sealtasks.TTAddPiece, sealtasks.TTFetch, + } + + wds := datastore.NewMapDatastore() + + arch := make(chan chan apres) + w := newLocalWorker(func() (ffiwrapper.Storage, error) { + return &testExec{apch: arch}, nil + }, WorkerConfig{ + TaskTypes: localTasks, + }, func(s string) (string, bool) { + if s == "AP_2K_MAX_MEMORY" { + return "99999", true + } + + return "", false + }, stor, lstor, idx, m, statestore.New(wds)) + + err := m.AddWorker(ctx, w) + require.NoError(t, err) + + sid := storage.SectorRef{ + ID: abi.SectorID{Miner: 1000, Number: 1}, + ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1, + } + + go func() { + _, err := m.AddPiece(ctx, sid, nil, 1016, strings.NewReader(strings.Repeat("testthis", 127))) + require.Error(t, err) + }() + +l: + for { + st := m.WorkerStats() + require.Len(t, st, 1) + for _, w := range st { + if w.MemUsedMax > 0 { + break l + } + time.Sleep(time.Millisecond) + } + } + + st := m.WorkerStats() + require.Len(t, st, 1) + for _, w := range st { + require.Equal(t, uint64(99999), w.MemUsedMax) + } +} diff --git a/extern/sector-storage/mock/mock.go b/extern/sector-storage/mock/mock.go index 273f0928e..c99af89e7 100644 --- a/extern/sector-storage/mock/mock.go +++ b/extern/sector-storage/mock/mock.go @@ -10,8 +10,9 @@ import ( "math/rand" "sync" - proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" + "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + "github.com/filecoin-project/dagstore/mount" ffiwrapper2 "github.com/filecoin-project/go-commp-utils/ffiwrapper" commcid "github.com/filecoin-project/go-fil-commcid" "github.com/filecoin-project/go-state-types/abi" @@ -36,7 +37,7 @@ type SectorMgr struct { } type mockVerifProver struct { - aggregates map[string]proof5.AggregateSealVerifyProofAndInfos // used for logging bad verifies + aggregates map[string]proof.AggregateSealVerifyProofAndInfos // used for logging bad verifies } func NewMockSectorMgr(genesisSectors []abi.SectorID) *SectorMgr { @@ -261,6 +262,28 @@ func (mgr *SectorMgr) SealCommit2(ctx context.Context, sid storage.SectorRef, ph return out[:], nil } +func (mgr *SectorMgr) ReplicaUpdate(ctx context.Context, sid storage.SectorRef, pieces []abi.PieceInfo) (storage.ReplicaUpdateOut, error) { + out := storage.ReplicaUpdateOut{} + return out, nil +} + +func (mgr *SectorMgr) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storage.ReplicaVanillaProofs, error) { + out := make([][]byte, 0) + return out, nil +} + +func (mgr *SectorMgr) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storage.ReplicaUpdateProof, error) { + return make([]byte, 0), nil +} + +func (mgr *SectorMgr) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) error { + return nil +} + +func (mgr *SectorMgr) ReleaseSealed(ctx context.Context, sid storage.SectorRef) error { + return nil +} + // Test Instrumentation Methods func (mgr *SectorMgr) MarkFailed(sid storage.SectorRef, failed bool) error { @@ -311,14 +334,23 @@ func AddOpFinish(ctx context.Context) (context.Context, func()) { } } -func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, error) { +func (mgr *SectorMgr) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, xSectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) { mgr.lk.Lock() defer mgr.lk.Unlock() + sectorInfo := make([]proof.SectorInfo, len(xSectorInfo)) + for i, xssi := range xSectorInfo { + sectorInfo[i] = proof.SectorInfo{ + SealProof: xssi.SealProof, + SectorNumber: xssi.SectorNumber, + SealedCID: xssi.SealedCID, + } + } + return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWinningPoStProof, randomness), nil } -func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) ([]proof5.PoStProof, []abi.SectorID, error) { +func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, xSectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, []abi.SectorID, error) { mgr.lk.Lock() defer mgr.lk.Unlock() @@ -326,22 +358,22 @@ func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorI return nil, nil, xerrors.Errorf("failed to post (mock)") } - si := make([]proof5.SectorInfo, 0, len(sectorInfo)) + si := make([]proof.ExtendedSectorInfo, 0, len(xSectorInfo)) var skipped []abi.SectorID var err error - for _, info := range sectorInfo { + for _, xsi := range xSectorInfo { sid := abi.SectorID{ Miner: minerID, - Number: info.SectorNumber, + Number: xsi.SectorNumber, } _, found := mgr.sectors[sid] if found && !mgr.sectors[sid].failed && !mgr.sectors[sid].corrupted { - si = append(si, info) + si = append(si, xsi) } else { skipped = append(skipped, sid) err = xerrors.Errorf("skipped some sectors") @@ -352,10 +384,19 @@ func (mgr *SectorMgr) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorI return nil, skipped, err } - return generateFakePoSt(si, abi.RegisteredSealProof.RegisteredWindowPoStProof, randomness), skipped, nil + sectorInfo := make([]proof.SectorInfo, len(si)) + for i, xssi := range si { + sectorInfo[i] = proof.SectorInfo{ + SealProof: xssi.SealProof, + SectorNumber: xssi.SectorNumber, + SealedCID: xssi.SealedCID, + } + } + + return generateFakePoSt(sectorInfo, abi.RegisteredSealProof.RegisteredWindowPoStProof, randomness), skipped, nil } -func generateFakePoStProof(sectorInfo []proof5.SectorInfo, randomness abi.PoStRandomness) []byte { +func generateFakePoStProof(sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) []byte { randomness[31] &= 0x3f hasher := sha256.New() @@ -370,13 +411,13 @@ func generateFakePoStProof(sectorInfo []proof5.SectorInfo, randomness abi.PoStRa } -func generateFakePoSt(sectorInfo []proof5.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []proof5.PoStProof { +func generateFakePoSt(sectorInfo []proof.SectorInfo, rpt func(abi.RegisteredSealProof) (abi.RegisteredPoStProof, error), randomness abi.PoStRandomness) []proof.PoStProof { wp, err := rpt(sectorInfo[0].SealProof) if err != nil { panic(err) } - return []proof5.PoStProof{ + return []proof.PoStProof{ { PoStProof: wp, ProofBytes: generateFakePoStProof(sectorInfo, randomness), @@ -384,12 +425,22 @@ func generateFakePoSt(sectorInfo []proof5.SectorInfo, rpt func(abi.RegisteredSea } } -func (mgr *SectorMgr) ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (io.ReadCloser, bool, error) { - if offset != 0 { +func (mgr *SectorMgr) ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (mount.Reader, bool, error) { + if uint64(offset) != 0 { panic("implme") } - return ioutil.NopCloser(bytes.NewReader(mgr.pieces[mgr.sectors[sector.ID].pieces[0]][:size])), false, nil + br := bytes.NewReader(mgr.pieces[mgr.sectors[sector.ID].pieces[0]][:size]) + + return struct { + io.ReadCloser + io.Seeker + io.ReaderAt + }{ + ReadCloser: ioutil.NopCloser(br), + Seeker: br, + ReaderAt: br, + }, false, nil } func (mgr *SectorMgr) StageFakeData(mid abi.ActorID, spt abi.RegisteredSealProof) (storage.SectorRef, []abi.PieceInfo, error) { @@ -430,6 +481,14 @@ func (mgr *SectorMgr) ReleaseUnsealed(ctx context.Context, sector storage.Sector return nil } +func (mgr *SectorMgr) ReleaseReplicaUpgrade(ctx context.Context, sector storage.SectorRef) error { + return nil +} + +func (mgr *SectorMgr) ReleaseSectorKey(ctx context.Context, sector storage.SectorRef) error { + return nil +} + func (mgr *SectorMgr) Remove(ctx context.Context, sector storage.SectorRef) error { mgr.lk.Lock() defer mgr.lk.Unlock() @@ -456,6 +515,8 @@ func (mgr *SectorMgr) CheckProvable(ctx context.Context, pp abi.RegisteredPoStPr return bad, nil } +var _ storiface.WorkerReturn = &SectorMgr{} + func (mgr *SectorMgr) ReturnAddPiece(ctx context.Context, callID storiface.CallID, pi abi.PieceInfo, err *storiface.CallError) error { panic("not supported") } @@ -500,7 +561,23 @@ func (mgr *SectorMgr) ReturnFetch(ctx context.Context, callID storiface.CallID, panic("not supported") } -func (m mockVerifProver) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) { +func (mgr *SectorMgr) ReturnReplicaUpdate(ctx context.Context, callID storiface.CallID, out storage.ReplicaUpdateOut, err *storiface.CallError) error { + panic("not supported") +} + +func (mgr *SectorMgr) ReturnProveReplicaUpdate1(ctx context.Context, callID storiface.CallID, out storage.ReplicaVanillaProofs, err *storiface.CallError) error { + panic("not supported") +} + +func (mgr *SectorMgr) ReturnProveReplicaUpdate2(ctx context.Context, callID storiface.CallID, out storage.ReplicaUpdateProof, err *storiface.CallError) error { + panic("not supported") +} + +func (mgr *SectorMgr) ReturnGenerateSectorKeyFromData(ctx context.Context, callID storiface.CallID, err *storiface.CallError) error { + panic("not supported") +} + +func (m mockVerifProver) VerifySeal(svi proof.SealVerifyInfo) (bool, error) { plen, err := svi.SealProof.ProofSize() if err != nil { return false, err @@ -521,7 +598,7 @@ func (m mockVerifProver) VerifySeal(svi proof5.SealVerifyInfo) (bool, error) { return true, nil } -func (m mockVerifProver) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { +func (m mockVerifProver) VerifyAggregateSeals(aggregate proof.AggregateSealVerifyProofAndInfos) (bool, error) { out := make([]byte, m.aggLen(len(aggregate.Infos))) for pi, svi := range aggregate.Infos { for i := 0; i < 32; i++ { @@ -547,7 +624,11 @@ func (m mockVerifProver) VerifyAggregateSeals(aggregate proof5.AggregateSealVeri return ok, nil } -func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof5.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) { +func (m mockVerifProver) VerifyReplicaUpdate(update proof.ReplicaUpdateInfo) (bool, error) { + return true, nil +} + +func (m mockVerifProver) AggregateSealProofs(aggregateInfo proof.AggregateSealVerifyProofAndInfos, proofs [][]byte) ([]byte, error) { out := make([]byte, m.aggLen(len(aggregateInfo.Infos))) // todo: figure out more real length for pi, proof := range proofs { for i := range proof[:32] { @@ -589,12 +670,12 @@ func (m mockVerifProver) aggLen(nproofs int) int { } } -func (m mockVerifProver) VerifyWinningPoSt(ctx context.Context, info proof5.WinningPoStVerifyInfo) (bool, error) { +func (m mockVerifProver) VerifyWinningPoSt(ctx context.Context, info proof.WinningPoStVerifyInfo) (bool, error) { info.Randomness[31] &= 0x3f return true, nil } -func (m mockVerifProver) VerifyWindowPoSt(ctx context.Context, info proof5.WindowPoStVerifyInfo) (bool, error) { +func (m mockVerifProver) VerifyWindowPoSt(ctx context.Context, info proof.WindowPoStVerifyInfo) (bool, error) { if len(info.Proofs) != 1 { return false, xerrors.Errorf("expected 1 proof entry") } @@ -617,7 +698,7 @@ func (m mockVerifProver) GenerateWinningPoStSectorChallenge(ctx context.Context, } var MockVerifier = mockVerifProver{ - aggregates: map[string]proof5.AggregateSealVerifyProofAndInfos{}, + aggregates: map[string]proof.AggregateSealVerifyProofAndInfos{}, } var MockProver = MockVerifier diff --git a/extern/sector-storage/partialfile/partialfile.go b/extern/sector-storage/partialfile/partialfile.go index 529e889ea..ffc3935ac 100644 --- a/extern/sector-storage/partialfile/partialfile.go +++ b/extern/sector-storage/partialfile/partialfile.go @@ -71,7 +71,7 @@ func CreatePartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*PartialF err := fallocate.Fallocate(f, 0, int64(maxPieceSize)) if errno, ok := err.(syscall.Errno); ok { if errno == syscall.EOPNOTSUPP || errno == syscall.ENOSYS { - log.Warnf("could not allocated space, ignoring: %v", errno) + log.Warnf("could not allocate space, ignoring: %v", errno) err = nil // log and ignore } } diff --git a/extern/sector-storage/piece_provider.go b/extern/sector-storage/piece_provider.go index ad3a2543e..4622289e8 100644 --- a/extern/sector-storage/piece_provider.go +++ b/extern/sector-storage/piece_provider.go @@ -8,6 +8,7 @@ import ( "github.com/ipfs/go-cid" "golang.org/x/xerrors" + "github.com/filecoin-project/dagstore/mount" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-storage/storage" @@ -23,7 +24,11 @@ type Unsealer interface { type PieceProvider interface { // ReadPiece is used to read an Unsealed piece at the given offset and of the given size from a Sector - ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (io.ReadCloser, bool, error) + // pieceOffset + pieceSize specify piece bounds for unsealing (note: with SDR the entire sector will be unsealed by + // default in most cases, but this might matter with future PoRep) + // startOffset is added to the pieceOffset to get the starting reader offset. + // The number of bytes that can be read is pieceSize-startOffset + ReadPiece(ctx context.Context, sector storage.SectorRef, pieceOffset storiface.UnpaddedByteIndex, pieceSize abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (mount.Reader, bool, error) IsUnsealed(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) } @@ -67,50 +72,104 @@ func (p *pieceProvider) IsUnsealed(ctx context.Context, sector storage.SectorRef // It will NOT try to schedule an Unseal of a sealed sector file for the read. // // Returns a nil reader if the piece does NOT exist in any unsealed file or there is no unsealed file for the given sector on any of the workers. -func (p *pieceProvider) tryReadUnsealedPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (io.ReadCloser, context.CancelFunc, error) { +func (p *pieceProvider) tryReadUnsealedPiece(ctx context.Context, pc cid.Cid, sector storage.SectorRef, pieceOffset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (mount.Reader, error) { // acquire a lock purely for reading unsealed sectors ctx, cancel := context.WithCancel(ctx) if err := p.index.StorageLock(ctx, sector.ID, storiface.FTUnsealed, storiface.FTNone); err != nil { cancel() - return nil, nil, xerrors.Errorf("acquiring read sector lock: %w", err) + return nil, xerrors.Errorf("acquiring read sector lock: %w", err) } - // Reader returns a reader for an unsealed piece at the given offset in the given sector. + // Reader returns a reader getter for an unsealed piece at the given offset in the given sector. // The returned reader will be nil if none of the workers has an unsealed sector file containing // the unsealed piece. - r, err := p.storage.Reader(ctx, sector, abi.PaddedPieceSize(offset.Padded()), size.Padded()) + rg, err := p.storage.Reader(ctx, sector, abi.PaddedPieceSize(pieceOffset.Padded()), size.Padded()) if err != nil { + cancel() log.Debugf("did not get storage reader;sector=%+v, err:%s", sector.ID, err) + return nil, err + } + if rg == nil { cancel() - return nil, nil, err + return nil, nil } - if r == nil { + + buf := make([]byte, fr32.BufSize(size.Padded())) + + pr, err := (&pieceReader{ + ctx: ctx, + getReader: func(ctx context.Context, startOffset uint64) (io.ReadCloser, error) { + startOffsetAligned := storiface.UnpaddedByteIndex(startOffset / 127 * 127) // floor to multiple of 127 + + r, err := rg(startOffsetAligned.Padded()) + if err != nil { + return nil, xerrors.Errorf("getting reader at +%d: %w", startOffsetAligned, err) + } + + upr, err := fr32.NewUnpadReaderBuf(r, size.Padded(), buf) + if err != nil { + r.Close() // nolint + return nil, xerrors.Errorf("creating unpadded reader: %w", err) + } + + bir := bufio.NewReaderSize(upr, 127) + if startOffset > uint64(startOffsetAligned) { + if _, err := bir.Discard(int(startOffset - uint64(startOffsetAligned))); err != nil { + r.Close() // nolint + return nil, xerrors.Errorf("discarding bytes for startOffset: %w", err) + } + } + + return struct { + io.Reader + io.Closer + }{ + Reader: bir, + Closer: funcCloser(func() error { + return r.Close() + }), + }, nil + }, + len: size, + onClose: cancel, + pieceCid: pc, + }).init() + if err != nil || pr == nil { // pr == nil to make sure we don't return typed nil cancel() + return nil, err } - return r, cancel, nil + return pr, err } +type funcCloser func() error + +func (f funcCloser) Close() error { + return f() +} + +var _ io.Closer = funcCloser(nil) + // ReadPiece is used to read an Unsealed piece at the given offset and of the given size from a Sector // If an Unsealed sector file exists with the Piece Unsealed in it, we'll use that for the read. // Otherwise, we will Unseal a Sealed sector file for the given sector and read the Unsealed piece from it. // If we do NOT have an existing unsealed file containing the given piece thus causing us to schedule an Unseal, // the returned boolean parameter will be set to true. // If we have an existing unsealed file containing the given piece, the returned boolean will be set to false. -func (p *pieceProvider) ReadPiece(ctx context.Context, sector storage.SectorRef, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (io.ReadCloser, bool, error) { - if err := offset.Valid(); err != nil { - return nil, false, xerrors.Errorf("offset is not valid: %w", err) +func (p *pieceProvider) ReadPiece(ctx context.Context, sector storage.SectorRef, pieceOffset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, ticket abi.SealRandomness, unsealed cid.Cid) (mount.Reader, bool, error) { + if err := pieceOffset.Valid(); err != nil { + return nil, false, xerrors.Errorf("pieceOffset is not valid: %w", err) } if err := size.Validate(); err != nil { return nil, false, xerrors.Errorf("size is not a valid piece size: %w", err) } - r, unlock, err := p.tryReadUnsealedPiece(ctx, sector, offset, size) + r, err := p.tryReadUnsealedPiece(ctx, unsealed, sector, pieceOffset, size) log.Debugf("result of first tryReadUnsealedPiece: r=%+v, err=%s", r, err) if xerrors.Is(err, storiface.ErrSectorNotFound) { - log.Debugf("no unsealed sector file with unsealed piece, sector=%+v, offset=%d, size=%d", sector, offset, size) + log.Debugf("no unsealed sector file with unsealed piece, sector=%+v, pieceOffset=%d, size=%d", sector, pieceOffset, size) err = nil } if err != nil { @@ -129,14 +188,14 @@ func (p *pieceProvider) ReadPiece(ctx context.Context, sector storage.SectorRef, if unsealed == cid.Undef { commd = nil } - if err := p.uns.SectorsUnsealPiece(ctx, sector, offset, size, ticket, commd); err != nil { + if err := p.uns.SectorsUnsealPiece(ctx, sector, pieceOffset, size, ticket, commd); err != nil { log.Errorf("failed to SectorsUnsealPiece: %s", err) return nil, false, xerrors.Errorf("unsealing piece: %w", err) } - log.Debugf("unsealed a sector file to read the piece, sector=%+v, offset=%d, size=%d", sector, offset, size) + log.Debugf("unsealed a sector file to read the piece, sector=%+v, pieceOffset=%d, size=%d", sector, pieceOffset, size) - r, unlock, err = p.tryReadUnsealedPiece(ctx, sector, offset, size) + r, err = p.tryReadUnsealedPiece(ctx, unsealed, sector, pieceOffset, size) if err != nil { log.Errorf("failed to tryReadUnsealedPiece after SectorsUnsealPiece: %s", err) return nil, true, xerrors.Errorf("read after unsealing: %w", err) @@ -145,32 +204,12 @@ func (p *pieceProvider) ReadPiece(ctx context.Context, sector storage.SectorRef, log.Errorf("got no reader after unsealing piece") return nil, true, xerrors.Errorf("got no reader after unsealing piece") } - log.Debugf("got a reader to read unsealed piece, sector=%+v, offset=%d, size=%d", sector, offset, size) + log.Debugf("got a reader to read unsealed piece, sector=%+v, pieceOffset=%d, size=%d", sector, pieceOffset, size) } else { - log.Debugf("unsealed piece already exists, no need to unseal, sector=%+v, offset=%d, size=%d", sector, offset, size) - } - - upr, err := fr32.NewUnpadReader(r, size.Padded()) - if err != nil { - unlock() - return nil, uns, xerrors.Errorf("creating unpadded reader: %w", err) + log.Debugf("unsealed piece already exists, no need to unseal, sector=%+v, pieceOffset=%d, size=%d", sector, pieceOffset, size) } - log.Debugf("returning reader to read unsealed piece, sector=%+v, offset=%d, size=%d", sector, offset, size) + log.Debugf("returning reader to read unsealed piece, sector=%+v, pieceOffset=%d, size=%d", sector, pieceOffset, size) - return &funcCloser{ - Reader: bufio.NewReaderSize(upr, 127), - close: func() error { - err = r.Close() - unlock() - return err - }, - }, uns, nil -} - -type funcCloser struct { - io.Reader - close func() error + return r, uns, nil } - -func (fc *funcCloser) Close() error { return fc.close() } diff --git a/extern/sector-storage/piece_provider_test.go b/extern/sector-storage/piece_provider_test.go index d6fa14574..3ace2916e 100644 --- a/extern/sector-storage/piece_provider_test.go +++ b/extern/sector-storage/piece_provider_test.go @@ -7,6 +7,7 @@ import ( "math/rand" "net" "net/http" + "os" "testing" "github.com/filecoin-project/go-state-types/abi" @@ -286,7 +287,7 @@ func (p *pieceProviderTestHarness) addRemoteWorker(t *testing.T, tasks []sealtas worker := newLocalWorker(nil, WorkerConfig{ TaskTypes: tasks, - }, remote, localStore, p.index, p.mgr, csts) + }, os.LookupEnv, remote, localStore, p.index, p.mgr, csts) p.servers = append(p.servers, svc) p.localStores = append(p.localStores, localStore) @@ -298,7 +299,7 @@ func (p *pieceProviderTestHarness) addRemoteWorker(t *testing.T, tasks []sealtas func (p *pieceProviderTestHarness) removeAllUnsealedSectorFiles(t *testing.T) { for i := range p.localStores { ls := p.localStores[i] - require.NoError(t, ls.Remove(p.ctx, p.sector.ID, storiface.FTUnsealed, false)) + require.NoError(t, ls.Remove(p.ctx, p.sector.ID, storiface.FTUnsealed, false, nil)) } } diff --git a/extern/sector-storage/piece_reader.go b/extern/sector-storage/piece_reader.go new file mode 100644 index 000000000..d7a3f4e98 --- /dev/null +++ b/extern/sector-storage/piece_reader.go @@ -0,0 +1,180 @@ +package sectorstorage + +import ( + "bufio" + "context" + "io" + + "github.com/ipfs/go-cid" + "go.opencensus.io/stats" + "golang.org/x/xerrors" + + "github.com/filecoin-project/dagstore/mount" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/metrics" +) + +// For small read skips, it's faster to "burn" some bytes than to setup new sector reader. +// Assuming 1ms stream seek latency, and 1G/s stream rate, we're willing to discard up to 1 MiB. +var MaxPieceReaderBurnBytes int64 = 1 << 20 // 1M +var ReadBuf = 128 * (127 * 8) // unpadded(128k) + +type pieceGetter func(ctx context.Context, offset uint64) (io.ReadCloser, error) + +type pieceReader struct { + ctx context.Context + getReader pieceGetter + pieceCid cid.Cid + len abi.UnpaddedPieceSize + onClose context.CancelFunc + + closed bool + seqAt int64 // next byte to be read by io.Reader + + r io.ReadCloser + br *bufio.Reader + rAt int64 +} + +func (p *pieceReader) init() (_ *pieceReader, err error) { + stats.Record(p.ctx, metrics.DagStorePRInitCount.M(1)) + + p.rAt = 0 + p.r, err = p.getReader(p.ctx, uint64(p.rAt)) + if err != nil { + return nil, err + } + if p.r == nil { + return nil, nil + } + + p.br = bufio.NewReaderSize(p.r, ReadBuf) + + return p, nil +} + +func (p *pieceReader) check() error { + if p.closed { + return xerrors.Errorf("reader closed") + } + + return nil +} + +func (p *pieceReader) Close() error { + if err := p.check(); err != nil { + return err + } + + if p.r != nil { + if err := p.r.Close(); err != nil { + return err + } + if err := p.r.Close(); err != nil { + return err + } + p.r = nil + } + + p.onClose() + + p.closed = true + + return nil +} + +func (p *pieceReader) Read(b []byte) (int, error) { + if err := p.check(); err != nil { + return 0, err + } + + n, err := p.ReadAt(b, p.seqAt) + p.seqAt += int64(n) + return n, err +} + +func (p *pieceReader) Seek(offset int64, whence int) (int64, error) { + if err := p.check(); err != nil { + return 0, err + } + + switch whence { + case io.SeekStart: + p.seqAt = offset + case io.SeekCurrent: + p.seqAt += offset + case io.SeekEnd: + p.seqAt = int64(p.len) + offset + default: + return 0, xerrors.Errorf("bad whence") + } + + return p.seqAt, nil +} + +func (p *pieceReader) ReadAt(b []byte, off int64) (n int, err error) { + if err := p.check(); err != nil { + return 0, err + } + + stats.Record(p.ctx, metrics.DagStorePRBytesRequested.M(int64(len(b)))) + + // 1. Get the backing reader into the correct position + + // if the backing reader is ahead of the offset we want, or more than + // MaxPieceReaderBurnBytes behind, reset the reader + if p.r == nil || p.rAt > off || p.rAt+MaxPieceReaderBurnBytes < off { + if p.r != nil { + if err := p.r.Close(); err != nil { + return 0, xerrors.Errorf("closing backing reader: %w", err) + } + p.r = nil + p.br = nil + } + + log.Debugw("pieceReader new stream", "piece", p.pieceCid, "at", p.rAt, "off", off-p.rAt, "n", len(b)) + + if off > p.rAt { + stats.Record(p.ctx, metrics.DagStorePRSeekForwardBytes.M(off-p.rAt), metrics.DagStorePRSeekForwardCount.M(1)) + } else { + stats.Record(p.ctx, metrics.DagStorePRSeekBackBytes.M(p.rAt-off), metrics.DagStorePRSeekBackCount.M(1)) + } + + p.rAt = off + p.r, err = p.getReader(p.ctx, uint64(p.rAt)) + p.br = bufio.NewReaderSize(p.r, ReadBuf) + if err != nil { + return 0, xerrors.Errorf("getting backing reader: %w", err) + } + } + + // 2. Check if we need to burn some bytes + if off > p.rAt { + stats.Record(p.ctx, metrics.DagStorePRBytesDiscarded.M(off-p.rAt), metrics.DagStorePRDiscardCount.M(1)) + + n, err := io.CopyN(io.Discard, p.br, off-p.rAt) + p.rAt += n + if err != nil { + return 0, xerrors.Errorf("discarding read gap: %w", err) + } + } + + // 3. Sanity check + if off != p.rAt { + return 0, xerrors.Errorf("bad reader offset; requested %d; at %d", off, p.rAt) + } + + // 4. Read! + n, err = io.ReadFull(p.br, b) + if n < len(b) { + log.Debugw("pieceReader short read", "piece", p.pieceCid, "at", p.rAt, "toEnd", int64(p.len)-p.rAt, "n", len(b), "read", n, "err", err) + } + if err == io.ErrUnexpectedEOF { + err = io.EOF + } + + p.rAt += int64(n) + return n, err +} + +var _ mount.Reader = (*pieceReader)(nil) diff --git a/extern/sector-storage/resources.go b/extern/sector-storage/resources.go deleted file mode 100644 index 2e989fdf4..000000000 --- a/extern/sector-storage/resources.go +++ /dev/null @@ -1,325 +0,0 @@ -package sectorstorage - -import ( - "github.com/filecoin-project/go-state-types/abi" - - "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" -) - -type Resources struct { - MinMemory uint64 // What Must be in RAM for decent perf - MaxMemory uint64 // Memory required (swap + ram) - - MaxParallelism int // -1 = multithread - CanGPU bool - - BaseMinMemory uint64 // What Must be in RAM for decent perf (shared between threads) -} - -/* - - Percent of threads to allocate to parallel tasks - - 12 * 0.92 = 11 - 16 * 0.92 = 14 - 24 * 0.92 = 22 - 32 * 0.92 = 29 - 64 * 0.92 = 58 - 128 * 0.92 = 117 - -*/ -var ParallelNum uint64 = 92 -var ParallelDenom uint64 = 100 - -// TODO: Take NUMA into account -func (r Resources) Threads(wcpus uint64) uint64 { - if r.MaxParallelism == -1 { - n := (wcpus * ParallelNum) / ParallelDenom - if n == 0 { - return wcpus - } - return n - } - - return uint64(r.MaxParallelism) -} - -var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources{ - sealtasks.TTAddPiece: { - abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ - MaxMemory: 8 << 30, - MinMemory: 8 << 30, - - MaxParallelism: 1, - - BaseMinMemory: 1 << 30, - }, - abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ - MaxMemory: 4 << 30, - MinMemory: 4 << 30, - - MaxParallelism: 1, - - BaseMinMemory: 1 << 30, - }, - abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ - MaxMemory: 1 << 30, - MinMemory: 1 << 30, - - MaxParallelism: 1, - - BaseMinMemory: 1 << 30, - }, - abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{ - MaxMemory: 2 << 10, - MinMemory: 2 << 10, - - MaxParallelism: 1, - - BaseMinMemory: 2 << 10, - }, - abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{ - MaxMemory: 8 << 20, - MinMemory: 8 << 20, - - MaxParallelism: 1, - - BaseMinMemory: 8 << 20, - }, - }, - sealtasks.TTPreCommit1: { - abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ - MaxMemory: 128 << 30, - MinMemory: 112 << 30, - - MaxParallelism: 1, - - BaseMinMemory: 10 << 20, - }, - abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ - MaxMemory: 64 << 30, - MinMemory: 56 << 30, - - MaxParallelism: 1, - - BaseMinMemory: 10 << 20, - }, - abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ - MaxMemory: 1 << 30, - MinMemory: 768 << 20, - - MaxParallelism: 1, - - BaseMinMemory: 1 << 20, - }, - abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{ - MaxMemory: 2 << 10, - MinMemory: 2 << 10, - - MaxParallelism: 1, - - BaseMinMemory: 2 << 10, - }, - abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{ - MaxMemory: 8 << 20, - MinMemory: 8 << 20, - - MaxParallelism: 1, - - BaseMinMemory: 8 << 20, - }, - }, - sealtasks.TTPreCommit2: { - abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ - MaxMemory: 30 << 30, - MinMemory: 30 << 30, - - MaxParallelism: -1, - CanGPU: true, - - BaseMinMemory: 1 << 30, - }, - abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ - MaxMemory: 15 << 30, - MinMemory: 15 << 30, - - MaxParallelism: -1, - CanGPU: true, - - BaseMinMemory: 1 << 30, - }, - abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ - MaxMemory: 3 << 29, // 1.5G - MinMemory: 1 << 30, - - MaxParallelism: -1, - - BaseMinMemory: 1 << 30, - }, - abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{ - MaxMemory: 2 << 10, - MinMemory: 2 << 10, - - MaxParallelism: -1, - - BaseMinMemory: 2 << 10, - }, - abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{ - MaxMemory: 8 << 20, - MinMemory: 8 << 20, - - MaxParallelism: -1, - - BaseMinMemory: 8 << 20, - }, - }, - sealtasks.TTCommit1: { // Very short (~100ms), so params are very light - abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ - MaxMemory: 1 << 30, - MinMemory: 1 << 30, - - MaxParallelism: 0, - - BaseMinMemory: 1 << 30, - }, - abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ - MaxMemory: 1 << 30, - MinMemory: 1 << 30, - - MaxParallelism: 0, - - BaseMinMemory: 1 << 30, - }, - abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ - MaxMemory: 1 << 30, - MinMemory: 1 << 30, - - MaxParallelism: 0, - - BaseMinMemory: 1 << 30, - }, - abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{ - MaxMemory: 2 << 10, - MinMemory: 2 << 10, - - MaxParallelism: 0, - - BaseMinMemory: 2 << 10, - }, - abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{ - MaxMemory: 8 << 20, - MinMemory: 8 << 20, - - MaxParallelism: 0, - - BaseMinMemory: 8 << 20, - }, - }, - sealtasks.TTCommit2: { - abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ - MaxMemory: 190 << 30, // TODO: Confirm - MinMemory: 60 << 30, - - MaxParallelism: -1, - CanGPU: true, - - BaseMinMemory: 64 << 30, // params - }, - abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ - MaxMemory: 150 << 30, // TODO: ~30G of this should really be BaseMaxMemory - MinMemory: 30 << 30, - - MaxParallelism: -1, - CanGPU: true, - - BaseMinMemory: 32 << 30, // params - }, - abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ - MaxMemory: 3 << 29, // 1.5G - MinMemory: 1 << 30, - - MaxParallelism: 1, // This is fine - CanGPU: true, - - BaseMinMemory: 10 << 30, - }, - abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{ - MaxMemory: 2 << 10, - MinMemory: 2 << 10, - - MaxParallelism: 1, - CanGPU: true, - - BaseMinMemory: 2 << 10, - }, - abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{ - MaxMemory: 8 << 20, - MinMemory: 8 << 20, - - MaxParallelism: 1, - CanGPU: true, - - BaseMinMemory: 8 << 20, - }, - }, - sealtasks.TTFetch: { - abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ - MaxMemory: 1 << 20, - MinMemory: 1 << 20, - - MaxParallelism: 0, - CanGPU: false, - - BaseMinMemory: 0, - }, - abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ - MaxMemory: 1 << 20, - MinMemory: 1 << 20, - - MaxParallelism: 0, - CanGPU: false, - - BaseMinMemory: 0, - }, - abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ - MaxMemory: 1 << 20, - MinMemory: 1 << 20, - - MaxParallelism: 0, - CanGPU: false, - - BaseMinMemory: 0, - }, - abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{ - MaxMemory: 1 << 20, - MinMemory: 1 << 20, - - MaxParallelism: 0, - CanGPU: false, - - BaseMinMemory: 0, - }, - abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{ - MaxMemory: 1 << 20, - MinMemory: 1 << 20, - - MaxParallelism: 0, - CanGPU: false, - - BaseMinMemory: 0, - }, - }, -} - -func init() { - ResourceTable[sealtasks.TTUnseal] = ResourceTable[sealtasks.TTPreCommit1] // TODO: measure accurately - - // V1_1 is the same as V1 - for _, m := range ResourceTable { - m[abi.RegisteredSealProof_StackedDrg2KiBV1_1] = m[abi.RegisteredSealProof_StackedDrg2KiBV1] - m[abi.RegisteredSealProof_StackedDrg8MiBV1_1] = m[abi.RegisteredSealProof_StackedDrg8MiBV1] - m[abi.RegisteredSealProof_StackedDrg512MiBV1_1] = m[abi.RegisteredSealProof_StackedDrg512MiBV1] - m[abi.RegisteredSealProof_StackedDrg32GiBV1_1] = m[abi.RegisteredSealProof_StackedDrg32GiBV1] - m[abi.RegisteredSealProof_StackedDrg64GiBV1_1] = m[abi.RegisteredSealProof_StackedDrg64GiBV1] - } -} diff --git a/extern/sector-storage/sched.go b/extern/sector-storage/sched.go index 38e901bf2..d7d7d3265 100644 --- a/extern/sector-storage/sched.go +++ b/extern/sector-storage/sched.go @@ -53,7 +53,7 @@ type WorkerSelector interface { type scheduler struct { workersLk sync.RWMutex - workers map[WorkerID]*workerHandle + workers map[storiface.WorkerID]*workerHandle schedule chan *workerRequest windowRequests chan *schedWindowRequest @@ -95,7 +95,7 @@ type workerHandle struct { } type schedWindowRequest struct { - worker WorkerID + worker storiface.WorkerID done chan *schedWindow } @@ -107,14 +107,14 @@ type schedWindow struct { type workerDisableReq struct { activeWindows []*schedWindow - wid WorkerID + wid storiface.WorkerID done func() } type activeResources struct { memUsedMin uint64 memUsedMax uint64 - gpuUsed bool + gpuUsed float64 cpuUse uint64 cond *sync.Cond @@ -145,7 +145,7 @@ type workerResponse struct { func newScheduler() *scheduler { return &scheduler{ - workers: map[WorkerID]*workerHandle{}, + workers: map[storiface.WorkerID]*workerHandle{}, schedule: make(chan *workerRequest), windowRequests: make(chan *schedWindowRequest, 20), @@ -155,8 +155,9 @@ func newScheduler() *scheduler { schedQueue: &requestQueue{}, workTracker: &workTracker{ - done: map[storiface.CallID]struct{}{}, - running: map[storiface.CallID]trackedWork{}, + done: map[storiface.CallID]struct{}{}, + running: map[storiface.CallID]trackedWork{}, + prepared: map[uuid.UUID]trackedWork{}, }, info: make(chan func(interface{})), @@ -377,7 +378,6 @@ func (sh *scheduler) trySched() { }() task := (*sh.schedQueue)[sqi] - needRes := ResourceTable[task.taskType][task.sector.ProofType] task.indexHeap = sqi for wnd, windowRequest := range sh.openWindows { @@ -393,6 +393,8 @@ func (sh *scheduler) trySched() { continue } + needRes := worker.info.Resources.ResourceSpec(task.sector.ProofType, task.taskType) + // TODO: allow bigger windows if !windows[wnd].allocated.canHandleRequest(needRes, windowRequest.worker, "schedAcceptable", worker.info) { continue @@ -456,7 +458,6 @@ func (sh *scheduler) trySched() { for sqi := 0; sqi < queueLen; sqi++ { task := (*sh.schedQueue)[sqi] - needRes := ResourceTable[task.taskType][task.sector.ProofType] selectedWindow := -1 for _, wnd := range acceptableWindows[task.indexHeap] { @@ -465,6 +466,8 @@ func (sh *scheduler) trySched() { log.Debugf("SCHED try assign sqi:%d sector %d to window %d", sqi, task.sector.ID.Number, wnd) + needRes := info.Resources.ResourceSpec(task.sector.ProofType, task.taskType) + // TODO: allow bigger windows if !windows[wnd].allocated.canHandleRequest(needRes, wid, "schedAssign", info) { continue diff --git a/extern/sector-storage/sched_resources.go b/extern/sector-storage/sched_resources.go index 7c16120c2..5f7f1cfb8 100644 --- a/extern/sector-storage/sched_resources.go +++ b/extern/sector-storage/sched_resources.go @@ -6,7 +6,7 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) -func (a *activeResources) withResources(id WorkerID, wr storiface.WorkerInfo, r Resources, locker sync.Locker, cb func() error) error { +func (a *activeResources) withResources(id storiface.WorkerID, wr storiface.WorkerInfo, r storiface.Resources, locker sync.Locker, cb func() error) error { for !a.canHandleRequest(r, id, "withResources", wr) { if a.cond == nil { a.cond = sync.NewCond(locker) @@ -30,20 +30,20 @@ func (a *activeResources) hasWorkWaiting() bool { return a.waiting > 0 } -func (a *activeResources) add(wr storiface.WorkerResources, r Resources) { - if r.CanGPU { - a.gpuUsed = true +func (a *activeResources) add(wr storiface.WorkerResources, r storiface.Resources) { + if r.GPUUtilization > 0 { + a.gpuUsed += r.GPUUtilization } - a.cpuUse += r.Threads(wr.CPUs) + a.cpuUse += r.Threads(wr.CPUs, len(wr.GPUs)) a.memUsedMin += r.MinMemory a.memUsedMax += r.MaxMemory } -func (a *activeResources) free(wr storiface.WorkerResources, r Resources) { - if r.CanGPU { - a.gpuUsed = false +func (a *activeResources) free(wr storiface.WorkerResources, r storiface.Resources) { + if r.GPUUtilization > 0 { + a.gpuUsed -= r.GPUUtilization } - a.cpuUse -= r.Threads(wr.CPUs) + a.cpuUse -= r.Threads(wr.CPUs, len(wr.GPUs)) a.memUsedMin -= r.MinMemory a.memUsedMax -= r.MaxMemory @@ -54,35 +54,46 @@ func (a *activeResources) free(wr storiface.WorkerResources, r Resources) { // canHandleRequest evaluates if the worker has enough available resources to // handle the request. -func (a *activeResources) canHandleRequest(needRes Resources, wid WorkerID, caller string, info storiface.WorkerInfo) bool { +func (a *activeResources) canHandleRequest(needRes storiface.Resources, wid storiface.WorkerID, caller string, info storiface.WorkerInfo) bool { if info.IgnoreResources { // shortcircuit; if this worker is ignoring resources, it can always handle the request. return true } res := info.Resources + // TODO: dedupe needRes.BaseMinMemory per task type (don't add if that task is already running) - minNeedMem := res.MemReserved + a.memUsedMin + needRes.MinMemory + needRes.BaseMinMemory - if minNeedMem > res.MemPhysical { - log.Debugf("sched: not scheduling on worker %s for %s; not enough physical memory - need: %dM, have %dM", wid, caller, minNeedMem/mib, res.MemPhysical/mib) + memNeeded := needRes.MinMemory + needRes.BaseMinMemory + memUsed := a.memUsedMin + // assume that MemUsed can be swapped, so only check it in the vmem Check + memAvail := res.MemPhysical - memUsed + if memNeeded > memAvail { + log.Debugf("sched: not scheduling on worker %s for %s; not enough physical memory - need: %dM, have %dM available", wid, caller, memNeeded/mib, memAvail/mib) return false } - maxNeedMem := res.MemReserved + a.memUsedMax + needRes.MaxMemory + needRes.BaseMinMemory + vmemNeeded := needRes.MaxMemory + needRes.BaseMinMemory + vmemUsed := a.memUsedMax + workerMemoryReserved := res.MemUsed + res.MemSwapUsed // memory used outside lotus-worker (used by the OS, etc.) + + if vmemUsed < workerMemoryReserved { + vmemUsed = workerMemoryReserved + } + vmemAvail := (res.MemPhysical + res.MemSwap) - vmemUsed - if maxNeedMem > res.MemSwap+res.MemPhysical { - log.Debugf("sched: not scheduling on worker %s for %s; not enough virtual memory - need: %dM, have %dM", wid, caller, maxNeedMem/mib, (res.MemSwap+res.MemPhysical)/mib) + if vmemNeeded > vmemAvail { + log.Debugf("sched: not scheduling on worker %s for %s; not enough virtual memory - need: %dM, have %dM available", wid, caller, vmemNeeded/mib, vmemAvail/mib) return false } - if a.cpuUse+needRes.Threads(res.CPUs) > res.CPUs { - log.Debugf("sched: not scheduling on worker %s for %s; not enough threads, need %d, %d in use, target %d", wid, caller, needRes.Threads(res.CPUs), a.cpuUse, res.CPUs) + if a.cpuUse+needRes.Threads(res.CPUs, len(res.GPUs)) > res.CPUs { + log.Debugf("sched: not scheduling on worker %s for %s; not enough threads, need %d, %d in use, target %d", wid, caller, needRes.Threads(res.CPUs, len(res.GPUs)), a.cpuUse, res.CPUs) return false } - if len(res.GPUs) > 0 && needRes.CanGPU { - if a.gpuUsed { - log.Debugf("sched: not scheduling on worker %s for %s; GPU in use", wid, caller) + if len(res.GPUs) > 0 && needRes.GPUUtilization > 0 { + if a.gpuUsed+needRes.GPUUtilization > float64(len(res.GPUs)) { + log.Debugf("sched: not scheduling on worker %s for %s; GPU(s) in use", wid, caller) return false } } @@ -96,12 +107,21 @@ func (a *activeResources) utilization(wr storiface.WorkerResources) float64 { cpu := float64(a.cpuUse) / float64(wr.CPUs) max = cpu - memMin := float64(a.memUsedMin+wr.MemReserved) / float64(wr.MemPhysical) + memUsed := a.memUsedMin + if memUsed < wr.MemUsed { + memUsed = wr.MemUsed + } + memMin := float64(memUsed) / float64(wr.MemPhysical) if memMin > max { max = memMin } - memMax := float64(a.memUsedMax+wr.MemReserved) / float64(wr.MemPhysical+wr.MemSwap) + vmemUsed := a.memUsedMax + if a.memUsedMax < wr.MemUsed+wr.MemSwapUsed { + vmemUsed = wr.MemUsed + wr.MemSwapUsed + } + memMax := float64(vmemUsed) / float64(wr.MemPhysical+wr.MemSwap) + if memMax > max { max = memMax } diff --git a/extern/sector-storage/sched_test.go b/extern/sector-storage/sched_test.go index fbc4d83ee..4042f3905 100644 --- a/extern/sector-storage/sched_test.go +++ b/extern/sector-storage/sched_test.go @@ -1,3 +1,4 @@ +//stm: #unit package sectorstorage import ( @@ -41,14 +42,16 @@ func TestWithPriority(t *testing.T) { var decentWorkerResources = storiface.WorkerResources{ MemPhysical: 128 << 30, MemSwap: 200 << 30, - MemReserved: 2 << 30, + MemUsed: 1 << 30, + MemSwapUsed: 1 << 30, CPUs: 32, - GPUs: []string{"a GPU"}, + GPUs: []string{}, } var constrainedWorkerResources = storiface.WorkerResources{ MemPhysical: 1 << 30, - MemReserved: 2 << 30, + MemUsed: 1 << 30, + MemSwapUsed: 1 << 30, CPUs: 1, } @@ -100,6 +103,22 @@ func (s *schedTestWorker) AddPiece(ctx context.Context, sector storage.SectorRef panic("implement me") } +func (s *schedTestWorker) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, peices []abi.PieceInfo) (storiface.CallID, error) { + panic("implement me") +} + +func (s *schedTestWorker) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) { + panic("implement me") +} + +func (s *schedTestWorker) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) { + panic("implement me") +} + +func (s *schedTestWorker) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) (storiface.CallID, error) { + panic("implement me") +} + func (s *schedTestWorker) MoveStorage(ctx context.Context, sector storage.SectorRef, types storiface.SectorFileType) (storiface.CallID, error) { panic("implement me") } @@ -188,6 +207,10 @@ func TestSchedStartStop(t *testing.T) { } func TestSched(t *testing.T) { + //stm: @WORKER_JOBS_001 + storiface.ParallelNum = 1 + storiface.ParallelDenom = 1 + ctx, done := context.WithTimeout(context.Background(), 30*time.Second) defer done() @@ -254,7 +277,9 @@ func TestSched(t *testing.T) { return nil }, noopAction) - require.NoError(t, err, fmt.Sprint(l, l2)) + if err != context.Canceled { + require.NoError(t, err, fmt.Sprint(l, l2)) + } }() <-sched.testSync @@ -299,9 +324,6 @@ func TestSched(t *testing.T) { } testFunc := func(workers []workerSpec, tasks []task) func(t *testing.T) { - ParallelNum = 1 - ParallelDenom = 1 - return func(t *testing.T) { index := stores.NewIndex() @@ -558,7 +580,7 @@ func BenchmarkTrySched(b *testing.B) { b.StopTimer() sched := newScheduler() - sched.workers[WorkerID{}] = &workerHandle{ + sched.workers[storiface.WorkerID{}] = &workerHandle{ workerRpc: nil, info: storiface.WorkerInfo{ Hostname: "t", @@ -570,7 +592,7 @@ func BenchmarkTrySched(b *testing.B) { for i := 0; i < windows; i++ { sched.openWindows = append(sched.openWindows, &schedWindowRequest{ - worker: WorkerID{}, + worker: storiface.WorkerID{}, done: make(chan *schedWindow, 1000), }) } @@ -616,7 +638,7 @@ func TestWindowCompact(t *testing.T) { taskType: task, sector: storage.SectorRef{ProofType: spt}, }) - window.allocated.add(wh.info.Resources, ResourceTable[task][spt]) + window.allocated.add(wh.info.Resources, storiface.ResourceTable[task][spt]) } wh.activeWindows = append(wh.activeWindows, window) @@ -635,7 +657,7 @@ func TestWindowCompact(t *testing.T) { for ti, task := range tasks { require.Equal(t, task, wh.activeWindows[wi].todo[ti].taskType, "%d, %d", wi, ti) - expectRes.add(wh.info.Resources, ResourceTable[task][spt]) + expectRes.add(wh.info.Resources, storiface.ResourceTable[task][spt]) } require.Equal(t, expectRes.cpuUse, wh.activeWindows[wi].allocated.cpuUse, "%d", wi) diff --git a/extern/sector-storage/sched_worker.go b/extern/sector-storage/sched_worker.go index 42bba2ee5..762c3fc3a 100644 --- a/extern/sector-storage/sched_worker.go +++ b/extern/sector-storage/sched_worker.go @@ -4,17 +4,18 @@ import ( "context" "time" - "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" "golang.org/x/xerrors" + "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" "github.com/filecoin-project/lotus/extern/sector-storage/stores" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) type schedWorker struct { sched *scheduler worker *workerHandle - wid WorkerID + wid storiface.WorkerID heartbeatTimer *time.Ticker scheduledWindows chan *schedWindow @@ -50,7 +51,7 @@ func (sh *scheduler) runWorker(ctx context.Context, w Worker) error { closedMgr: make(chan struct{}), } - wid := WorkerID(sessID) + wid := storiface.WorkerID(sessID) sh.workersLk.Lock() _, exist := sh.workers[wid] @@ -237,7 +238,7 @@ func (sw *schedWorker) checkSession(ctx context.Context) bool { continue } - if WorkerID(curSes) != sw.wid { + if storiface.WorkerID(curSes) != sw.wid { if curSes != ClosedWorkerID { // worker restarted log.Warnw("worker session changed (worker restarted?)", "initial", sw.wid, "current", curSes) @@ -296,7 +297,7 @@ func (sw *schedWorker) workerCompactWindows() { var moved []int for ti, todo := range window.todo { - needRes := ResourceTable[todo.taskType][todo.sector.ProofType] + needRes := worker.info.Resources.ResourceSpec(todo.sector.ProofType, todo.taskType) if !lower.allocated.canHandleRequest(needRes, sw.wid, "compactWindows", worker.info) { continue } @@ -357,7 +358,7 @@ assignLoop: worker.lk.Lock() for t, todo := range firstWindow.todo { - needRes := ResourceTable[todo.taskType][todo.sector.ProofType] + needRes := worker.info.Resources.ResourceSpec(todo.sector.ProofType, todo.taskType) if worker.preparing.canHandleRequest(needRes, sw.wid, "startPreparing", worker.info) { tidx = t break @@ -418,7 +419,7 @@ assignLoop: continue } - needRes := ResourceTable[todo.taskType][todo.sector.ProofType] + needRes := storiface.ResourceTable[todo.taskType][todo.sector.ProofType] if worker.active.canHandleRequest(needRes, sw.wid, "startPreparing", worker.info) { tidx = t break @@ -456,7 +457,7 @@ assignLoop: func (sw *schedWorker) startProcessingTask(req *workerRequest) error { w, sh := sw.worker, sw.sched - needRes := ResourceTable[req.taskType][req.sector.ProofType] + needRes := w.info.Resources.ResourceSpec(req.sector.ProofType, req.taskType) w.lk.Lock() w.preparing.add(w.info.Resources, needRes) @@ -464,7 +465,9 @@ func (sw *schedWorker) startProcessingTask(req *workerRequest) error { go func() { // first run the prepare step (e.g. fetching sector data from other worker) - err := req.prepare(req.ctx, sh.workTracker.worker(sw.wid, w.info, w.workerRpc)) + tw := sh.workTracker.worker(sw.wid, w.info, w.workerRpc) + tw.start() + err := req.prepare(req.ctx, tw) w.lk.Lock() if err != nil { @@ -488,6 +491,14 @@ func (sw *schedWorker) startProcessingTask(req *workerRequest) error { return } + tw = sh.workTracker.worker(sw.wid, w.info, w.workerRpc) + + // start tracking work first early in case we need to wait for resources + werr := make(chan error, 1) + go func() { + werr <- req.work(req.ctx, tw) + }() + // wait (if needed) for resources in the 'active' window err = w.active.withResources(sw.wid, w.info, needRes, &w.lk, func() error { w.preparing.free(w.info.Resources, needRes) @@ -501,7 +512,8 @@ func (sw *schedWorker) startProcessingTask(req *workerRequest) error { } // Do the work! - err = req.work(req.ctx, sh.workTracker.worker(sw.wid, w.info, w.workerRpc)) + tw.start() + err = <-werr select { case req.ret <- workerResponse{err: err}: @@ -528,13 +540,15 @@ func (sw *schedWorker) startProcessingTask(req *workerRequest) error { func (sw *schedWorker) startProcessingReadyTask(req *workerRequest) error { w, sh := sw.worker, sw.sched - needRes := ResourceTable[req.taskType][req.sector.ProofType] + needRes := w.info.Resources.ResourceSpec(req.sector.ProofType, req.taskType) w.active.add(w.info.Resources, needRes) go func() { // Do the work! - err := req.work(req.ctx, sh.workTracker.worker(sw.wid, w.info, w.workerRpc)) + tw := sh.workTracker.worker(sw.wid, w.info, w.workerRpc) + tw.start() + err := req.work(req.ctx, tw) select { case req.ret <- workerResponse{err: err}: @@ -566,7 +580,7 @@ func (sw *schedWorker) startProcessingReadyTask(req *workerRequest) error { return nil } -func (sh *scheduler) workerCleanup(wid WorkerID, w *workerHandle) { +func (sh *scheduler) workerCleanup(wid storiface.WorkerID, w *workerHandle) { select { case <-w.closingMgr: default: diff --git a/extern/sector-storage/sealtasks/task.go b/extern/sector-storage/sealtasks/task.go index 6d341a4b3..8b91d5b29 100644 --- a/extern/sector-storage/sealtasks/task.go +++ b/extern/sector-storage/sealtasks/task.go @@ -6,24 +6,33 @@ const ( TTAddPiece TaskType = "seal/v0/addpiece" TTPreCommit1 TaskType = "seal/v0/precommit/1" TTPreCommit2 TaskType = "seal/v0/precommit/2" - TTCommit1 TaskType = "seal/v0/commit/1" // NOTE: We use this to transfer the sector into miner-local storage for now; Don't use on workers! + TTCommit1 TaskType = "seal/v0/commit/1" TTCommit2 TaskType = "seal/v0/commit/2" TTFinalize TaskType = "seal/v0/finalize" TTFetch TaskType = "seal/v0/fetch" TTUnseal TaskType = "seal/v0/unseal" + + TTReplicaUpdate TaskType = "seal/v0/replicaupdate" + TTProveReplicaUpdate1 TaskType = "seal/v0/provereplicaupdate/1" + TTProveReplicaUpdate2 TaskType = "seal/v0/provereplicaupdate/2" + TTRegenSectorKey TaskType = "seal/v0/regensectorkey" ) var order = map[TaskType]int{ - TTAddPiece: 6, // least priority - TTPreCommit1: 5, - TTPreCommit2: 4, - TTCommit2: 3, - TTCommit1: 2, - TTUnseal: 1, - TTFetch: -1, - TTFinalize: -2, // most priority + TTRegenSectorKey: 10, // least priority + TTAddPiece: 9, + TTReplicaUpdate: 8, + TTProveReplicaUpdate2: 7, + TTProveReplicaUpdate1: 6, + TTPreCommit1: 5, + TTPreCommit2: 4, + TTCommit2: 3, + TTCommit1: 2, + TTUnseal: 1, + TTFetch: -1, + TTFinalize: -2, // most priority } var shortNames = map[TaskType]string{ @@ -38,6 +47,11 @@ var shortNames = map[TaskType]string{ TTFetch: "GET", TTUnseal: "UNS", + + TTReplicaUpdate: "RU", + TTProveReplicaUpdate1: "PR1", + TTProveReplicaUpdate2: "PR2", + TTRegenSectorKey: "GSK", } func (a TaskType) MuchLess(b TaskType) (bool, bool) { diff --git a/extern/sector-storage/stats.go b/extern/sector-storage/stats.go index c5bc2fba1..43828742a 100644 --- a/extern/sector-storage/stats.go +++ b/extern/sector-storage/stats.go @@ -35,7 +35,13 @@ func (m *Manager) WorkerJobs() map[uuid.UUID][]storiface.WorkerJob { out := map[uuid.UUID][]storiface.WorkerJob{} calls := map[storiface.CallID]struct{}{} - for _, t := range m.sched.workTracker.Running() { + running, preparing := m.sched.workTracker.Running() + + for _, t := range running { + out[uuid.UUID(t.worker)] = append(out[uuid.UUID(t.worker)], t.job) + calls[t.job.ID] = struct{}{} + } + for _, t := range preparing { out[uuid.UUID(t.worker)] = append(out[uuid.UUID(t.worker)], t.job) calls[t.job.ID] = struct{}{} } @@ -50,7 +56,7 @@ func (m *Manager) WorkerJobs() map[uuid.UUID][]storiface.WorkerJob { ID: storiface.UndefCall, Sector: request.sector.ID, Task: request.taskType, - RunWait: wi + 1, + RunWait: wi + 2, Start: request.start, }) } diff --git a/extern/sector-storage/stores/http_handler.go b/extern/sector-storage/stores/http_handler.go index 5b8477fc8..771a9a3a1 100644 --- a/extern/sector-storage/stores/http_handler.go +++ b/extern/sector-storage/stores/http_handler.go @@ -2,7 +2,6 @@ package stores import ( "encoding/json" - "io" "net/http" "os" "strconv" @@ -21,9 +20,9 @@ import ( var log = logging.Logger("stores") -var _ partialFileHandler = &DefaultPartialFileHandler{} +var _ PartialFileHandler = &DefaultPartialFileHandler{} -// DefaultPartialFileHandler is the default implementation of the partialFileHandler interface. +// DefaultPartialFileHandler is the default implementation of the PartialFileHandler interface. // This is probably the only implementation we'll ever use because the purpose of the // interface to is to mock out partial file related functionality during testing. type DefaultPartialFileHandler struct{} @@ -46,7 +45,7 @@ func (d *DefaultPartialFileHandler) Close(pf *partialfile.PartialFile) error { type FetchHandler struct { Local Store - PfHandler partialFileHandler + PfHandler PartialFileHandler } func (handler *FetchHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // /remote/ @@ -85,7 +84,6 @@ func (handler *FetchHandler) remoteStatFs(w http.ResponseWriter, r *http.Request // remoteGetSector returns the sector file/tared directory byte stream for the sectorID and sector file type sent in the request. // returns an error if it does NOT have the required sector file/dir. func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Request) { - log.Infof("SERVE GET %s", r.URL) vars := mux.Vars(r) id, err := storiface.ParseSectorID(vars["id"]) @@ -139,17 +137,12 @@ func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Requ return } - rd, err := tarutil.TarDirectory(path) - if err != nil { - log.Errorf("%+v", err) - w.WriteHeader(500) - return - } - w.Header().Set("Content-Type", "application/x-tar") w.WriteHeader(200) - if _, err := io.CopyBuffer(w, rd, make([]byte, CopyBuf)); err != nil { - log.Errorf("%+v", err) + + err := tarutil.TarDirectory(path, w, make([]byte, CopyBuf)) + if err != nil { + log.Errorf("send tar: %+v", err) return } } else { @@ -179,7 +172,7 @@ func (handler *FetchHandler) remoteDeleteSector(w http.ResponseWriter, r *http.R return } - if err := handler.Local.Remove(r.Context(), id, ft, false); err != nil { + if err := handler.Local.Remove(r.Context(), id, ft, false, []ID{ID(r.FormValue("keep"))}); err != nil { log.Errorf("%+v", err) w.WriteHeader(500) return diff --git a/extern/sector-storage/stores/http_handler_test.go b/extern/sector-storage/stores/http_handler_test.go index 1258d8530..673aba55d 100644 --- a/extern/sector-storage/stores/http_handler_test.go +++ b/extern/sector-storage/stores/http_handler_test.go @@ -63,7 +63,7 @@ func TestRemoteGetAllocated(t *testing.T) { tcs := map[string]struct { piFnc func(pi *pieceInfo) storeFnc func(s *mocks.MockStore) - pfFunc func(s *mocks.MockpartialFileHandler) + pfFunc func(s *mocks.MockPartialFileHandler) // expectation expectedStatusCode int @@ -129,7 +129,7 @@ func TestRemoteGetAllocated(t *testing.T) { storiface.SectorPaths{}, nil).Times(1) }, - pfFunc: func(pf *mocks.MockpartialFileHandler) { + pfFunc: func(pf *mocks.MockPartialFileHandler) { pf.EXPECT().OpenPartialFile(abi.PaddedPieceSize(sectorSize), pfPath).Return(&partialfile.PartialFile{}, xerrors.New("some error")).Times(1) }, @@ -146,7 +146,7 @@ func TestRemoteGetAllocated(t *testing.T) { storiface.SectorPaths{}, nil).Times(1) }, - pfFunc: func(pf *mocks.MockpartialFileHandler) { + pfFunc: func(pf *mocks.MockPartialFileHandler) { pf.EXPECT().OpenPartialFile(abi.PaddedPieceSize(sectorSize), pfPath).Return(emptyPartialFile, nil).Times(1) @@ -165,7 +165,7 @@ func TestRemoteGetAllocated(t *testing.T) { storiface.SectorPaths{}, nil).Times(1) }, - pfFunc: func(pf *mocks.MockpartialFileHandler) { + pfFunc: func(pf *mocks.MockPartialFileHandler) { pf.EXPECT().OpenPartialFile(abi.PaddedPieceSize(sectorSize), pfPath).Return(emptyPartialFile, nil).Times(1) @@ -184,7 +184,7 @@ func TestRemoteGetAllocated(t *testing.T) { storiface.SectorPaths{}, nil).Times(1) }, - pfFunc: func(pf *mocks.MockpartialFileHandler) { + pfFunc: func(pf *mocks.MockPartialFileHandler) { pf.EXPECT().OpenPartialFile(abi.PaddedPieceSize(sectorSize), pfPath).Return(emptyPartialFile, nil).Times(1) @@ -203,7 +203,7 @@ func TestRemoteGetAllocated(t *testing.T) { defer mockCtrl.Finish() lstore := mocks.NewMockStore(mockCtrl) - pfhandler := mocks.NewMockpartialFileHandler(mockCtrl) + pfhandler := mocks.NewMockPartialFileHandler(mockCtrl) handler := &stores.FetchHandler{ lstore, @@ -371,7 +371,7 @@ func TestRemoteGetSector(t *testing.T) { // when test is done, assert expectations on all mock objects. defer mockCtrl.Finish() lstore := mocks.NewMockStore(mockCtrl) - pfhandler := mocks.NewMockpartialFileHandler(mockCtrl) + pfhandler := mocks.NewMockPartialFileHandler(mockCtrl) var path string diff --git a/extern/sector-storage/stores/index.go b/extern/sector-storage/stores/index.go index 1d3d972e0..a90cdf0b9 100644 --- a/extern/sector-storage/stores/index.go +++ b/extern/sector-storage/stores/index.go @@ -29,6 +29,8 @@ var SkippedHeartbeatThresh = HeartbeatInterval * 5 // filesystem, local or networked / shared by multiple machines type ID string +type Group = string + type StorageInfo struct { ID ID URLs []string // TODO: Support non-http transports @@ -37,6 +39,9 @@ type StorageInfo struct { CanSeal bool CanStore bool + + Groups []Group + AllowTo []Group } type HealthReport struct { @@ -55,6 +60,8 @@ type SectorStorageInfo struct { Primary bool } +//go:generate go run github.com/golang/mock/mockgen -destination=mocks/index.go -package=mocks . SectorIndex + type SectorIndex interface { // part of storage-miner api StorageAttach(context.Context, StorageInfo, fsutil.FsStat) error StorageInfo(context.Context, ID) (StorageInfo, error) @@ -69,6 +76,7 @@ type SectorIndex interface { // part of storage-miner api // atomically acquire locks on all sector file types. close ctx to unlock StorageLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) error StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) + StorageGetLocks(ctx context.Context) (storiface.SectorLocks, error) StorageList(ctx context.Context) (map[ID][]Decl, error) } @@ -166,6 +174,8 @@ func (i *Index) StorageAttach(ctx context.Context, si StorageInfo, st fsutil.FsS i.stores[si.ID].info.MaxStorage = si.MaxStorage i.stores[si.ID].info.CanSeal = si.CanSeal i.stores[si.ID].info.CanStore = si.CanStore + i.stores[si.ID].info.Groups = si.Groups + i.stores[si.ID].info.AllowTo = si.AllowTo return nil } @@ -290,6 +300,8 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft storif storageIDs := map[ID]uint64{} isprimary := map[ID]bool{} + allowTo := map[Group]struct{}{} + for _, pathType := range storiface.PathTypes { if ft&pathType == 0 { continue @@ -321,6 +333,14 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft storif urls[k] = rl.String() } + if allowTo != nil && len(st.info.AllowTo) > 0 { + for _, group := range st.info.AllowTo { + allowTo[group] = struct{}{} + } + } else { + allowTo = nil // allow to any + } + out = append(out, SectorStorageInfo{ ID: id, URLs: urls, @@ -363,6 +383,22 @@ func (i *Index) StorageFindSector(ctx context.Context, s abi.SectorID, ft storif continue } + if allowTo != nil { + allow := false + for _, group := range st.info.Groups { + if _, found := allowTo[group]; found { + log.Debugf("path %s in allowed group %s", st.info.ID, group) + allow = true + break + } + } + + if !allow { + log.Debugf("not selecting on %s, not in allowed group, allow %+v; path has %+v", st.info.ID, allowTo, st.info.Groups) + continue + } + } + urls := make([]string, len(st.info.URLs)) for k, u := range st.info.URLs { rl, err := url.Parse(u) diff --git a/extern/sector-storage/stores/index_locks.go b/extern/sector-storage/stores/index_locks.go index 3a5ff940e..ade437c6b 100644 --- a/extern/sector-storage/stores/index_locks.go +++ b/extern/sector-storage/stores/index_locks.go @@ -2,6 +2,7 @@ package stores import ( "context" + "sort" "sync" "golang.org/x/xerrors" @@ -154,3 +155,32 @@ func (i *indexLocks) StorageLock(ctx context.Context, sector abi.SectorID, read func (i *indexLocks) StorageTryLock(ctx context.Context, sector abi.SectorID, read storiface.SectorFileType, write storiface.SectorFileType) (bool, error) { return i.lockWith(ctx, (*sectorLock).tryLockSafe, sector, read, write) } + +func (i *indexLocks) StorageGetLocks(context.Context) (storiface.SectorLocks, error) { + i.lk.Lock() + defer i.lk.Unlock() + + out := storiface.SectorLocks{ + Locks: []storiface.SectorLock{}, + } + + for id, lock := range i.locks { + l := storiface.SectorLock{Sector: id} + + for t, b := range lock.w.All() { + if b { + l.Write[t]++ + } + } + + copy(l.Read[:], lock.r[:]) + + out.Locks = append(out.Locks, l) + } + + sort.Slice(out.Locks, func(i, j int) bool { + return out.Locks[i].Sector.Number < out.Locks[j].Sector.Number + }) + + return out, nil +} diff --git a/extern/sector-storage/stores/index_test.go b/extern/sector-storage/stores/index_test.go new file mode 100644 index 000000000..bb4239035 --- /dev/null +++ b/extern/sector-storage/stores/index_test.go @@ -0,0 +1,154 @@ +package stores + +import ( + "context" + "testing" + + "github.com/google/uuid" + logging "github.com/ipfs/go-log/v2" + "github.com/stretchr/testify/require" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" + "github.com/filecoin-project/lotus/extern/sector-storage/storiface" +) + +func init() { + logging.SetLogLevel("stores", "DEBUG") +} + +func newTestStorage() StorageInfo { + return StorageInfo{ + ID: ID(uuid.New().String()), + CanSeal: true, + CanStore: true, + Groups: nil, + AllowTo: nil, + } +} + +var bigFsStat = fsutil.FsStat{ + Capacity: 1 << 40, + Available: 1 << 40, + FSAvailable: 1 << 40, + Reserved: 0, + Max: 0, + Used: 0, +} + +const s32g = 32 << 30 + +func TestFindSimple(t *testing.T) { + ctx := context.Background() + + i := NewIndex() + stor1 := newTestStorage() + stor2 := newTestStorage() + + require.NoError(t, i.StorageAttach(ctx, stor1, bigFsStat)) + require.NoError(t, i.StorageAttach(ctx, stor2, bigFsStat)) + + s1 := abi.SectorID{ + Miner: 12, + Number: 34, + } + + { + si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, true) + require.NoError(t, err) + require.Len(t, si, 0) + } + + require.NoError(t, i.StorageDeclareSector(ctx, stor1.ID, s1, storiface.FTSealed, true)) + + { + si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, false) + require.NoError(t, err) + require.Len(t, si, 1) + require.Equal(t, stor1.ID, si[0].ID) + } + + { + si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, true) + require.NoError(t, err) + require.Len(t, si, 2) + } +} + +func TestFindNoAllow(t *testing.T) { + ctx := context.Background() + + i := NewIndex() + stor1 := newTestStorage() + stor1.AllowTo = []Group{"grp1"} + stor2 := newTestStorage() + + require.NoError(t, i.StorageAttach(ctx, stor1, bigFsStat)) + require.NoError(t, i.StorageAttach(ctx, stor2, bigFsStat)) + + s1 := abi.SectorID{ + Miner: 12, + Number: 34, + } + require.NoError(t, i.StorageDeclareSector(ctx, stor1.ID, s1, storiface.FTSealed, true)) + + { + si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, false) + require.NoError(t, err) + require.Len(t, si, 1) + require.Equal(t, stor1.ID, si[0].ID) + } + + { + si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, true) + require.NoError(t, err) + require.Len(t, si, 1) + require.Equal(t, stor1.ID, si[0].ID) + } +} + +func TestFindAllow(t *testing.T) { + ctx := context.Background() + + i := NewIndex() + + stor1 := newTestStorage() + stor1.AllowTo = []Group{"grp1"} + + stor2 := newTestStorage() + stor2.Groups = []Group{"grp1"} + + stor3 := newTestStorage() + stor3.Groups = []Group{"grp2"} + + require.NoError(t, i.StorageAttach(ctx, stor1, bigFsStat)) + require.NoError(t, i.StorageAttach(ctx, stor2, bigFsStat)) + require.NoError(t, i.StorageAttach(ctx, stor3, bigFsStat)) + + s1 := abi.SectorID{ + Miner: 12, + Number: 34, + } + require.NoError(t, i.StorageDeclareSector(ctx, stor1.ID, s1, storiface.FTSealed, true)) + + { + si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, false) + require.NoError(t, err) + require.Len(t, si, 1) + require.Equal(t, stor1.ID, si[0].ID) + } + + { + si, err := i.StorageFindSector(ctx, s1, storiface.FTSealed, s32g, true) + require.NoError(t, err) + require.Len(t, si, 2) + if si[0].ID == stor1.ID { + require.Equal(t, stor1.ID, si[0].ID) + require.Equal(t, stor2.ID, si[1].ID) + } else { + require.Equal(t, stor1.ID, si[1].ID) + require.Equal(t, stor2.ID, si[0].ID) + } + } +} diff --git a/extern/sector-storage/stores/interface.go b/extern/sector-storage/stores/interface.go index 4986e6c80..32157366c 100644 --- a/extern/sector-storage/stores/interface.go +++ b/extern/sector-storage/stores/interface.go @@ -13,8 +13,10 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) +//go:generate go run github.com/golang/mock/mockgen -destination=mocks/pf.go -package=mocks . PartialFileHandler + // PartialFileHandler helps mock out the partial file functionality during testing. -type partialFileHandler interface { +type PartialFileHandler interface { // OpenPartialFile opens and returns a partial file at the given path and also verifies it has the given // size OpenPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialfile.PartialFile, error) @@ -30,9 +32,11 @@ type partialFileHandler interface { Close(pf *partialfile.PartialFile) error } +//go:generate go run github.com/golang/mock/mockgen -destination=mocks/store.go -package=mocks . Store + type Store interface { AcquireSector(ctx context.Context, s storage.SectorRef, existing storiface.SectorFileType, allocate storiface.SectorFileType, sealing storiface.PathType, op storiface.AcquireMode) (paths storiface.SectorPaths, stores storiface.SectorPaths, err error) - Remove(ctx context.Context, s abi.SectorID, types storiface.SectorFileType, force bool) error + Remove(ctx context.Context, s abi.SectorID, types storiface.SectorFileType, force bool, keepIn []ID) error // like remove, but doesn't remove the primary sector copy, nor the last // non-primary copy if there no primary copies diff --git a/extern/sector-storage/stores/local.go b/extern/sector-storage/stores/local.go index 6f37f4f7a..8121c418d 100644 --- a/extern/sector-storage/stores/local.go +++ b/extern/sector-storage/stores/local.go @@ -46,6 +46,13 @@ type LocalStorageMeta struct { // MaxStorage specifies the maximum number of bytes to use for sector storage // (0 = unlimited) MaxStorage uint64 + + // List of storage groups this path belongs to + Groups []string + + // List of storage groups to which data from this path can be moved. If none + // are specified, allow to all + AllowTo []string } // StorageConfig .lotusstorage/storage.json @@ -212,6 +219,8 @@ func (st *Local) OpenPath(ctx context.Context, p string) error { MaxStorage: meta.MaxStorage, CanSeal: meta.CanSeal, CanStore: meta.CanStore, + Groups: meta.Groups, + AllowTo: meta.AllowTo, }, fst) if err != nil { return xerrors.Errorf("declaring storage in index: %w", err) @@ -276,6 +285,8 @@ func (st *Local) Redeclare(ctx context.Context) error { MaxStorage: meta.MaxStorage, CanSeal: meta.CanSeal, CanStore: meta.CanStore, + Groups: meta.Groups, + AllowTo: meta.AllowTo, }, fst) if err != nil { return xerrors.Errorf("redeclaring storage in index: %w", err) @@ -544,7 +555,7 @@ func (st *Local) Local(ctx context.Context) ([]StoragePath, error) { return out, nil } -func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ storiface.SectorFileType, force bool) error { +func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ storiface.SectorFileType, force bool, keepIn []ID) error { if bits.OnesCount(uint(typ)) != 1 { return xerrors.New("delete expects one file type") } @@ -558,7 +569,14 @@ func (st *Local) Remove(ctx context.Context, sid abi.SectorID, typ storiface.Sec return xerrors.Errorf("can't delete sector %v(%d), not found", sid, typ) } +storeLoop: for _, info := range si { + for _, id := range keepIn { + if id == info.ID { + continue storeLoop + } + } + if err := st.removeSector(ctx, sid, typ, info.ID); err != nil { return err } diff --git a/extern/sector-storage/stores/mocks/index.go b/extern/sector-storage/stores/mocks/index.go index 59a6017b5..268148536 100644 --- a/extern/sector-storage/stores/mocks/index.go +++ b/extern/sector-storage/stores/mocks/index.go @@ -1,5 +1,5 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: index.go +// Source: github.com/filecoin-project/lotus/extern/sector-storage/stores (interfaces: SectorIndex) // Package mocks is a generated GoMock package. package mocks @@ -53,61 +53,76 @@ func (mr *MockSectorIndexMockRecorder) StorageAttach(arg0, arg1, arg2 interface{ } // StorageBestAlloc mocks base method. -func (m *MockSectorIndex) StorageBestAlloc(ctx context.Context, allocate storiface.SectorFileType, ssize abi.SectorSize, pathType storiface.PathType) ([]stores.StorageInfo, error) { +func (m *MockSectorIndex) StorageBestAlloc(arg0 context.Context, arg1 storiface.SectorFileType, arg2 abi.SectorSize, arg3 storiface.PathType) ([]stores.StorageInfo, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StorageBestAlloc", ctx, allocate, ssize, pathType) + ret := m.ctrl.Call(m, "StorageBestAlloc", arg0, arg1, arg2, arg3) ret0, _ := ret[0].([]stores.StorageInfo) ret1, _ := ret[1].(error) return ret0, ret1 } // StorageBestAlloc indicates an expected call of StorageBestAlloc. -func (mr *MockSectorIndexMockRecorder) StorageBestAlloc(ctx, allocate, ssize, pathType interface{}) *gomock.Call { +func (mr *MockSectorIndexMockRecorder) StorageBestAlloc(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageBestAlloc", reflect.TypeOf((*MockSectorIndex)(nil).StorageBestAlloc), ctx, allocate, ssize, pathType) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageBestAlloc", reflect.TypeOf((*MockSectorIndex)(nil).StorageBestAlloc), arg0, arg1, arg2, arg3) } // StorageDeclareSector mocks base method. -func (m *MockSectorIndex) StorageDeclareSector(ctx context.Context, storageID stores.ID, s abi.SectorID, ft storiface.SectorFileType, primary bool) error { +func (m *MockSectorIndex) StorageDeclareSector(arg0 context.Context, arg1 stores.ID, arg2 abi.SectorID, arg3 storiface.SectorFileType, arg4 bool) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StorageDeclareSector", ctx, storageID, s, ft, primary) + ret := m.ctrl.Call(m, "StorageDeclareSector", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].(error) return ret0 } // StorageDeclareSector indicates an expected call of StorageDeclareSector. -func (mr *MockSectorIndexMockRecorder) StorageDeclareSector(ctx, storageID, s, ft, primary interface{}) *gomock.Call { +func (mr *MockSectorIndexMockRecorder) StorageDeclareSector(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageDeclareSector", reflect.TypeOf((*MockSectorIndex)(nil).StorageDeclareSector), ctx, storageID, s, ft, primary) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageDeclareSector", reflect.TypeOf((*MockSectorIndex)(nil).StorageDeclareSector), arg0, arg1, arg2, arg3, arg4) } // StorageDropSector mocks base method. -func (m *MockSectorIndex) StorageDropSector(ctx context.Context, storageID stores.ID, s abi.SectorID, ft storiface.SectorFileType) error { +func (m *MockSectorIndex) StorageDropSector(arg0 context.Context, arg1 stores.ID, arg2 abi.SectorID, arg3 storiface.SectorFileType) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StorageDropSector", ctx, storageID, s, ft) + ret := m.ctrl.Call(m, "StorageDropSector", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // StorageDropSector indicates an expected call of StorageDropSector. -func (mr *MockSectorIndexMockRecorder) StorageDropSector(ctx, storageID, s, ft interface{}) *gomock.Call { +func (mr *MockSectorIndexMockRecorder) StorageDropSector(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageDropSector", reflect.TypeOf((*MockSectorIndex)(nil).StorageDropSector), ctx, storageID, s, ft) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageDropSector", reflect.TypeOf((*MockSectorIndex)(nil).StorageDropSector), arg0, arg1, arg2, arg3) } // StorageFindSector mocks base method. -func (m *MockSectorIndex) StorageFindSector(ctx context.Context, sector abi.SectorID, ft storiface.SectorFileType, ssize abi.SectorSize, allowFetch bool) ([]stores.SectorStorageInfo, error) { +func (m *MockSectorIndex) StorageFindSector(arg0 context.Context, arg1 abi.SectorID, arg2 storiface.SectorFileType, arg3 abi.SectorSize, arg4 bool) ([]stores.SectorStorageInfo, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StorageFindSector", ctx, sector, ft, ssize, allowFetch) + ret := m.ctrl.Call(m, "StorageFindSector", arg0, arg1, arg2, arg3, arg4) ret0, _ := ret[0].([]stores.SectorStorageInfo) ret1, _ := ret[1].(error) return ret0, ret1 } // StorageFindSector indicates an expected call of StorageFindSector. -func (mr *MockSectorIndexMockRecorder) StorageFindSector(ctx, sector, ft, ssize, allowFetch interface{}) *gomock.Call { +func (mr *MockSectorIndexMockRecorder) StorageFindSector(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageFindSector", reflect.TypeOf((*MockSectorIndex)(nil).StorageFindSector), ctx, sector, ft, ssize, allowFetch) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageFindSector", reflect.TypeOf((*MockSectorIndex)(nil).StorageFindSector), arg0, arg1, arg2, arg3, arg4) +} + +// StorageGetLocks mocks base method. +func (m *MockSectorIndex) StorageGetLocks(arg0 context.Context) (storiface.SectorLocks, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StorageGetLocks", arg0) + ret0, _ := ret[0].(storiface.SectorLocks) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StorageGetLocks indicates an expected call of StorageGetLocks. +func (mr *MockSectorIndexMockRecorder) StorageGetLocks(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageGetLocks", reflect.TypeOf((*MockSectorIndex)(nil).StorageGetLocks), arg0) } // StorageInfo mocks base method. @@ -126,32 +141,32 @@ func (mr *MockSectorIndexMockRecorder) StorageInfo(arg0, arg1 interface{}) *gomo } // StorageList mocks base method. -func (m *MockSectorIndex) StorageList(ctx context.Context) (map[stores.ID][]stores.Decl, error) { +func (m *MockSectorIndex) StorageList(arg0 context.Context) (map[stores.ID][]stores.Decl, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StorageList", ctx) + ret := m.ctrl.Call(m, "StorageList", arg0) ret0, _ := ret[0].(map[stores.ID][]stores.Decl) ret1, _ := ret[1].(error) return ret0, ret1 } // StorageList indicates an expected call of StorageList. -func (mr *MockSectorIndexMockRecorder) StorageList(ctx interface{}) *gomock.Call { +func (mr *MockSectorIndexMockRecorder) StorageList(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageList", reflect.TypeOf((*MockSectorIndex)(nil).StorageList), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageList", reflect.TypeOf((*MockSectorIndex)(nil).StorageList), arg0) } // StorageLock mocks base method. -func (m *MockSectorIndex) StorageLock(ctx context.Context, sector abi.SectorID, read, write storiface.SectorFileType) error { +func (m *MockSectorIndex) StorageLock(arg0 context.Context, arg1 abi.SectorID, arg2, arg3 storiface.SectorFileType) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StorageLock", ctx, sector, read, write) + ret := m.ctrl.Call(m, "StorageLock", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(error) return ret0 } // StorageLock indicates an expected call of StorageLock. -func (mr *MockSectorIndexMockRecorder) StorageLock(ctx, sector, read, write interface{}) *gomock.Call { +func (mr *MockSectorIndexMockRecorder) StorageLock(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageLock", reflect.TypeOf((*MockSectorIndex)(nil).StorageLock), ctx, sector, read, write) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageLock", reflect.TypeOf((*MockSectorIndex)(nil).StorageLock), arg0, arg1, arg2, arg3) } // StorageReportHealth mocks base method. @@ -169,16 +184,16 @@ func (mr *MockSectorIndexMockRecorder) StorageReportHealth(arg0, arg1, arg2 inte } // StorageTryLock mocks base method. -func (m *MockSectorIndex) StorageTryLock(ctx context.Context, sector abi.SectorID, read, write storiface.SectorFileType) (bool, error) { +func (m *MockSectorIndex) StorageTryLock(arg0 context.Context, arg1 abi.SectorID, arg2, arg3 storiface.SectorFileType) (bool, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StorageTryLock", ctx, sector, read, write) + ret := m.ctrl.Call(m, "StorageTryLock", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(error) return ret0, ret1 } // StorageTryLock indicates an expected call of StorageTryLock. -func (mr *MockSectorIndexMockRecorder) StorageTryLock(ctx, sector, read, write interface{}) *gomock.Call { +func (mr *MockSectorIndexMockRecorder) StorageTryLock(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageTryLock", reflect.TypeOf((*MockSectorIndex)(nil).StorageTryLock), ctx, sector, read, write) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StorageTryLock", reflect.TypeOf((*MockSectorIndex)(nil).StorageTryLock), arg0, arg1, arg2, arg3) } diff --git a/extern/sector-storage/stores/mocks/pf.go b/extern/sector-storage/stores/mocks/pf.go new file mode 100644 index 000000000..175e3c119 --- /dev/null +++ b/extern/sector-storage/stores/mocks/pf.go @@ -0,0 +1,97 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/filecoin-project/lotus/extern/sector-storage/stores (interfaces: PartialFileHandler) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + os "os" + reflect "reflect" + + abi "github.com/filecoin-project/go-state-types/abi" + partialfile "github.com/filecoin-project/lotus/extern/sector-storage/partialfile" + storiface "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + gomock "github.com/golang/mock/gomock" +) + +// MockPartialFileHandler is a mock of PartialFileHandler interface. +type MockPartialFileHandler struct { + ctrl *gomock.Controller + recorder *MockPartialFileHandlerMockRecorder +} + +// MockPartialFileHandlerMockRecorder is the mock recorder for MockPartialFileHandler. +type MockPartialFileHandlerMockRecorder struct { + mock *MockPartialFileHandler +} + +// NewMockPartialFileHandler creates a new mock instance. +func NewMockPartialFileHandler(ctrl *gomock.Controller) *MockPartialFileHandler { + mock := &MockPartialFileHandler{ctrl: ctrl} + mock.recorder = &MockPartialFileHandlerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPartialFileHandler) EXPECT() *MockPartialFileHandlerMockRecorder { + return m.recorder +} + +// Close mocks base method. +func (m *MockPartialFileHandler) Close(arg0 *partialfile.PartialFile) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Close", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// Close indicates an expected call of Close. +func (mr *MockPartialFileHandlerMockRecorder) Close(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockPartialFileHandler)(nil).Close), arg0) +} + +// HasAllocated mocks base method. +func (m *MockPartialFileHandler) HasAllocated(arg0 *partialfile.PartialFile, arg1 storiface.UnpaddedByteIndex, arg2 abi.UnpaddedPieceSize) (bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "HasAllocated", arg0, arg1, arg2) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// HasAllocated indicates an expected call of HasAllocated. +func (mr *MockPartialFileHandlerMockRecorder) HasAllocated(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasAllocated", reflect.TypeOf((*MockPartialFileHandler)(nil).HasAllocated), arg0, arg1, arg2) +} + +// OpenPartialFile mocks base method. +func (m *MockPartialFileHandler) OpenPartialFile(arg0 abi.PaddedPieceSize, arg1 string) (*partialfile.PartialFile, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "OpenPartialFile", arg0, arg1) + ret0, _ := ret[0].(*partialfile.PartialFile) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// OpenPartialFile indicates an expected call of OpenPartialFile. +func (mr *MockPartialFileHandlerMockRecorder) OpenPartialFile(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OpenPartialFile", reflect.TypeOf((*MockPartialFileHandler)(nil).OpenPartialFile), arg0, arg1) +} + +// Reader mocks base method. +func (m *MockPartialFileHandler) Reader(arg0 *partialfile.PartialFile, arg1 storiface.PaddedByteIndex, arg2 abi.PaddedPieceSize) (*os.File, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Reader", arg0, arg1, arg2) + ret0, _ := ret[0].(*os.File) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Reader indicates an expected call of Reader. +func (mr *MockPartialFileHandlerMockRecorder) Reader(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reader", reflect.TypeOf((*MockPartialFileHandler)(nil).Reader), arg0, arg1, arg2) +} diff --git a/extern/sector-storage/stores/mocks/store.go b/extern/sector-storage/stores/mocks/store.go new file mode 100644 index 000000000..15ca9aae5 --- /dev/null +++ b/extern/sector-storage/stores/mocks/store.go @@ -0,0 +1,128 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/filecoin-project/lotus/extern/sector-storage/stores (interfaces: Store) + +// Package mocks is a generated GoMock package. +package mocks + +import ( + context "context" + reflect "reflect" + + abi "github.com/filecoin-project/go-state-types/abi" + fsutil "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" + stores "github.com/filecoin-project/lotus/extern/sector-storage/stores" + storiface "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + storage "github.com/filecoin-project/specs-storage/storage" + gomock "github.com/golang/mock/gomock" +) + +// MockStore is a mock of Store interface. +type MockStore struct { + ctrl *gomock.Controller + recorder *MockStoreMockRecorder +} + +// MockStoreMockRecorder is the mock recorder for MockStore. +type MockStoreMockRecorder struct { + mock *MockStore +} + +// NewMockStore creates a new mock instance. +func NewMockStore(ctrl *gomock.Controller) *MockStore { + mock := &MockStore{ctrl: ctrl} + mock.recorder = &MockStoreMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStore) EXPECT() *MockStoreMockRecorder { + return m.recorder +} + +// AcquireSector mocks base method. +func (m *MockStore) AcquireSector(arg0 context.Context, arg1 storage.SectorRef, arg2, arg3 storiface.SectorFileType, arg4 storiface.PathType, arg5 storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AcquireSector", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(storiface.SectorPaths) + ret1, _ := ret[1].(storiface.SectorPaths) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// AcquireSector indicates an expected call of AcquireSector. +func (mr *MockStoreMockRecorder) AcquireSector(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AcquireSector", reflect.TypeOf((*MockStore)(nil).AcquireSector), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// FsStat mocks base method. +func (m *MockStore) FsStat(arg0 context.Context, arg1 stores.ID) (fsutil.FsStat, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FsStat", arg0, arg1) + ret0, _ := ret[0].(fsutil.FsStat) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FsStat indicates an expected call of FsStat. +func (mr *MockStoreMockRecorder) FsStat(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FsStat", reflect.TypeOf((*MockStore)(nil).FsStat), arg0, arg1) +} + +// MoveStorage mocks base method. +func (m *MockStore) MoveStorage(arg0 context.Context, arg1 storage.SectorRef, arg2 storiface.SectorFileType) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MoveStorage", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// MoveStorage indicates an expected call of MoveStorage. +func (mr *MockStoreMockRecorder) MoveStorage(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MoveStorage", reflect.TypeOf((*MockStore)(nil).MoveStorage), arg0, arg1, arg2) +} + +// Remove mocks base method. +func (m *MockStore) Remove(arg0 context.Context, arg1 abi.SectorID, arg2 storiface.SectorFileType, arg3 bool, arg4 []stores.ID) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Remove", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(error) + return ret0 +} + +// Remove indicates an expected call of Remove. +func (mr *MockStoreMockRecorder) Remove(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockStore)(nil).Remove), arg0, arg1, arg2, arg3, arg4) +} + +// RemoveCopies mocks base method. +func (m *MockStore) RemoveCopies(arg0 context.Context, arg1 abi.SectorID, arg2 storiface.SectorFileType) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RemoveCopies", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// RemoveCopies indicates an expected call of RemoveCopies. +func (mr *MockStoreMockRecorder) RemoveCopies(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveCopies", reflect.TypeOf((*MockStore)(nil).RemoveCopies), arg0, arg1, arg2) +} + +// Reserve mocks base method. +func (m *MockStore) Reserve(arg0 context.Context, arg1 storage.SectorRef, arg2 storiface.SectorFileType, arg3 storiface.SectorPaths, arg4 map[storiface.SectorFileType]int) (func(), error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Reserve", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(func()) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Reserve indicates an expected call of Reserve. +func (mr *MockStoreMockRecorder) Reserve(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reserve", reflect.TypeOf((*MockStore)(nil).Reserve), arg0, arg1, arg2, arg3, arg4) +} diff --git a/extern/sector-storage/stores/mocks/stores.go b/extern/sector-storage/stores/mocks/stores.go deleted file mode 100644 index fdfd73a07..000000000 --- a/extern/sector-storage/stores/mocks/stores.go +++ /dev/null @@ -1,212 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: interface.go - -// Package mocks is a generated GoMock package. -package mocks - -import ( - context "context" - os "os" - reflect "reflect" - - abi "github.com/filecoin-project/go-state-types/abi" - fsutil "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" - partialfile "github.com/filecoin-project/lotus/extern/sector-storage/partialfile" - stores "github.com/filecoin-project/lotus/extern/sector-storage/stores" - storiface "github.com/filecoin-project/lotus/extern/sector-storage/storiface" - storage "github.com/filecoin-project/specs-storage/storage" - gomock "github.com/golang/mock/gomock" -) - -// MockpartialFileHandler is a mock of partialFileHandler interface. -type MockpartialFileHandler struct { - ctrl *gomock.Controller - recorder *MockpartialFileHandlerMockRecorder -} - -// MockpartialFileHandlerMockRecorder is the mock recorder for MockpartialFileHandler. -type MockpartialFileHandlerMockRecorder struct { - mock *MockpartialFileHandler -} - -// NewMockpartialFileHandler creates a new mock instance. -func NewMockpartialFileHandler(ctrl *gomock.Controller) *MockpartialFileHandler { - mock := &MockpartialFileHandler{ctrl: ctrl} - mock.recorder = &MockpartialFileHandlerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockpartialFileHandler) EXPECT() *MockpartialFileHandlerMockRecorder { - return m.recorder -} - -// Close mocks base method. -func (m *MockpartialFileHandler) Close(pf *partialfile.PartialFile) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Close", pf) - ret0, _ := ret[0].(error) - return ret0 -} - -// Close indicates an expected call of Close. -func (mr *MockpartialFileHandlerMockRecorder) Close(pf interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Close", reflect.TypeOf((*MockpartialFileHandler)(nil).Close), pf) -} - -// HasAllocated mocks base method. -func (m *MockpartialFileHandler) HasAllocated(pf *partialfile.PartialFile, offset storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HasAllocated", pf, offset, size) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// HasAllocated indicates an expected call of HasAllocated. -func (mr *MockpartialFileHandlerMockRecorder) HasAllocated(pf, offset, size interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasAllocated", reflect.TypeOf((*MockpartialFileHandler)(nil).HasAllocated), pf, offset, size) -} - -// OpenPartialFile mocks base method. -func (m *MockpartialFileHandler) OpenPartialFile(maxPieceSize abi.PaddedPieceSize, path string) (*partialfile.PartialFile, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "OpenPartialFile", maxPieceSize, path) - ret0, _ := ret[0].(*partialfile.PartialFile) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// OpenPartialFile indicates an expected call of OpenPartialFile. -func (mr *MockpartialFileHandlerMockRecorder) OpenPartialFile(maxPieceSize, path interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "OpenPartialFile", reflect.TypeOf((*MockpartialFileHandler)(nil).OpenPartialFile), maxPieceSize, path) -} - -// Reader mocks base method. -func (m *MockpartialFileHandler) Reader(pf *partialfile.PartialFile, offset storiface.PaddedByteIndex, size abi.PaddedPieceSize) (*os.File, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Reader", pf, offset, size) - ret0, _ := ret[0].(*os.File) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Reader indicates an expected call of Reader. -func (mr *MockpartialFileHandlerMockRecorder) Reader(pf, offset, size interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reader", reflect.TypeOf((*MockpartialFileHandler)(nil).Reader), pf, offset, size) -} - -// MockStore is a mock of Store interface. -type MockStore struct { - ctrl *gomock.Controller - recorder *MockStoreMockRecorder -} - -// MockStoreMockRecorder is the mock recorder for MockStore. -type MockStoreMockRecorder struct { - mock *MockStore -} - -// NewMockStore creates a new mock instance. -func NewMockStore(ctrl *gomock.Controller) *MockStore { - mock := &MockStore{ctrl: ctrl} - mock.recorder = &MockStoreMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockStore) EXPECT() *MockStoreMockRecorder { - return m.recorder -} - -// AcquireSector mocks base method. -func (m *MockStore) AcquireSector(ctx context.Context, s storage.SectorRef, existing, allocate storiface.SectorFileType, sealing storiface.PathType, op storiface.AcquireMode) (storiface.SectorPaths, storiface.SectorPaths, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AcquireSector", ctx, s, existing, allocate, sealing, op) - ret0, _ := ret[0].(storiface.SectorPaths) - ret1, _ := ret[1].(storiface.SectorPaths) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// AcquireSector indicates an expected call of AcquireSector. -func (mr *MockStoreMockRecorder) AcquireSector(ctx, s, existing, allocate, sealing, op interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AcquireSector", reflect.TypeOf((*MockStore)(nil).AcquireSector), ctx, s, existing, allocate, sealing, op) -} - -// FsStat mocks base method. -func (m *MockStore) FsStat(ctx context.Context, id stores.ID) (fsutil.FsStat, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FsStat", ctx, id) - ret0, _ := ret[0].(fsutil.FsStat) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// FsStat indicates an expected call of FsStat. -func (mr *MockStoreMockRecorder) FsStat(ctx, id interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FsStat", reflect.TypeOf((*MockStore)(nil).FsStat), ctx, id) -} - -// MoveStorage mocks base method. -func (m *MockStore) MoveStorage(ctx context.Context, s storage.SectorRef, types storiface.SectorFileType) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "MoveStorage", ctx, s, types) - ret0, _ := ret[0].(error) - return ret0 -} - -// MoveStorage indicates an expected call of MoveStorage. -func (mr *MockStoreMockRecorder) MoveStorage(ctx, s, types interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MoveStorage", reflect.TypeOf((*MockStore)(nil).MoveStorage), ctx, s, types) -} - -// Remove mocks base method. -func (m *MockStore) Remove(ctx context.Context, s abi.SectorID, types storiface.SectorFileType, force bool) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Remove", ctx, s, types, force) - ret0, _ := ret[0].(error) - return ret0 -} - -// Remove indicates an expected call of Remove. -func (mr *MockStoreMockRecorder) Remove(ctx, s, types, force interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Remove", reflect.TypeOf((*MockStore)(nil).Remove), ctx, s, types, force) -} - -// RemoveCopies mocks base method. -func (m *MockStore) RemoveCopies(ctx context.Context, s abi.SectorID, types storiface.SectorFileType) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RemoveCopies", ctx, s, types) - ret0, _ := ret[0].(error) - return ret0 -} - -// RemoveCopies indicates an expected call of RemoveCopies. -func (mr *MockStoreMockRecorder) RemoveCopies(ctx, s, types interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemoveCopies", reflect.TypeOf((*MockStore)(nil).RemoveCopies), ctx, s, types) -} - -// Reserve mocks base method. -func (m *MockStore) Reserve(ctx context.Context, sid storage.SectorRef, ft storiface.SectorFileType, storageIDs storiface.SectorPaths, overheadTab map[storiface.SectorFileType]int) (func(), error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Reserve", ctx, sid, ft, storageIDs, overheadTab) - ret0, _ := ret[0].(func()) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Reserve indicates an expected call of Reserve. -func (mr *MockStoreMockRecorder) Reserve(ctx, sid, ft, storageIDs, overheadTab interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Reserve", reflect.TypeOf((*MockStore)(nil).Reserve), ctx, sid, ft, storageIDs, overheadTab) -} diff --git a/extern/sector-storage/stores/remote.go b/extern/sector-storage/stores/remote.go index 6f8efc03e..bd6b34be3 100644 --- a/extern/sector-storage/stores/remote.go +++ b/extern/sector-storage/stores/remote.go @@ -41,7 +41,7 @@ type Remote struct { fetchLk sync.Mutex fetching map[abi.SectorID]chan struct{} - pfHandler partialFileHandler + pfHandler PartialFileHandler } func (r *Remote) RemoveCopies(ctx context.Context, s abi.SectorID, types storiface.SectorFileType) error { @@ -52,7 +52,7 @@ func (r *Remote) RemoveCopies(ctx context.Context, s abi.SectorID, types storifa return r.local.RemoveCopies(ctx, s, types) } -func NewRemote(local Store, index SectorIndex, auth http.Header, fetchLimit int, pfHandler partialFileHandler) *Remote { +func NewRemote(local Store, index SectorIndex, auth http.Header, fetchLimit int, pfHandler PartialFileHandler) *Remote { return &Remote{ local: local, index: index, @@ -155,7 +155,8 @@ func (r *Remote) AcquireSector(ctx context.Context, s storage.SectorRef, existin } if op == storiface.AcquireMove { - if err := r.deleteFromRemote(ctx, url); err != nil { + id := ID(storageID) + if err := r.deleteFromRemote(ctx, url, &id); err != nil { log.Warnf("deleting sector %v from %s (delete %s): %+v", s, storageID, url, err) } } @@ -280,7 +281,7 @@ func (r *Remote) fetch(ctx context.Context, url, outname string) error { switch mediatype { case "application/x-tar": - return tarutil.ExtractTar(resp.Body, outname) + return tarutil.ExtractTar(resp.Body, outname, make([]byte, CopyBuf)) case "application/octet-stream": f, err := os.Create(outname) if err != nil { @@ -304,7 +305,6 @@ func (r *Remote) checkAllocated(ctx context.Context, url string, spt abi.Registe return false, xerrors.Errorf("request: %w", err) } req.Header = r.auth.Clone() - fmt.Printf("req using header: %#v \n", r.auth) req = req.WithContext(ctx) resp, err := http.DefaultClient.Do(req) @@ -333,12 +333,12 @@ func (r *Remote) MoveStorage(ctx context.Context, s storage.SectorRef, types sto return r.local.MoveStorage(ctx, s, types) } -func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ storiface.SectorFileType, force bool) error { +func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ storiface.SectorFileType, force bool, keepIn []ID) error { if bits.OnesCount(uint(typ)) != 1 { return xerrors.New("delete expects one file type") } - if err := r.local.Remove(ctx, sid, typ, force); err != nil { + if err := r.local.Remove(ctx, sid, typ, force, keepIn); err != nil { return xerrors.Errorf("remove from local: %w", err) } @@ -347,9 +347,15 @@ func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ storiface.Sec return xerrors.Errorf("finding existing sector %d(t:%d) failed: %w", sid, typ, err) } +storeLoop: for _, info := range si { + for _, id := range keepIn { + if id == info.ID { + continue storeLoop + } + } for _, url := range info.URLs { - if err := r.deleteFromRemote(ctx, url); err != nil { + if err := r.deleteFromRemote(ctx, url, nil); err != nil { log.Warnf("remove %s: %+v", url, err) continue } @@ -360,7 +366,11 @@ func (r *Remote) Remove(ctx context.Context, sid abi.SectorID, typ storiface.Sec return nil } -func (r *Remote) deleteFromRemote(ctx context.Context, url string) error { +func (r *Remote) deleteFromRemote(ctx context.Context, url string, keepIn *ID) error { + if keepIn != nil { + url = url + "?keep=" + string(*keepIn) + } + log.Infof("Delete %s", url) req, err := http.NewRequest("DELETE", url, nil) @@ -575,7 +585,7 @@ func (r *Remote) CheckIsUnsealed(ctx context.Context, s storage.SectorRef, offse // 1. no worker(local worker included) has an unsealed file for the given sector OR // 2. no worker(local worker included) has the unsealed piece in their unsealed sector file. // Will return a nil reader and a nil error in such a case. -func (r *Remote) Reader(ctx context.Context, s storage.SectorRef, offset, size abi.PaddedPieceSize) (io.ReadCloser, error) { +func (r *Remote) Reader(ctx context.Context, s storage.SectorRef, offset, size abi.PaddedPieceSize) (func(startOffsetAligned storiface.PaddedByteIndex) (io.ReadCloser, error), error) { ft := storiface.FTUnsealed // check if we have the unsealed sector file locally @@ -613,7 +623,52 @@ func (r *Remote) Reader(ctx context.Context, s storage.SectorRef, offset, size a if has { log.Infof("returning piece reader for local unsealed piece sector=%+v, (offset=%d, size=%d)", s.ID, offset, size) - return r.pfHandler.Reader(pf, storiface.PaddedByteIndex(offset), size) + + return func(startOffsetAligned storiface.PaddedByteIndex) (io.ReadCloser, error) { + // don't reuse between readers unless closed + f := pf + pf = nil + + if f == nil { + f, err = r.pfHandler.OpenPartialFile(abi.PaddedPieceSize(ssize), path) + if err != nil { + return nil, xerrors.Errorf("opening partial file: %w", err) + } + log.Debugf("local partial file (re)opened %s (+%d,%d)", path, offset, size) + } + + r, err := r.pfHandler.Reader(f, storiface.PaddedByteIndex(offset)+startOffsetAligned, size-abi.PaddedPieceSize(startOffsetAligned)) + if err != nil { + return nil, err + } + + return struct { + io.Reader + io.Closer + }{ + Reader: r, + Closer: funcCloser(func() error { + // if we already have a reader cached, close this one + if pf != nil { + if f == nil { + return nil + } + if pf == f { + pf = nil + } + + tmp := f + f = nil + return tmp.Close() + } + + // otherwise stash it away for reuse + pf = f + return nil + }), + }, nil + }, nil + } log.Debugf("miner has unsealed file but not unseal piece, %s (+%d,%d)", path, offset, size) @@ -656,16 +711,18 @@ func (r *Remote) Reader(ctx context.Context, s storage.SectorRef, offset, size a continue } - // readRemote fetches a reader that we can use to read the unsealed piece from the remote worker. - // It uses a ranged HTTP query to ensure we ONLY read the unsealed piece and not the entire unsealed file. - rd, err := r.readRemote(ctx, url, offset, size) - if err != nil { - log.Warnw("reading from remote", "url", url, "error", err) - lastErr = err - continue - } - log.Infof("Read remote %s (+%d,%d)", url, offset, size) - return rd, nil + return func(startOffsetAligned storiface.PaddedByteIndex) (io.ReadCloser, error) { + // readRemote fetches a reader that we can use to read the unsealed piece from the remote worker. + // It uses a ranged HTTP query to ensure we ONLY read the unsealed piece and not the entire unsealed file. + rd, err := r.readRemote(ctx, url, offset+abi.PaddedPieceSize(startOffsetAligned), size) + if err != nil { + log.Warnw("reading from remote", "url", url, "error", err) + return nil, err + } + + return rd, err + }, nil + } } @@ -682,3 +739,11 @@ func (r *Remote) Reserve(ctx context.Context, sid storage.SectorRef, ft storifac } var _ Store = &Remote{} + +type funcCloser func() error + +func (f funcCloser) Close() error { + return f() +} + +var _ io.Closer = funcCloser(nil) diff --git a/extern/sector-storage/stores/remote_test.go b/extern/sector-storage/stores/remote_test.go index b708bb68f..a7a82a728 100644 --- a/extern/sector-storage/stores/remote_test.go +++ b/extern/sector-storage/stores/remote_test.go @@ -1,28 +1,161 @@ +//stm: #unit package stores_test import ( "context" + "encoding/json" "fmt" + "io" "io/ioutil" "net/http" "net/http/httptest" "os" + "path/filepath" "testing" + "github.com/golang/mock/gomock" + "github.com/google/uuid" + "github.com/gorilla/mux" + logging "github.com/ipfs/go-log/v2" + "github.com/stretchr/testify/require" + "golang.org/x/xerrors" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/extern/sector-storage/partialfile" "github.com/filecoin-project/lotus/extern/sector-storage/stores" "github.com/filecoin-project/lotus/extern/sector-storage/stores/mocks" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + "github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/specs-storage/storage" - "github.com/golang/mock/gomock" - "github.com/gorilla/mux" - logging "github.com/ipfs/go-log/v2" - "github.com/stretchr/testify/require" - "golang.org/x/xerrors" ) +const metaFile = "sectorstore.json" + +func createTestStorage(t *testing.T, p string, seal bool, att ...*stores.Local) stores.ID { + if err := os.MkdirAll(p, 0755); err != nil { + if !os.IsExist(err) { + require.NoError(t, err) + } + } + + cfg := &stores.LocalStorageMeta{ + ID: stores.ID(uuid.New().String()), + Weight: 10, + CanSeal: seal, + CanStore: !seal, + } + + b, err := json.MarshalIndent(cfg, "", " ") + require.NoError(t, err) + + require.NoError(t, ioutil.WriteFile(filepath.Join(p, metaFile), b, 0644)) + + for _, s := range att { + require.NoError(t, s.OpenPath(context.Background(), p)) + } + + return cfg.ID +} + +func TestMoveShared(t *testing.T) { + logging.SetAllLoggers(logging.LevelDebug) + + index := stores.NewIndex() + + ctx := context.Background() + + dir, err := ioutil.TempDir("", "stores-remote-test-") + require.NoError(t, err) + t.Cleanup(func() { + _ = os.RemoveAll(dir) + }) + + openRepo := func(dir string) repo.LockedRepo { + r, err := repo.NewFS(dir) + require.NoError(t, err) + require.NoError(t, r.Init(repo.Worker)) + lr, err := r.Lock(repo.Worker) + require.NoError(t, err) + + t.Cleanup(func() { + _ = lr.Close() + }) + + err = lr.SetStorage(func(config *stores.StorageConfig) { + *config = stores.StorageConfig{} + }) + require.NoError(t, err) + + return lr + } + + // setup two repos with two storage paths: + // repo 1 with both paths + // repo 2 with one path (shared) + + lr1 := openRepo(filepath.Join(dir, "l1")) + lr2 := openRepo(filepath.Join(dir, "l2")) + + mux1 := mux.NewRouter() + mux2 := mux.NewRouter() + hs1 := httptest.NewServer(mux1) + hs2 := httptest.NewServer(mux2) + + ls1, err := stores.NewLocal(ctx, lr1, index, []string{hs1.URL + "/remote"}) + require.NoError(t, err) + ls2, err := stores.NewLocal(ctx, lr2, index, []string{hs2.URL + "/remote"}) + require.NoError(t, err) + + dirStor := filepath.Join(dir, "stor") + dirSeal := filepath.Join(dir, "seal") + + id1 := createTestStorage(t, dirStor, false, ls1, ls2) + id2 := createTestStorage(t, dirSeal, true, ls1) + + rs1 := stores.NewRemote(ls1, index, nil, 20, &stores.DefaultPartialFileHandler{}) + rs2 := stores.NewRemote(ls2, index, nil, 20, &stores.DefaultPartialFileHandler{}) + _ = rs2 + mux1.PathPrefix("/").Handler(&stores.FetchHandler{Local: ls1, PfHandler: &stores.DefaultPartialFileHandler{}}) + mux2.PathPrefix("/").Handler(&stores.FetchHandler{Local: ls2, PfHandler: &stores.DefaultPartialFileHandler{}}) + + // add a sealed replica file to the sealing (non-shared) path + + s1ref := storage.SectorRef{ + ID: abi.SectorID{ + Miner: 12, + Number: 1, + }, + ProofType: abi.RegisteredSealProof_StackedDrg2KiBV1, + } + + sp, sid, err := rs1.AcquireSector(ctx, s1ref, storiface.FTNone, storiface.FTSealed, storiface.PathSealing, storiface.AcquireMove) + require.NoError(t, err) + require.Equal(t, id2, stores.ID(sid.Sealed)) + + data := make([]byte, 2032) + data[1] = 54 + require.NoError(t, ioutil.WriteFile(sp.Sealed, data, 0666)) + fmt.Println("write to ", sp.Sealed) + + require.NoError(t, index.StorageDeclareSector(ctx, stores.ID(sid.Sealed), s1ref.ID, storiface.FTSealed, true)) + + // move to the shared path from the second node (remote move / delete) + + require.NoError(t, rs2.MoveStorage(ctx, s1ref, storiface.FTSealed)) + + // check that the file still exists + sp, sid, err = rs2.AcquireSector(ctx, s1ref, storiface.FTSealed, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove) + require.NoError(t, err) + require.Equal(t, id1, stores.ID(sid.Sealed)) + fmt.Println("read from ", sp.Sealed) + + read, err := ioutil.ReadFile(sp.Sealed) + require.NoError(t, err) + require.EqualValues(t, data, read) +} + func TestReader(t *testing.T) { + //stm: @STORAGE_INFO_001 logging.SetAllLoggers(logging.LevelDebug) bz := []byte("Hello World") @@ -46,7 +179,7 @@ func TestReader(t *testing.T) { tcs := map[string]struct { storeFnc func(s *mocks.MockStore) - pfFunc func(s *mocks.MockpartialFileHandler) + pfFunc func(s *mocks.MockPartialFileHandler) indexFnc func(s *mocks.MockSectorIndex, serverURL string) needHttpServer bool @@ -76,7 +209,7 @@ func TestReader(t *testing.T) { mockSectorAcquire(l, sectorRef, pfPath, nil) }, - pfFunc: func(pf *mocks.MockpartialFileHandler) { + pfFunc: func(pf *mocks.MockPartialFileHandler) { mockPartialFileOpen(pf, sectorSize, pfPath, xerrors.New("pf open error")) }, errStr: "pf open error", @@ -87,7 +220,7 @@ func TestReader(t *testing.T) { mockSectorAcquire(l, sectorRef, pfPath, nil) }, - pfFunc: func(pf *mocks.MockpartialFileHandler) { + pfFunc: func(pf *mocks.MockPartialFileHandler) { mockPartialFileOpen(pf, sectorSize, pfPath, nil) mockCheckAllocation(pf, offset, size, emptyPartialFile, true, xerrors.New("piece check error")) @@ -101,7 +234,7 @@ func TestReader(t *testing.T) { mockSectorAcquire(l, sectorRef, pfPath, nil) }, - pfFunc: func(pf *mocks.MockpartialFileHandler) { + pfFunc: func(pf *mocks.MockPartialFileHandler) { mockPartialFileOpen(pf, sectorSize, pfPath, nil) mockCheckAllocation(pf, offset, size, emptyPartialFile, false, nil) @@ -115,7 +248,7 @@ func TestReader(t *testing.T) { mockSectorAcquire(l, sectorRef, pfPath, nil) }, - pfFunc: func(pf *mocks.MockpartialFileHandler) { + pfFunc: func(pf *mocks.MockPartialFileHandler) { mockPartialFileOpen(pf, sectorSize, pfPath, nil) mockCheckAllocation(pf, offset, size, emptyPartialFile, true, nil) @@ -157,7 +290,7 @@ func TestReader(t *testing.T) { mockSectorAcquire(l, sectorRef, pfPath, nil) }, - pfFunc: func(pf *mocks.MockpartialFileHandler) { + pfFunc: func(pf *mocks.MockPartialFileHandler) { mockPartialFileOpen(pf, sectorSize, pfPath, nil) mockCheckAllocation(pf, offset, size, emptyPartialFile, false, nil) @@ -223,7 +356,7 @@ func TestReader(t *testing.T) { mockSectorAcquire(l, sectorRef, pfPath, nil) }, - pfFunc: func(pf *mocks.MockpartialFileHandler) { + pfFunc: func(pf *mocks.MockPartialFileHandler) { mockPartialFileOpen(pf, sectorSize, pfPath, nil) mockCheckAllocation(pf, offset, size, emptyPartialFile, true, nil) @@ -251,7 +384,7 @@ func TestReader(t *testing.T) { mockSectorAcquire(l, sectorRef, pfPath, nil) }, - pfFunc: func(pf *mocks.MockpartialFileHandler) { + pfFunc: func(pf *mocks.MockPartialFileHandler) { mockPartialFileOpen(pf, sectorSize, pfPath, nil) mockCheckAllocation(pf, offset, size, emptyPartialFile, false, nil) @@ -308,7 +441,7 @@ func TestReader(t *testing.T) { // create them mocks lstore := mocks.NewMockStore(mockCtrl) - pfhandler := mocks.NewMockpartialFileHandler(mockCtrl) + pfhandler := mocks.NewMockPartialFileHandler(mockCtrl) index := mocks.NewMockSectorIndex(mockCtrl) if tc.storeFnc != nil { @@ -340,12 +473,20 @@ func TestReader(t *testing.T) { remoteStore := stores.NewRemote(lstore, index, nil, 6000, pfhandler) - rd, err := remoteStore.Reader(ctx, sectorRef, offset, size) + rdg, err := remoteStore.Reader(ctx, sectorRef, offset, size) + var rd io.ReadCloser if tc.errStr != "" { - require.Error(t, err) - require.Nil(t, rd) - require.Contains(t, err.Error(), tc.errStr) + if rdg == nil { + require.Error(t, err) + require.Nil(t, rdg) + require.Contains(t, err.Error(), tc.errStr) + } else { + rd, err = rdg(0) + require.Error(t, err) + require.Nil(t, rd) + require.Contains(t, err.Error(), tc.errStr) + } } else { require.NoError(t, err) } @@ -353,7 +494,10 @@ func TestReader(t *testing.T) { if !tc.expectedNonNilReader { require.Nil(t, rd) } else { - require.NotNil(t, rd) + require.NotNil(t, rdg) + rd, err := rdg(0) + require.NoError(t, err) + defer func() { require.NoError(t, rd.Close()) }() @@ -393,7 +537,7 @@ func TestCheckIsUnsealed(t *testing.T) { tcs := map[string]struct { storeFnc func(s *mocks.MockStore) - pfFunc func(s *mocks.MockpartialFileHandler) + pfFunc func(s *mocks.MockPartialFileHandler) indexFnc func(s *mocks.MockSectorIndex, serverURL string) needHttpServer bool @@ -421,7 +565,7 @@ func TestCheckIsUnsealed(t *testing.T) { mockSectorAcquire(l, sectorRef, pfPath, nil) }, - pfFunc: func(pf *mocks.MockpartialFileHandler) { + pfFunc: func(pf *mocks.MockPartialFileHandler) { mockPartialFileOpen(pf, sectorSize, pfPath, xerrors.New("pf open error")) }, errStr: "pf open error", @@ -432,7 +576,7 @@ func TestCheckIsUnsealed(t *testing.T) { mockSectorAcquire(l, sectorRef, pfPath, nil) }, - pfFunc: func(pf *mocks.MockpartialFileHandler) { + pfFunc: func(pf *mocks.MockPartialFileHandler) { mockPartialFileOpen(pf, sectorSize, pfPath, nil) mockCheckAllocation(pf, offset, size, emptyPartialFile, true, xerrors.New("piece check error")) @@ -446,7 +590,7 @@ func TestCheckIsUnsealed(t *testing.T) { mockSectorAcquire(l, sectorRef, pfPath, nil) }, - pfFunc: func(pf *mocks.MockpartialFileHandler) { + pfFunc: func(pf *mocks.MockPartialFileHandler) { mockPartialFileOpen(pf, sectorSize, pfPath, nil) mockCheckAllocation(pf, offset, size, emptyPartialFile, @@ -488,7 +632,7 @@ func TestCheckIsUnsealed(t *testing.T) { mockSectorAcquire(l, sectorRef, pfPath, nil) }, - pfFunc: func(pf *mocks.MockpartialFileHandler) { + pfFunc: func(pf *mocks.MockPartialFileHandler) { mockPartialFileOpen(pf, sectorSize, pfPath, nil) mockCheckAllocation(pf, offset, size, emptyPartialFile, false, nil) @@ -533,7 +677,7 @@ func TestCheckIsUnsealed(t *testing.T) { mockSectorAcquire(l, sectorRef, pfPath, nil) }, - pfFunc: func(pf *mocks.MockpartialFileHandler) { + pfFunc: func(pf *mocks.MockPartialFileHandler) { mockPartialFileOpen(pf, sectorSize, pfPath, nil) mockCheckAllocation(pf, offset, size, emptyPartialFile, true, nil) @@ -569,7 +713,7 @@ func TestCheckIsUnsealed(t *testing.T) { mockSectorAcquire(l, sectorRef, pfPath, nil) }, - pfFunc: func(pf *mocks.MockpartialFileHandler) { + pfFunc: func(pf *mocks.MockPartialFileHandler) { mockPartialFileOpen(pf, sectorSize, pfPath, nil) mockCheckAllocation(pf, offset, size, emptyPartialFile, false, nil) @@ -602,7 +746,7 @@ func TestCheckIsUnsealed(t *testing.T) { // create them mocks lstore := mocks.NewMockStore(mockCtrl) - pfhandler := mocks.NewMockpartialFileHandler(mockCtrl) + pfhandler := mocks.NewMockPartialFileHandler(mockCtrl) index := mocks.NewMockSectorIndex(mockCtrl) if tc.storeFnc != nil { @@ -656,18 +800,18 @@ func mockSectorAcquire(l *mocks.MockStore, sectorRef storage.SectorRef, pfPath s storiface.SectorPaths{}, err).Times(1) } -func mockPartialFileOpen(pf *mocks.MockpartialFileHandler, sectorSize abi.SectorSize, pfPath string, err error) { +func mockPartialFileOpen(pf *mocks.MockPartialFileHandler, sectorSize abi.SectorSize, pfPath string, err error) { pf.EXPECT().OpenPartialFile(abi.PaddedPieceSize(sectorSize), pfPath).Return(&partialfile.PartialFile{}, err).Times(1) } -func mockCheckAllocation(pf *mocks.MockpartialFileHandler, offset, size abi.PaddedPieceSize, file *partialfile.PartialFile, +func mockCheckAllocation(pf *mocks.MockPartialFileHandler, offset, size abi.PaddedPieceSize, file *partialfile.PartialFile, out bool, err error) { pf.EXPECT().HasAllocated(file, storiface.UnpaddedByteIndex(offset.Unpadded()), size.Unpadded()).Return(out, err).Times(1) } -func mockPfReader(pf *mocks.MockpartialFileHandler, file *partialfile.PartialFile, offset, size abi.PaddedPieceSize, +func mockPfReader(pf *mocks.MockPartialFileHandler, file *partialfile.PartialFile, offset, size abi.PaddedPieceSize, outFile *os.File, err error) { pf.EXPECT().Reader(file, storiface.PaddedByteIndex(offset), size).Return(outFile, err) } diff --git a/extern/sector-storage/storiface/filetype.go b/extern/sector-storage/storiface/filetype.go index 2e0999022..83fcadc90 100644 --- a/extern/sector-storage/storiface/filetype.go +++ b/extern/sector-storage/storiface/filetype.go @@ -12,11 +12,13 @@ const ( FTUnsealed SectorFileType = 1 << iota FTSealed FTCache + FTUpdate + FTUpdateCache FileTypes = iota ) -var PathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache} +var PathTypes = []SectorFileType{FTUnsealed, FTSealed, FTCache, FTUpdate, FTUpdateCache} const ( FTNone SectorFileType = 0 @@ -25,15 +27,21 @@ const ( const FSOverheadDen = 10 var FSOverheadSeal = map[SectorFileType]int{ // 10x overheads - FTUnsealed: FSOverheadDen, - FTSealed: FSOverheadDen, - FTCache: 141, // 11 layers + D(2x ssize) + C + R + FTUnsealed: FSOverheadDen, + FTSealed: FSOverheadDen, + FTUpdate: FSOverheadDen, + FTUpdateCache: FSOverheadDen * 2, + FTCache: 141, // 11 layers + D(2x ssize) + C + R' } +// sector size * disk / fs overhead. FSOverheadDen is like the unit of sector size + var FsOverheadFinalized = map[SectorFileType]int{ - FTUnsealed: FSOverheadDen, - FTSealed: FSOverheadDen, - FTCache: 2, + FTUnsealed: FSOverheadDen, + FTSealed: FSOverheadDen, + FTUpdate: FSOverheadDen * 2, // XXX: we should clear the update cache on Finalize??? + FTUpdateCache: FSOverheadDen, + FTCache: 2, } type SectorFileType int @@ -46,6 +54,10 @@ func (t SectorFileType) String() string { return "sealed" case FTCache: return "cache" + case FTUpdate: + return "update" + case FTUpdateCache: + return "update-cache" default: return fmt.Sprintf("", t) } @@ -104,9 +116,11 @@ func (t SectorFileType) All() [FileTypes]bool { type SectorPaths struct { ID abi.SectorID - Unsealed string - Sealed string - Cache string + Unsealed string + Sealed string + Cache string + Update string + UpdateCache string } func ParseSectorID(baseName string) (abi.SectorID, error) { @@ -139,6 +153,10 @@ func PathByType(sps SectorPaths, fileType SectorFileType) string { return sps.Sealed case FTCache: return sps.Cache + case FTUpdate: + return sps.Update + case FTUpdateCache: + return sps.UpdateCache } panic("requested unknown path type") @@ -152,5 +170,9 @@ func SetPathByType(sps *SectorPaths, fileType SectorFileType, p string) { sps.Sealed = p case FTCache: sps.Cache = p + case FTUpdate: + sps.Update = p + case FTUpdateCache: + sps.UpdateCache = p } } diff --git a/extern/sector-storage/storiface/resources.go b/extern/sector-storage/storiface/resources.go new file mode 100644 index 000000000..51bb68574 --- /dev/null +++ b/extern/sector-storage/storiface/resources.go @@ -0,0 +1,563 @@ +package storiface + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" +) + +type Resources struct { + MinMemory uint64 `envname:"MIN_MEMORY"` // What Must be in RAM for decent perf + MaxMemory uint64 `envname:"MAX_MEMORY"` // Memory required (swap + ram; peak memory usage during task execution) + + // GPUUtilization specifes the number of GPUs a task can use + GPUUtilization float64 `envname:"GPU_UTILIZATION"` + + // MaxParallelism specifies the number of CPU cores when GPU is NOT in use + MaxParallelism int `envname:"MAX_PARALLELISM"` // -1 = multithread + + // MaxParallelismGPU specifies the number of CPU cores when GPU is in use + MaxParallelismGPU int `envname:"MAX_PARALLELISM_GPU"` // when 0, inherits MaxParallelism + + BaseMinMemory uint64 `envname:"BASE_MIN_MEMORY"` // What Must be in RAM for decent perf (shared between threads) +} + +/* + + Percent of threads to allocate to parallel tasks + + 12 * 0.92 = 11 + 16 * 0.92 = 14 + 24 * 0.92 = 22 + 32 * 0.92 = 29 + 64 * 0.92 = 58 + 128 * 0.92 = 117 + +*/ +var ParallelNum uint64 = 92 +var ParallelDenom uint64 = 100 + +// TODO: Take NUMA into account +func (r Resources) Threads(wcpus uint64, gpus int) uint64 { + mp := r.MaxParallelism + + if r.GPUUtilization > 0 && gpus > 0 && r.MaxParallelismGPU != 0 { // task can use GPUs and worker has some + mp = r.MaxParallelismGPU + } + + if mp == -1 { + n := (wcpus * ParallelNum) / ParallelDenom + if n == 0 { + return wcpus + } + return n + } + + return uint64(mp) +} + +var ResourceTable = map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources{ + sealtasks.TTAddPiece: { + abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ + MaxMemory: 8 << 30, + MinMemory: 8 << 30, + + MaxParallelism: 1, + + BaseMinMemory: 1 << 30, + }, + abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ + MaxMemory: 4 << 30, + MinMemory: 4 << 30, + + MaxParallelism: 1, + + BaseMinMemory: 1 << 30, + }, + abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ + MaxMemory: 1 << 30, + MinMemory: 1 << 30, + + MaxParallelism: 1, + + BaseMinMemory: 1 << 30, + }, + abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{ + MaxMemory: 2 << 10, + MinMemory: 2 << 10, + + MaxParallelism: 1, + + BaseMinMemory: 2 << 10, + }, + abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{ + MaxMemory: 8 << 20, + MinMemory: 8 << 20, + + MaxParallelism: 1, + + BaseMinMemory: 8 << 20, + }, + }, + sealtasks.TTPreCommit1: { + abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ + MaxMemory: 128 << 30, + MinMemory: 112 << 30, + + MaxParallelism: 1, + + BaseMinMemory: 10 << 20, + }, + abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ + MaxMemory: 64 << 30, + MinMemory: 56 << 30, + + MaxParallelism: 1, + + BaseMinMemory: 10 << 20, + }, + abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ + MaxMemory: 1 << 30, + MinMemory: 768 << 20, + + MaxParallelism: 1, + + BaseMinMemory: 1 << 20, + }, + abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{ + MaxMemory: 2 << 10, + MinMemory: 2 << 10, + + MaxParallelism: 1, + + BaseMinMemory: 2 << 10, + }, + abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{ + MaxMemory: 8 << 20, + MinMemory: 8 << 20, + + MaxParallelism: 1, + + BaseMinMemory: 8 << 20, + }, + }, + sealtasks.TTPreCommit2: { + abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ + MaxMemory: 30 << 30, + MinMemory: 30 << 30, + + MaxParallelism: -1, + MaxParallelismGPU: 6, + GPUUtilization: 1.0, + + BaseMinMemory: 1 << 30, + }, + abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ + MaxMemory: 15 << 30, + MinMemory: 15 << 30, + + MaxParallelism: -1, + MaxParallelismGPU: 6, + GPUUtilization: 1.0, + + BaseMinMemory: 1 << 30, + }, + abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ + MaxMemory: 3 << 29, // 1.5G + MinMemory: 1 << 30, + + MaxParallelism: -1, + + BaseMinMemory: 1 << 30, + }, + abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{ + MaxMemory: 2 << 10, + MinMemory: 2 << 10, + + MaxParallelism: -1, + + BaseMinMemory: 2 << 10, + }, + abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{ + MaxMemory: 8 << 20, + MinMemory: 8 << 20, + + MaxParallelism: -1, + + BaseMinMemory: 8 << 20, + }, + }, + sealtasks.TTCommit1: { // Very short (~100ms), so params are very light + abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ + MaxMemory: 1 << 30, + MinMemory: 1 << 30, + + MaxParallelism: 0, + + BaseMinMemory: 1 << 30, + }, + abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ + MaxMemory: 1 << 30, + MinMemory: 1 << 30, + + MaxParallelism: 0, + + BaseMinMemory: 1 << 30, + }, + abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ + MaxMemory: 1 << 30, + MinMemory: 1 << 30, + + MaxParallelism: 0, + + BaseMinMemory: 1 << 30, + }, + abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{ + MaxMemory: 2 << 10, + MinMemory: 2 << 10, + + MaxParallelism: 0, + + BaseMinMemory: 2 << 10, + }, + abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{ + MaxMemory: 8 << 20, + MinMemory: 8 << 20, + + MaxParallelism: 0, + + BaseMinMemory: 8 << 20, + }, + }, + sealtasks.TTCommit2: { + abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ + MaxMemory: 190 << 30, // TODO: Confirm + MinMemory: 60 << 30, + + MaxParallelism: -1, + MaxParallelismGPU: 6, + GPUUtilization: 1.0, + + BaseMinMemory: 64 << 30, // params + }, + abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ + MaxMemory: 150 << 30, // TODO: ~30G of this should really be BaseMaxMemory + MinMemory: 30 << 30, + + MaxParallelism: -1, + MaxParallelismGPU: 6, + GPUUtilization: 1.0, + + BaseMinMemory: 32 << 30, // params + }, + abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ + MaxMemory: 3 << 29, // 1.5G + MinMemory: 1 << 30, + + MaxParallelism: 1, // This is fine + GPUUtilization: 1.0, + + BaseMinMemory: 10 << 30, + }, + abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{ + MaxMemory: 2 << 10, + MinMemory: 2 << 10, + + MaxParallelism: 1, + GPUUtilization: 1.0, + + BaseMinMemory: 2 << 10, + }, + abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{ + MaxMemory: 8 << 20, + MinMemory: 8 << 20, + + MaxParallelism: 1, + GPUUtilization: 1.0, + + BaseMinMemory: 8 << 20, + }, + }, + sealtasks.TTFetch: { + abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ + MaxMemory: 1 << 20, + MinMemory: 1 << 20, + + MaxParallelism: 0, + GPUUtilization: 0, + + BaseMinMemory: 0, + }, + abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ + MaxMemory: 1 << 20, + MinMemory: 1 << 20, + + MaxParallelism: 0, + GPUUtilization: 0, + + BaseMinMemory: 0, + }, + abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ + MaxMemory: 1 << 20, + MinMemory: 1 << 20, + + MaxParallelism: 0, + GPUUtilization: 0, + + BaseMinMemory: 0, + }, + abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{ + MaxMemory: 1 << 20, + MinMemory: 1 << 20, + + MaxParallelism: 0, + GPUUtilization: 0, + + BaseMinMemory: 0, + }, + abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{ + MaxMemory: 1 << 20, + MinMemory: 1 << 20, + + MaxParallelism: 0, + GPUUtilization: 0, + + BaseMinMemory: 0, + }, + }, + // TODO: this should ideally be the actual replica update proof types + // TODO: actually measure this (and all the other replica update work) + sealtasks.TTReplicaUpdate: { // copied from addpiece + abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ + MaxMemory: 8 << 30, + MinMemory: 8 << 30, + + MaxParallelism: 1, + + BaseMinMemory: 1 << 30, + }, + abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ + MaxMemory: 4 << 30, + MinMemory: 4 << 30, + + MaxParallelism: 1, + + BaseMinMemory: 1 << 30, + }, + abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ + MaxMemory: 1 << 30, + MinMemory: 1 << 30, + + MaxParallelism: 1, + + BaseMinMemory: 1 << 30, + }, + abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{ + MaxMemory: 2 << 10, + MinMemory: 2 << 10, + + MaxParallelism: 1, + + BaseMinMemory: 2 << 10, + }, + abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{ + MaxMemory: 8 << 20, + MinMemory: 8 << 20, + + MaxParallelism: 1, + + BaseMinMemory: 8 << 20, + }, + }, + sealtasks.TTProveReplicaUpdate1: { // copied from commit1 + abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ + MaxMemory: 1 << 30, + MinMemory: 1 << 30, + + MaxParallelism: 0, + + BaseMinMemory: 1 << 30, + }, + abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ + MaxMemory: 1 << 30, + MinMemory: 1 << 30, + + MaxParallelism: 0, + + BaseMinMemory: 1 << 30, + }, + abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ + MaxMemory: 1 << 30, + MinMemory: 1 << 30, + + MaxParallelism: 0, + + BaseMinMemory: 1 << 30, + }, + abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{ + MaxMemory: 2 << 10, + MinMemory: 2 << 10, + + MaxParallelism: 0, + + BaseMinMemory: 2 << 10, + }, + abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{ + MaxMemory: 8 << 20, + MinMemory: 8 << 20, + + MaxParallelism: 0, + + BaseMinMemory: 8 << 20, + }, + }, + sealtasks.TTProveReplicaUpdate2: { // copied from commit2 + abi.RegisteredSealProof_StackedDrg64GiBV1: Resources{ + MaxMemory: 190 << 30, // TODO: Confirm + MinMemory: 60 << 30, + + MaxParallelism: -1, + MaxParallelismGPU: 6, + GPUUtilization: 1.0, + + BaseMinMemory: 64 << 30, // params + }, + abi.RegisteredSealProof_StackedDrg32GiBV1: Resources{ + MaxMemory: 150 << 30, // TODO: ~30G of this should really be BaseMaxMemory + MinMemory: 30 << 30, + + MaxParallelism: -1, + MaxParallelismGPU: 6, + GPUUtilization: 1.0, + + BaseMinMemory: 32 << 30, // params + }, + abi.RegisteredSealProof_StackedDrg512MiBV1: Resources{ + MaxMemory: 3 << 29, // 1.5G + MinMemory: 1 << 30, + + MaxParallelism: 1, // This is fine + GPUUtilization: 1.0, + + BaseMinMemory: 10 << 30, + }, + abi.RegisteredSealProof_StackedDrg2KiBV1: Resources{ + MaxMemory: 2 << 10, + MinMemory: 2 << 10, + + MaxParallelism: 1, + GPUUtilization: 1.0, + + BaseMinMemory: 2 << 10, + }, + abi.RegisteredSealProof_StackedDrg8MiBV1: Resources{ + MaxMemory: 8 << 20, + MinMemory: 8 << 20, + + MaxParallelism: 1, + GPUUtilization: 1.0, + + BaseMinMemory: 8 << 20, + }, + }, +} + +func init() { + ResourceTable[sealtasks.TTUnseal] = ResourceTable[sealtasks.TTPreCommit1] // TODO: measure accurately + ResourceTable[sealtasks.TTRegenSectorKey] = ResourceTable[sealtasks.TTReplicaUpdate] + + // V1_1 is the same as V1 + for _, m := range ResourceTable { + m[abi.RegisteredSealProof_StackedDrg2KiBV1_1] = m[abi.RegisteredSealProof_StackedDrg2KiBV1] + m[abi.RegisteredSealProof_StackedDrg8MiBV1_1] = m[abi.RegisteredSealProof_StackedDrg8MiBV1] + m[abi.RegisteredSealProof_StackedDrg512MiBV1_1] = m[abi.RegisteredSealProof_StackedDrg512MiBV1] + m[abi.RegisteredSealProof_StackedDrg32GiBV1_1] = m[abi.RegisteredSealProof_StackedDrg32GiBV1] + m[abi.RegisteredSealProof_StackedDrg64GiBV1_1] = m[abi.RegisteredSealProof_StackedDrg64GiBV1] + } +} + +func ParseResourceEnv(lookup func(key, def string) (string, bool)) (map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources, error) { + out := map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources{} + + for taskType, defTT := range ResourceTable { + out[taskType] = map[abi.RegisteredSealProof]Resources{} + + for spt, defRes := range defTT { + r := defRes // copy + + spsz, err := spt.SectorSize() + if err != nil { + return nil, xerrors.Errorf("getting sector size: %w", err) + } + shortSize := strings.TrimSuffix(spsz.ShortString(), "iB") + + rr := reflect.ValueOf(&r) + for i := 0; i < rr.Elem().Type().NumField(); i++ { + f := rr.Elem().Type().Field(i) + + envname := f.Tag.Get("envname") + if envname == "" { + return nil, xerrors.Errorf("no envname for field '%s'", f.Name) + } + + envval, found := lookup(taskType.Short()+"_"+shortSize+"_"+envname, fmt.Sprint(rr.Elem().Field(i).Interface())) + if !found { + // special multicore SDR handling + if (taskType == sealtasks.TTPreCommit1 || taskType == sealtasks.TTUnseal) && envname == "MAX_PARALLELISM" { + v, ok := rr.Elem().Field(i).Addr().Interface().(*int) + if !ok { + // can't happen, but let's not panic + return nil, xerrors.Errorf("res.MAX_PARALLELISM is not int (!?): %w", err) + } + *v, err = getSDRThreads(lookup) + if err != nil { + return nil, err + } + } + + continue + } + + v := rr.Elem().Field(i).Addr().Interface() + switch fv := v.(type) { + case *uint64: + *fv, err = strconv.ParseUint(envval, 10, 64) + case *int: + *fv, err = strconv.Atoi(envval) + case *float64: + *fv, err = strconv.ParseFloat(envval, 64) + default: + return nil, xerrors.Errorf("unknown resource field type") + } + } + + out[taskType][spt] = r + } + } + + return out, nil +} + +func getSDRThreads(lookup func(key, def string) (string, bool)) (_ int, err error) { + producers := 0 + + if v, _ := lookup("FIL_PROOFS_USE_MULTICORE_SDR", ""); v == "1" { + producers = 3 + + if penv, found := lookup("FIL_PROOFS_MULTICORE_SDR_PRODUCERS", ""); found { + producers, err = strconv.Atoi(penv) + if err != nil { + return 0, xerrors.Errorf("parsing (atoi) FIL_PROOFS_MULTICORE_SDR_PRODUCERS: %w", err) + } + } + } + + // producers + the one core actually doing the work + return producers + 1, nil +} diff --git a/extern/sector-storage/storiface/resources_test.go b/extern/sector-storage/storiface/resources_test.go new file mode 100644 index 000000000..bf7425d24 --- /dev/null +++ b/extern/sector-storage/storiface/resources_test.go @@ -0,0 +1,75 @@ +package storiface + +import ( + "fmt" + "testing" + + stabi "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" + "github.com/stretchr/testify/require" +) + +func TestListResourceVars(t *testing.T) { + _, err := ParseResourceEnv(func(key, def string) (string, bool) { + if def != "" { + fmt.Printf("%s=%s\n", key, def) + } + + return "", false + }) + + require.NoError(t, err) +} + +func TestListResourceOverride(t *testing.T) { + rt, err := ParseResourceEnv(func(key, def string) (string, bool) { + if key == "UNS_2K_MAX_PARALLELISM" { + return "2", true + } + if key == "PC2_2K_GPU_UTILIZATION" { + return "0.4", true + } + if key == "PC2_2K_MAX_MEMORY" { + return "2222", true + } + + return "", false + }) + + require.NoError(t, err) + require.Equal(t, 2, rt[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism) + require.Equal(t, 0.4, rt[sealtasks.TTPreCommit2][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].GPUUtilization) + require.Equal(t, uint64(2222), rt[sealtasks.TTPreCommit2][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxMemory) + + // check that defaults don't get mutated + require.Equal(t, 1, ResourceTable[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism) +} + +func TestListResourceSDRMulticoreOverride(t *testing.T) { + rt, err := ParseResourceEnv(func(key, def string) (string, bool) { + if key == "FIL_PROOFS_USE_MULTICORE_SDR" { + return "1", true + } + + return "", false + }) + + require.NoError(t, err) + require.Equal(t, 4, rt[sealtasks.TTPreCommit1][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism) + require.Equal(t, 4, rt[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism) + + rt, err = ParseResourceEnv(func(key, def string) (string, bool) { + if key == "FIL_PROOFS_USE_MULTICORE_SDR" { + return "1", true + } + if key == "FIL_PROOFS_MULTICORE_SDR_PRODUCERS" { + return "9000", true + } + + return "", false + }) + + require.NoError(t, err) + require.Equal(t, 9001, rt[sealtasks.TTPreCommit1][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism) + require.Equal(t, 9001, rt[sealtasks.TTUnseal][stabi.RegisteredSealProof_StackedDrg2KiBV1_1].MaxParallelism) +} diff --git a/extern/sector-storage/storiface/storage.go b/extern/sector-storage/storiface/storage.go index e836002d5..624b71d77 100644 --- a/extern/sector-storage/storiface/storage.go +++ b/extern/sector-storage/storiface/storage.go @@ -1,5 +1,7 @@ package storiface +import "github.com/filecoin-project/go-state-types/abi" + type PathType string const ( @@ -13,3 +15,17 @@ const ( AcquireMove AcquireMode = "move" AcquireCopy AcquireMode = "copy" ) + +type Refs struct { + RefCount [FileTypes]uint +} + +type SectorLock struct { + Sector abi.SectorID + Write [FileTypes]uint + Read [FileTypes]uint +} + +type SectorLocks struct { + Locks []SectorLock +} diff --git a/extern/sector-storage/storiface/worker.go b/extern/sector-storage/storiface/worker.go index d1373f4c5..8bb6a256a 100644 --- a/extern/sector-storage/storiface/worker.go +++ b/extern/sector-storage/storiface/worker.go @@ -15,6 +15,12 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" ) +type WorkerID uuid.UUID // worker session UUID + +func (w WorkerID) String() string { + return uuid.UUID(w).String() +} + type WorkerInfo struct { Hostname string @@ -28,12 +34,35 @@ type WorkerInfo struct { type WorkerResources struct { MemPhysical uint64 + MemUsed uint64 MemSwap uint64 - - MemReserved uint64 // Used by system / other processes + MemSwapUsed uint64 CPUs uint64 // Logical cores GPUs []string + + // if nil use the default resource table + Resources map[sealtasks.TaskType]map[abi.RegisteredSealProof]Resources +} + +func (wr WorkerResources) ResourceSpec(spt abi.RegisteredSealProof, tt sealtasks.TaskType) Resources { + res := ResourceTable[tt][spt] + + // if the worker specifies custom resource table, prefer that + if wr.Resources != nil { + tr, ok := wr.Resources[tt] + if !ok { + return res + } + + r, ok := tr[spt] + if ok { + return r + } + } + + // otherwise, use the default resource table + return res } type WorkerStats struct { @@ -42,11 +71,13 @@ type WorkerStats struct { MemUsedMin uint64 MemUsedMax uint64 - GpuUsed bool // nolint - CpuUse uint64 // nolint + GpuUsed float64 // nolint + CpuUse uint64 // nolint } const ( + RWPrepared = 1 + RWRunning = 0 RWRetWait = -1 RWReturned = -2 RWRetDone = -3 @@ -57,7 +88,8 @@ type WorkerJob struct { Sector abi.SectorID Task sealtasks.TaskType - // 1+ - assigned + // 2+ - assigned + // 1 - prepared // 0 - running // -1 - ret-wait // -2 - returned @@ -89,6 +121,10 @@ type WorkerCalls interface { SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (CallID, error) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (CallID, error) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef, safeToFree []storage.Range) (CallID, error) + ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (CallID, error) + ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (CallID, error) + ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (CallID, error) + GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) (CallID, error) MoveStorage(ctx context.Context, sector storage.SectorRef, types SectorFileType) (CallID, error) UnsealPiece(context.Context, storage.SectorRef, UnpaddedByteIndex, abi.UnpaddedPieceSize, abi.SealRandomness, cid.Cid) (CallID, error) Fetch(context.Context, storage.SectorRef, SectorFileType, PathType, AcquireMode) (CallID, error) @@ -142,6 +178,10 @@ type WorkerReturn interface { ReturnSealCommit2(ctx context.Context, callID CallID, proof storage.Proof, err *CallError) error ReturnFinalizeSector(ctx context.Context, callID CallID, err *CallError) error ReturnReleaseUnsealed(ctx context.Context, callID CallID, err *CallError) error + ReturnReplicaUpdate(ctx context.Context, callID CallID, out storage.ReplicaUpdateOut, err *CallError) error + ReturnProveReplicaUpdate1(ctx context.Context, callID CallID, proofs storage.ReplicaVanillaProofs, err *CallError) error + ReturnProveReplicaUpdate2(ctx context.Context, callID CallID, proof storage.ReplicaUpdateProof, err *CallError) error + ReturnGenerateSectorKeyFromData(ctx context.Context, callID CallID, err *CallError) error ReturnMoveStorage(ctx context.Context, callID CallID, err *CallError) error ReturnUnsealPiece(ctx context.Context, callID CallID, err *CallError) error ReturnReadPiece(ctx context.Context, callID CallID, ok bool, err *CallError) error diff --git a/extern/sector-storage/tarutil/systar.go b/extern/sector-storage/tarutil/systar.go index 2329aafc7..eb958fa02 100644 --- a/extern/sector-storage/tarutil/systar.go +++ b/extern/sector-storage/tarutil/systar.go @@ -14,7 +14,7 @@ import ( var log = logging.Logger("tarutil") // nolint -func ExtractTar(body io.Reader, dir string) error { +func ExtractTar(body io.Reader, dir string, buf []byte) error { if err := os.MkdirAll(dir, 0755); err != nil { // nolint return xerrors.Errorf("mkdir: %w", err) } @@ -38,7 +38,7 @@ func ExtractTar(body io.Reader, dir string) error { // This data is coming from a trusted source, no need to check the size. //nolint:gosec - if _, err := io.Copy(f, tr); err != nil { + if _, err := io.CopyBuffer(f, tr, buf); err != nil { return err } @@ -48,17 +48,7 @@ func ExtractTar(body io.Reader, dir string) error { } } -func TarDirectory(dir string) (io.ReadCloser, error) { - r, w := io.Pipe() - - go func() { - _ = w.CloseWithError(writeTarDirectory(dir, w)) - }() - - return r, nil -} - -func writeTarDirectory(dir string, w io.Writer) error { +func TarDirectory(dir string, w io.Writer, buf []byte) error { tw := tar.NewWriter(w) files, err := ioutil.ReadDir(dir) @@ -81,7 +71,7 @@ func writeTarDirectory(dir string, w io.Writer) error { return xerrors.Errorf("opening %s for reading: %w", file.Name(), err) } - if _, err := io.Copy(tw, f); err != nil { + if _, err := io.CopyBuffer(tw, f, buf); err != nil { return xerrors.Errorf("copy data for file %s: %w", file.Name(), err) } diff --git a/extern/sector-storage/teststorage_test.go b/extern/sector-storage/teststorage_test.go index 72b27b154..cb15184be 100644 --- a/extern/sector-storage/teststorage_test.go +++ b/extern/sector-storage/teststorage_test.go @@ -7,7 +7,7 @@ import ( "github.com/ipfs/go-cid" "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/specs-actors/actors/runtime/proof" + "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/filecoin-project/specs-storage/storage" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" @@ -23,11 +23,11 @@ type testExec struct { apch chan chan apres } -func (t *testExec) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) { +func (t *testExec) GenerateWinningPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) ([]proof.PoStProof, error) { panic("implement me") } -func (t *testExec) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.SectorInfo, randomness abi.PoStRandomness) (proof []proof.PoStProof, skipped []abi.SectorID, err error) { +func (t *testExec) GenerateWindowPoSt(ctx context.Context, minerID abi.ActorID, sectorInfo []proof.ExtendedSectorInfo, randomness abi.PoStRandomness) (proof []proof.PoStProof, skipped []abi.SectorID, err error) { panic("implement me") } @@ -55,10 +55,38 @@ func (t *testExec) ReleaseUnsealed(ctx context.Context, sector storage.SectorRef panic("implement me") } +func (t *testExec) ReleaseSealed(ctx context.Context, sector storage.SectorRef) error { + panic("implement me") +} + +func (t *testExec) ReleaseSectorKey(ctx context.Context, sector storage.SectorRef) error { + panic("implement me") +} + +func (t *testExec) ReleaseReplicaUpgrade(ctx context.Context, sector storage.SectorRef) error { + panic("implement me") +} + func (t *testExec) Remove(ctx context.Context, sector storage.SectorRef) error { panic("implement me") } +func (t *testExec) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storage.ReplicaUpdateOut, error) { + panic("implement me") +} + +func (t *testExec) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storage.ReplicaVanillaProofs, error) { + panic("implement me") +} + +func (t *testExec) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storage.ReplicaUpdateProof, error) { + panic("implement me") +} + +func (t *testExec) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) error { + panic("implement me") +} + func (t *testExec) NewSector(ctx context.Context, sector storage.SectorRef) error { panic("implement me") } diff --git a/extern/sector-storage/testworker_test.go b/extern/sector-storage/testworker_test.go index 2fe99f3d4..dd23278ae 100644 --- a/extern/sector-storage/testworker_test.go +++ b/extern/sector-storage/testworker_test.go @@ -7,6 +7,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-storage/storage" "github.com/google/uuid" + cid "github.com/ipfs/go-cid" "github.com/filecoin-project/lotus/extern/sector-storage/mock" "github.com/filecoin-project/lotus/extern/sector-storage/sealtasks" @@ -67,6 +68,33 @@ func (t *testWorker) AddPiece(ctx context.Context, sector storage.SectorRef, pie }) } +func (t *testWorker) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) { + return t.asyncCall(sector, func(ci storiface.CallID) { + out, err := t.mockSeal.ReplicaUpdate(ctx, sector, pieces) + if err := t.ret.ReturnReplicaUpdate(ctx, ci, out, toCallError(err)); err != nil { + log.Error(err) + } + }) +} + +func (t *testWorker) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) { + return t.asyncCall(sector, func(ci storiface.CallID) { + vanillaProofs, err := t.mockSeal.ProveReplicaUpdate1(ctx, sector, sectorKey, newSealed, newUnsealed) + if err := t.ret.ReturnProveReplicaUpdate1(ctx, ci, vanillaProofs, toCallError(err)); err != nil { + log.Error(err) + } + }) +} + +func (t *testWorker) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) { + return t.asyncCall(sector, func(ci storiface.CallID) { + proof, err := t.mockSeal.ProveReplicaUpdate2(ctx, sector, sectorKey, newSealed, newUnsealed, vanillaProofs) + if err := t.ret.ReturnProveReplicaUpdate2(ctx, ci, proof, toCallError(err)); err != nil { + log.Error(err) + } + }) +} + func (t *testWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { return t.asyncCall(sector, func(ci storiface.CallID) { t.pc1s++ @@ -102,14 +130,15 @@ func (t *testWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) { } func (t *testWorker) Info(ctx context.Context) (storiface.WorkerInfo, error) { - res := ResourceTable[sealtasks.TTPreCommit2][abi.RegisteredSealProof_StackedDrg2KiBV1] + res := storiface.ResourceTable[sealtasks.TTPreCommit2][abi.RegisteredSealProof_StackedDrg2KiBV1] return storiface.WorkerInfo{ Hostname: "testworkerer", Resources: storiface.WorkerResources{ MemPhysical: res.MinMemory * 3, + MemUsed: res.MinMemory, + MemSwapUsed: 0, MemSwap: 0, - MemReserved: res.MinMemory, CPUs: 32, GPUs: nil, }, diff --git a/extern/sector-storage/worker_local.go b/extern/sector-storage/worker_local.go index 3e63f8659..a5f5a0b9d 100644 --- a/extern/sector-storage/worker_local.go +++ b/extern/sector-storage/worker_local.go @@ -28,7 +28,7 @@ import ( "github.com/filecoin-project/lotus/extern/sector-storage/storiface" ) -var pathTypes = []storiface.SectorFileType{storiface.FTUnsealed, storiface.FTSealed, storiface.FTCache} +var pathTypes = []storiface.SectorFileType{storiface.FTUnsealed, storiface.FTSealed, storiface.FTCache, storiface.FTUpdate, storiface.FTUpdateCache} type WorkerConfig struct { TaskTypes []sealtasks.TaskType @@ -42,6 +42,7 @@ type WorkerConfig struct { // used do provide custom proofs impl (mostly used in testing) type ExecutorFunc func() (ffiwrapper.Storage, error) +type EnvFunc func(string) (string, bool) type LocalWorker struct { storage stores.Store @@ -50,6 +51,7 @@ type LocalWorker struct { ret storiface.WorkerReturn executor ExecutorFunc noSwap bool + envLookup EnvFunc // see equivalent field on WorkerConfig. ignoreResources bool @@ -64,7 +66,7 @@ type LocalWorker struct { closing chan struct{} } -func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store, local *stores.Local, sindex stores.SectorIndex, ret storiface.WorkerReturn, cst *statestore.StateStore) *LocalWorker { +func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, envLookup EnvFunc, store stores.Store, local *stores.Local, sindex stores.SectorIndex, ret storiface.WorkerReturn, cst *statestore.StateStore) *LocalWorker { acceptTasks := map[sealtasks.TaskType]struct{}{} for _, taskType := range wcfg.TaskTypes { acceptTasks[taskType] = struct{}{} @@ -82,6 +84,7 @@ func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store acceptTasks: acceptTasks, executor: executor, noSwap: wcfg.NoSwap, + envLookup: envLookup, ignoreResources: wcfg.IgnoreResourceFiltering, session: uuid.New(), closing: make(chan struct{}), @@ -115,7 +118,7 @@ func newLocalWorker(executor ExecutorFunc, wcfg WorkerConfig, store stores.Store } func NewLocalWorker(wcfg WorkerConfig, store stores.Store, local *stores.Local, sindex stores.SectorIndex, ret storiface.WorkerReturn, cst *statestore.StateStore) *LocalWorker { - return newLocalWorker(nil, wcfg, store, local, sindex, ret, cst) + return newLocalWorker(nil, wcfg, os.LookupEnv, store, local, sindex, ret, cst) } type localWorkerPathProvider struct { @@ -145,7 +148,6 @@ func (l *localWorkerPathProvider) AcquireSector(ctx context.Context, sector stor } sid := storiface.PathByType(storageIDs, fileType) - if err := l.w.sindex.StorageDeclareSector(ctx, stores.ID(sid), sector.ID, fileType, l.op == storiface.AcquireMove); err != nil { log.Errorf("declare sector error: %+v", err) } @@ -160,16 +162,20 @@ func (l *LocalWorker) ffiExec() (ffiwrapper.Storage, error) { type ReturnType string const ( - AddPiece ReturnType = "AddPiece" - SealPreCommit1 ReturnType = "SealPreCommit1" - SealPreCommit2 ReturnType = "SealPreCommit2" - SealCommit1 ReturnType = "SealCommit1" - SealCommit2 ReturnType = "SealCommit2" - FinalizeSector ReturnType = "FinalizeSector" - ReleaseUnsealed ReturnType = "ReleaseUnsealed" - MoveStorage ReturnType = "MoveStorage" - UnsealPiece ReturnType = "UnsealPiece" - Fetch ReturnType = "Fetch" + AddPiece ReturnType = "AddPiece" + SealPreCommit1 ReturnType = "SealPreCommit1" + SealPreCommit2 ReturnType = "SealPreCommit2" + SealCommit1 ReturnType = "SealCommit1" + SealCommit2 ReturnType = "SealCommit2" + FinalizeSector ReturnType = "FinalizeSector" + ReplicaUpdate ReturnType = "ReplicaUpdate" + ProveReplicaUpdate1 ReturnType = "ProveReplicaUpdate1" + ProveReplicaUpdate2 ReturnType = "ProveReplicaUpdate2" + GenerateSectorKey ReturnType = "GenerateSectorKey" + ReleaseUnsealed ReturnType = "ReleaseUnsealed" + MoveStorage ReturnType = "MoveStorage" + UnsealPiece ReturnType = "UnsealPiece" + Fetch ReturnType = "Fetch" ) // in: func(WorkerReturn, context.Context, CallID, err string) @@ -207,16 +213,20 @@ func rfunc(in interface{}) func(context.Context, storiface.CallID, storiface.Wor } var returnFunc = map[ReturnType]func(context.Context, storiface.CallID, storiface.WorkerReturn, interface{}, *storiface.CallError) error{ - AddPiece: rfunc(storiface.WorkerReturn.ReturnAddPiece), - SealPreCommit1: rfunc(storiface.WorkerReturn.ReturnSealPreCommit1), - SealPreCommit2: rfunc(storiface.WorkerReturn.ReturnSealPreCommit2), - SealCommit1: rfunc(storiface.WorkerReturn.ReturnSealCommit1), - SealCommit2: rfunc(storiface.WorkerReturn.ReturnSealCommit2), - FinalizeSector: rfunc(storiface.WorkerReturn.ReturnFinalizeSector), - ReleaseUnsealed: rfunc(storiface.WorkerReturn.ReturnReleaseUnsealed), - MoveStorage: rfunc(storiface.WorkerReturn.ReturnMoveStorage), - UnsealPiece: rfunc(storiface.WorkerReturn.ReturnUnsealPiece), - Fetch: rfunc(storiface.WorkerReturn.ReturnFetch), + AddPiece: rfunc(storiface.WorkerReturn.ReturnAddPiece), + SealPreCommit1: rfunc(storiface.WorkerReturn.ReturnSealPreCommit1), + SealPreCommit2: rfunc(storiface.WorkerReturn.ReturnSealPreCommit2), + SealCommit1: rfunc(storiface.WorkerReturn.ReturnSealCommit1), + SealCommit2: rfunc(storiface.WorkerReturn.ReturnSealCommit2), + FinalizeSector: rfunc(storiface.WorkerReturn.ReturnFinalizeSector), + ReleaseUnsealed: rfunc(storiface.WorkerReturn.ReturnReleaseUnsealed), + ReplicaUpdate: rfunc(storiface.WorkerReturn.ReturnReplicaUpdate), + ProveReplicaUpdate1: rfunc(storiface.WorkerReturn.ReturnProveReplicaUpdate1), + ProveReplicaUpdate2: rfunc(storiface.WorkerReturn.ReturnProveReplicaUpdate2), + GenerateSectorKey: rfunc(storiface.WorkerReturn.ReturnGenerateSectorKeyFromData), + MoveStorage: rfunc(storiface.WorkerReturn.ReturnMoveStorage), + UnsealPiece: rfunc(storiface.WorkerReturn.ReturnUnsealPiece), + Fetch: rfunc(storiface.WorkerReturn.ReturnFetch), } func (l *LocalWorker) asyncCall(ctx context.Context, sector storage.SectorRef, rt ReturnType, work func(ctx context.Context, ci storiface.CallID) (interface{}, error)) (storiface.CallID, error) { @@ -240,7 +250,6 @@ func (l *LocalWorker) asyncCall(ctx context.Context, sector storage.SectorRef, r } res, err := work(ctx, ci) - if err != nil { rb, err := json.Marshal(res) if err != nil { @@ -258,7 +267,6 @@ func (l *LocalWorker) asyncCall(ctx context.Context, sector storage.SectorRef, r } } }() - return ci, nil } @@ -331,11 +339,11 @@ func (l *LocalWorker) SealPreCommit1(ctx context.Context, sector storage.SectorR { // cleanup previous failed attempts if they exist - if err := l.storage.Remove(ctx, sector.ID, storiface.FTSealed, true); err != nil { + if err := l.storage.Remove(ctx, sector.ID, storiface.FTSealed, true, nil); err != nil { return nil, xerrors.Errorf("cleaning up sealed data: %w", err) } - if err := l.storage.Remove(ctx, sector.ID, storiface.FTCache, true); err != nil { + if err := l.storage.Remove(ctx, sector.ID, storiface.FTCache, true, nil); err != nil { return nil, xerrors.Errorf("cleaning up cache data: %w", err) } } @@ -382,6 +390,51 @@ func (l *LocalWorker) SealCommit2(ctx context.Context, sector storage.SectorRef, }) } +func (l *LocalWorker) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) { + sb, err := l.executor() + if err != nil { + return storiface.UndefCall, err + } + + return l.asyncCall(ctx, sector, ReplicaUpdate, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + sealerOut, err := sb.ReplicaUpdate(ctx, sector, pieces) + return sealerOut, err + }) +} + +func (l *LocalWorker) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) { + sb, err := l.executor() + if err != nil { + return storiface.UndefCall, err + } + + return l.asyncCall(ctx, sector, ProveReplicaUpdate1, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return sb.ProveReplicaUpdate1(ctx, sector, sectorKey, newSealed, newUnsealed) + }) +} + +func (l *LocalWorker) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) { + sb, err := l.executor() + if err != nil { + return storiface.UndefCall, err + } + + return l.asyncCall(ctx, sector, ProveReplicaUpdate2, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return sb.ProveReplicaUpdate2(ctx, sector, sectorKey, newSealed, newUnsealed, vanillaProofs) + }) +} + +func (l *LocalWorker) GenerateSectorKeyFromData(ctx context.Context, sector storage.SectorRef, commD cid.Cid) (storiface.CallID, error) { + sb, err := l.executor() + if err != nil { + return storiface.UndefCall, err + } + + return l.asyncCall(ctx, sector, GenerateSectorKey, func(ctx context.Context, ci storiface.CallID) (interface{}, error) { + return nil, sb.GenerateSectorKeyFromData(ctx, sector, commD) + }) +} + func (l *LocalWorker) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) { sb, err := l.executor() if err != nil { @@ -394,7 +447,7 @@ func (l *LocalWorker) FinalizeSector(ctx context.Context, sector storage.SectorR } if len(keepUnsealed) == 0 { - if err := l.storage.Remove(ctx, sector.ID, storiface.FTUnsealed, true); err != nil { + if err := l.storage.Remove(ctx, sector.ID, storiface.FTUnsealed, true, nil); err != nil { return nil, xerrors.Errorf("removing unsealed data: %w", err) } } @@ -410,13 +463,13 @@ func (l *LocalWorker) ReleaseUnsealed(ctx context.Context, sector storage.Sector func (l *LocalWorker) Remove(ctx context.Context, sector abi.SectorID) error { var err error - if rerr := l.storage.Remove(ctx, sector, storiface.FTSealed, true); rerr != nil { + if rerr := l.storage.Remove(ctx, sector, storiface.FTSealed, true, nil); rerr != nil { err = multierror.Append(err, xerrors.Errorf("removing sector (sealed): %w", rerr)) } - if rerr := l.storage.Remove(ctx, sector, storiface.FTCache, true); rerr != nil { + if rerr := l.storage.Remove(ctx, sector, storiface.FTCache, true, nil); rerr != nil { err = multierror.Append(err, xerrors.Errorf("removing sector (cache): %w", rerr)) } - if rerr := l.storage.Remove(ctx, sector, storiface.FTUnsealed, true); rerr != nil { + if rerr := l.storage.Remove(ctx, sector, storiface.FTUnsealed, true, nil); rerr != nil { err = multierror.Append(err, xerrors.Errorf("removing sector (unsealed): %w", rerr)) } @@ -482,6 +535,52 @@ func (l *LocalWorker) Paths(ctx context.Context) ([]stores.StoragePath, error) { return l.localStore.Local(ctx) } +func (l *LocalWorker) memInfo() (memPhysical, memUsed, memSwap, memSwapUsed uint64, err error) { + h, err := sysinfo.Host() + if err != nil { + return 0, 0, 0, 0, err + } + + mem, err := h.Memory() + if err != nil { + return 0, 0, 0, 0, err + } + memPhysical = mem.Total + // mem.Available is memory available without swapping, it is more relevant for this calculation + memUsed = mem.Total - mem.Available + memSwap = mem.VirtualTotal + memSwapUsed = mem.VirtualUsed + + if cgMemMax, cgMemUsed, cgSwapMax, cgSwapUsed, err := cgroupV1Mem(); err == nil { + if cgMemMax > 0 && cgMemMax < memPhysical { + memPhysical = cgMemMax + memUsed = cgMemUsed + } + if cgSwapMax > 0 && cgSwapMax < memSwap { + memSwap = cgSwapMax + memSwapUsed = cgSwapUsed + } + } + + if cgMemMax, cgMemUsed, cgSwapMax, cgSwapUsed, err := cgroupV2Mem(); err == nil { + if cgMemMax > 0 && cgMemMax < memPhysical { + memPhysical = cgMemMax + memUsed = cgMemUsed + } + if cgSwapMax > 0 && cgSwapMax < memSwap { + memSwap = cgSwapMax + memSwapUsed = cgSwapUsed + } + } + + if l.noSwap { + memSwap = 0 + memSwapUsed = 0 + } + + return memPhysical, memUsed, memSwap, memSwapUsed, nil +} + func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) { hostname, err := os.Hostname() // TODO: allow overriding from config if err != nil { @@ -493,30 +592,29 @@ func (l *LocalWorker) Info(context.Context) (storiface.WorkerInfo, error) { log.Errorf("getting gpu devices failed: %+v", err) } - h, err := sysinfo.Host() - if err != nil { - return storiface.WorkerInfo{}, xerrors.Errorf("getting host info: %w", err) - } - - mem, err := h.Memory() + memPhysical, memUsed, memSwap, memSwapUsed, err := l.memInfo() if err != nil { return storiface.WorkerInfo{}, xerrors.Errorf("getting memory info: %w", err) } - memSwap := mem.VirtualTotal - if l.noSwap { - memSwap = 0 + resEnv, err := storiface.ParseResourceEnv(func(key, def string) (string, bool) { + return l.envLookup(key) + }) + if err != nil { + return storiface.WorkerInfo{}, xerrors.Errorf("interpreting resource env vars: %w", err) } return storiface.WorkerInfo{ Hostname: hostname, IgnoreResources: l.ignoreResources, Resources: storiface.WorkerResources{ - MemPhysical: mem.Total, + MemPhysical: memPhysical, + MemUsed: memUsed, MemSwap: memSwap, - MemReserved: mem.VirtualUsed + mem.Total - mem.Available, // TODO: sub this process + MemSwapUsed: memSwapUsed, CPUs: uint64(runtime.NumCPU()), GPUs: gpus, + Resources: resEnv, }, }, nil } diff --git a/extern/sector-storage/worker_tracked.go b/extern/sector-storage/worker_tracked.go index 2160dd8e6..a1c647422 100644 --- a/extern/sector-storage/worker_tracked.go +++ b/extern/sector-storage/worker_tracked.go @@ -5,6 +5,7 @@ import ( "sync" "time" + "github.com/google/uuid" "github.com/ipfs/go-cid" "go.opencensus.io/stats" "go.opencensus.io/tag" @@ -19,15 +20,16 @@ import ( type trackedWork struct { job storiface.WorkerJob - worker WorkerID + worker storiface.WorkerID workerHostname string } type workTracker struct { lk sync.Mutex - done map[storiface.CallID]struct{} - running map[storiface.CallID]trackedWork + done map[storiface.CallID]struct{} + running map[storiface.CallID]trackedWork + prepared map[uuid.UUID]trackedWork // TODO: done, aggregate stats, queue stats, scheduler feedback } @@ -56,103 +58,161 @@ func (wt *workTracker) onDone(ctx context.Context, callID storiface.CallID) { delete(wt.running, callID) } -func (wt *workTracker) track(ctx context.Context, wid WorkerID, wi storiface.WorkerInfo, sid storage.SectorRef, task sealtasks.TaskType) func(storiface.CallID, error) (storiface.CallID, error) { - return func(callID storiface.CallID, err error) (storiface.CallID, error) { - if err != nil { - return callID, err - } - - wt.lk.Lock() - defer wt.lk.Unlock() - - _, done := wt.done[callID] - if done { - delete(wt.done, callID) - return callID, err - } - - wt.running[callID] = trackedWork{ +func (wt *workTracker) track(ctx context.Context, ready chan struct{}, wid storiface.WorkerID, wi storiface.WorkerInfo, sid storage.SectorRef, task sealtasks.TaskType, cb func() (storiface.CallID, error)) (storiface.CallID, error) { + tracked := func(rw int, callID storiface.CallID) trackedWork { + return trackedWork{ job: storiface.WorkerJob{ - ID: callID, - Sector: sid.ID, - Task: task, - Start: time.Now(), + ID: callID, + Sector: sid.ID, + Task: task, + Start: time.Now(), + RunWait: rw, }, worker: wid, workerHostname: wi.Hostname, } + } + + wt.lk.Lock() + defer wt.lk.Unlock() + + select { + case <-ready: + case <-ctx.Done(): + return storiface.UndefCall, ctx.Err() + default: + prepID := uuid.New() + + wt.prepared[prepID] = tracked(storiface.RWPrepared, storiface.UndefCall) + + wt.lk.Unlock() + + select { + case <-ready: + case <-ctx.Done(): + wt.lk.Lock() + delete(wt.prepared, prepID) + return storiface.UndefCall, ctx.Err() + } - ctx, _ = tag.New( - ctx, - tag.Upsert(metrics.TaskType, string(task)), - tag.Upsert(metrics.WorkerHostname, wi.Hostname), - ) - stats.Record(ctx, metrics.WorkerCallsStarted.M(1)) + wt.lk.Lock() + delete(wt.prepared, prepID) + } + callID, err := cb() + if err != nil { + return callID, err + } + _, done := wt.done[callID] + if done { + delete(wt.done, callID) return callID, err } + + wt.running[callID] = tracked(storiface.RWRunning, callID) + + ctx, _ = tag.New( + ctx, + tag.Upsert(metrics.TaskType, string(task)), + tag.Upsert(metrics.WorkerHostname, wi.Hostname), + ) + stats.Record(ctx, metrics.WorkerCallsStarted.M(1)) + + return callID, err } -func (wt *workTracker) worker(wid WorkerID, wi storiface.WorkerInfo, w Worker) Worker { +func (wt *workTracker) worker(wid storiface.WorkerID, wi storiface.WorkerInfo, w Worker) *trackedWorker { return &trackedWorker{ Worker: w, wid: wid, workerInfo: wi, + execute: make(chan struct{}), + tracker: wt, } } -func (wt *workTracker) Running() []trackedWork { +func (wt *workTracker) Running() ([]trackedWork, []trackedWork) { wt.lk.Lock() defer wt.lk.Unlock() - out := make([]trackedWork, 0, len(wt.running)) + running := make([]trackedWork, 0, len(wt.running)) for _, job := range wt.running { - out = append(out, job) + running = append(running, job) + } + prepared := make([]trackedWork, 0, len(wt.prepared)) + for _, job := range wt.prepared { + prepared = append(prepared, job) } - return out + return running, prepared } type trackedWorker struct { Worker - wid WorkerID + wid storiface.WorkerID workerInfo storiface.WorkerInfo + execute chan struct{} // channel blocking execution in case we're waiting for resources but the task is ready to execute + tracker *workTracker } +func (t *trackedWorker) start() { + close(t.execute) +} + func (t *trackedWorker) SealPreCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, pieces []abi.PieceInfo) (storiface.CallID, error) { - return t.tracker.track(ctx, t.wid, t.workerInfo, sector, sealtasks.TTPreCommit1)(t.Worker.SealPreCommit1(ctx, sector, ticket, pieces)) + return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTPreCommit1, func() (storiface.CallID, error) { return t.Worker.SealPreCommit1(ctx, sector, ticket, pieces) }) } func (t *trackedWorker) SealPreCommit2(ctx context.Context, sector storage.SectorRef, pc1o storage.PreCommit1Out) (storiface.CallID, error) { - return t.tracker.track(ctx, t.wid, t.workerInfo, sector, sealtasks.TTPreCommit2)(t.Worker.SealPreCommit2(ctx, sector, pc1o)) + return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTPreCommit2, func() (storiface.CallID, error) { return t.Worker.SealPreCommit2(ctx, sector, pc1o) }) } func (t *trackedWorker) SealCommit1(ctx context.Context, sector storage.SectorRef, ticket abi.SealRandomness, seed abi.InteractiveSealRandomness, pieces []abi.PieceInfo, cids storage.SectorCids) (storiface.CallID, error) { - return t.tracker.track(ctx, t.wid, t.workerInfo, sector, sealtasks.TTCommit1)(t.Worker.SealCommit1(ctx, sector, ticket, seed, pieces, cids)) + return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTCommit1, func() (storiface.CallID, error) { return t.Worker.SealCommit1(ctx, sector, ticket, seed, pieces, cids) }) } func (t *trackedWorker) SealCommit2(ctx context.Context, sector storage.SectorRef, c1o storage.Commit1Out) (storiface.CallID, error) { - return t.tracker.track(ctx, t.wid, t.workerInfo, sector, sealtasks.TTCommit2)(t.Worker.SealCommit2(ctx, sector, c1o)) + return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTCommit2, func() (storiface.CallID, error) { return t.Worker.SealCommit2(ctx, sector, c1o) }) } func (t *trackedWorker) FinalizeSector(ctx context.Context, sector storage.SectorRef, keepUnsealed []storage.Range) (storiface.CallID, error) { - return t.tracker.track(ctx, t.wid, t.workerInfo, sector, sealtasks.TTFinalize)(t.Worker.FinalizeSector(ctx, sector, keepUnsealed)) + return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTFinalize, func() (storiface.CallID, error) { return t.Worker.FinalizeSector(ctx, sector, keepUnsealed) }) } func (t *trackedWorker) AddPiece(ctx context.Context, sector storage.SectorRef, pieceSizes []abi.UnpaddedPieceSize, newPieceSize abi.UnpaddedPieceSize, pieceData storage.Data) (storiface.CallID, error) { - return t.tracker.track(ctx, t.wid, t.workerInfo, sector, sealtasks.TTAddPiece)(t.Worker.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData)) + return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTAddPiece, func() (storiface.CallID, error) { + return t.Worker.AddPiece(ctx, sector, pieceSizes, newPieceSize, pieceData) + }) } func (t *trackedWorker) Fetch(ctx context.Context, s storage.SectorRef, ft storiface.SectorFileType, ptype storiface.PathType, am storiface.AcquireMode) (storiface.CallID, error) { - return t.tracker.track(ctx, t.wid, t.workerInfo, s, sealtasks.TTFetch)(t.Worker.Fetch(ctx, s, ft, ptype, am)) + return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, s, sealtasks.TTFetch, func() (storiface.CallID, error) { return t.Worker.Fetch(ctx, s, ft, ptype, am) }) } func (t *trackedWorker) UnsealPiece(ctx context.Context, id storage.SectorRef, index storiface.UnpaddedByteIndex, size abi.UnpaddedPieceSize, randomness abi.SealRandomness, cid cid.Cid) (storiface.CallID, error) { - return t.tracker.track(ctx, t.wid, t.workerInfo, id, sealtasks.TTUnseal)(t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid)) + return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, id, sealtasks.TTUnseal, func() (storiface.CallID, error) { return t.Worker.UnsealPiece(ctx, id, index, size, randomness, cid) }) +} + +func (t *trackedWorker) ReplicaUpdate(ctx context.Context, sector storage.SectorRef, pieces []abi.PieceInfo) (storiface.CallID, error) { + return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTReplicaUpdate, func() (storiface.CallID, error) { + return t.Worker.ReplicaUpdate(ctx, sector, pieces) + }) +} + +func (t *trackedWorker) ProveReplicaUpdate1(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid) (storiface.CallID, error) { + return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTProveReplicaUpdate1, func() (storiface.CallID, error) { + return t.Worker.ProveReplicaUpdate1(ctx, sector, sectorKey, newSealed, newUnsealed) + }) +} + +func (t *trackedWorker) ProveReplicaUpdate2(ctx context.Context, sector storage.SectorRef, sectorKey, newSealed, newUnsealed cid.Cid, vanillaProofs storage.ReplicaVanillaProofs) (storiface.CallID, error) { + return t.tracker.track(ctx, t.execute, t.wid, t.workerInfo, sector, sealtasks.TTProveReplicaUpdate2, func() (storiface.CallID, error) { + return t.Worker.ProveReplicaUpdate2(ctx, sector, sectorKey, newSealed, newUnsealed, vanillaProofs) + }) } var _ Worker = &trackedWorker{} diff --git a/extern/storage-sealing/cbor_gen.go b/extern/storage-sealing/cbor_gen.go index 1dfaf54a5..c1e2b08fa 100644 --- a/extern/storage-sealing/cbor_gen.go +++ b/extern/storage-sealing/cbor_gen.go @@ -143,7 +143,7 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { _, err := w.Write(cbg.CborNull) return err } - if _, err := w.Write([]byte{184, 26}); err != nil { + if _, err := w.Write([]byte{184, 32}); err != nil { return err } @@ -573,6 +573,137 @@ func (t *SectorInfo) MarshalCBOR(w io.Writer) error { return err } + // t.CCUpdate (bool) (bool) + if len("CCUpdate") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CCUpdate\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("CCUpdate"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("CCUpdate")); err != nil { + return err + } + + if err := cbg.WriteBool(w, t.CCUpdate); err != nil { + return err + } + + // t.CCPieces ([]sealing.Piece) (slice) + if len("CCPieces") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"CCPieces\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("CCPieces"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("CCPieces")); err != nil { + return err + } + + if len(t.CCPieces) > cbg.MaxLength { + return xerrors.Errorf("Slice value in field t.CCPieces was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajArray, uint64(len(t.CCPieces))); err != nil { + return err + } + for _, v := range t.CCPieces { + if err := v.MarshalCBOR(w); err != nil { + return err + } + } + + // t.UpdateSealed (cid.Cid) (struct) + if len("UpdateSealed") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"UpdateSealed\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("UpdateSealed"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("UpdateSealed")); err != nil { + return err + } + + if t.UpdateSealed == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.UpdateSealed); err != nil { + return xerrors.Errorf("failed to write cid field t.UpdateSealed: %w", err) + } + } + + // t.UpdateUnsealed (cid.Cid) (struct) + if len("UpdateUnsealed") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"UpdateUnsealed\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("UpdateUnsealed"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("UpdateUnsealed")); err != nil { + return err + } + + if t.UpdateUnsealed == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.UpdateUnsealed); err != nil { + return xerrors.Errorf("failed to write cid field t.UpdateUnsealed: %w", err) + } + } + + // t.ReplicaUpdateProof (storage.ReplicaUpdateProof) (slice) + if len("ReplicaUpdateProof") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ReplicaUpdateProof\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ReplicaUpdateProof"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ReplicaUpdateProof")); err != nil { + return err + } + + if len(t.ReplicaUpdateProof) > cbg.ByteArrayMaxLen { + return xerrors.Errorf("Byte array in field t.ReplicaUpdateProof was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajByteString, uint64(len(t.ReplicaUpdateProof))); err != nil { + return err + } + + if _, err := w.Write(t.ReplicaUpdateProof[:]); err != nil { + return err + } + + // t.ReplicaUpdateMessage (cid.Cid) (struct) + if len("ReplicaUpdateMessage") > cbg.MaxLength { + return xerrors.Errorf("Value in field \"ReplicaUpdateMessage\" was too long") + } + + if err := cbg.WriteMajorTypeHeaderBuf(scratch, w, cbg.MajTextString, uint64(len("ReplicaUpdateMessage"))); err != nil { + return err + } + if _, err := io.WriteString(w, string("ReplicaUpdateMessage")); err != nil { + return err + } + + if t.ReplicaUpdateMessage == nil { + if _, err := w.Write(cbg.CborNull); err != nil { + return err + } + } else { + if err := cbg.WriteCidBuf(scratch, w, *t.ReplicaUpdateMessage); err != nil { + return xerrors.Errorf("failed to write cid field t.ReplicaUpdateMessage: %w", err) + } + } + // t.FaultReportMsg (cid.Cid) (struct) if len("FaultReportMsg") > cbg.MaxLength { return xerrors.Errorf("Value in field \"FaultReportMsg\" was too long") @@ -1166,6 +1297,145 @@ func (t *SectorInfo) UnmarshalCBOR(r io.Reader) error { } t.InvalidProofs = uint64(extra) + } + // t.CCUpdate (bool) (bool) + case "CCUpdate": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + if maj != cbg.MajOther { + return fmt.Errorf("booleans must be major type 7") + } + switch extra { + case 20: + t.CCUpdate = false + case 21: + t.CCUpdate = true + default: + return fmt.Errorf("booleans are either major type 7, value 20 or 21 (got %d)", extra) + } + // t.CCPieces ([]sealing.Piece) (slice) + case "CCPieces": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.MaxLength { + return fmt.Errorf("t.CCPieces: array too large (%d)", extra) + } + + if maj != cbg.MajArray { + return fmt.Errorf("expected cbor array") + } + + if extra > 0 { + t.CCPieces = make([]Piece, extra) + } + + for i := 0; i < int(extra); i++ { + + var v Piece + if err := v.UnmarshalCBOR(br); err != nil { + return err + } + + t.CCPieces[i] = v + } + + // t.UpdateSealed (cid.Cid) (struct) + case "UpdateSealed": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.UpdateSealed: %w", err) + } + + t.UpdateSealed = &c + } + + } + // t.UpdateUnsealed (cid.Cid) (struct) + case "UpdateUnsealed": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.UpdateUnsealed: %w", err) + } + + t.UpdateUnsealed = &c + } + + } + // t.ReplicaUpdateProof (storage.ReplicaUpdateProof) (slice) + case "ReplicaUpdateProof": + + maj, extra, err = cbg.CborReadHeaderBuf(br, scratch) + if err != nil { + return err + } + + if extra > cbg.ByteArrayMaxLen { + return fmt.Errorf("t.ReplicaUpdateProof: byte array too large (%d)", extra) + } + if maj != cbg.MajByteString { + return fmt.Errorf("expected byte array") + } + + if extra > 0 { + t.ReplicaUpdateProof = make([]uint8, extra) + } + + if _, err := io.ReadFull(br, t.ReplicaUpdateProof[:]); err != nil { + return err + } + // t.ReplicaUpdateMessage (cid.Cid) (struct) + case "ReplicaUpdateMessage": + + { + + b, err := br.ReadByte() + if err != nil { + return err + } + if b != cbg.CborNull[0] { + if err := br.UnreadByte(); err != nil { + return err + } + + c, err := cbg.ReadCid(br) + if err != nil { + return xerrors.Errorf("failed to read cid field t.ReplicaUpdateMessage: %w", err) + } + + t.ReplicaUpdateMessage = &c + } + } // t.FaultReportMsg (cid.Cid) (struct) case "FaultReportMsg": diff --git a/extern/storage-sealing/checks.go b/extern/storage-sealing/checks.go index 74a791fcb..3525c84a7 100644 --- a/extern/storage-sealing/checks.go +++ b/extern/storage-sealing/checks.go @@ -35,6 +35,9 @@ type ErrInvalidProof struct{ error } type ErrNoPrecommit struct{ error } type ErrCommitWaitFailed struct{ error } +type ErrBadRU struct{ error } +type ErrBadPR struct{ error } + func checkPieces(ctx context.Context, maddr address.Address, si SectorInfo, api SealingAPI) error { tok, height, err := api.ChainHead(ctx) if err != nil { @@ -187,3 +190,32 @@ func (m *Sealing) checkCommit(ctx context.Context, si SectorInfo, proof []byte, return nil } + +// check that sector info is good after running a replica update +func checkReplicaUpdate(ctx context.Context, maddr address.Address, si SectorInfo, tok TipSetToken, api SealingAPI) error { + + if err := checkPieces(ctx, maddr, si, api); err != nil { + return err + } + if !si.CCUpdate { + return xerrors.Errorf("replica update on sector not marked for update") + } + + commD, err := api.StateComputeDataCommitment(ctx, maddr, si.SectorType, si.dealIDs(), tok) + if err != nil { + return &ErrApi{xerrors.Errorf("calling StateComputeDataCommitment: %w", err)} + } + if si.UpdateUnsealed == nil || !commD.Equals(*si.UpdateUnsealed) { + return &ErrBadRU{xerrors.Errorf("on chain CommD differs from sector: %s != %s", commD, si.CommD)} + } + + if si.UpdateSealed == nil { + return &ErrBadRU{xerrors.Errorf("nil sealed cid")} + } + if si.ReplicaUpdateProof == nil { + return &ErrBadPR{xerrors.Errorf("nil PR2 proof")} + } + + return nil + +} diff --git a/extern/storage-sealing/commit_batch_test.go b/extern/storage-sealing/commit_batch_test.go index e03c34693..3bda6d3fd 100644 --- a/extern/storage-sealing/commit_batch_test.go +++ b/extern/storage-sealing/commit_batch_test.go @@ -1,3 +1,4 @@ +//stm: #unit package sealing_test import ( @@ -28,6 +29,7 @@ import ( ) func TestCommitBatcher(t *testing.T) { + //stm: @CHAIN_STATE_MINER_PRE_COM_INFO_001, @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001 t0123, err := address.NewFromString("t0123") require.NoError(t, err) @@ -147,6 +149,7 @@ func TestCommitBatcher(t *testing.T) { } } + //stm: @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001, @CHAIN_STATE_MINER_GET_COLLATERAL_001 expectSend := func(expect []abi.SectorNumber, aboveBalancer, failOnePCI bool) action { return func(t *testing.T, s *mocks.MockCommitBatcherApi, pcb *sealing.CommitBatcher) promise { s.EXPECT().StateMinerInfo(gomock.Any(), gomock.Any(), gomock.Any()).Return(miner.MinerInfo{Owner: t0123, Worker: t0123}, nil) diff --git a/extern/storage-sealing/fsm.go b/extern/storage-sealing/fsm.go index 00e38694d..83874e907 100644 --- a/extern/storage-sealing/fsm.go +++ b/extern/storage-sealing/fsm.go @@ -133,6 +133,44 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto on(SectorFinalizeFailed{}, FinalizeFailed), ), + // Snap deals + SnapDealsWaitDeals: planOne( + on(SectorAddPiece{}, SnapDealsAddPiece), + on(SectorStartPacking{}, SnapDealsPacking), + ), + SnapDealsAddPiece: planOne( + on(SectorPieceAdded{}, SnapDealsWaitDeals), + apply(SectorStartPacking{}), + apply(SectorAddPiece{}), + on(SectorAddPieceFailed{}, SnapDealsAddPieceFailed), + ), + SnapDealsPacking: planOne( + on(SectorPacked{}, UpdateReplica), + ), + UpdateReplica: planOne( + on(SectorReplicaUpdate{}, ProveReplicaUpdate), + on(SectorUpdateReplicaFailed{}, ReplicaUpdateFailed), + on(SectorDealsExpired{}, SnapDealsDealsExpired), + on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs), + ), + ProveReplicaUpdate: planOne( + on(SectorProveReplicaUpdate{}, SubmitReplicaUpdate), + on(SectorProveReplicaUpdateFailed{}, ReplicaUpdateFailed), + on(SectorDealsExpired{}, SnapDealsDealsExpired), + on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs), + ), + SubmitReplicaUpdate: planOne( + on(SectorReplicaUpdateSubmitted{}, ReplicaUpdateWait), + on(SectorSubmitReplicaUpdateFailed{}, ReplicaUpdateFailed), + ), + ReplicaUpdateWait: planOne( + on(SectorReplicaUpdateLanded{}, FinalizeReplicaUpdate), + on(SectorSubmitReplicaUpdateFailed{}, ReplicaUpdateFailed), + on(SectorAbortUpgrade{}, AbortUpgrade), + ), + FinalizeReplicaUpdate: planOne( + on(SectorFinalized{}, Proving), + ), // Sealing errors AddPieceFailed: planOne( @@ -188,11 +226,37 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto onReturning(SectorUpdateDealIDs{}), ), + // Snap Deals Errors + SnapDealsAddPieceFailed: planOne( + on(SectorRetryWaitDeals{}, SnapDealsWaitDeals), + apply(SectorStartPacking{}), + apply(SectorAddPiece{}), + ), + SnapDealsDealsExpired: planOne( + on(SectorAbortUpgrade{}, AbortUpgrade), + ), + SnapDealsRecoverDealIDs: planOne( + on(SectorUpdateDealIDs{}, SubmitReplicaUpdate), + on(SectorAbortUpgrade{}, AbortUpgrade), + ), + AbortUpgrade: planOneOrIgnore( + on(SectorRevertUpgradeToProving{}, Proving), + ), + ReplicaUpdateFailed: planOne( + on(SectorRetrySubmitReplicaUpdateWait{}, ReplicaUpdateWait), + on(SectorRetrySubmitReplicaUpdate{}, SubmitReplicaUpdate), + on(SectorRetryReplicaUpdate{}, UpdateReplica), + on(SectorRetryProveReplicaUpdate{}, ProveReplicaUpdate), + on(SectorInvalidDealIDs{}, SnapDealsRecoverDealIDs), + on(SectorDealsExpired{}, SnapDealsDealsExpired), + ), + // Post-seal Proving: planOne( on(SectorFaultReported{}, FaultReported), on(SectorFaulty{}, Faulty), + on(SectorStartCCUpdate{}, SnapDealsWaitDeals), ), Terminating: planOne( on(SectorTerminating{}, TerminateWait), @@ -209,7 +273,7 @@ var fsmPlanners = map[SectorState]func(events []statemachine.Event, state *Secto TerminateFailed: planOne( // SectorTerminating (global) ), - Removing: planOne( + Removing: planOneOrIgnore( on(SectorRemoved{}, Removed), on(SectorRemoveFailed{}, RemoveFailed), ), @@ -355,13 +419,6 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta log.Errorw("update sector stats", "error", err) } - // todo: drop this, use Context iface everywhere - wrapCtx := func(f func(Context, SectorInfo) error) func(statemachine.Context, SectorInfo) error { - return func(ctx statemachine.Context, info SectorInfo) error { - return f(&ctx, info) - } - } - switch state.State { // Happy path case Empty: @@ -403,6 +460,24 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta case FinalizeSector: return m.handleFinalizeSector, processed, nil + // Snap deals updates + case SnapDealsWaitDeals: + return m.handleWaitDeals, processed, nil + case SnapDealsAddPiece: + return m.handleAddPiece, processed, nil + case SnapDealsPacking: + return m.handlePacking, processed, nil + case UpdateReplica: + return m.handleReplicaUpdate, processed, nil + case ProveReplicaUpdate: + return m.handleProveReplicaUpdate, processed, nil + case SubmitReplicaUpdate: + return m.handleSubmitReplicaUpdate, processed, nil + case ReplicaUpdateWait: + return m.handleReplicaUpdateWait, processed, nil + case FinalizeReplicaUpdate: + return m.handleFinalizeReplicaUpdate, processed, nil + // Handled failure modes case AddPieceFailed: return m.handleAddPieceFailed, processed, nil @@ -426,7 +501,20 @@ func (m *Sealing) plan(events []statemachine.Event, state *SectorInfo) (func(sta case DealsExpired: return m.handleDealsExpired, processed, nil case RecoverDealIDs: - return wrapCtx(m.HandleRecoverDealIDs), processed, nil + return m.HandleRecoverDealIDs, processed, nil + + // Snap Deals failure modes + case SnapDealsAddPieceFailed: + return m.handleAddPieceFailed, processed, nil + + case SnapDealsDealsExpired: + return m.handleDealsExpiredSnapDeals, processed, nil + case SnapDealsRecoverDealIDs: + return m.handleSnapDealsRecoverDealIDs, processed, nil + case ReplicaUpdateFailed: + return m.handleSubmitReplicaUpdateFailed, processed, nil + case AbortUpgrade: + return m.handleAbortUpgrade, processed, nil // Post-seal case Proving: @@ -475,7 +563,7 @@ func (m *Sealing) onUpdateSector(ctx context.Context, state *SectorInfo) error { return xerrors.Errorf("getting config: %w", err) } - shouldUpdateInput := m.stats.updateSector(cfg, m.minerSectorID(state.SectorNumber), state.State) + shouldUpdateInput := m.stats.updateSector(ctx, cfg, m.minerSectorID(state.SectorNumber), state.State) // trigger more input processing when we've dipped below max sealing limits if shouldUpdateInput { @@ -642,3 +730,16 @@ func planOne(ts ...func() (mut mutator, next func(*SectorInfo) (more bool, err e return uint64(len(events)), nil } } + +// planOne but ignores unhandled states without erroring, this prevents the need to handle all possible events creating +// error during forced override +func planOneOrIgnore(ts ...func() (mut mutator, next func(*SectorInfo) (more bool, err error))) func(events []statemachine.Event, state *SectorInfo) (uint64, error) { + f := planOne(ts...) + return func(events []statemachine.Event, state *SectorInfo) (uint64, error) { + cnt, err := f(events, state) + if err != nil { + log.Warnf("planOneOrIgnore: ignoring error from planOne: %s", err) + } + return cnt, nil + } +} diff --git a/extern/storage-sealing/fsm_events.go b/extern/storage-sealing/fsm_events.go index 650a81799..395c4b94a 100644 --- a/extern/storage-sealing/fsm_events.go +++ b/extern/storage-sealing/fsm_events.go @@ -295,6 +295,46 @@ type SectorFinalizeFailed struct{ error } func (evt SectorFinalizeFailed) FormatError(xerrors.Printer) (next error) { return evt.error } func (evt SectorFinalizeFailed) apply(*SectorInfo) {} +// Snap deals // CC update path + +type SectorStartCCUpdate struct{} + +func (evt SectorStartCCUpdate) apply(state *SectorInfo) { + state.CCUpdate = true + // Clear filler piece but remember in case of abort + state.CCPieces = state.Pieces + state.Pieces = nil +} + +type SectorReplicaUpdate struct { + Out storage.ReplicaUpdateOut +} + +func (evt SectorReplicaUpdate) apply(state *SectorInfo) { + state.UpdateSealed = &evt.Out.NewSealed + state.UpdateUnsealed = &evt.Out.NewUnsealed +} + +type SectorProveReplicaUpdate struct { + Proof storage.ReplicaUpdateProof +} + +func (evt SectorProveReplicaUpdate) apply(state *SectorInfo) { + state.ReplicaUpdateProof = evt.Proof +} + +type SectorReplicaUpdateSubmitted struct { + Message cid.Cid +} + +func (evt SectorReplicaUpdateSubmitted) apply(state *SectorInfo) { + state.ReplicaUpdateMessage = &evt.Message +} + +type SectorReplicaUpdateLanded struct{} + +func (evt SectorReplicaUpdateLanded) apply(state *SectorInfo) {} + // Failed state recovery type SectorRetrySealPreCommit1 struct{} @@ -351,6 +391,60 @@ func (evt SectorUpdateDealIDs) apply(state *SectorInfo) { } } +// Snap Deals failure and recovery + +type SectorRetryReplicaUpdate struct{} + +func (evt SectorRetryReplicaUpdate) apply(state *SectorInfo) {} + +type SectorRetryProveReplicaUpdate struct{} + +func (evt SectorRetryProveReplicaUpdate) apply(state *SectorInfo) {} + +type SectorUpdateReplicaFailed struct{ error } + +func (evt SectorUpdateReplicaFailed) FormatError(xerrors.Printer) (next error) { return evt.error } +func (evt SectorUpdateReplicaFailed) apply(state *SectorInfo) {} + +type SectorProveReplicaUpdateFailed struct{ error } + +func (evt SectorProveReplicaUpdateFailed) FormatError(xerrors.Printer) (next error) { + return evt.error +} +func (evt SectorProveReplicaUpdateFailed) apply(state *SectorInfo) {} + +type SectorAbortUpgrade struct{ error } + +func (evt SectorAbortUpgrade) apply(state *SectorInfo) {} +func (evt SectorAbortUpgrade) FormatError(xerrors.Printer) (next error) { + return evt.error +} + +type SectorRevertUpgradeToProving struct{} + +func (evt SectorRevertUpgradeToProving) apply(state *SectorInfo) { + // cleanup sector state so that it is back in proving + state.CCUpdate = false + state.UpdateSealed = nil + state.UpdateUnsealed = nil + state.ReplicaUpdateProof = nil + state.ReplicaUpdateMessage = nil + state.Pieces = state.CCPieces + state.CCPieces = nil +} + +type SectorRetrySubmitReplicaUpdateWait struct{} + +func (evt SectorRetrySubmitReplicaUpdateWait) apply(state *SectorInfo) {} + +type SectorRetrySubmitReplicaUpdate struct{} + +func (evt SectorRetrySubmitReplicaUpdate) apply(state *SectorInfo) {} + +type SectorSubmitReplicaUpdateFailed struct{} + +func (evt SectorSubmitReplicaUpdateFailed) apply(state *SectorInfo) {} + // Faults type SectorFaulty struct{} diff --git a/extern/storage-sealing/fsm_test.go b/extern/storage-sealing/fsm_test.go index 5ddef0d53..10ee17c6b 100644 --- a/extern/storage-sealing/fsm_test.go +++ b/extern/storage-sealing/fsm_test.go @@ -33,7 +33,8 @@ func TestHappyPath(t *testing.T) { s: &Sealing{ maddr: ma, stats: SectorStats{ - bySector: map[abi.SectorID]statSectorState{}, + bySector: map[abi.SectorID]SectorState{}, + byState: map[SectorState]int64{}, }, notifee: func(before, after SectorInfo) { notif = append(notif, struct{ before, after SectorInfo }{before, after}) @@ -94,7 +95,8 @@ func TestHappyPathFinalizeEarly(t *testing.T) { s: &Sealing{ maddr: ma, stats: SectorStats{ - bySector: map[abi.SectorID]statSectorState{}, + bySector: map[abi.SectorID]SectorState{}, + byState: map[SectorState]int64{}, }, notifee: func(before, after SectorInfo) { notif = append(notif, struct{ before, after SectorInfo }{before, after}) @@ -161,7 +163,8 @@ func TestCommitFinalizeFailed(t *testing.T) { s: &Sealing{ maddr: ma, stats: SectorStats{ - bySector: map[abi.SectorID]statSectorState{}, + bySector: map[abi.SectorID]SectorState{}, + byState: map[SectorState]int64{}, }, notifee: func(before, after SectorInfo) { notif = append(notif, struct{ before, after SectorInfo }{before, after}) @@ -199,7 +202,8 @@ func TestSeedRevert(t *testing.T) { s: &Sealing{ maddr: ma, stats: SectorStats{ - bySector: map[abi.SectorID]statSectorState{}, + bySector: map[abi.SectorID]SectorState{}, + byState: map[SectorState]int64{}, }, }, t: t, @@ -252,7 +256,8 @@ func TestPlanCommittingHandlesSectorCommitFailed(t *testing.T) { s: &Sealing{ maddr: ma, stats: SectorStats{ - bySector: map[abi.SectorID]statSectorState{}, + bySector: map[abi.SectorID]SectorState{}, + byState: map[SectorState]int64{}, }, }, t: t, @@ -289,7 +294,8 @@ func TestBrokenState(t *testing.T) { s: &Sealing{ maddr: ma, stats: SectorStats{ - bySector: map[abi.SectorID]statSectorState{}, + bySector: map[abi.SectorID]SectorState{}, + byState: map[SectorState]int64{}, }, notifee: func(before, after SectorInfo) { notif = append(notif, struct{ before, after SectorInfo }{before, after}) @@ -324,7 +330,8 @@ func TestTicketExpired(t *testing.T) { s: &Sealing{ maddr: ma, stats: SectorStats{ - bySector: map[abi.SectorID]statSectorState{}, + bySector: map[abi.SectorID]SectorState{}, + byState: map[SectorState]int64{}, }, notifee: func(before, after SectorInfo) { notif = append(notif, struct{ before, after SectorInfo }{before, after}) diff --git a/extern/storage-sealing/input.go b/extern/storage-sealing/input.go index b69cf8c19..f3259f0cc 100644 --- a/extern/storage-sealing/input.go +++ b/extern/storage-sealing/input.go @@ -59,7 +59,13 @@ func (m *Sealing) handleWaitDeals(ctx statemachine.Context, sector SectorInfo) e return ctx.Send(SectorAddPiece{}) }, + number: sector.SectorNumber, + ccUpdate: sector.CCUpdate, } + } else { + // make sure we're only accounting for pieces which were correctly added + // (note that m.assignedPieces[sid] will always be empty here) + m.openSectors[sid].used = used } go func() { @@ -325,6 +331,17 @@ func (m *Sealing) SectorAddPieceToAny(ctx context.Context, size abi.UnpaddedPiec return api.SectorOffset{Sector: res.sn, Offset: res.offset.Padded()}, res.err } +func (m *Sealing) MatchPendingPiecesToOpenSectors(ctx context.Context) error { + sp, err := m.currentSealProof(ctx) + if err != nil { + return xerrors.Errorf("failed to get current seal proof: %w", err) + } + log.Debug("pieces to sector matching waiting for lock") + m.inputLk.Lock() + defer m.inputLk.Unlock() + return m.updateInput(ctx, sp) +} + // called with m.inputLk func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) error { ssize, err := sp.SectorSize() @@ -352,8 +369,33 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e toAssign[proposalCid] = struct{}{} + memo := make(map[abi.SectorNumber]abi.ChainEpoch) + expF := func(sn abi.SectorNumber) (abi.ChainEpoch, error) { + if exp, ok := memo[sn]; ok { + return exp, nil + } + onChainInfo, err := m.Api.StateSectorGetInfo(ctx, m.maddr, sn, TipSetToken{}) + if err != nil { + return 0, err + } + memo[sn] = onChainInfo.Expiration + return onChainInfo.Expiration, nil + } + for id, sector := range m.openSectors { avail := abi.PaddedPieceSize(ssize).Unpadded() - sector.used + // check that sector lifetime is long enough to fit deal using latest expiration from on chain + + ok, err := sector.dealFitsInLifetime(piece.deal.DealProposal.EndEpoch, expF) + if err != nil { + log.Errorf("failed to check expiration for cc Update sector %d", sector.number) + continue + } + if !ok { + exp, _ := expF(sector.number) + log.Infof("CC update sector %d cannot fit deal, expiration %d before deal end epoch %d", id, exp, piece.deal.DealProposal.EndEpoch) + continue + } if piece.size <= avail { // (note: if we have enough space for the piece, we also have enough space for inter-piece padding) matches = append(matches, match{ @@ -412,6 +454,7 @@ func (m *Sealing) updateInput(ctx context.Context, sp abi.RegisteredSealProof) e } if len(toAssign) > 0 { + log.Errorf("we are trying to create a new sector with open sectors %v", m.openSectors) if err := m.tryCreateDealSector(ctx, sp); err != nil { log.Errorw("Failed to create a new sector for deals", "error", err) } @@ -469,7 +512,7 @@ func (m *Sealing) createSector(ctx context.Context, cfg sealiface.Config, sp abi } // update stats early, fsm planner would do that async - m.stats.updateSector(cfg, m.minerSectorID(sid), UndefinedSectorState) + m.stats.updateSector(ctx, cfg, m.minerSectorID(sid), UndefinedSectorState) return sid, nil } diff --git a/extern/storage-sealing/mocks/api.go b/extern/storage-sealing/mocks/api.go index cc8561dc7..95c222ecd 100644 --- a/extern/storage-sealing/mocks/api.go +++ b/extern/storage-sealing/mocks/api.go @@ -213,6 +213,21 @@ func (mr *MockSealingAPIMockRecorder) StateMarketStorageDealProposal(arg0, arg1, return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMarketStorageDealProposal", reflect.TypeOf((*MockSealingAPI)(nil).StateMarketStorageDealProposal), arg0, arg1, arg2) } +// StateMinerActiveSectors mocks base method. +func (m *MockSealingAPI) StateMinerActiveSectors(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) ([]*miner.SectorOnChainInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StateMinerActiveSectors", arg0, arg1, arg2) + ret0, _ := ret[0].([]*miner.SectorOnChainInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// StateMinerActiveSectors indicates an expected call of StateMinerActiveSectors. +func (mr *MockSealingAPIMockRecorder) StateMinerActiveSectors(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StateMinerActiveSectors", reflect.TypeOf((*MockSealingAPI)(nil).StateMinerActiveSectors), arg0, arg1, arg2) +} + // StateMinerAvailableBalance mocks base method. func (m *MockSealingAPI) StateMinerAvailableBalance(arg0 context.Context, arg1 address.Address, arg2 sealing.TipSetToken) (big.Int, error) { m.ctrl.T.Helper() diff --git a/extern/storage-sealing/precommit_batch_test.go b/extern/storage-sealing/precommit_batch_test.go index f6440996e..a90645a05 100644 --- a/extern/storage-sealing/precommit_batch_test.go +++ b/extern/storage-sealing/precommit_batch_test.go @@ -1,3 +1,4 @@ +//stm: #unit package sealing_test import ( @@ -38,6 +39,7 @@ var fc = config.MinerFeeConfig{ } func TestPrecommitBatcher(t *testing.T) { + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 t0123, err := address.NewFromString("t0123") require.NoError(t, err) @@ -151,6 +153,7 @@ func TestPrecommitBatcher(t *testing.T) { } } + //stm: @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001 expectSend := func(expect []abi.SectorNumber) action { return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise { s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil) @@ -171,6 +174,7 @@ func TestPrecommitBatcher(t *testing.T) { } } + //stm: @CHAIN_STATE_MINER_INFO_001, @CHAIN_STATE_NETWORK_VERSION_001 expectSendsSingle := func(expect []abi.SectorNumber) action { return func(t *testing.T, s *mocks.MockPreCommitBatcherApi, pcb *sealing.PreCommitBatcher) promise { s.EXPECT().ChainHead(gomock.Any()).Return(nil, abi.ChainEpoch(1), nil) diff --git a/extern/storage-sealing/sealing.go b/extern/storage-sealing/sealing.go index 165003bc9..81f6b38e9 100644 --- a/extern/storage-sealing/sealing.go +++ b/extern/storage-sealing/sealing.go @@ -63,6 +63,7 @@ type SealingAPI interface { StateMinerInfo(context.Context, address.Address, TipSetToken) (miner.MinerInfo, error) StateMinerAvailableBalance(context.Context, address.Address, TipSetToken) (big.Int, error) StateMinerSectorAllocated(context.Context, address.Address, abi.SectorNumber, TipSetToken) (bool, error) + StateMinerActiveSectors(context.Context, address.Address, TipSetToken) ([]*miner.SectorOnChainInfo, error) StateMarketStorageDeal(context.Context, abi.DealID, TipSetToken) (*api.MarketDeal, error) StateMarketStorageDealProposal(context.Context, abi.DealID, TipSetToken) (market.DealProposal, error) StateNetworkVersion(ctx context.Context, tok TipSetToken) (network.Version, error) @@ -121,11 +122,24 @@ type Sealing struct { } type openSector struct { - used abi.UnpaddedPieceSize // change to bitfield/rle when AddPiece gains offset support to better fill sectors + used abi.UnpaddedPieceSize // change to bitfield/rle when AddPiece gains offset support to better fill sectors + number abi.SectorNumber + ccUpdate bool maybeAccept func(cid.Cid) error // called with inputLk } +func (o *openSector) dealFitsInLifetime(dealEnd abi.ChainEpoch, expF func(sn abi.SectorNumber) (abi.ChainEpoch, error)) (bool, error) { + if !o.ccUpdate { + return true, nil + } + expiration, err := expF(o.number) + if err != nil { + return false, err + } + return expiration >= dealEnd, nil +} + type pendingPiece struct { size abi.UnpaddedPieceSize deal api.PieceDealInfo @@ -166,7 +180,8 @@ func New(mctx context.Context, api SealingAPI, fc config.MinerFeeConfig, events getConfig: gc, stats: SectorStats{ - bySector: map[abi.SectorID]statSectorState{}, + bySector: map[abi.SectorID]SectorState{}, + byState: map[SectorState]int64{}, }, } s.startupWait.Add(1) diff --git a/extern/storage-sealing/sector_state.go b/extern/storage-sealing/sector_state.go index b606de5ae..ba6df7ff4 100644 --- a/extern/storage-sealing/sector_state.go +++ b/extern/storage-sealing/sector_state.go @@ -3,50 +3,65 @@ package sealing type SectorState string var ExistSectorStateList = map[SectorState]struct{}{ - Empty: {}, - WaitDeals: {}, - Packing: {}, - AddPiece: {}, - AddPieceFailed: {}, - GetTicket: {}, - PreCommit1: {}, - PreCommit2: {}, - PreCommitting: {}, - PreCommitWait: {}, - SubmitPreCommitBatch: {}, - PreCommitBatchWait: {}, - WaitSeed: {}, - Committing: {}, - CommitFinalize: {}, - CommitFinalizeFailed: {}, - SubmitCommit: {}, - CommitWait: {}, - SubmitCommitAggregate: {}, - CommitAggregateWait: {}, - FinalizeSector: {}, - Proving: {}, - FailedUnrecoverable: {}, - SealPreCommit1Failed: {}, - SealPreCommit2Failed: {}, - PreCommitFailed: {}, - ComputeProofFailed: {}, - CommitFailed: {}, - PackingFailed: {}, - FinalizeFailed: {}, - DealsExpired: {}, - RecoverDealIDs: {}, - Faulty: {}, - FaultReported: {}, - FaultedFinal: {}, - Terminating: {}, - TerminateWait: {}, - TerminateFinality: {}, - TerminateFailed: {}, - Removing: {}, - RemoveFailed: {}, - Removed: {}, + Empty: {}, + WaitDeals: {}, + Packing: {}, + AddPiece: {}, + AddPieceFailed: {}, + GetTicket: {}, + PreCommit1: {}, + PreCommit2: {}, + PreCommitting: {}, + PreCommitWait: {}, + SubmitPreCommitBatch: {}, + PreCommitBatchWait: {}, + WaitSeed: {}, + Committing: {}, + CommitFinalize: {}, + CommitFinalizeFailed: {}, + SubmitCommit: {}, + CommitWait: {}, + SubmitCommitAggregate: {}, + CommitAggregateWait: {}, + FinalizeSector: {}, + Proving: {}, + FailedUnrecoverable: {}, + SealPreCommit1Failed: {}, + SealPreCommit2Failed: {}, + PreCommitFailed: {}, + ComputeProofFailed: {}, + CommitFailed: {}, + PackingFailed: {}, + FinalizeFailed: {}, + DealsExpired: {}, + RecoverDealIDs: {}, + Faulty: {}, + FaultReported: {}, + FaultedFinal: {}, + Terminating: {}, + TerminateWait: {}, + TerminateFinality: {}, + TerminateFailed: {}, + Removing: {}, + RemoveFailed: {}, + Removed: {}, + SnapDealsWaitDeals: {}, + SnapDealsAddPiece: {}, + SnapDealsPacking: {}, + UpdateReplica: {}, + ProveReplicaUpdate: {}, + SubmitReplicaUpdate: {}, + ReplicaUpdateWait: {}, + FinalizeReplicaUpdate: {}, + SnapDealsAddPieceFailed: {}, + SnapDealsDealsExpired: {}, + SnapDealsRecoverDealIDs: {}, + ReplicaUpdateFailed: {}, + AbortUpgrade: {}, } +// cmd/lotus-miner/info.go defines CLI colors corresponding to these states +// update files there when adding new states const ( UndefinedSectorState SectorState = "" @@ -79,6 +94,17 @@ const ( FinalizeSector SectorState = "FinalizeSector" Proving SectorState = "Proving" + + // snap deals / cc update + SnapDealsWaitDeals SectorState = "SnapDealsWaitDeals" + SnapDealsAddPiece SectorState = "SnapDealsAddPiece" + SnapDealsPacking SectorState = "SnapDealsPacking" + UpdateReplica SectorState = "UpdateReplica" + ProveReplicaUpdate SectorState = "ProveReplicaUpdate" + SubmitReplicaUpdate SectorState = "SubmitReplicaUpdate" + ReplicaUpdateWait SectorState = "ReplicaUpdateWait" + FinalizeReplicaUpdate SectorState = "FinalizeReplicaUpdate" + // error modes FailedUnrecoverable SectorState = "FailedUnrecoverable" AddPieceFailed SectorState = "AddPieceFailed" @@ -92,6 +118,13 @@ const ( DealsExpired SectorState = "DealsExpired" RecoverDealIDs SectorState = "RecoverDealIDs" + // snap deals error modes + SnapDealsAddPieceFailed SectorState = "SnapDealsAddPieceFailed" + SnapDealsDealsExpired SectorState = "SnapDealsDealsExpired" + SnapDealsRecoverDealIDs SectorState = "SnapDealsRecoverDealIDs" + AbortUpgrade SectorState = "AbortUpgrade" + ReplicaUpdateFailed SectorState = "ReplicaUpdateFailed" + Faulty SectorState = "Faulty" // sector is corrupted or gone for some reason FaultReported SectorState = "FaultReported" // sector has been declared as a fault on chain FaultedFinal SectorState = "FaultedFinal" // fault declared on chain @@ -108,11 +141,11 @@ const ( func toStatState(st SectorState, finEarly bool) statSectorState { switch st { - case UndefinedSectorState, Empty, WaitDeals, AddPiece, AddPieceFailed: + case UndefinedSectorState, Empty, WaitDeals, AddPiece, AddPieceFailed, SnapDealsWaitDeals, SnapDealsAddPiece: return sstStaging - case Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, CommitFinalize, FinalizeSector: + case Packing, GetTicket, PreCommit1, PreCommit2, PreCommitting, PreCommitWait, SubmitPreCommitBatch, PreCommitBatchWait, WaitSeed, Committing, CommitFinalize, FinalizeSector, SnapDealsPacking, UpdateReplica, ProveReplicaUpdate, FinalizeReplicaUpdate: return sstSealing - case SubmitCommit, CommitWait, SubmitCommitAggregate, CommitAggregateWait: + case SubmitCommit, CommitWait, SubmitCommitAggregate, CommitAggregateWait, SubmitReplicaUpdate, ReplicaUpdateWait: if finEarly { // we use statSectorState for throttling storage use. With FinalizeEarly // we can consider sectors in states after CommitFinalize as finalized, so diff --git a/extern/storage-sealing/states_failed.go b/extern/storage-sealing/states_failed.go index 0c88cc384..c32ac4c3a 100644 --- a/extern/storage-sealing/states_failed.go +++ b/extern/storage-sealing/states_failed.go @@ -1,11 +1,13 @@ package sealing import ( + "context" "time" "github.com/hashicorp/go-multierror" "golang.org/x/xerrors" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/lotus/chain/actors/builtin/market" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" @@ -181,6 +183,67 @@ func (m *Sealing) handleComputeProofFailed(ctx statemachine.Context, sector Sect return ctx.Send(SectorRetryComputeProof{}) } +func (m *Sealing) handleSubmitReplicaUpdateFailed(ctx statemachine.Context, sector SectorInfo) error { + if sector.ReplicaUpdateMessage != nil { + mw, err := m.Api.StateSearchMsg(ctx.Context(), *sector.ReplicaUpdateMessage) + if err != nil { + // API error + if err := failedCooldown(ctx, sector); err != nil { + return err + } + + return ctx.Send(SectorRetrySubmitReplicaUpdateWait{}) + } + + if mw == nil { + return ctx.Send(SectorRetrySubmitReplicaUpdateWait{}) + } + + switch mw.Receipt.ExitCode { + case exitcode.Ok: + return ctx.Send(SectorRetrySubmitReplicaUpdateWait{}) + case exitcode.SysErrOutOfGas: + return ctx.Send(SectorRetrySubmitReplicaUpdate{}) + default: + // something else went wrong + } + } + + tok, _, err := m.Api.ChainHead(ctx.Context()) + if err != nil { + log.Errorf("handleCommitting: api error, not proceeding: %+v", err) + return nil + } + + if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, tok, m.Api); err != nil { + switch err.(type) { + case *ErrApi: + log.Errorf("handleSubmitReplicaUpdateFailed: api error, not proceeding: %+v", err) + return nil + case *ErrBadRU: + log.Errorf("bad replica update: %+v", err) + return ctx.Send(SectorRetryReplicaUpdate{}) + case *ErrBadPR: + log.Errorf("bad PR1: +%v", err) + return ctx.Send(SectorRetryProveReplicaUpdate{}) + + case *ErrInvalidDeals: + return ctx.Send(SectorInvalidDealIDs{}) + case *ErrExpiredDeals: + return ctx.Send(SectorDealsExpired{xerrors.Errorf("expired dealIDs in sector: %w", err)}) + default: + log.Errorf("sanity check error, not proceeding: +%v", err) + return xerrors.Errorf("checkReplica sanity check error: %w", err) + } + } + + if err := failedCooldown(ctx, sector); err != nil { + return err + } + + return ctx.Send(SectorRetrySubmitReplicaUpdate{}) +} + func (m *Sealing) handleCommitFailed(ctx statemachine.Context, sector SectorInfo) error { tok, _, err := m.Api.ChainHead(ctx.Context()) if err != nil { @@ -319,61 +382,40 @@ func (m *Sealing) handleDealsExpired(ctx statemachine.Context, sector SectorInfo return ctx.Send(SectorRemove{}) } -func (m *Sealing) HandleRecoverDealIDs(ctx Context, sector SectorInfo) error { - tok, height, err := m.Api.ChainHead(ctx.Context()) - if err != nil { - return xerrors.Errorf("getting chain head: %w", err) +func (m *Sealing) handleDealsExpiredSnapDeals(ctx statemachine.Context, sector SectorInfo) error { + if !sector.CCUpdate { + // Should be impossible + return xerrors.Errorf("should never reach SnapDealsDealsExpired as a non-CCUpdate sector") } - var toFix []int - paddingPieces := 0 - - for i, p := range sector.Pieces { - // if no deal is associated with the piece, ensure that we added it as - // filler (i.e. ensure that it has a zero PieceCID) - if p.DealInfo == nil { - exp := zerocomm.ZeroPieceCommitment(p.Piece.Size.Unpadded()) - if !p.Piece.PieceCID.Equals(exp) { - return xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", sector.SectorNumber, i, p.Piece.PieceCID) - } - paddingPieces++ - continue - } - - proposal, err := m.Api.StateMarketStorageDealProposal(ctx.Context(), p.DealInfo.DealID, tok) - if err != nil { - log.Warnf("getting deal %d for piece %d: %+v", p.DealInfo.DealID, i, err) - toFix = append(toFix, i) - continue - } - - if proposal.Provider != m.maddr { - log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong provider: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, proposal.Provider, m.maddr) - toFix = append(toFix, i) - continue - } - - if proposal.PieceCID != p.Piece.PieceCID { - log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID) - toFix = append(toFix, i) - continue - } + return ctx.Send(SectorAbortUpgrade{xerrors.Errorf("one of upgrade deals expired")}) +} - if p.Piece.Size != proposal.PieceSize { - log.Warnf("piece %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.Size, proposal.PieceSize) - toFix = append(toFix, i) - continue - } +func (m *Sealing) handleAbortUpgrade(ctx statemachine.Context, sector SectorInfo) error { + if !sector.CCUpdate { + return xerrors.Errorf("should never reach AbortUpgrade as a non-CCUpdate sector") + } - if height >= proposal.StartEpoch { - // TODO: check if we are in an early enough state (before precommit), try to remove the offending pieces - // (tricky as we have to 'defragment' the sector while doing that, and update piece references for retrieval) - return xerrors.Errorf("can't fix sector deals: piece %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, proposal.StartEpoch, height) - } + // Remove snap deals replica if any + if err := m.sealer.ReleaseReplicaUpgrade(ctx.Context(), m.minerSector(sector.SectorType, sector.SectorNumber)); err != nil { + return xerrors.Errorf("removing CC update files from sector storage") } + return ctx.Send(SectorRevertUpgradeToProving{}) +} +// failWith is a mutator or global mutator +func (m *Sealing) handleRecoverDealIDsOrFailWith(ctx statemachine.Context, sector SectorInfo, failWith interface{}) error { + toFix, paddingPieces, err := recoveryPiecesToFix(ctx.Context(), m.Api, sector, m.maddr) + if err != nil { + return err + } + tok, _, err := m.Api.ChainHead(ctx.Context()) + if err != nil { + return err + } failed := map[int]error{} updates := map[int]abi.DealID{} + for _, i := range toFix { p := sector.Pieces[i] @@ -381,7 +423,7 @@ func (m *Sealing) HandleRecoverDealIDs(ctx Context, sector SectorInfo) error { // TODO: check if we are in an early enough state try to remove this piece log.Errorf("can't fix sector deals: piece %d (of %d) of sector %d has nil DealInfo.PublishCid (refers to deal %d)", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID) // Not much to do here (and this can only happen for old spacerace sectors) - return ctx.Send(SectorRemove{}) + return ctx.Send(failWith) } var dp *market.DealProposal @@ -416,7 +458,7 @@ func (m *Sealing) HandleRecoverDealIDs(ctx Context, sector SectorInfo) error { if len(failed)+paddingPieces == len(sector.Pieces) { log.Errorf("removing sector %d: all deals expired or unrecoverable: %+v", sector.SectorNumber, merr) - return ctx.Send(SectorRemove{}) + return ctx.Send(failWith) } // todo: try to remove bad pieces (hard; see the todo above) @@ -424,9 +466,73 @@ func (m *Sealing) HandleRecoverDealIDs(ctx Context, sector SectorInfo) error { // for now removing sectors is probably better than having them stuck in RecoverDealIDs // and expire anyways log.Errorf("removing sector %d: deals expired or unrecoverable: %+v", sector.SectorNumber, merr) - return ctx.Send(SectorRemove{}) + return ctx.Send(failWith) } // Not much to do here, we can't go back in time to commit this sector return ctx.Send(SectorUpdateDealIDs{Updates: updates}) } + +func (m *Sealing) HandleRecoverDealIDs(ctx statemachine.Context, sector SectorInfo) error { + return m.handleRecoverDealIDsOrFailWith(ctx, sector, SectorRemove{}) +} + +func (m *Sealing) handleSnapDealsRecoverDealIDs(ctx statemachine.Context, sector SectorInfo) error { + return m.handleRecoverDealIDsOrFailWith(ctx, sector, SectorAbortUpgrade{}) +} + +func recoveryPiecesToFix(ctx context.Context, api SealingAPI, sector SectorInfo, maddr address.Address) ([]int, int, error) { + tok, height, err := api.ChainHead(ctx) + if err != nil { + return nil, 0, xerrors.Errorf("getting chain head: %w", err) + } + + var toFix []int + paddingPieces := 0 + + for i, p := range sector.Pieces { + // if no deal is associated with the piece, ensure that we added it as + // filler (i.e. ensure that it has a zero PieceCID) + if p.DealInfo == nil { + exp := zerocomm.ZeroPieceCommitment(p.Piece.Size.Unpadded()) + if !p.Piece.PieceCID.Equals(exp) { + return nil, 0, xerrors.Errorf("sector %d piece %d had non-zero PieceCID %+v", sector.SectorNumber, i, p.Piece.PieceCID) + } + paddingPieces++ + continue + } + + proposal, err := api.StateMarketStorageDealProposal(ctx, p.DealInfo.DealID, tok) + if err != nil { + log.Warnf("getting deal %d for piece %d: %+v", p.DealInfo.DealID, i, err) + toFix = append(toFix, i) + continue + } + + if proposal.Provider != maddr { + log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong provider: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, proposal.Provider, maddr) + toFix = append(toFix, i) + continue + } + + if proposal.PieceCID != p.Piece.PieceCID { + log.Warnf("piece %d (of %d) of sector %d refers deal %d with wrong PieceCID: %s != %s", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.PieceCID, proposal.PieceCID) + toFix = append(toFix, i) + continue + } + + if p.Piece.Size != proposal.PieceSize { + log.Warnf("piece %d (of %d) of sector %d refers deal %d with different size: %d != %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, p.Piece.Size, proposal.PieceSize) + toFix = append(toFix, i) + continue + } + + if height >= proposal.StartEpoch { + // TODO: check if we are in an early enough state (before precommit), try to remove the offending pieces + // (tricky as we have to 'defragment' the sector while doing that, and update piece references for retrieval) + return nil, 0, xerrors.Errorf("can't fix sector deals: piece %d (of %d) of sector %d refers expired deal %d - should start at %d, head %d", i, len(sector.Pieces), sector.SectorNumber, p.DealInfo.DealID, proposal.StartEpoch, height) + } + } + + return toFix, paddingPieces, nil +} diff --git a/extern/storage-sealing/states_failed_test.go b/extern/storage-sealing/states_failed_test.go index 22c245afd..9b28f35b1 100644 --- a/extern/storage-sealing/states_failed_test.go +++ b/extern/storage-sealing/states_failed_test.go @@ -1,3 +1,4 @@ +//stm: #unit package sealing_test import ( @@ -6,6 +7,7 @@ import ( "testing" "github.com/filecoin-project/go-state-types/network" + statemachine "github.com/filecoin-project/go-statemachine" market0 "github.com/filecoin-project/specs-actors/actors/builtin/market" @@ -25,6 +27,7 @@ import ( ) func TestStateRecoverDealIDs(t *testing.T) { + t.Skip("Bring this back when we can correctly mock a state machine context: Issue #7867") mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() @@ -40,13 +43,14 @@ func TestStateRecoverDealIDs(t *testing.T) { sctx := mocks.NewMockContext(mockCtrl) sctx.EXPECT().Context().AnyTimes().Return(ctx) - api.EXPECT().ChainHead(ctx).Times(1).Return(nil, abi.ChainEpoch(10), nil) + api.EXPECT().ChainHead(ctx).Times(2).Return(nil, abi.ChainEpoch(10), nil) var dealId abi.DealID = 12 dealProposal := market.DealProposal{ PieceCID: idCid("newPieceCID"), } + //stm: @CHAIN_STATE_MARKET_STORAGE_DEAL_001, @CHAIN_STATE_NETWORK_VERSION_001 api.EXPECT().StateMarketStorageDealProposal(ctx, dealId, nil).Return(dealProposal, nil) pc := idCid("publishCID") @@ -70,7 +74,9 @@ func TestStateRecoverDealIDs(t *testing.T) { sctx.EXPECT().Send(sealing.SectorRemove{}).Return(nil) - err := fakeSealing.HandleRecoverDealIDs(sctx, sealing.SectorInfo{ + // TODO sctx should satisfy an interface so it can be useable for mocking. This will fail because we are passing in an empty context now to get this to build. + // https://github.com/filecoin-project/lotus/issues/7867 + err := fakeSealing.HandleRecoverDealIDs(statemachine.Context{}, sealing.SectorInfo{ Pieces: []sealing.Piece{ { DealInfo: &api2.PieceDealInfo{ diff --git a/extern/storage-sealing/states_replica_update.go b/extern/storage-sealing/states_replica_update.go new file mode 100644 index 000000000..43d5467ed --- /dev/null +++ b/extern/storage-sealing/states_replica_update.go @@ -0,0 +1,222 @@ +package sealing + +import ( + "bytes" + + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + statemachine "github.com/filecoin-project/go-statemachine" + api "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "golang.org/x/xerrors" +) + +func (m *Sealing) handleReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error { + if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api); err != nil { // Sanity check state + return handleErrors(ctx, err, sector) + } + out, err := m.sealer.ReplicaUpdate(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), sector.pieceInfos()) + if err != nil { + return ctx.Send(SectorUpdateReplicaFailed{xerrors.Errorf("replica update failed: %w", err)}) + } + return ctx.Send(SectorReplicaUpdate{ + Out: out, + }) +} + +func (m *Sealing) handleProveReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error { + if sector.UpdateSealed == nil || sector.UpdateUnsealed == nil { + return xerrors.Errorf("invalid sector %d with nil UpdateSealed or UpdateUnsealed output", sector.SectorNumber) + } + if sector.CommR == nil { + return xerrors.Errorf("invalid sector %d with nil CommR", sector.SectorNumber) + } + + vanillaProofs, err := m.sealer.ProveReplicaUpdate1(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), *sector.CommR, *sector.UpdateSealed, *sector.UpdateUnsealed) + if err != nil { + return ctx.Send(SectorProveReplicaUpdateFailed{xerrors.Errorf("prove replica update (1) failed: %w", err)}) + } + + if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api); err != nil { // Sanity check state + return handleErrors(ctx, err, sector) + } + + proof, err := m.sealer.ProveReplicaUpdate2(sector.sealingCtx(ctx.Context()), m.minerSector(sector.SectorType, sector.SectorNumber), *sector.CommR, *sector.UpdateSealed, *sector.UpdateUnsealed, vanillaProofs) + if err != nil { + return ctx.Send(SectorProveReplicaUpdateFailed{xerrors.Errorf("prove replica update (2) failed: %w", err)}) + + } + return ctx.Send(SectorProveReplicaUpdate{ + Proof: proof, + }) +} + +func (m *Sealing) handleSubmitReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error { + + tok, _, err := m.Api.ChainHead(ctx.Context()) + if err != nil { + log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %+v", err) + return nil + } + + if err := checkPieces(ctx.Context(), m.maddr, sector, m.Api); err != nil { // Sanity check state + return handleErrors(ctx, err, sector) + } + + if err := checkReplicaUpdate(ctx.Context(), m.maddr, sector, tok, m.Api); err != nil { + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + sl, err := m.Api.StateSectorPartition(ctx.Context(), m.maddr, sector.SectorNumber, tok) + if err != nil { + log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %+v", err) + return nil + } + updateProof, err := sector.SectorType.RegisteredUpdateProof() + if err != nil { + log.Errorf("failed to get update proof type from seal proof: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + enc := new(bytes.Buffer) + params := &miner.ProveReplicaUpdatesParams{ + Updates: []miner.ReplicaUpdate{ + { + SectorID: sector.SectorNumber, + Deadline: sl.Deadline, + Partition: sl.Partition, + NewSealedSectorCID: *sector.UpdateSealed, + Deals: sector.dealIDs(), + UpdateProofType: updateProof, + ReplicaProof: sector.ReplicaUpdateProof, + }, + }, + } + if err := params.MarshalCBOR(enc); err != nil { + log.Errorf("failed to serialize update replica params: %w", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + cfg, err := m.getConfig() + if err != nil { + return xerrors.Errorf("getting config: %w", err) + } + + onChainInfo, err := m.Api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, tok) + if err != nil { + log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %+v", err) + return nil + } + sp, err := m.currentSealProof(ctx.Context()) + if err != nil { + log.Errorf("sealer failed to return current seal proof not proceeding: %+v", err) + return nil + } + virtualPCI := miner.SectorPreCommitInfo{ + SealProof: sp, + SectorNumber: sector.SectorNumber, + SealedCID: *sector.UpdateSealed, + //SealRandEpoch: 0, + DealIDs: sector.dealIDs(), + Expiration: onChainInfo.Expiration, + //ReplaceCapacity: false, + //ReplaceSectorDeadline: 0, + //ReplaceSectorPartition: 0, + //ReplaceSectorNumber: 0, + } + + collateral, err := m.Api.StateMinerInitialPledgeCollateral(ctx.Context(), m.maddr, virtualPCI, tok) + if err != nil { + return xerrors.Errorf("getting initial pledge collateral: %w", err) + } + + collateral = big.Sub(collateral, onChainInfo.InitialPledge) + if collateral.LessThan(big.Zero()) { + collateral = big.Zero() + } + + collateral, err = collateralSendAmount(ctx.Context(), m.Api, m.maddr, cfg, collateral) + if err != nil { + log.Errorf("collateral send amount failed not proceeding: %+v", err) + return nil + } + + goodFunds := big.Add(collateral, big.Int(m.feeCfg.MaxCommitGasFee)) + + mi, err := m.Api.StateMinerInfo(ctx.Context(), m.maddr, tok) + if err != nil { + log.Errorf("handleSubmitReplicaUpdate: api error, not proceeding: %+v", err) + return nil + } + + from, _, err := m.addrSel(ctx.Context(), mi, api.CommitAddr, goodFunds, collateral) + if err != nil { + log.Errorf("no good address to send replica update message from: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + mcid, err := m.Api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.ProveReplicaUpdates, big.Zero(), big.Int(m.feeCfg.MaxCommitGasFee), enc.Bytes()) + if err != nil { + log.Errorf("handleSubmitReplicaUpdate: error sending message: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + return ctx.Send(SectorReplicaUpdateSubmitted{Message: mcid}) +} + +func (m *Sealing) handleReplicaUpdateWait(ctx statemachine.Context, sector SectorInfo) error { + if sector.ReplicaUpdateMessage == nil { + log.Errorf("handleReplicaUpdateWait: no replica update message cid recorded") + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + mw, err := m.Api.StateWaitMsg(ctx.Context(), *sector.ReplicaUpdateMessage) + if err != nil { + log.Errorf("handleReplicaUpdateWait: failed to wait for message: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + switch mw.Receipt.ExitCode { + case exitcode.Ok: + //expected + case exitcode.SysErrInsufficientFunds: + fallthrough + case exitcode.SysErrOutOfGas: + log.Errorf("gas estimator was wrong or out of funds") + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + default: + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + si, err := m.Api.StateSectorGetInfo(ctx.Context(), m.maddr, sector.SectorNumber, mw.TipSetTok) + if err != nil { + log.Errorf("api err failed to get sector info: %+v", err) + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + if si == nil { + log.Errorf("api err sector not found") + return ctx.Send(SectorSubmitReplicaUpdateFailed{}) + } + + if !si.SealedCID.Equals(*sector.UpdateSealed) { + log.Errorf("mismatch of expected onchain sealed cid after replica update, expected %s got %s", sector.UpdateSealed, si.SealedCID) + return ctx.Send(SectorAbortUpgrade{}) + } + return ctx.Send(SectorReplicaUpdateLanded{}) +} + +func (m *Sealing) handleFinalizeReplicaUpdate(ctx statemachine.Context, sector SectorInfo) error { + return ctx.Send(SectorFinalized{}) +} + +func handleErrors(ctx statemachine.Context, err error, sector SectorInfo) error { + switch err.(type) { + case *ErrApi: + log.Errorf("handleReplicaUpdate: api error, not proceeding: %+v", err) + return nil + case *ErrInvalidDeals: + log.Warnf("invalid deals in sector %d: %v", sector.SectorNumber, err) + return ctx.Send(SectorInvalidDealIDs{}) + case *ErrExpiredDeals: // Probably not much we can do here, maybe re-pack the sector? + return ctx.Send(SectorDealsExpired{xerrors.Errorf("expired dealIDs in sector: %w", err)}) + default: + return xerrors.Errorf("checkPieces sanity check error: %w", err) + } +} diff --git a/extern/storage-sealing/states_sealing.go b/extern/storage-sealing/states_sealing.go index 9dcb779a7..2258250f4 100644 --- a/extern/storage-sealing/states_sealing.go +++ b/extern/storage-sealing/states_sealing.go @@ -280,8 +280,8 @@ func (m *Sealing) handlePreCommit2(ctx statemachine.Context, sector SectorInfo) } // TODO: We should probably invoke this method in most (if not all) state transition failures after handlePreCommitting -func (m *Sealing) remarkForUpgrade(sid abi.SectorNumber) { - err := m.MarkForUpgrade(sid) +func (m *Sealing) remarkForUpgrade(ctx context.Context, sid abi.SectorNumber) { + err := m.MarkForUpgrade(ctx, sid) if err != nil { log.Errorf("error re-marking sector %d as for upgrade: %+v", sid, err) } @@ -424,7 +424,7 @@ func (m *Sealing) handlePreCommitting(ctx statemachine.Context, sector SectorInf mcid, err := m.Api.SendMsg(ctx.Context(), from, m.maddr, miner.Methods.PreCommitSector, deposit, big.Int(m.feeCfg.MaxPreCommitGasFee), enc.Bytes()) if err != nil { if params.ReplaceCapacity { - m.remarkForUpgrade(params.ReplaceSectorNumber) + m.remarkForUpgrade(ctx.Context(), params.ReplaceSectorNumber) } return ctx.Send(SectorChainPreCommitFailed{xerrors.Errorf("pushing message to mpool: %w", err)}) } @@ -595,7 +595,7 @@ func (m *Sealing) handleCommitting(ctx statemachine.Context, sector SectorInfo) } if err := m.checkCommit(ctx.Context(), sector, proof, tok); err != nil { - return ctx.Send(SectorComputeProofFailed{xerrors.Errorf("commit check error: %w", err)}) + return ctx.Send(SectorCommitFailed{xerrors.Errorf("commit check error: %w", err)}) } } diff --git a/extern/storage-sealing/stats.go b/extern/storage-sealing/stats.go index 28556866a..050204519 100644 --- a/extern/storage-sealing/stats.go +++ b/extern/storage-sealing/stats.go @@ -1,10 +1,16 @@ package sealing import ( + "context" "sync" + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/extern/storage-sealing/sealiface" + "github.com/filecoin-project/lotus/metrics" ) type statSectorState int @@ -20,11 +26,12 @@ const ( type SectorStats struct { lk sync.Mutex - bySector map[abi.SectorID]statSectorState + bySector map[abi.SectorID]SectorState + byState map[SectorState]int64 totals [nsst]uint64 } -func (ss *SectorStats) updateSector(cfg sealiface.Config, id abi.SectorID, st SectorState) (updateInput bool) { +func (ss *SectorStats) updateSector(ctx context.Context, cfg sealiface.Config, id abi.SectorID, st SectorState) (updateInput bool) { ss.lk.Lock() defer ss.lk.Unlock() @@ -34,12 +41,20 @@ func (ss *SectorStats) updateSector(cfg sealiface.Config, id abi.SectorID, st Se // update totals oldst, found := ss.bySector[id] if found { - ss.totals[oldst]-- + ss.totals[toStatState(oldst, cfg.FinalizeEarly)]-- + ss.byState[oldst]-- + + mctx, _ := tag.New(ctx, tag.Upsert(metrics.SectorState, string(oldst))) + stats.Record(mctx, metrics.SectorStates.M(ss.byState[oldst])) } sst := toStatState(st, cfg.FinalizeEarly) - ss.bySector[id] = sst + ss.bySector[id] = st ss.totals[sst]++ + ss.byState[st]++ + + mctx, _ := tag.New(ctx, tag.Upsert(metrics.SectorState, string(st))) + stats.Record(mctx, metrics.SectorStates.M(ss.byState[st])) // check if we may need be able to process more deals sealing := ss.curSealingLocked() diff --git a/extern/storage-sealing/types.go b/extern/storage-sealing/types.go index aeb378f29..db53f43d3 100644 --- a/extern/storage-sealing/types.go +++ b/extern/storage-sealing/types.go @@ -73,7 +73,7 @@ type SectorInfo struct { // PreCommit2 CommD *cid.Cid - CommR *cid.Cid + CommR *cid.Cid // SectorKey Proof []byte PreCommitInfo *miner.SectorPreCommitInfo @@ -91,6 +91,14 @@ type SectorInfo struct { CommitMessage *cid.Cid InvalidProofs uint64 // failed proof computations (doesn't validate with proof inputs; can't compute) + // CCUpdate + CCUpdate bool + CCPieces []Piece + UpdateSealed *cid.Cid + UpdateUnsealed *cid.Cid + ReplicaUpdateProof storage.ReplicaUpdateProof + ReplicaUpdateMessage *cid.Cid + // Faults FaultReportMsg *cid.Cid diff --git a/extern/storage-sealing/upgrade_queue.go b/extern/storage-sealing/upgrade_queue.go index 02db41fde..1aacc9c08 100644 --- a/extern/storage-sealing/upgrade_queue.go +++ b/extern/storage-sealing/upgrade_queue.go @@ -4,6 +4,7 @@ import ( "context" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + market7 "github.com/filecoin-project/specs-actors/v7/actors/builtin/market" "golang.org/x/xerrors" @@ -18,7 +19,8 @@ func (m *Sealing) IsMarkedForUpgrade(id abi.SectorNumber) bool { return found } -func (m *Sealing) MarkForUpgrade(id abi.SectorNumber) error { +func (m *Sealing) MarkForUpgrade(ctx context.Context, id abi.SectorNumber) error { + m.upgradeLk.Lock() defer m.upgradeLk.Unlock() @@ -27,6 +29,37 @@ func (m *Sealing) MarkForUpgrade(id abi.SectorNumber) error { return xerrors.Errorf("sector %d already marked for upgrade", id) } + si, err := m.GetSectorInfo(id) + if err != nil { + return xerrors.Errorf("getting sector info: %w", err) + } + if si.State != Proving { + return xerrors.Errorf("can't mark sectors not in the 'Proving' state for upgrade") + } + if len(si.Pieces) != 1 { + return xerrors.Errorf("not a committed-capacity sector, expected 1 piece") + } + if si.Pieces[0].DealInfo != nil { + return xerrors.Errorf("not a committed-capacity sector, has deals") + } + + m.toUpgrade[id] = struct{}{} + + return nil +} + +func (m *Sealing) MarkForSnapUpgrade(ctx context.Context, id abi.SectorNumber) error { + cfg, err := m.getConfig() + if err != nil { + return xerrors.Errorf("getting storage config: %w", err) + } + + curStaging := m.stats.curStaging() + if cfg.MaxWaitDealsSectors > 0 && curStaging >= cfg.MaxWaitDealsSectors { + return xerrors.Errorf("already waiting for deals in %d >= %d (cfg.MaxWaitDealsSectors) sectors, no free resources to wait for deals in another", + curStaging, cfg.MaxWaitDealsSectors) + } + si, err := m.GetSectorInfo(id) if err != nil { return xerrors.Errorf("getting sector info: %w", err) @@ -44,11 +77,37 @@ func (m *Sealing) MarkForUpgrade(id abi.SectorNumber) error { return xerrors.Errorf("not a committed-capacity sector, has deals") } - // TODO: more checks to match actor constraints + tok, head, err := m.Api.ChainHead(ctx) + if err != nil { + return xerrors.Errorf("couldnt get chain head: %w", err) + } + onChainInfo, err := m.Api.StateSectorGetInfo(ctx, m.maddr, id, tok) + if err != nil { + return xerrors.Errorf("failed to read sector on chain info: %w", err) + } - m.toUpgrade[id] = struct{}{} + active, err := m.Api.StateMinerActiveSectors(ctx, m.maddr, tok) + if err != nil { + return xerrors.Errorf("failed to check active sectors: %w", err) + } + // Ensure the upgraded sector is active + var found bool + for _, si := range active { + if si.SectorNumber == id { + found = true + break + } + } + if !found { + return xerrors.Errorf("cannot mark inactive sector for upgrade") + } - return nil + if onChainInfo.Expiration-head < market7.DealMinDuration { + return xerrors.Errorf("pointless to upgrade sector %d, expiration %d is less than a min deal duration away from current epoch."+ + "Upgrade expiration before marking for upgrade", id, onChainInfo.Expiration) + } + + return m.sectors.Send(uint64(id), SectorStartCCUpdate{}) } func (m *Sealing) tryUpgradeSector(ctx context.Context, params *miner.SectorPreCommitInfo) big.Int { diff --git a/gateway/node.go b/gateway/node.go index 56f95a31b..a0c120d39 100644 --- a/gateway/node.go +++ b/gateway/node.go @@ -33,6 +33,8 @@ const ( // (to make it easy to mock for tests) type TargetAPI interface { Version(context.Context) (api.APIVersion, error) + ChainGetParentMessages(context.Context, cid.Cid) ([]api.Message, error) + ChainGetParentReceipts(context.Context, cid.Cid) ([]*types.MessageReceipt, error) ChainGetBlockMessages(context.Context, cid.Cid) (*api.BlockMessages, error) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) ChainGetNode(ctx context.Context, p string) (*api.IpldObject, error) @@ -44,6 +46,7 @@ type TargetAPI interface { ChainNotify(context.Context) (<-chan []*api.HeadChange, error) ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*api.HeadChange, error) ChainReadObj(context.Context, cid.Cid) ([]byte, error) + ChainGetGenesis(context.Context) (*types.TipSet, error) GasEstimateMessageGas(ctx context.Context, msg *types.Message, spec *api.MessageSendSpec, tsk types.TipSetKey) (*types.Message, error) MpoolPushUntrusted(ctx context.Context, sm *types.SignedMessage) (cid.Cid, error) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) @@ -144,6 +147,14 @@ func (gw *Node) Version(ctx context.Context) (api.APIVersion, error) { return gw.target.Version(ctx) } +func (gw *Node) ChainGetParentMessages(ctx context.Context, c cid.Cid) ([]api.Message, error) { + return gw.target.ChainGetParentMessages(ctx, c) +} + +func (gw *Node) ChainGetParentReceipts(ctx context.Context, c cid.Cid) ([]*types.MessageReceipt, error) { + return gw.target.ChainGetParentReceipts(ctx, c) +} + func (gw *Node) ChainGetBlockMessages(ctx context.Context, c cid.Cid) (*api.BlockMessages, error) { return gw.target.ChainGetBlockMessages(ctx, c) } @@ -231,6 +242,10 @@ func (gw *Node) ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]* return gw.target.ChainGetPath(ctx, from, to) } +func (gw *Node) ChainGetGenesis(ctx context.Context) (*types.TipSet, error) { + return gw.target.ChainGetGenesis(ctx) +} + func (gw *Node) ChainReadObj(ctx context.Context, c cid.Cid) ([]byte, error) { return gw.target.ChainReadObj(ctx, c) } diff --git a/gen/api/proxygen.go b/gen/api/proxygen.go index 3e0766c31..df39132ff 100644 --- a/gen/api/proxygen.go +++ b/gen/api/proxygen.go @@ -10,7 +10,6 @@ import ( "path/filepath" "strings" "text/template" - "unicode" "golang.org/x/xerrors" ) @@ -71,9 +70,6 @@ func typeName(e ast.Expr, pkg string) (string, error) { return t.X.(*ast.Ident).Name + "." + t.Sel.Name, nil case *ast.Ident: pstr := t.Name - if !unicode.IsLower(rune(pstr[0])) && pkg != "api" { - pstr = "api." + pstr // todo src pkg name - } return pstr, nil case *ast.ArrayType: subt, err := typeName(t.Elt, pkg) diff --git a/gen/inline-gen/main.go b/gen/inline-gen/main.go new file mode 100644 index 000000000..d97134cdd --- /dev/null +++ b/gen/inline-gen/main.go @@ -0,0 +1,123 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io/fs" + "io/ioutil" + "os" + "path/filepath" + "strings" + "text/template" +) + +const ( + stateGlobal = iota + stateTemplate + stateGen +) + +func main() { + db, err := ioutil.ReadFile(os.Args[2]) + if err != nil { + panic(err) + } + var data map[string]interface{} + if err := json.Unmarshal(db, &data); err != nil { + panic(err) + } + + err = filepath.WalkDir(os.Args[1], func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() { + return nil + } + if filepath.Ext(path) != ".go" { + return nil + } + fb, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + lines := strings.Split(string(fb), "\n") + + outLines := make([]string, 0, len(lines)) + var templateLines []string + + state := stateGlobal + + rewrite := false + + for i, line := range lines { + ln := i + 1 + switch state { + case stateGlobal: + outLines = append(outLines, line) + if strings.TrimSpace(line) == `/* inline-gen template` { + state = stateTemplate + fmt.Printf("template section start %s:%d\n", path, ln) + } + case stateTemplate: + outLines = append(outLines, line) // output all template lines + + if strings.TrimSpace(line) == `/* inline-gen start */` { + state = stateGen + fmt.Printf("generated section start %s:%d\n", path, ln) + continue + } + templateLines = append(templateLines, line) + case stateGen: + if strings.TrimSpace(line) != `/* inline-gen end */` { + continue + } + fmt.Printf("generated section end %s:%d\n", path, ln) + + state = stateGlobal + rewrite = true + + tpl, err := template.New("").Funcs(template.FuncMap{ + "import": func(v float64) string { + if v == 0 { + return "/" + } + return fmt.Sprintf("/v%d/", int(v)) + }, + "add": func(a, b float64) float64 { + return a + b + }, + }).Parse(strings.Join(templateLines, "\n")) + if err != nil { + fmt.Printf("%s:%d: parsing template: %s\n", path, ln, err) + os.Exit(1) + } + + var b bytes.Buffer + err = tpl.Execute(&b, data) + if err != nil { + fmt.Printf("%s:%d: executing template: %s\n", path, ln, err) + os.Exit(1) + } + + outLines = append(outLines, strings.Split(b.String(), "\n")...) + outLines = append(outLines, line) + templateLines = nil + } + } + + if rewrite { + fmt.Printf("write %s\n", path) + if err := ioutil.WriteFile(path, []byte(strings.Join(outLines, "\n")), 0664); err != nil { + return err + } + } + + return nil + }) + if err != nil { + panic(err) + } +} diff --git a/gen/inlinegen-data.json b/gen/inlinegen-data.json new file mode 100644 index 000000000..ef97db651 --- /dev/null +++ b/gen/inlinegen-data.json @@ -0,0 +1,7 @@ +{ + "actorVersions": [0, 2, 3, 4, 5, 6, 7], + "latestActorsVersion": 7, + + "networkVersions": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], + "latestNetworkVersion": 15 +} diff --git a/go.mod b/go.mod index 5487c88d7..3108a53c3 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,6 @@ module github.com/filecoin-project/lotus go 1.16 require ( - contrib.go.opencensus.io/exporter/jaeger v0.2.1 contrib.go.opencensus.io/exporter/prometheus v0.4.0 github.com/BurntSushi/toml v0.4.1 github.com/GeertJohan/go.rice v1.0.2 @@ -16,49 +15,52 @@ require ( github.com/btcsuite/btcutil v1.0.3-0.20210929233259-9cdf59f60c51 github.com/buger/goterm v1.0.3 github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e - github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07 + github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327 github.com/coreos/go-systemd/v22 v22.3.2 - github.com/cronokirby/safenum v0.29.0 // indirect + github.com/cronokirby/safenum v0.29.0 github.com/detailyang/go-fallocate v0.0.0-20180908115635-432fa640bd2e - github.com/dgraph-io/badger/v2 v2.2007.2 + github.com/dgraph-io/badger/v2 v2.2007.3 + github.com/dgraph-io/ristretto v0.1.0 // indirect github.com/docker/go-units v0.4.0 - github.com/drand/drand v1.2.1 - github.com/drand/kyber v1.1.4 + github.com/drand/drand v1.3.0 + github.com/drand/kyber v1.1.7 github.com/dustin/go-humanize v1.0.0 github.com/elastic/go-sysinfo v1.7.0 github.com/elastic/gosigar v0.14.1 github.com/etclabscore/go-openrpc-reflect v0.0.36 github.com/fatih/color v1.13.0 - github.com/filecoin-project/dagstore v0.4.3 + github.com/filecoin-project/dagstore v0.4.4 github.com/filecoin-project/filecoin-ffi v0.30.4-0.20200910194244-f640612a1a1f github.com/filecoin-project/go-address v0.0.6 github.com/filecoin-project/go-bitfield v0.2.4 github.com/filecoin-project/go-cbor-util v0.0.1 - github.com/filecoin-project/go-commp-utils v0.1.2 + github.com/filecoin-project/go-commp-utils v0.1.3 github.com/filecoin-project/go-crypto v0.0.1 - github.com/filecoin-project/go-data-transfer v1.11.1 + github.com/filecoin-project/go-data-transfer v1.14.0 github.com/filecoin-project/go-fil-commcid v0.1.0 github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 - github.com/filecoin-project/go-fil-markets v1.13.1 + github.com/filecoin-project/go-fil-markets v1.19.0 github.com/filecoin-project/go-jsonrpc v0.1.5 github.com/filecoin-project/go-padreader v0.0.1 - github.com/filecoin-project/go-paramfetch v0.0.2 - github.com/filecoin-project/go-state-types v0.1.1-0.20210915140513-d354ccf10379 + github.com/filecoin-project/go-paramfetch v0.0.3-0.20220111000201-e42866db1a53 + github.com/filecoin-project/go-state-types v0.1.3 github.com/filecoin-project/go-statemachine v1.0.1 - github.com/filecoin-project/go-statestore v0.1.1 - github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b + github.com/filecoin-project/go-statestore v0.2.0 + github.com/filecoin-project/go-storedcounter v0.1.0 github.com/filecoin-project/specs-actors v0.9.14 - github.com/filecoin-project/specs-actors/v2 v2.3.5 + github.com/filecoin-project/specs-actors/v2 v2.3.6 github.com/filecoin-project/specs-actors/v3 v3.1.1 github.com/filecoin-project/specs-actors/v4 v4.0.1 github.com/filecoin-project/specs-actors/v5 v5.0.4 - github.com/filecoin-project/specs-actors/v6 v6.0.0-20211001193936-c3afe7fa3c5c - github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 + github.com/filecoin-project/specs-actors/v6 v6.0.1 + github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1 + github.com/filecoin-project/specs-storage v0.1.1-0.20211228030229-6d460d25a0c9 github.com/filecoin-project/test-vectors/schema v0.0.5 - github.com/gammazero/keymutex v0.0.0-20211002043844-c7ebad3e5479 - github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 + github.com/gammazero/keymutex v0.0.2 + github.com/gbrlsnchs/jwt/v3 v3.0.1 github.com/gdamore/tcell/v2 v2.2.0 github.com/go-kit/kit v0.12.0 + github.com/golang/glog v1.0.0 // indirect github.com/golang/mock v1.6.0 github.com/google/uuid v1.3.0 github.com/gorilla/mux v1.7.4 @@ -70,81 +72,82 @@ require ( github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94 github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab github.com/ipfs/bbloom v0.0.4 - github.com/ipfs/go-bitswap v0.3.4 + github.com/ipfs/go-bitswap v0.5.1 github.com/ipfs/go-block-format v0.0.3 - github.com/ipfs/go-blockservice v0.1.7 + github.com/ipfs/go-blockservice v0.2.1 github.com/ipfs/go-cid v0.1.0 github.com/ipfs/go-cidutil v0.0.2 - github.com/ipfs/go-datastore v0.4.6 - github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e - github.com/ipfs/go-ds-leveldb v0.4.2 - github.com/ipfs/go-ds-measure v0.1.0 - github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459 + github.com/ipfs/go-datastore v0.5.1 + github.com/ipfs/go-ds-badger2 v0.1.2 + github.com/ipfs/go-ds-leveldb v0.5.0 + github.com/ipfs/go-ds-measure v0.2.0 github.com/ipfs/go-fs-lock v0.0.6 - github.com/ipfs/go-graphsync v0.10.1 - github.com/ipfs/go-ipfs-blockstore v1.0.4 + github.com/ipfs/go-graphsync v0.12.0 + github.com/ipfs/go-ipfs-blockstore v1.1.2 github.com/ipfs/go-ipfs-blocksutil v0.0.1 github.com/ipfs/go-ipfs-chunker v0.0.5 - github.com/ipfs/go-ipfs-ds-help v1.0.0 - github.com/ipfs/go-ipfs-exchange-interface v0.0.1 - github.com/ipfs/go-ipfs-exchange-offline v0.0.1 + github.com/ipfs/go-ipfs-ds-help v1.1.0 + github.com/ipfs/go-ipfs-exchange-interface v0.1.0 + github.com/ipfs/go-ipfs-exchange-offline v0.1.1 github.com/ipfs/go-ipfs-files v0.0.9 github.com/ipfs/go-ipfs-http-client v0.0.6 - github.com/ipfs/go-ipfs-routing v0.1.0 + github.com/ipfs/go-ipfs-routing v0.2.1 github.com/ipfs/go-ipfs-util v0.0.2 - github.com/ipfs/go-ipld-cbor v0.0.5 + github.com/ipfs/go-ipld-cbor v0.0.6 github.com/ipfs/go-ipld-format v0.2.0 - github.com/ipfs/go-log/v2 v2.3.0 - github.com/ipfs/go-merkledag v0.4.1 + github.com/ipfs/go-ipld-legacy v0.1.1 // indirect + github.com/ipfs/go-log/v2 v2.5.0 + github.com/ipfs/go-merkledag v0.5.1 github.com/ipfs/go-metrics-interface v0.0.1 github.com/ipfs/go-metrics-prometheus v0.0.2 github.com/ipfs/go-path v0.0.7 - github.com/ipfs/go-unixfs v0.2.6 + github.com/ipfs/go-unixfs v0.3.1 github.com/ipfs/interface-go-ipfs-core v0.4.0 - github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823 - github.com/ipld/go-car/v2 v2.0.3-0.20210811121346-c514a30114d7 + github.com/ipld/go-car v0.3.3 + github.com/ipld/go-car/v2 v2.1.1 github.com/ipld/go-codec-dagpb v1.3.0 - github.com/ipld/go-ipld-prime v0.12.3 - github.com/ipld/go-ipld-selector-text-lite v0.0.0 + github.com/ipld/go-ipld-prime v0.14.4 + github.com/ipld/go-ipld-selector-text-lite v0.0.1 + github.com/jonboulle/clockwork v0.2.2 // indirect github.com/kelseyhightower/envconfig v1.4.0 github.com/libp2p/go-buffer-pool v0.0.2 github.com/libp2p/go-eventbus v0.2.1 - github.com/libp2p/go-libp2p v0.15.0 - github.com/libp2p/go-libp2p-connmgr v0.2.4 - github.com/libp2p/go-libp2p-core v0.9.0 - github.com/libp2p/go-libp2p-discovery v0.5.1 - github.com/libp2p/go-libp2p-kad-dht v0.13.0 - github.com/libp2p/go-libp2p-mplex v0.4.1 - github.com/libp2p/go-libp2p-noise v0.2.2 - github.com/libp2p/go-libp2p-peerstore v0.2.9 - github.com/libp2p/go-libp2p-pubsub v0.5.4 - github.com/libp2p/go-libp2p-quic-transport v0.11.2 + github.com/libp2p/go-libp2p v0.18.0-rc3 + github.com/libp2p/go-libp2p-connmgr v0.3.1 // indirect + github.com/libp2p/go-libp2p-core v0.14.0 + github.com/libp2p/go-libp2p-discovery v0.6.0 + github.com/libp2p/go-libp2p-kad-dht v0.15.0 + github.com/libp2p/go-libp2p-noise v0.3.0 + github.com/libp2p/go-libp2p-peerstore v0.6.0 + github.com/libp2p/go-libp2p-pubsub v0.6.1 + github.com/libp2p/go-libp2p-quic-transport v0.16.0 github.com/libp2p/go-libp2p-record v0.1.3 + github.com/libp2p/go-libp2p-resource-manager v0.1.3 github.com/libp2p/go-libp2p-routing-helpers v0.2.3 - github.com/libp2p/go-libp2p-swarm v0.5.3 - github.com/libp2p/go-libp2p-tls v0.2.0 - github.com/libp2p/go-libp2p-yamux v0.5.4 + github.com/libp2p/go-libp2p-swarm v0.10.1 + github.com/libp2p/go-libp2p-tls v0.3.1 + github.com/libp2p/go-libp2p-yamux v0.8.1 github.com/libp2p/go-maddr-filter v0.1.0 github.com/mattn/go-isatty v0.0.14 github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 - github.com/minio/minio-go/v7 v7.0.16 // indirect + github.com/minio/minio-go/v7 v7.0.16 github.com/mitchellh/go-homedir v1.1.0 github.com/multiformats/go-base32 v0.0.4 - github.com/multiformats/go-multiaddr v0.4.1 + github.com/multiformats/go-multiaddr v0.5.0 github.com/multiformats/go-multiaddr-dns v0.3.1 github.com/multiformats/go-multibase v0.0.3 - github.com/multiformats/go-multihash v0.0.16 + github.com/multiformats/go-multihash v0.1.0 github.com/multiformats/go-varint v0.0.6 github.com/open-rpc/meta-schema v0.0.0-20201029221707-1b72ef2ea333 github.com/opentracing/opentracing-go v1.2.0 github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e github.com/prometheus/client_golang v1.11.0 github.com/raulk/clock v1.1.0 - github.com/raulk/go-watchdog v1.0.1 + github.com/raulk/go-watchdog v1.2.0 github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25 github.com/stretchr/testify v1.7.0 github.com/syndtr/goleveldb v1.0.0 - github.com/tidwall/gjson v1.9.3 // indirect + github.com/uber/jaeger-client-go v2.25.0+incompatible // indirect github.com/urfave/cli/v2 v2.2.0 github.com/whyrusleeping/bencher v0.0.0-20190829221104-bb6607aa8bba github.com/whyrusleeping/cbor-gen v0.0.0-20210713220151-be142a5ae1a8 @@ -153,21 +156,34 @@ require ( github.com/whyrusleeping/pubsub v0.0.0-20190708150250-92bcb0691325 github.com/xorcare/golden v0.6.1-0.20191112154924-b87f686d7542 go.opencensus.io v0.23.0 + go.opentelemetry.io/otel v1.3.0 + go.opentelemetry.io/otel/bridge/opencensus v0.25.0 + go.opentelemetry.io/otel/exporters/jaeger v1.2.0 + go.opentelemetry.io/otel/sdk v1.2.0 go.uber.org/dig v1.10.0 // indirect go.uber.org/fx v1.9.0 go.uber.org/multierr v1.7.0 go.uber.org/zap v1.19.1 - golang.org/x/net v0.0.0-20210917221730-978cfadd31cf + golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b // indirect + golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20210917161153-d61c044b1678 + golang.org/x/sys v0.0.0-20211209171907-798191bca915 golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac - golang.org/x/tools v0.1.5 + golang.org/x/tools v0.1.7 golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 gopkg.in/cheggaaa/pb.v1 v1.0.28 gotest.tools v2.2.0+incompatible + lukechampine.com/blake3 v1.1.7 // indirect ) -replace github.com/libp2p/go-libp2p-yamux => github.com/libp2p/go-libp2p-yamux v0.5.1 +// FIXME: Replacing with a fork to upgrade go-ipld-cbor and support to network v15 and v7 while keeping new actors +// in the eudico repo. +// In this fork we also include a SendWithSerializedParams required to forward messages with already serialized +// params from an actor. +replace github.com/filecoin-project/specs-actors/v7 => github.com/adlrocha/specs-actors/v7 v7.0.0-rc1.0.20220215102846-08bb2fde502a + +// FIXME: Replacing with a fork that includes support to hierarchical consensus addresses. +replace github.com/filecoin-project/go-address => github.com/adlrocha/go-address v0.0.7-0.20220201161333-9140b209222d replace github.com/filecoin-project/filecoin-ffi => ./extern/filecoin-ffi diff --git a/go.sum b/go.sum index 63d5700e2..e02ce6b48 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,6 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -contrib.go.opencensus.io/exporter/jaeger v0.2.1 h1:yGBYzYMewVL0yO9qqJv3Z5+IRhPdU7e9o/2oKpX4YvI= -contrib.go.opencensus.io/exporter/jaeger v0.2.1/go.mod h1:Y8IsLgdxqh1QxYxPC5IgXVmBaeLUeQFfBeBi9PbeZd0= contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs= contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= @@ -82,14 +80,14 @@ github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9 github.com/Stebalien/go-bitfield v0.0.1 h1:X3kbSSPUaJK60wV2hjOPZwmpljr6VGCqdq4cBLhbQBo= github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/Zondax/multi-party-sig v0.6.0-alpha-2021-09-21.0.20211117140501-65990deeb804 h1:zQrDkZ4qvHYKvqCJ2ljZ8VVSwHa1YC6nT8FFNQIahf0= -github.com/Zondax/multi-party-sig v0.6.0-alpha-2021-09-21.0.20211117140501-65990deeb804/go.mod h1:KOfSxMrl13ge7DX30LdWQyw4zrVzyZc9uykL77Q/TXc= -github.com/Zondax/multi-party-sig v0.6.0-alpha-2021-09-21.0.20211202131736-ca8cb1c7e1a1 h1:txHoW8OAMuzzEJqKy/krlsq45hMtySGKxW6W5LIlveM= -github.com/Zondax/multi-party-sig v0.6.0-alpha-2021-09-21.0.20211202131736-ca8cb1c7e1a1/go.mod h1:KOfSxMrl13ge7DX30LdWQyw4zrVzyZc9uykL77Q/TXc= github.com/Zondax/multi-party-sig v0.6.0-alpha-2021-09-21.0.20220119135030-8f412195b286 h1:ZxenR9Lvj/0m86G4oo/3mFKLBXdM+UpnpxzwWQybPq0= github.com/Zondax/multi-party-sig v0.6.0-alpha-2021-09-21.0.20220119135030-8f412195b286/go.mod h1:KOfSxMrl13ge7DX30LdWQyw4zrVzyZc9uykL77Q/TXc= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= +github.com/adlrocha/go-address v0.0.7-0.20220201161333-9140b209222d h1:49cRLnRjRE3YO4NsdFhkVhz2Y/Zv5a/V7a9iFZW76/E= +github.com/adlrocha/go-address v0.0.7-0.20220201161333-9140b209222d/go.mod h1:7B0/5DA13n6nHkB8bbGx1gWzG/dbTsZ0fgOJVGsM3TE= +github.com/adlrocha/specs-actors/v7 v7.0.0-rc1.0.20220215102846-08bb2fde502a h1:P3KHHAOD20pEI2MC8SYTRqrfSrBq6BSink8ROQDhjGE= +github.com/adlrocha/specs-actors/v7 v7.0.0-rc1.0.20220215102846-08bb2fde502a/go.mod h1:QQisUhgXrjKFA1u8d3k4MDL0A04sDUQrUTJUsLRJDBk= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= @@ -102,6 +100,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a h1:E/8AP5dFtMhl5KPJz66Kt9G0n+7Sn41Fy1wv9/jHOrc= +github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -123,11 +123,11 @@ github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVj github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o= github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= -github.com/benbjohnson/clock v1.0.1/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.0.2/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.2.0 h1:9Re3G2TWxkE06LdMWMpcY6KV81GLXMGiYpPYUPkFAws= +github.com/benbjohnson/clock v1.2.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -167,8 +167,6 @@ github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRt github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894 h1:JLaf/iINcLyjwbtTsCJjc6rtlASgHeIJPrB6QmwURnA= -github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -182,6 +180,7 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5O github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.2.0 h1:Fv93L3KKckEcEHR3oApXVzyBTDA8WAm6VXhPE00N3f8= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= @@ -192,15 +191,6 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= -github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07 h1:Cb2pZUCFXlLA8i7My+wrN51D41GeuhYOKa1dJeZt6NY= -github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= -github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3 h1:2+dpIJzYMSbLi0587YXpi8tOJT52qCOI/1I0UNThc/I= -github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327 h1:7grrpcfCtbZLsjtB0DgMuzs1umsJmpzaHMZ6cO6iAWw= @@ -215,7 +205,6 @@ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= -github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= @@ -242,7 +231,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= -github.com/davidlazar/go-crypto v0.0.0-20190912175916-7055855a373f/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= github.com/decred/dcrd/chaincfg/chainhash v1.0.2/go.mod h1:BpbrGgrPTr3YJYRN3Bm+D9NuaFd+zGyNeIKgrhCXK60= @@ -258,13 +246,12 @@ github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6ps github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= -github.com/dgraph-io/badger/v2 v2.0.3/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM= -github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k= -github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= -github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/badger/v2 v2.2007.3 h1:Sl9tQWz92WCbVSe8pj04Tkqlm2boW+KAxd+XSs58SQI= +github.com/dgraph-io/badger/v2 v2.2007.3/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= @@ -272,12 +259,13 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUn github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/drand/bls12-381 v0.3.2/go.mod h1:dtcLgPtYT38L3NO6mPDYH0nbpc5tjPassDqiniuAt4Y= -github.com/drand/drand v1.2.1 h1:KB7z+69YbnQ5z22AH/LMi0ObDR8DzYmrkS6vZXTR9jI= -github.com/drand/drand v1.2.1/go.mod h1:j0P7RGmVaY7E/OuO2yQOcQj7OgeZCuhgu2gdv0JAm+g= +github.com/drand/drand v1.3.0 h1:k/w/PtHzmlU6OmfoAqgirWyrJ4FZH8ESlJrsKF20UkM= +github.com/drand/drand v1.3.0/go.mod h1:D6kAVlxufq1gi71YCGfzN455JrXF4Q272ZJEG975fzo= github.com/drand/kyber v1.0.1-0.20200110225416-8de27ed8c0e2/go.mod h1:UpXoA0Upd1N9l4TvRPHr1qAUBBERj6JQ/mnKI3BPEmw= github.com/drand/kyber v1.0.2/go.mod h1:x6KOpK7avKj0GJ4emhXFP5n7M7W7ChAPmnQh/OL6vRw= -github.com/drand/kyber v1.1.4 h1:YvKM03QWGvLrdTnYmxxP5iURAX+Gdb6qRDUOgg8i60Q= github.com/drand/kyber v1.1.4/go.mod h1:9+IgTq7kadePhZg7eRwSD7+bA+bmvqRK+8DtmoV5a3U= +github.com/drand/kyber v1.1.7 h1:YnOshFoGYSOdhf4K8BiDw4XL/l6caL92vsodAsVQbJI= +github.com/drand/kyber v1.1.7/go.mod h1:UkHLsI4W6+jT5PvNxmc0cvQAgppjTUpX+XCsN9TXmRo= github.com/drand/kyber-bls12381 v0.2.0/go.mod h1:zQip/bHdeEB6HFZSU3v+d3cQE0GaBVQw9aR2E7AdoeI= github.com/drand/kyber-bls12381 v0.2.1 h1:/d5/YAdaCmHpYjF1NZevOEcKGaq6LBbyvkCTIdGqDjs= github.com/drand/kyber-bls12381 v0.2.1/go.mod h1:JwWn4nHO9Mp4F5qCie5sVIPQZ0X6cw8XAeMRvc/GXBE= @@ -316,18 +304,16 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/filecoin-project/dagstore v0.4.2/go.mod h1:WY5OoLfnwISCk6eASSF927KKPqLPIlTwmG1qHpA08KY= -github.com/filecoin-project/dagstore v0.4.3 h1:yeFl6+2BRY1gOVp/hrZuFa24s7LY0Qqkqx/Gh8lidZs= -github.com/filecoin-project/dagstore v0.4.3/go.mod h1:dm/91AO5UaDd3bABFjg/5fmRH99vvpS7g1mykqvz6KQ= -github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= -github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= -github.com/filecoin-project/go-address v0.0.6 h1:DWQtj38ax+ogHwyH3VULRIoT8E6loyXqsk/p81xoY7M= -github.com/filecoin-project/go-address v0.0.6/go.mod h1:7B0/5DA13n6nHkB8bbGx1gWzG/dbTsZ0fgOJVGsM3TE= +github.com/filecoin-project/dagstore v0.4.3-0.20211211192320-72b849e131d2/go.mod h1:tlV8C11UljvFq3WWlMh2oMViEaVaPb6uT8eL/YQgDfk= +github.com/filecoin-project/dagstore v0.4.4 h1:luolWahhzp3ulRsapGKE7raoLE3n2cFkQUJjPyqUmF4= +github.com/filecoin-project/dagstore v0.4.4/go.mod h1:7BlOvaTJrFJ1Qemt5jHlLJ4VhDIuSIzGS0IwO/0AXPA= github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 h1:t6qDiuGYYngDqaLc2ZUvdtAg4UNxPeOYaXhBWSNsVaM= github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38aAC1ptBnJfPma1R/zZsKmx4o= github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 h1:ZNJ9tEG5bE72vBWYiuh5bkxJVM3ViHNOmQ7qew9n6RE= github.com/filecoin-project/go-amt-ipld/v3 v3.1.0/go.mod h1:UjM2QhDFrrjD5s1CdnkJkat4ga+LqZBZgTMniypABRo= +github.com/filecoin-project/go-amt-ipld/v4 v4.0.0 h1:XM81BJ4/6h3FV0WfFjh74cIDIgqMbJsMBLM0fIuLUUk= +github.com/filecoin-project/go-amt-ipld/v4 v4.0.0/go.mod h1:gF053YQ4BIpzTNDoEwHZas7U3oAwncDVGvOHyY8oDpE= github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-bitfield v0.2.3/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-bitfield v0.2.4 h1:uZ7MeE+XfM5lqrHJZ93OnhQKc/rveW8p9au0C68JPgk= @@ -335,26 +321,24 @@ github.com/filecoin-project/go-bitfield v0.2.4/go.mod h1:CNl9WG8hgR5mttCnUErjcQj github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= github.com/filecoin-project/go-cbor-util v0.0.1 h1:E1LYZYTtjfAQwCReho0VXvbu8t3CYAVPiMx8EiV/VAs= github.com/filecoin-project/go-cbor-util v0.0.1/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= -github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= -github.com/filecoin-project/go-commp-utils v0.1.2 h1:SKLRuGdx/6WlolaWKaUzzUYWGGePuARyO4guxOPxvt4= -github.com/filecoin-project/go-commp-utils v0.1.2/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= +github.com/filecoin-project/go-commp-utils v0.1.3 h1:rTxbkNXZU7FLgdkBk8RsQIEOuPONHykEoX3xGk41Fkw= +github.com/filecoin-project/go-commp-utils v0.1.3/go.mod h1:3ENlD1pZySaUout0p9ANQrY3fDFoXdqyX04J+dWpK30= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= github.com/filecoin-project/go-crypto v0.0.1 h1:AcvpSGGCgjaY8y1az6AMfKQWreF/pWO2JJGLl6gCq6o= github.com/filecoin-project/go-crypto v0.0.1/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= -github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo= -github.com/filecoin-project/go-data-transfer v1.11.1 h1:fiw2FHDVSDrt427cGp7+Ax3TTZk0e6HvF9Odcl2etBM= -github.com/filecoin-project/go-data-transfer v1.11.1/go.mod h1:2MitLI0ebCkLlPKM7NRggP/t9d+gCcREUKkCKqWRCwU= -github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ= -github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= +github.com/filecoin-project/go-data-transfer v1.14.0 h1:4pnfJk8FYtqcdAg+QRGzaz57seUC/Tz+HJgPuGB7zdg= +github.com/filecoin-project/go-data-transfer v1.14.0/go.mod h1:wNJKhaLLYBJDM3VFvgvYi4iUjPa69pz/1Q5Q4HzX2wE= +github.com/filecoin-project/go-ds-versioning v0.0.0-20211206185234-508abd7c2aff/go.mod h1:C9/l9PnB1+mwPa26BBVpCjG/XQCB0yj/q5CK2J8X1I4= +github.com/filecoin-project/go-ds-versioning v0.1.1 h1:JiyBqaQlwC+UM0WhcBtVEeT3XrX59mQhT8U3p7nu86o= +github.com/filecoin-project/go-ds-versioning v0.1.1/go.mod h1:C9/l9PnB1+mwPa26BBVpCjG/XQCB0yj/q5CK2J8X1I4= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.1.0 h1:3R4ds1A9r6cr8mvZBfMYxTS88OqLYEo6roi+GiIeOh8= github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 h1:imrrpZWEHRnNqqv0tN7LXep5bFEVOVmQWHJvl2mgsGo= github.com/filecoin-project/go-fil-commp-hashhash v0.1.0/go.mod h1:73S8WSEWh9vr0fDJVnKADhfIv/d6dCbAGaAGWbdJEI8= -github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c= -github.com/filecoin-project/go-fil-markets v1.13.1 h1:KjarxgKp/RN4iYXT2pMcMq6veIa1guGJMoVtnwru4BQ= -github.com/filecoin-project/go-fil-markets v1.13.1/go.mod h1:58OjtsWtDt3xlN1QLmgDQxtfCDtDS4RIyHepIUbqXhM= +github.com/filecoin-project/go-fil-markets v1.19.0 h1:kap2q2wTM6tfkVO5gMA5DD9GUeTvkDhMfhjCtEwMDM8= +github.com/filecoin-project/go-fil-markets v1.19.0/go.mod h1:qsb3apmo4RSJYCEq40QxVdU7UZospN6nFJLOBHuaIbc= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= @@ -364,39 +348,35 @@ github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 h1:rVVNq0x6RGQIzCo1iiJlGFm9AG github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0/go.mod h1:bxmzgT8tmeVQA1/gvBwFmYdT8SOFUwB3ovSUfG1Ux0g= github.com/filecoin-project/go-jsonrpc v0.1.5 h1:ckxqZ09ivBAVf5CSmxxrqqNHC7PJm3GYGtYKiNQ+vGk= github.com/filecoin-project/go-jsonrpc v0.1.5/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= -github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= github.com/filecoin-project/go-padreader v0.0.0-20210723183308-812a16dc01b1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= github.com/filecoin-project/go-padreader v0.0.1 h1:8h2tVy5HpoNbr2gBRr+WD6zV6VD6XHig+ynSGJg8ZOs= github.com/filecoin-project/go-padreader v0.0.1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= -github.com/filecoin-project/go-paramfetch v0.0.2 h1:a6W3Ij6CKhwHYYlx+5mqvBIyw4CabZH2ojdEaoAZ6/g= -github.com/filecoin-project/go-paramfetch v0.0.2/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts= +github.com/filecoin-project/go-paramfetch v0.0.3-0.20220111000201-e42866db1a53 h1:+nripp+UI/rhl01w9Gs4V0XDGaVPYPMGU/D/gNVLue0= +github.com/filecoin-project/go-paramfetch v0.0.3-0.20220111000201-e42866db1a53/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts= github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= -github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.1-0.20210810190654-139e0e79e69e/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= -github.com/filecoin-project/go-state-types v0.1.1-0.20210915140513-d354ccf10379 h1:UmKkt13NrtulubqfNXhG7SQ7Pjza8BeKdNBxngqAo64= -github.com/filecoin-project/go-state-types v0.1.1-0.20210915140513-d354ccf10379/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.1.1/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.1.3 h1:rzIJyQo5HO2ptc8Jcu8P0qTutnI7NWwTle54eAHoNO0= +github.com/filecoin-project/go-state-types v0.1.3/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= github.com/filecoin-project/go-statemachine v1.0.1 h1:LQ60+JDVjMdLxXmVFM2jjontzOYnfVE7u02CXV3WKSw= github.com/filecoin-project/go-statemachine v1.0.1/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54= github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= -github.com/filecoin-project/go-statestore v0.1.1 h1:ufMFq00VqnT2CAuDpcGnwLnCX1I/c3OROw/kXVNSTZk= -github.com/filecoin-project/go-statestore v0.1.1/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= -github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg= -github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8= -github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4= -github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= +github.com/filecoin-project/go-statestore v0.2.0 h1:cRRO0aPLrxKQCZ2UOQbzFGn4WDNdofHZoGPjfNaAo5Q= +github.com/filecoin-project/go-statestore v0.2.0/go.mod h1:8sjBYbS35HwPzct7iT4lIXjLlYyPor80aU7t7a/Kspo= +github.com/filecoin-project/go-storedcounter v0.1.0 h1:Mui6wSUBC+cQGHbDUBcO7rfh5zQkWJM/CpAZa/uOuus= +github.com/filecoin-project/go-storedcounter v0.1.0/go.mod h1:4ceukaXi4vFURIoxYMfKzaRF5Xv/Pinh2oTnoxpv+z8= github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= github.com/filecoin-project/specs-actors v0.9.14 h1:68PVstg2UB3ZsMLF+DKFTAs/YKsqhKWynkr0IqmVRQY= github.com/filecoin-project/specs-actors v0.9.14/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= -github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY= github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= -github.com/filecoin-project/specs-actors/v2 v2.3.5 h1:PbT4tPlSXZ8sRgajhb4D8AOEmiaaZ+jg6tc6BBv8VQc= -github.com/filecoin-project/specs-actors/v2 v2.3.5/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= +github.com/filecoin-project/specs-actors/v2 v2.3.6 h1:UxnWTfQd7JsOae39/aHCK0m1IBjdcyymCJfqxuSkn+g= +github.com/filecoin-project/specs-actors/v2 v2.3.6/go.mod h1:DJMpxVRXvev9t8P0XWA26RmTzN+MHiL9IlItVLT0zUc= github.com/filecoin-project/specs-actors/v3 v3.1.0/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= github.com/filecoin-project/specs-actors/v3 v3.1.1 h1:BE8fsns1GnEOxt1DTE5LxBK2FThXtWmCChgcJoHTg0E= github.com/filecoin-project/specs-actors/v3 v3.1.1/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= @@ -406,14 +386,13 @@ github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIP github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI= github.com/filecoin-project/specs-actors/v5 v5.0.4 h1:OY7BdxJWlUfUFXWV/kpNBYGXNPasDIedf42T3sGx08s= github.com/filecoin-project/specs-actors/v5 v5.0.4/go.mod h1:5BAKRAMsOOlD8+qCw4UvT/lTLInCJ3JwOWZbX8Ipwq4= -github.com/filecoin-project/specs-actors/v6 v6.0.0-20211001193936-c3afe7fa3c5c h1:29m9oz0AP3TglBFC9Sii9M3skIAbhZhZr+2FyomSTTo= -github.com/filecoin-project/specs-actors/v6 v6.0.0-20211001193936-c3afe7fa3c5c/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk= -github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw= -github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= +github.com/filecoin-project/specs-actors/v6 v6.0.1 h1:laxvHNsvrq83Y9n+W7znVCePi3oLyRf0Rkl4jFO8Wew= +github.com/filecoin-project/specs-actors/v6 v6.0.1/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk= +github.com/filecoin-project/specs-storage v0.1.1-0.20211228030229-6d460d25a0c9 h1:oUYOvF7EvdXS0Zmk9mNkaB6Bu0l+WXBYPzVodKMiLug= +github.com/filecoin-project/specs-storage v0.1.1-0.20211228030229-6d460d25a0c9/go.mod h1:Tb88Zq+IBJbvAn3mS89GYj3jdRThBTE/771HCVZdRJU= github.com/filecoin-project/test-vectors/schema v0.0.5 h1:w3zHQhzM4pYxJDl21avXjOKBLF8egrvwUwjpT8TquDg= github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= @@ -422,24 +401,22 @@ github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiD github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fxamacker/cbor/v2 v2.3.0 h1:aM45YGMctNakddNNAezPxDUpv38j44Abh+hifNuqXik= github.com/fxamacker/cbor/v2 v2.3.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= -github.com/gammazero/keymutex v0.0.0-20211002043844-c7ebad3e5479 h1:lCqfYOAqlMRixZ/t6MB7Khu1YSTrEE4GEIsiF0qNSdY= -github.com/gammazero/keymutex v0.0.0-20211002043844-c7ebad3e5479/go.mod h1:qtzWCCLMisQUmVa4dvqHVgwfh4BP2YB7JxNDGXnsKrs= -github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 h1:EzDjxMg43q1tA2c0MV3tNbaontnHLplHyFF6M5KiVP0= -github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1/go.mod h1:0eHX/BVySxPc6SE2mZRoppGq7qcEagxdmQnA3dzork8= +github.com/gammazero/keymutex v0.0.2 h1:cmpLBJHdEwn+WlR5Z/o9/BN92znSZTp5AKPQDpu1QcI= +github.com/gammazero/keymutex v0.0.2/go.mod h1:qtzWCCLMisQUmVa4dvqHVgwfh4BP2YB7JxNDGXnsKrs= +github.com/gbrlsnchs/jwt/v3 v3.0.1 h1:lbUmgAKpxnClrKloyIwpxm4OuWeDl5wLk52G91ODPw4= +github.com/gbrlsnchs/jwt/v3 v3.0.1/go.mod h1:AncDcjXz18xetI3A6STfXq2w+LuTx8pQ8bGEwRN8zVM= github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= github.com/gdamore/tcell/v2 v2.2.0 h1:vSyEgKwraXPSOkvCk7IwOSyX+Pv3V2cV9CikJMXg4U4= github.com/gdamore/tcell/v2 v2.2.0/go.mod h1:cTTuF84Dlj/RqmaCIV5p4w8uG1zWdk0SF6oBpwHp4fU= -github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= @@ -461,6 +438,11 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.1 h1:DX7uPQ4WgAWfoh+NGGlbJQswnYIVvz0SRlLS3rPZQDA= +github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.0 h1:j4LrlVXgrbIWO83mmQUnK0Hi+YnbD+vzrE1z/EphbFE= +github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY= github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= @@ -506,6 +488,8 @@ github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -543,7 +527,6 @@ github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -611,7 +594,6 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= @@ -688,23 +670,21 @@ github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= +github.com/ipfs/go-bitfield v1.0.0 h1:y/XHm2GEmD9wKngheWNNCNL0pzrWXZwCdQGv1ikXknQ= +github.com/ipfs/go-bitfield v1.0.0/go.mod h1:N/UiujQy+K+ceU1EF5EkVd1TNqevLrCQMIcAEPrdtus= github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3UPrwvis= github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= -github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= -github.com/ipfs/go-bitswap v0.3.4 h1:AhJhRrG8xkxh6x87b4wWs+4U4y3DVB3doI8yFNqgQME= -github.com/ipfs/go-bitswap v0.3.4/go.mod h1:4T7fvNv/LmOys+21tnLzGKncMeeXUYUd1nUiJ2teMvI= +github.com/ipfs/go-bitswap v0.5.1 h1:721YAEDBnLIrvcIMkCHCdqp34hA8jwL9yKMkyJpSpco= +github.com/ipfs/go-bitswap v0.5.1/go.mod h1:P+ckC87ri1xFLvk74NlXdP0Kj9RmWAh4+H78sC6Qopo= github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc= github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbRhbvNSdgc/7So= github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= -github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= -github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= -github.com/ipfs/go-blockservice v0.1.5/go.mod h1:yLk8lBJCBRWRqerqCSVi3cE/Dncdt3vGC/PJMVKhLTY= -github.com/ipfs/go-blockservice v0.1.7 h1:yVe9te0M7ow8i+PPkx03YFSpxqzXx594d6h+34D6qMg= -github.com/ipfs/go-blockservice v0.1.7/go.mod h1:GmS+BAt4hrwBKkzE11AFDQUrnvqjwFatGS2MY7wOjEM= +github.com/ipfs/go-blockservice v0.2.1 h1:NJ4j/cwEfIg60rzAWcCIxRtOwbf6ZPK49MewNxObCPQ= +github.com/ipfs/go-blockservice v0.2.1/go.mod h1:k6SiwmgyYgs4M/qt+ww6amPeUH9EISLRBnvUurKJhi8= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= @@ -714,7 +694,6 @@ github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67Fexh github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.0.8-0.20210716091050-de6c03deae1c/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= github.com/ipfs/go-cid v0.1.0 h1:YN33LQulcRHjfom/i25yoOZR4Telp1Hr/2RU3d0PnC0= github.com/ipfs/go-cid v0.1.0/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= github.com/ipfs/go-cidutil v0.0.2 h1:CNOboQf1t7Qp0nuNh8QMmhJs0+Q//bRL1axtCnIB1Yo= @@ -723,15 +702,14 @@ github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAK github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.3.0/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.2/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= -github.com/ipfs/go-datastore v0.4.6 h1:zU2cmweykxJ+ziXnA2cPtsLe8rdR/vrthOipLPuf6kc= -github.com/ipfs/go-datastore v0.4.6/go.mod h1:XSipLSc64rFKSFRFGo1ecQl+WhYce3K7frtpHkyPFUc= +github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.5.1 h1:WkRhLuISI+XPD0uk3OskB0fYFSyqK8Ob5ZYew9Qa1nQ= +github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= @@ -739,41 +717,31 @@ github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaH github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= -github.com/ipfs/go-ds-badger v0.2.6/go.mod h1:02rnztVKA4aZwDuaRPTf8mpqcKmXP7mLl6JPxd14JHA= -github.com/ipfs/go-ds-badger v0.2.7 h1:ju5REfIm+v+wgVnQ19xGLYPHYHbYLR6qJfmMbCDSK1I= github.com/ipfs/go-ds-badger v0.2.7/go.mod h1:02rnztVKA4aZwDuaRPTf8mpqcKmXP7mLl6JPxd14JHA= -github.com/ipfs/go-ds-badger2 v0.1.0/go.mod h1:pbR1p817OZbdId9EvLOhKBgUVTM3BMCSTan78lDDVaw= -github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e h1:Xi1nil8K2lBOorBS6Ys7+hmUCzH8fr3U9ipdL/IrcEI= -github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e/go.mod h1:lJnws7amT9Ehqzta0gwMrRsURU04caT0iRPr1W8AsOU= +github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro= +github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= +github.com/ipfs/go-ds-badger2 v0.1.2 h1:sQc2q1gaXrv8YFNeUtxil0neuyDf9hnVHfLsi7lpXfE= +github.com/ipfs/go-ds-badger2 v0.1.2/go.mod h1:3FtQmDv6fMubygEfU43bsFelYpIiXX/XEYA54l9eCwg= github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-leveldb v0.4.2 h1:QmQoAJ9WkPMUfBLnu1sBVy0xWWlJPg0m4kRAiJL9iaw= github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-measure v0.1.0 h1:vE4TyY4aeLeVgnnPBC5QzKIjKrqzha0NCujTfgvVbVQ= -github.com/ipfs/go-ds-measure v0.1.0/go.mod h1:1nDiFrhLlwArTME1Ees2XaBOl49OoCgd2A3f8EchMSY= -github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459 h1:W3YMLEvOXqdW+sYMiguhWP6txJwQvIQqhvpU8yAMGQs= -github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459/go.mod h1:oh4liWHulKcDKVhCska5NLelE3MatWl+1FwSz3tY91g= -github.com/ipfs/go-filestore v1.0.0 h1:QR7ekKH+q2AGiWDc7W2Q0qHuYSRZGUJqUn0GsegEPb0= -github.com/ipfs/go-filestore v1.0.0/go.mod h1:/XOCuNtIe2f1YPbiXdYvD0BKLA0JR1MgPiFOdcuu9SM= +github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= +github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= +github.com/ipfs/go-ds-measure v0.2.0 h1:sG4goQe0KDTccHMyT45CY1XyUbxe5VwTKpg2LjApYyQ= +github.com/ipfs/go-ds-measure v0.2.0/go.mod h1:SEUD/rE2PwRa4IQEC5FuNAmjJCyYObZr9UvVh8V3JxE= +github.com/ipfs/go-filestore v1.1.0 h1:Pu4tLBi1bucu6/HU9llaOmb9yLFk/sgP+pW764zNDoE= +github.com/ipfs/go-filestore v1.1.0/go.mod h1:6e1/5Y6NvLuCRdmda/KA4GUhXJQ3Uat6vcWm2DJfxc8= github.com/ipfs/go-fs-lock v0.0.6 h1:sn3TWwNVQqSeNjlWy6zQ1uUGAZrV3hPOyEA6y1/N2a0= github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM= -github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= -github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0= -github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY= -github.com/ipfs/go-graphsync v0.10.0/go.mod h1:cKIshzTaa5rCZjryH5xmSKZVGX9uk1wvwGvz2WEha5Y= -github.com/ipfs/go-graphsync v0.10.1 h1:m6nNwiRFE2FVBTCxHWVTRApjH0snIjFy7fkDbOlMa/I= -github.com/ipfs/go-graphsync v0.10.1/go.mod h1:cKIshzTaa5rCZjryH5xmSKZVGX9uk1wvwGvz2WEha5Y= -github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= +github.com/ipfs/go-graphsync v0.12.0 h1:QCsVHVzb9FTkcm3NEa8GjXnUeGit1L9s08HcSVQ4m/g= +github.com/ipfs/go-graphsync v0.12.0/go.mod h1:nASYWYETgsnMbQ3+DirNImOHQ8TY0a5AhAqyOY55tUg= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= -github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= -github.com/ipfs/go-ipfs-blockstore v0.1.6/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= -github.com/ipfs/go-ipfs-blockstore v1.0.0/go.mod h1:knLVdhVU9L7CC4T+T4nvGdeUIPAXlnd9zmXfp+9MIjU= -github.com/ipfs/go-ipfs-blockstore v1.0.1/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= -github.com/ipfs/go-ipfs-blockstore v1.0.3/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= -github.com/ipfs/go-ipfs-blockstore v1.0.4 h1:DZdeya9Vu4ttvlGheQPGrj6kWehXnYZRFCp9EsZQ1hI= -github.com/ipfs/go-ipfs-blockstore v1.0.4/go.mod h1:uL7/gTJ8QIZ3MtA3dWf+s1a0U3fJy2fcEZAsovpRp+w= +github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE= +github.com/ipfs/go-ipfs-blockstore v1.1.1/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= +github.com/ipfs/go-ipfs-blockstore v1.1.2 h1:WCXoZcMYnvOTmlpX+RSSnhVN0uCmbWTeepTGX5lgiXw= +github.com/ipfs/go-ipfs-blockstore v1.1.2/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= @@ -788,12 +756,14 @@ github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1I github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= -github.com/ipfs/go-ipfs-ds-help v1.0.0 h1:bEQ8hMGs80h0sR8O4tfDgV6B01aaF9qeTrujrTLYV3g= -github.com/ipfs/go-ipfs-ds-help v1.0.0/go.mod h1:ujAbkeIgkKAWtxxNkoZHWLCyk5JpPoKnGyCcsoF6ueE= -github.com/ipfs/go-ipfs-exchange-interface v0.0.1 h1:LJXIo9W7CAmugqI+uofioIpRb6rY30GUu7G6LUfpMvM= +github.com/ipfs/go-ipfs-ds-help v1.1.0 h1:yLE2w9RAsl31LtfMt91tRZcrx+e61O5mDxFRR994w4Q= +github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= -github.com/ipfs/go-ipfs-exchange-offline v0.0.1 h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C81I1NSHW1FxGew= +github.com/ipfs/go-ipfs-exchange-interface v0.1.0 h1:TiMekCrOGQuWYtZO3mf4YJXDIdNgnKWZ9IE3fGlnWfo= +github.com/ipfs/go-ipfs-exchange-interface v0.1.0/go.mod h1:ych7WPlyHqFvCi/uQI48zLZuAWVP5iTQPXEfVaw5WEI= github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= +github.com/ipfs/go-ipfs-exchange-offline v0.1.1 h1:mEiXWdbMN6C7vtDG21Fphx8TGCbZPpQnz/496w/PL4g= +github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY= github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= @@ -807,23 +777,26 @@ github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7 github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= github.com/ipfs/go-ipfs-routing v0.0.1/go.mod h1:k76lf20iKFxQTjcJokbPM9iBXVXVZhcOwc360N4nuKs= -github.com/ipfs/go-ipfs-routing v0.1.0 h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRDI/HQQ= github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= +github.com/ipfs/go-ipfs-routing v0.2.1 h1:E+whHWhJkdN9YeoHZNj5itzc+OR292AJ2uE9FFiW0BY= +github.com/ipfs/go-ipfs-routing v0.2.1/go.mod h1:xiNNiwgjmLqPS1cimvAw6EyB9rkVDbiocA4yY+wRNLM= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= -github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= -github.com/ipfs/go-ipld-cbor v0.0.5 h1:ovz4CHKogtG2KB/h1zUp5U0c/IzZrL435rCh5+K/5G8= github.com/ipfs/go-ipld-cbor v0.0.5/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-cbor v0.0.6-0.20211211231443-5d9b9e1f6fa8/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= +github.com/ipfs/go-ipld-cbor v0.0.6 h1:pYuWHyvSpIsOOLw4Jy7NbBkCyzLDcl64Bf/LZW7eBQ0= +github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA= github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= -github.com/ipfs/go-ipld-legacy v0.1.0 h1:wxkkc4k8cnvIGIjPO0waJCe7SHEyFgl+yQdafdjGrpA= github.com/ipfs/go-ipld-legacy v0.1.0/go.mod h1:86f5P/srAmh9GcIcWQR9lfFLZPrIyyXQeVlOWeeWEuI= +github.com/ipfs/go-ipld-legacy v0.1.1 h1:BvD8PEuqwBHLTKqlGFTHSwrwFOMkVESEvwIYwR2cdcc= +github.com/ipfs/go-ipld-legacy v0.1.1/go.mod h1:8AyKFCjgRPsQFf15ZQgDB8Din4DML/fOmKZkkFkrIEg= github.com/ipfs/go-ipns v0.1.2 h1:O/s/0ht+4Jl9+VoxoUo0zaHjnZUS+aBQIKTuzdZ/ucI= github.com/ipfs/go-ipns v0.1.2/go.mod h1:ioQ0j02o6jdIVW+bmi18f4k2gRf0AV3kZ9KeHYHICnQ= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= @@ -843,15 +816,16 @@ github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHn github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.2/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= -github.com/ipfs/go-log/v2 v2.3.0 h1:31Re/cPqFHpsRHgyVwjWADPoF0otB1WrjTy8ZFYwEZU= github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= +github.com/ipfs/go-log/v2 v2.4.0/go.mod h1:nPZnh7Cj7lwS3LpRU5Mwr2ol1c2gXIEXuF6aywqrtmo= +github.com/ipfs/go-log/v2 v2.5.0 h1:+MhAooFd9XZNvR0i9FriKW6HB0ql7HNXUuflWtc0dd4= +github.com/ipfs/go-log/v2 v2.5.0/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= github.com/ipfs/go-merkledag v0.2.4/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= -github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= -github.com/ipfs/go-merkledag v0.4.1 h1:CEEQZnwRkszN06oezuasHwDD823Xcr4p4zluUN9vXqs= -github.com/ipfs/go-merkledag v0.4.1/go.mod h1:56biPaS6e+IS0eXkEt6A8tG+BUQaEIFqDqJuFfQDBoE= +github.com/ipfs/go-merkledag v0.5.1 h1:tr17GPP5XtPhvPPiWtu20tSGZiZDuTaJRXBLcr79Umk= +github.com/ipfs/go-merkledag v0.5.1/go.mod h1:cLMZXx8J08idkp5+id62iVftUQV+HlYJ3PIhDfZsjA4= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= github.com/ipfs/go-metrics-prometheus v0.0.2 h1:9i2iljLg12S78OhC6UAiXi176xvQGiZaGVF1CUVdE+s= @@ -860,14 +834,16 @@ github.com/ipfs/go-path v0.0.7 h1:H06hKMquQ0aYtHiHryOMLpQC1qC3QwXwkahcEVD51Ho= github.com/ipfs/go-path v0.0.7/go.mod h1:6KTKmeRnBXgqrTvzFrPV3CamxcgvXX/4z79tfAd2Sno= github.com/ipfs/go-peertaskqueue v0.0.4/go.mod h1:03H8fhyeMfKNFWqzYEVyMbcPUeYrqP1MX6Kd+aN+rMQ= github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.2.0 h1:2cSr7exUGKYyDeUyQ7P/nHPs9P7Ht/B+ROrpN1EJOjc= -github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= +github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= +github.com/ipfs/go-peertaskqueue v0.7.1 h1:7PLjon3RZwRQMgOTvYccZ+mjzkmds/7YzSWKFlBAypE= +github.com/ipfs/go-peertaskqueue v0.7.1/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= github.com/ipfs/go-todocounter v0.0.1/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC8BHOplkWvZ4= github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= -github.com/ipfs/go-unixfs v0.2.6 h1:gq3U3T2vh8x6tXhfo3uSO3n+2z4yW0tYtNgVP/3sIyA= -github.com/ipfs/go-unixfs v0.2.6/go.mod h1:GTTzQvaZsTZARdNkkdjDKFFnBhmO3e5mIM1PkH/x4p0= +github.com/ipfs/go-unixfs v0.3.1 h1:LrfED0OGfG98ZEegO4/xiprx2O+yS+krCMQSp7zLVv8= +github.com/ipfs/go-unixfs v0.3.1/go.mod h1:h4qfQYzghiIc8ZNFKiLMFWOTzrWIAtzYQ59W/pCFf1o= +github.com/ipfs/go-unixfsnode v1.2.0 h1:tHHBJftsJyHGa8bS62PpkYNqHy/Sug3c/vxxC8NaGQY= +github.com/ipfs/go-unixfsnode v1.2.0/go.mod h1:mQEgLjxkV/1mohkC4p7taRRBYPBeXu97SA3YaerT2q0= github.com/ipfs/go-verifcid v0.0.1 h1:m2HI7zIuR5TFyQ1b79Da5N9dnnCP1vcu2QqawmWlK2E= github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= github.com/ipfs/interface-go-ipfs-core v0.4.0 h1:+mUiamyHIwedqP8ZgbCIwpy40oX7QcXUbo4CZOeJVJg= @@ -877,34 +853,29 @@ github.com/ipfs/iptb v1.4.0/go.mod h1:1rzHpCYtNp87/+hTxG5TfCVn/yMY3dKnLn8tBiMfdm github.com/ipfs/iptb-plugins v0.3.0 h1:C1rpq1o5lUZtaAOkLIox5akh6ba4uk/3RwWc6ttVxw0= github.com/ipfs/iptb-plugins v0.3.0/go.mod h1:5QtOvckeIw4bY86gSH4fgh3p3gCSMn3FmIKr4gaBncA= github.com/ipld/go-car v0.1.0/go.mod h1:RCWzaUh2i4mOEkB3W45Vc+9jnS/M6Qay5ooytiBHl3g= -github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4/go.mod h1:xrMEcuSq+D1vEwl+YAXsg/JfA98XGpXDwnkIL4Aimqw= -github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d/go.mod h1:2Gys8L8MJ6zkh1gktTSXreY63t4UbyvNp5JaudTyxHQ= -github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823 h1:8JMSJ0k71fU9lIUrpVwEdoX4KoxiTEX8cZG97v/hTDw= -github.com/ipld/go-car v0.3.2-0.20211001225732-32d0d9933823/go.mod h1:jSlTph+i/q1jLFoiKKeN69KGG0fXpwrcD0izu5C1Tpo= -github.com/ipld/go-car/v2 v2.0.0-beta1.0.20210721090610-5a9d1b217d25/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU= -github.com/ipld/go-car/v2 v2.0.2/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU= -github.com/ipld/go-car/v2 v2.0.3-0.20210811121346-c514a30114d7 h1:6Z0beJSZNsRY+7udoqUl4gQ/tqtrPuRvDySrlsvbqZA= -github.com/ipld/go-car/v2 v2.0.3-0.20210811121346-c514a30114d7/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU= +github.com/ipld/go-car v0.3.3-0.20211210032800-e6f244225a16/go.mod h1:/wkKF4908ULT4dFIFIUZYcfjAnj+KFnJvlh8Hsz1FbQ= +github.com/ipld/go-car v0.3.3 h1:D6y+jvg9h2ZSv7GLUMWUwg5VTLy1E7Ak+uQw5orOg3I= +github.com/ipld/go-car v0.3.3/go.mod h1:/wkKF4908ULT4dFIFIUZYcfjAnj+KFnJvlh8Hsz1FbQ= +github.com/ipld/go-car/v2 v2.1.1-0.20211211000942-be2525f6bf2d/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI= +github.com/ipld/go-car/v2 v2.1.1 h1:saaKz4nC0AdfCGHLYKeXLGn8ivoPC54fyS55uyOLKwA= +github.com/ipld/go-car/v2 v2.1.1/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI= github.com/ipld/go-codec-dagpb v1.2.0/go.mod h1:6nBN7X7h8EOsEejZGqC7tej5drsdBAXbMHyBT+Fne5s= github.com/ipld/go-codec-dagpb v1.3.0 h1:czTcaoAuNNyIYWs6Qe01DJ+sEX7B+1Z0LcXjSatMGe8= github.com/ipld/go-codec-dagpb v1.3.0/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA= github.com/ipld/go-ipld-prime v0.0.2-0.20191108012745-28a82f04c785/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w= -github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8= -github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= -github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= github.com/ipld/go-ipld-prime v0.9.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.9.1-0.20210324083106-dc342a9917db/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.10.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= -github.com/ipld/go-ipld-prime v0.12.3-0.20210930132912-0b3aef3ca569/go.mod h1:PaeLYq8k6dJLmDUSLrzkEpoGV4PEfe/1OtFN/eALOc8= -github.com/ipld/go-ipld-prime v0.12.3 h1:furVobw7UBLQZwlEwfE26tYORy3PAK8VYSgZOSr3JMQ= -github.com/ipld/go-ipld-prime v0.12.3/go.mod h1:PaeLYq8k6dJLmDUSLrzkEpoGV4PEfe/1OtFN/eALOc8= +github.com/ipld/go-ipld-prime v0.14.0/go.mod h1:9ASQLwUFLptCov6lIYc70GRB4V7UTyLD0IJtrDJe6ZM= +github.com/ipld/go-ipld-prime v0.14.3-0.20211207234443-319145880958/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= +github.com/ipld/go-ipld-prime v0.14.4 h1:bqhmume8+nbNsX4/+J6eohktfZHAI8GKrF3rQ0xgOyc= +github.com/ipld/go-ipld-prime v0.14.4/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= -github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs= -github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0= -github.com/ipld/go-ipld-prime-proto v0.1.0/go.mod h1:11zp8f3sHVgIqtb/c9Kr5ZGqpnCLF1IVTNOez9TopzE= -github.com/ipld/go-ipld-selector-text-lite v0.0.0 h1:MLU1YUAgd3Z+RfVCXUbvxH1RQjEe+larJ9jmlW1aMgA= -github.com/ipld/go-ipld-selector-text-lite v0.0.0/go.mod h1:U2CQmFb+uWzfIEF3I1arrDa5rwtj00PrpiwwCO+k1RM= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73 h1:TsyATB2ZRRQGTwafJdgEUQkmjOExRV0DNokcihZxbnQ= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73/go.mod h1:2PJ0JgxyB08t0b2WKrcuqI3di0V+5n6RS/LTUJhkoxY= +github.com/ipld/go-ipld-selector-text-lite v0.0.1 h1:lNqFsQpBHc3p5xHob2KvEg/iM5dIFn6iw4L/Hh+kS1Y= +github.com/ipld/go-ipld-selector-text-lite v0.0.1/go.mod h1:U2CQmFb+uWzfIEF3I1arrDa5rwtj00PrpiwwCO+k1RM= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= @@ -912,7 +883,6 @@ github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+ github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= -github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c h1:uUx61FiAa1GI6ZmVd2wf2vULeQZIKG66eybjNXKYCz4= github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c/go.mod h1:sdx1xVM9UuLw1tXnhJWN3piypTUO3vCIHYmG15KE/dU= @@ -934,8 +904,9 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.1.1-0.20190114141812-62fb9bc030d1 h1:qBCV/RLV02TSfQa7tFmxTihnG+u+7JXByOkhlkR5rmQ= github.com/jonboulle/clockwork v0.1.1-0.20190114141812-62fb9bc030d1/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= @@ -983,7 +954,6 @@ github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.8/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -995,8 +965,9 @@ github.com/koron/go-ssdp v0.0.2/go.mod h1:XoLfkAiA2KeZsYh4DbHxD7h3nR2AZNqVQOa+LJ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -1004,7 +975,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= -github.com/libp2p/go-addr-util v0.1.0 h1:acKsntI33w2bTU7tC9a0SaPimJGfSI0bFKC18ChxeVI= github.com/libp2p/go-addr-util v0.1.0/go.mod h1:6I3ZYuFr2O/9D+SoyM0zEw0EF3YkldtTX406BpdQMqw= github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= @@ -1015,8 +985,9 @@ github.com/libp2p/go-conn-security v0.0.1/go.mod h1:bGmu51N0KU9IEjX7kl2PQjgZa40J github.com/libp2p/go-conn-security-multistream v0.0.2/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= -github.com/libp2p/go-conn-security-multistream v0.2.1 h1:ft6/POSK7F+vl/2qzegnHDaXFU0iWB4yVTYrioC6Zy0= github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= +github.com/libp2p/go-conn-security-multistream v0.3.0 h1:9UCIKlBL1hC9u7nkMXpD1nkc/T53PKMAn3/k9ivBAVc= +github.com/libp2p/go-conn-security-multistream v0.3.0/go.mod h1:EEP47t4fw/bTelVmEzIDqSe69hO/ip52xBEhZMLWAHM= github.com/libp2p/go-eventbus v0.0.2/go.mod h1:Hr/yGlwxA/stuLnpMiu82lpNKpvRy3EaJxPu40XYOwk= github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= github.com/libp2p/go-eventbus v0.2.1 h1:VanAdErQnpTioN2TowqNcOijf6YwhuODe4pPKSDpxGc= @@ -1030,53 +1001,51 @@ github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68 github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= github.com/libp2p/go-libp2p v0.3.1/go.mod h1:e6bwxbdYH1HqWTz8faTChKGR0BjPc8p+6SyP8GTTR7Y= github.com/libp2p/go-libp2p v0.4.0/go.mod h1:9EsEIf9p2UDuwtPd0DwJsAl0qXVxgAnuDGRvHbfATfI= -github.com/libp2p/go-libp2p v0.6.0/go.mod h1:mfKWI7Soz3ABX+XEBR61lGbg+ewyMtJHVt043oWeqwg= github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= -github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El8cTaefiM= -github.com/libp2p/go-libp2p v0.9.2/go.mod h1:cunHNLDVus66Ct9iXXcjKRLdmHdFdHVe1TAnbubJQqQ= -github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ6bdM+Q/G8= -github.com/libp2p/go-libp2p v0.13.0/go.mod h1:pM0beYdACRfHO1WcJlp65WXyG2A6NqYM+t2DTVAJxMo= -github.com/libp2p/go-libp2p v0.14.0/go.mod h1:dsQrWLAoIn+GkHPN/U+yypizkHiB9tnv79Os+kSgQ4Q= +github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= github.com/libp2p/go-libp2p v0.14.4/go.mod h1:EIRU0Of4J5S8rkockZM7eJp2S0UrCyi55m2kJVru3rM= -github.com/libp2p/go-libp2p v0.15.0 h1:jbMbdmtizfpvl1+oQuGJzfGhttAtuxUCavF3enwFncg= -github.com/libp2p/go-libp2p v0.15.0/go.mod h1:8Ljmwon0cZZYKrOCjFeLwQEK8bqR42dOheUZ1kSKhP0= -github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052 h1:BM7aaOF7RpmNn9+9g6uTjGJ0cTzWr5j9i9IKeun2M8U= +github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76fyMX364/O4= +github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw= +github.com/libp2p/go-libp2p v0.18.0-rc1/go.mod h1:RgYlH7IIWHXREimC92bw5Lg1V2R5XmSzuLHb5fTnr+8= +github.com/libp2p/go-libp2p v0.18.0-rc3 h1:tI+dAFDgOCeHRF6FgvXpqbrVz+ZFabX/pXO2BUdHu4o= +github.com/libp2p/go-libp2p v0.18.0-rc3/go.mod h1:WYL+Xw1iuwi6rdfzw5VIEpD+HqzYucHZ6fcUuumbI3M= github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= +github.com/libp2p/go-libp2p-asn-util v0.1.0 h1:rABPCO77SjdbJ/eJ/ynIo8vWICy1VEnL5JAxJbQLo1E= +github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I= github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE= github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= -github.com/libp2p/go-libp2p-autonat v0.2.3/go.mod h1:2U6bNWCNsAG9LEbwccBDQbjzQ8Krdjge1jLTE9rdoMM= -github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= -github.com/libp2p/go-libp2p-autonat v0.4.2 h1:YMp7StMi2dof+baaxkbxaizXjY1RPvU71CXfxExzcUU= github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= +github.com/libp2p/go-libp2p-autonat v0.6.0/go.mod h1:bFC6kY8jwzNNWoqc8iGE57vsfwyJ/lP4O4DOV1e0B2o= +github.com/libp2p/go-libp2p-autonat v0.7.0/go.mod h1:uPvPn6J7cN+LCfFwW5tpOYvAz5NvPTc4iBamTV/WDMg= github.com/libp2p/go-libp2p-autonat-svc v0.1.0/go.mod h1:fqi8Obl/z3R4PFVLm8xFtZ6PBL9MlV/xumymRFkKq5A= github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc= github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= github.com/libp2p/go-libp2p-blankhost v0.1.3/go.mod h1:KML1//wiKR8vuuJO0y3LUd1uLv+tlkGTAr3jC0S5cLg= github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= -github.com/libp2p/go-libp2p-blankhost v0.1.6/go.mod h1:jONCAJqEP+Z8T6EQviGL4JsQcLx1LgTGtVqFNY8EMfQ= -github.com/libp2p/go-libp2p-blankhost v0.2.0 h1:3EsGAi0CBGcZ33GwRuXEYJLLPoVWyXJ1bcJzAJjINkk= github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= +github.com/libp2p/go-libp2p-blankhost v0.3.0 h1:kTnLArltMabZlzY63pgGDA4kkUcLkBFSM98zBssn/IY= +github.com/libp2p/go-libp2p-blankhost v0.3.0/go.mod h1:urPC+7U01nCGgJ3ZsV8jdwTp6Ji9ID0dMTvq+aJ+nZU= github.com/libp2p/go-libp2p-circuit v0.0.9/go.mod h1:uU+IBvEQzCu953/ps7bYzC/D/R0Ho2A9LfKVVCatlqU= github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= github.com/libp2p/go-libp2p-circuit v0.1.1/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= github.com/libp2p/go-libp2p-circuit v0.1.3/go.mod h1:Xqh2TjSy8DD5iV2cCOMzdynd6h8OTBGoV1AWbWor3qM= github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= -github.com/libp2p/go-libp2p-circuit v0.2.2/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= -github.com/libp2p/go-libp2p-circuit v0.2.3/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCyJp1Eo4A1xYdpjfs4= -github.com/libp2p/go-libp2p-circuit v0.4.0 h1:eqQ3sEYkGTtybWgr6JLqJY6QLtPWRErvFjFDfAOO1wc= github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= +github.com/libp2p/go-libp2p-circuit v0.6.0 h1:rw/HlhmUB3OktS/Ygz6+2XABOmHKzZpPUuMNUMosj8w= +github.com/libp2p/go-libp2p-circuit v0.6.0/go.mod h1:kB8hY+zCpMeScyvFrKrGicRdid6vNXbunKE4rXATZ0M= github.com/libp2p/go-libp2p-connmgr v0.1.1/go.mod h1:wZxh8veAmU5qdrfJ0ZBLcU8oJe9L82ciVP/fl1VHjXk= -github.com/libp2p/go-libp2p-connmgr v0.2.3/go.mod h1:Gqjg29zI8CwXX21zRxy6gOg8VYu3zVerJRt2KyktzH4= -github.com/libp2p/go-libp2p-connmgr v0.2.4 h1:TMS0vc0TCBomtQJyWr7fYxcVYYhx+q/2gF++G5Jkl/w= github.com/libp2p/go-libp2p-connmgr v0.2.4/go.mod h1:YV0b/RIm8NGPnnNWM7hG9Q38OeQiQfKhHCCs1++ufn0= +github.com/libp2p/go-libp2p-connmgr v0.3.0/go.mod h1:RVoyPjJm0J9Vd1m6qUN2Tn7kJm4rL1Ml20pFsFgPGik= +github.com/libp2p/go-libp2p-connmgr v0.3.1 h1:alEy2fpGKFu+7ZhQF4GF0dvKLyVHeLtIfS/KziwoiZw= +github.com/libp2p/go-libp2p-connmgr v0.3.1/go.mod h1:RVoyPjJm0J9Vd1m6qUN2Tn7kJm4rL1Ml20pFsFgPGik= github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= @@ -1106,8 +1075,13 @@ github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJB github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.6/go.mod h1:dgHr0l0hIKfWpGpqAMbpo19pen9wJfdCGv51mTmdpmM= -github.com/libp2p/go-libp2p-core v0.9.0 h1:t97Mv0LIBZlP2FXVRNKKVzHJCIjbIWGxYptGId4+htU= github.com/libp2p/go-libp2p-core v0.9.0/go.mod h1:ESsbz31oC3C1AvMJoGx26RTuCkNhmkSRCqZ0kQtJ2/8= +github.com/libp2p/go-libp2p-core v0.10.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.11.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.12.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.13.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.14.0 h1:0kYSgiK/D7Eo28GTuRXo5YHsWwAisVpFCqCVPUd/vJs= +github.com/libp2p/go-libp2p-core v0.14.0/go.mod h1:tLasfcVdTXnixsLB0QYaT1syJOhsbrhG7q6pGrHtBg8= github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= @@ -1116,10 +1090,9 @@ github.com/libp2p/go-libp2p-discovery v0.0.5/go.mod h1:YtF20GUxjgoKZ4zmXj8j3Nb2T github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= -github.com/libp2p/go-libp2p-discovery v0.4.0/go.mod h1:bZ0aJSrFc/eX2llP0ryhb1kpgkPyTo23SJ5b7UQCMh4= github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= -github.com/libp2p/go-libp2p-discovery v0.5.1 h1:CJylx+h2+4+s68GvrM4pGNyfNhOYviWBPtVv5PA7sfo= -github.com/libp2p/go-libp2p-discovery v0.5.1/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= +github.com/libp2p/go-libp2p-discovery v0.6.0 h1:1XdPmhMJr8Tmj/yUfkJMIi8mgwWrLUsCB3bMxdT+DSo= +github.com/libp2p/go-libp2p-discovery v0.6.0/go.mod h1:/u1voHt0tKIe5oIA1RHBKQLVCWPna2dXmPNHc2zR9S8= github.com/libp2p/go-libp2p-host v0.0.1/go.mod h1:qWd+H1yuU0m5CwzAkvbSjqKairayEHdR5MMl7Cwa7Go= github.com/libp2p/go-libp2p-host v0.0.3/go.mod h1:Y/qPyA6C8j2coYyos1dfRm0I8+nvd4TGrDGt4tA7JR8= github.com/libp2p/go-libp2p-interface-connmgr v0.0.1/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= @@ -1127,8 +1100,8 @@ github.com/libp2p/go-libp2p-interface-connmgr v0.0.4/go.mod h1:GarlRLH0LdeWcLnYM github.com/libp2p/go-libp2p-interface-connmgr v0.0.5/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= github.com/libp2p/go-libp2p-interface-pnet v0.0.1/go.mod h1:el9jHpQAXK5dnTpKA4yfCNBZXvrzdOU75zz+C6ryp3k= github.com/libp2p/go-libp2p-kad-dht v0.2.1/go.mod h1:k7ONOlup7HKzQ68dE6lSnp07cdxdkmnRa+6B4Fh9/w0= -github.com/libp2p/go-libp2p-kad-dht v0.13.0 h1:qBNYzee8BVS6RkD8ukIAGRG6LmVz8+kkeponyI7W+yA= -github.com/libp2p/go-libp2p-kad-dht v0.13.0/go.mod h1:NkGf28RNhPrcsGYWJHm6EH8ULkiJ2qxsWmpE7VTL3LI= +github.com/libp2p/go-libp2p-kad-dht v0.15.0 h1:Ke+Oj78gX5UDXnA6HBdrgvi+fStJxgYTDa51U0TsCLo= +github.com/libp2p/go-libp2p-kad-dht v0.15.0/go.mod h1:rZtPxYu1TnHHz6n1RggdGrxUX/tA1C2/Wiw3ZMUDrU0= github.com/libp2p/go-libp2p-kbucket v0.2.1/go.mod h1:/Rtu8tqbJ4WQ2KTCOMJhggMukOLNLNPY1EtEWWLxUvc= github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio= github.com/libp2p/go-libp2p-kbucket v0.4.7 h1:spZAcgxifvFZHBD8tErvppbnNiKA5uokDu3CV7axu70= @@ -1143,21 +1116,22 @@ github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiY github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= -github.com/libp2p/go-libp2p-mplex v0.4.1 h1:/pyhkP1nLwjG3OM+VuaNJkQT/Pqq73WzB3aDN3Fx1sc= github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= +github.com/libp2p/go-libp2p-mplex v0.5.0 h1:vt3k4E4HSND9XH4Z8rUpacPJFSAgLOv6HDvG8W9Ks9E= +github.com/libp2p/go-libp2p-mplex v0.5.0/go.mod h1:eLImPJLkj3iG5t5lq68w3Vm5NAQ5BcKwrrb2VmOYb3M= github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= -github.com/libp2p/go-libp2p-nat v0.0.6 h1:wMWis3kYynCbHoyKLPBEMu4YRLltbm8Mk08HGSfvTkU= github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= +github.com/libp2p/go-libp2p-nat v0.1.0 h1:vigUi2MEN+fwghe5ijpScxtbbDz+L/6y8XwlzYOJgSY= +github.com/libp2p/go-libp2p-nat v0.1.0/go.mod h1:DQzAG+QbDYjN1/C3B6vXucLtz3u9rEonLVPtZVzQqks= github.com/libp2p/go-libp2p-net v0.0.1/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= github.com/libp2p/go-libp2p-net v0.0.2/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q= github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= -github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= -github.com/libp2p/go-libp2p-noise v0.2.2 h1:MRt5XGfYziDXIUy2udtMWfPmzZqUDYoC1FZoKnqPzwk= -github.com/libp2p/go-libp2p-noise v0.2.2/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= +github.com/libp2p/go-libp2p-noise v0.3.0 h1:NCVH7evhVt9njbTQshzT7N1S3Q6fjj9M11FCgfH5+cA= +github.com/libp2p/go-libp2p-noise v0.3.0/go.mod h1:JNjHbociDJKHD64KTkzGnzqJ0FEV5gHJa6AB00kbCNQ= github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es= github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= @@ -1169,32 +1143,37 @@ github.com/libp2p/go-libp2p-peerstore v0.1.4/go.mod h1:+4BDbDiiKf4PzpANZDAT+knVd github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ= github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= -github.com/libp2p/go-libp2p-peerstore v0.2.3/go.mod h1:K8ljLdFn590GMttg/luh4caB/3g0vKuY01psze0upRw= -github.com/libp2p/go-libp2p-peerstore v0.2.4/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.8/go.mod h1:gGiPlXdz7mIHd2vfAsHzBNAMqSDkt2UBFwgcITgw1lA= -github.com/libp2p/go-libp2p-peerstore v0.2.9 h1:tVa7siDymmzOl3b3+SxPYpQUCnicmK13y6Re1PqWK+g= -github.com/libp2p/go-libp2p-peerstore v0.2.9/go.mod h1:zhBaLzxiWpNGQ3+uI17G/OIjmOD8GxKyFuHbrZbgs0w= +github.com/libp2p/go-libp2p-peerstore v0.4.0/go.mod h1:rDJUFyzEWPpXpEwywkcTYYzDHlwza8riYMaUzaN6hX0= +github.com/libp2p/go-libp2p-peerstore v0.6.0 h1:HJminhQSGISBIRb93N6WK3t6Fa8OOTnHd/VBjL4mY5A= +github.com/libp2p/go-libp2p-peerstore v0.6.0/go.mod h1:DGEmKdXrcYpK9Jha3sS7MhqYdInxJy84bIPtSu65bKc= github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q= -github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato= -github.com/libp2p/go-libp2p-pubsub v0.5.4 h1:rHl9/Xok4zX3zgi0pg0XnUj9Xj2OeXO8oTu85q2+YA8= -github.com/libp2p/go-libp2p-pubsub v0.5.4/go.mod h1:gVOzwebXVdSMDQBTfH8ACO5EJ4SQrvsHqCmYsCZpD0E= +github.com/libp2p/go-libp2p-pubsub v0.6.0/go.mod h1:nJv87QM2cU0w45KPR1rZicq+FmFIOD16zmT+ep1nOmg= +github.com/libp2p/go-libp2p-pubsub v0.6.1 h1:wycbV+f4rreCoVY61Do6g/BUk0RIrbNRcYVbn+QkjGk= +github.com/libp2p/go-libp2p-pubsub v0.6.1/go.mod h1:nJv87QM2cU0w45KPR1rZicq+FmFIOD16zmT+ep1nOmg= github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU= -github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M= github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= -github.com/libp2p/go-libp2p-quic-transport v0.11.2 h1:p1YQDZRHH4Cv2LPtHubqlQ9ggz4CKng/REZuXZbZMhM= github.com/libp2p/go-libp2p-quic-transport v0.11.2/go.mod h1:wlanzKtIh6pHrq+0U3p3DY9PJfGqxMgPaGKaK5LifwQ= +github.com/libp2p/go-libp2p-quic-transport v0.13.0/go.mod h1:39/ZWJ1TW/jx1iFkKzzUg00W6tDJh73FC0xYudjr7Hc= +github.com/libp2p/go-libp2p-quic-transport v0.15.0/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= +github.com/libp2p/go-libp2p-quic-transport v0.15.2/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= +github.com/libp2p/go-libp2p-quic-transport v0.16.0 h1:aVg9/jr+R2esov5sH7wkXrmYmqJiUjtLMLYX3L9KYdY= +github.com/libp2p/go-libp2p-quic-transport v0.16.0/go.mod h1:1BXjVMzr+w7EkPfiHkKnwsWjPjtfaNT0q8RS3tGDvEQ= github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= github.com/libp2p/go-libp2p-record v0.1.2/go.mod h1:pal0eNcT5nqZaTV7UGhqeGqxFgGdsU/9W//C8dqjQDk= github.com/libp2p/go-libp2p-record v0.1.3 h1:R27hoScIhQf/A8XJZ8lYpnqh9LatJ5YbHs28kCIfql0= github.com/libp2p/go-libp2p-record v0.1.3/go.mod h1:yNUff/adKIfPnYQXgp6FQmNu3gLJ6EMg7+/vv2+9pY4= +github.com/libp2p/go-libp2p-resource-manager v0.1.0/go.mod h1:wJPNjeE4XQlxeidwqVY5G6DLOKqFK33u2n8blpl0I6Y= +github.com/libp2p/go-libp2p-resource-manager v0.1.3 h1:Umf0tW6WNXSb6Uoma0YT56azB5iikL/aeGAP7s7+f5o= +github.com/libp2p/go-libp2p-resource-manager v0.1.3/go.mod h1:wJPNjeE4XQlxeidwqVY5G6DLOKqFK33u2n8blpl0I6Y= github.com/libp2p/go-libp2p-routing v0.0.1/go.mod h1:N51q3yTr4Zdr7V8Jt2JIktVU+3xBBylx1MZeVA6t1Ys= github.com/libp2p/go-libp2p-routing v0.1.0/go.mod h1:zfLhI1RI8RLEzmEaaPwzonRvXeeSHddONWkcTcB54nE= github.com/libp2p/go-libp2p-routing-helpers v0.2.3 h1:xY61alxJ6PurSi+MXbywZpelvuU4U4p/gPTxjqCqTzY= @@ -1209,14 +1188,15 @@ github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evl github.com/libp2p/go-libp2p-swarm v0.2.1/go.mod h1:x07b4zkMFo2EvgPV2bMTlNmdQc8i+74Jjio7xGvsTgU= github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= -github.com/libp2p/go-libp2p-swarm v0.2.4/go.mod h1:/xIpHFPPh3wmSthtxdGbkHZ0OET1h/GGZes8Wku/M5Y= -github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYRNYp1xs4S2FBWKA= github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= -github.com/libp2p/go-libp2p-swarm v0.4.0/go.mod h1:XVFcO52VoLoo0eitSxNQWYq4D6sydGOweTOAjJNraCw= github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= -github.com/libp2p/go-libp2p-swarm v0.5.3 h1:hsYaD/y6+kZff1o1Mc56NcuwSg80lIphTS/zDk3mO4M= github.com/libp2p/go-libp2p-swarm v0.5.3/go.mod h1:NBn7eNW2lu568L7Ns9wdFrOhgRlkRnIDg0FLKbuu3i8= +github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc= +github.com/libp2p/go-libp2p-swarm v0.9.0/go.mod h1:2f8d8uxTJmpeqHF/1ujjdXZp+98nNIbujVOMEZxCbZ8= +github.com/libp2p/go-libp2p-swarm v0.10.0/go.mod h1:71ceMcV6Rg/0rIQ97rsZWMzto1l9LnNquef+efcRbmA= +github.com/libp2p/go-libp2p-swarm v0.10.1 h1:lXW3pgGt+BVmkzcFX61erX7l6Lt+WAamNhwa2Kf3eJM= +github.com/libp2p/go-libp2p-swarm v0.10.1/go.mod h1:Pdkq0QU5a+qu+oyqIV3bknMsnzk9lnNyKvB9acJ5aZs= github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= @@ -1226,25 +1206,46 @@ github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eq github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= -github.com/libp2p/go-libp2p-testing v0.4.2 h1:IOiA5mMigi+eEjf4J+B7fepDhsjtsoWA9QbsCqbNp5U= github.com/libp2p/go-libp2p-testing v0.4.2/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= +github.com/libp2p/go-libp2p-testing v0.5.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= +github.com/libp2p/go-libp2p-testing v0.6.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= +github.com/libp2p/go-libp2p-testing v0.7.0 h1:9bfyhNINizxuLrKsenzGaZalXRXIaAEmx1BP/PzF1gM= +github.com/libp2p/go-libp2p-testing v0.7.0/go.mod h1:OLbdn9DbgdMwv00v+tlp1l3oe2Cl+FAjoWIA2pa0X6E= github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= -github.com/libp2p/go-libp2p-tls v0.2.0 h1:N8i5wPiHudA+02sfW85R2nUbybPm7agjAywZc6pd3xA= -github.com/libp2p/go-libp2p-tls v0.2.0/go.mod h1:twrp2Ci4lE2GYspA1AnlYm+boYjqVruxDKJJj7s6xrc= +github.com/libp2p/go-libp2p-tls v0.3.0/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= +github.com/libp2p/go-libp2p-tls v0.3.1 h1:lsE2zYte+rZCEOHF72J1Fg3XK3dGQyKvI6i5ehJfEp0= +github.com/libp2p/go-libp2p-tls v0.3.1/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk= github.com/libp2p/go-libp2p-transport v0.0.5/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A= github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2kn/m1w6YXxcIAYJYeI90h6BGgUc= github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.0/go.mod h1:J4ko0ObtZSmgn5BX5AmegP+dK3CSnU2lMCKsSq/EY0s= github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= github.com/libp2p/go-libp2p-transport-upgrader v0.4.3/go.mod h1:bpkldbOWXMrXhpZbSV1mQxTrefOg2Fi+k1ClDSA4ppw= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.6 h1:SHt3g0FslnqIkEWF25YOB8UCOCTpGAVvHRWQYJ+veiI= github.com/libp2p/go-libp2p-transport-upgrader v0.4.6/go.mod h1:JE0WQuQdy+uLZ5zOaI3Nw9dWGYJIA7mywEtP2lMvnyk= +github.com/libp2p/go-libp2p-transport-upgrader v0.5.0/go.mod h1:Rc+XODlB3yce7dvFV4q/RmyJGsFcCZRkeZMu/Zdg0mo= +github.com/libp2p/go-libp2p-transport-upgrader v0.6.0/go.mod h1:1e07y1ZSZdHo9HPbuU8IztM1Cj+DR5twgycb4pnRzRo= +github.com/libp2p/go-libp2p-transport-upgrader v0.7.0/go.mod h1:GIR2aTRp1J5yjVlkUoFqMkdobfob6RnAwYg/RZPhrzg= +github.com/libp2p/go-libp2p-transport-upgrader v0.7.1 h1:MSMe+tUfxpC9GArTz7a4G5zQKQgGh00Vio87d3j3xIg= +github.com/libp2p/go-libp2p-transport-upgrader v0.7.1/go.mod h1:GIR2aTRp1J5yjVlkUoFqMkdobfob6RnAwYg/RZPhrzg= github.com/libp2p/go-libp2p-xor v0.0.0-20210714161855-5c005aca55db/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQJscoIL/u6InY= -github.com/libp2p/go-libp2p-yamux v0.5.1 h1:sX4WQPHMhRxJE5UZTfjEuBvlQWXB5Bo3A2JK9ZJ9EM0= -github.com/libp2p/go-libp2p-yamux v0.5.1/go.mod h1:dowuvDu8CRWmr0iqySMiSxK+W0iL5cMVO9S94Y6gkv4= +github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8= +github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4= +github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= +github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= +github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= +github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA= +github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= +github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= +github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= +github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= +github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= +github.com/libp2p/go-libp2p-yamux v0.6.0/go.mod h1:MRhd6mAYnFRnSISp4M8i0ClV/j+mWHo2mYLifWGw33k= +github.com/libp2p/go-libp2p-yamux v0.7.0/go.mod h1:fMyA0CsPfHkIuBU0wjRGrCjTBFiXTXxG0k5M4ETv+08= +github.com/libp2p/go-libp2p-yamux v0.8.0/go.mod h1:yTkPgN2ib8FHyU1ZcVD7aelzyAqXXwEPbyx+aSKm9h8= +github.com/libp2p/go-libp2p-yamux v0.8.1 h1:pi7zUeZ4Z9TpbUMntvSvoP3dFD4SEw/VPybxBcOZGzg= +github.com/libp2p/go-libp2p-yamux v0.8.1/go.mod h1:rUozF8Jah2dL9LLGyBaBeTQeARdwhefMCTQVQt6QobE= github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= @@ -1256,22 +1257,26 @@ github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6 github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= -github.com/libp2p/go-mplex v0.3.0 h1:U1T+vmCYJaEoDJPV1aq31N56hS+lJgb397GsylNSgrU= github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= +github.com/libp2p/go-mplex v0.4.0 h1:Ukkez9/4EOX5rTw4sHefNJp10dksftAA05ZgyjplUbM= +github.com/libp2p/go-mplex v0.4.0/go.mod h1:y26Lx+wNVtMYMaPu300Cbot5LkEZ4tJaNYeHeT9dh6E= github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.6 h1:lQ7Uc0kS1wb1EfRxO2Eir/RJoHkHn7t6o+EiwsYIKJA= github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= +github.com/libp2p/go-msgio v0.1.0 h1:8Q7g/528ivAlfXTFWvWhVjTE8XG8sDTkRUKPYh9+5Q8= +github.com/libp2p/go-msgio v0.1.0/go.mod h1:eNlv2vy9V2X/kNldcZ+SShFE++o2Yjxwx6RAYsmgJnE= github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= -github.com/libp2p/go-nat v0.0.5 h1:qxnwkco8RLKqVh1NmjQ+tJ8p8khNLFxuElYG/TwqW4Q= github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= +github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= +github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= -github.com/libp2p/go-netroute v0.1.6 h1:ruPJStbYyXVYGQ81uzEDzuvbYRLKRrLvTYd33yomC38= github.com/libp2p/go-netroute v0.1.6/go.mod h1:AqhkMh0VuWmfgtxKPp3Oc1LdU5QSWS7wl0QLhSZqXxQ= +github.com/libp2p/go-netroute v0.2.0 h1:0FpsbsvuSnAhXFnCY0VLFbJOzaK0VnP0r1QT/o4nWRE= +github.com/libp2p/go-netroute v0.2.0/go.mod h1:Vio7LTzZ+6hoT4CMZi5/6CpY3Snzh2vgZhWgxMNwlQI= github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= @@ -1279,32 +1284,35 @@ github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO github.com/libp2p/go-openssl v0.0.7 h1:eCAzdLejcNVBzP/iZM9vqHnQm+XyCEbSSIheIPRGNsw= github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= -github.com/libp2p/go-reuseport v0.0.2 h1:XSG94b1FJfGA01BUrT82imejHQyTxO4jEWqheyCXYvU= github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= +github.com/libp2p/go-reuseport v0.1.0 h1:0ooKOx2iwyIkf339WCZ2HN3ujTDbkK0PjC7JVoP1AiM= +github.com/libp2p/go-reuseport v0.1.0/go.mod h1:bQVn9hmfcTaoo0c9v5pBhOarsU1eNOBZdaAd2hzXRKU= github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= -github.com/libp2p/go-reuseport-transport v0.0.5 h1:lJzi+vSYbyJj2faPKLxNGWEIBcaV/uJmyvsUxXy2mLw= github.com/libp2p/go-reuseport-transport v0.0.5/go.mod h1:TC62hhPc8qs5c/RoXDZG6YmjK+/YWUPC0yYmeUecbjc= +github.com/libp2p/go-reuseport-transport v0.1.0 h1:C3PHeHjmnz8m6f0uydObj02tMEoi7CyD1zuN7xQT8gc= +github.com/libp2p/go-reuseport-transport v0.1.0/go.mod h1:vev0C0uMkzriDY59yFHD9v+ujJvYmDQVLowvAjEOmfw= github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-sockaddr v0.1.1 h1:yD80l2ZOdGksnOyHrhxDdTDFrf7Oy+v3FMVArIRgZxQ= github.com/libp2p/go-sockaddr v0.1.1/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= github.com/libp2p/go-stream-muxer v0.1.0/go.mod h1:8JAVsjeRBCWwPoZeH0W1imLOcriqXJyFvB0mR4A04sQ= github.com/libp2p/go-stream-muxer-multistream v0.1.1/go.mod h1:zmGdfkQ1AzOECIAcccoL8L//laqawOsO03zX8Sa+eGw= github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= -github.com/libp2p/go-stream-muxer-multistream v0.3.0 h1:TqnSHPJEIqDEO7h1wZZ0p3DXdvDSiLHQidKKUGZtiOY= github.com/libp2p/go-stream-muxer-multistream v0.3.0/go.mod h1:yDh8abSIzmZtqtOt64gFJUXEryejzNb0lisTt+fAMJA= +github.com/libp2p/go-stream-muxer-multistream v0.4.0 h1:HsM/9OdtqnIzjVXcxTXjmqKrj3gJ8kacaOJwJS1ipaY= +github.com/libp2p/go-stream-muxer-multistream v0.4.0/go.mod h1:nb+dGViZleRP4XcyHuZSVrJCBl55nRBOMmiSL/dyziw= github.com/libp2p/go-tcp-transport v0.0.4/go.mod h1:+E8HvC8ezEVOxIo3V5vCK9l1y/19K427vCzQ+xHKH/o= github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= -github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M= +github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= github.com/libp2p/go-tcp-transport v0.2.4/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= github.com/libp2p/go-tcp-transport v0.2.7/go.mod h1:lue9p1b3VmZj1MhhEGB/etmvF/nBQ0X9CW2DutBT3MM= -github.com/libp2p/go-tcp-transport v0.2.8 h1:aLjX+Nkz+kIz3uA56WtlGKRSAnKDvnqKmv1qF4EyyE4= -github.com/libp2p/go-tcp-transport v0.2.8/go.mod h1:64rSfVidkYPLqbzpcN2IwHY4pmgirp67h++hZ/rcndQ= +github.com/libp2p/go-tcp-transport v0.4.0/go.mod h1:0y52Rwrn4076xdJYu/51/qJIdxz+EWDAOG2S45sV3VI= +github.com/libp2p/go-tcp-transport v0.5.0 h1:3ZPW8HAuyRAuFzyabE0hSrCXKKSWzROnZZX7DtcIatY= +github.com/libp2p/go-tcp-transport v0.5.0/go.mod h1:UPPL0DIjQqiWRwVAb+CEQlaAG0rp/mCqJfIhFcLHc4Y= github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I= github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= github.com/libp2p/go-ws-transport v0.0.5/go.mod h1:Qbl4BxPfXXhhd/o0wcrgoaItHqA9tnZjoFZnxykuaXU= @@ -1312,28 +1320,42 @@ github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw github.com/libp2p/go-ws-transport v0.1.2/go.mod h1:dsh2Ld8F+XNmzpkaAijmg5Is+e9l6/1tK/6VFOdN69Y= github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= -github.com/libp2p/go-ws-transport v0.3.1/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= -github.com/libp2p/go-ws-transport v0.5.0 h1:cO6x4P0v6PfxbKnxmf5cY2Ny4OPDGYkUqNvZzp/zdlo= github.com/libp2p/go-ws-transport v0.5.0/go.mod h1:I2juo1dNTbl8BKSBYo98XY85kU2xds1iamArLvl8kNg= +github.com/libp2p/go-ws-transport v0.6.0 h1:326XBL6Q+5CQ2KtjXz32+eGu02W/Kz2+Fm4SpXdr0q4= +github.com/libp2p/go-ws-transport v0.6.0/go.mod h1:dXqtI9e2JV9FtF1NOtWVZSKXh5zXvnuwPXfj8GPBbYU= +github.com/libp2p/go-yamux v1.2.1/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.6 h1:O5qcBXRcfqecvQ/My9NqDNHB3/5t58yuJYqthcKhhgE= -github.com/libp2p/go-yamux v1.3.6/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux/v2 v2.0.0 h1:vSGhAy5u6iHBq11ZDcyHH4Blcf9xlBhT4WQDoOE90LU= -github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U= -github.com/libp2p/zeroconf/v2 v2.0.0/go.mod h1:J85R/d9joD8u8F9aHM8pBXygtG9W02enEwS+wWeL6yo= +github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= +github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI= +github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= +github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= +github.com/libp2p/go-yamux/v2 v2.3.0/go.mod h1:iTU+lOIn/2h0AgKcL49clNTwfEw+WSfDYrXe05EyKIs= +github.com/libp2p/go-yamux/v3 v3.0.1/go.mod h1:s2LsDhHbh+RfCsQoICSYt58U2f8ijtPANFD8BmE74Bo= +github.com/libp2p/go-yamux/v3 v3.0.2 h1:LW0q5+A1Wy0npEsPJP9wmare2NH4ohNluN5EWVwv2mE= +github.com/libp2p/go-yamux/v3 v3.0.2/go.mod h1:s2LsDhHbh+RfCsQoICSYt58U2f8ijtPANFD8BmE74Bo= +github.com/libp2p/zeroconf/v2 v2.1.1/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw= -github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE= github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= -github.com/lucas-clemente/quic-go v0.21.2 h1:8LqqL7nBQFDUINadW0fHV/xSaCQJgmJC0Gv+qUnjd78= github.com/lucas-clemente/quic-go v0.21.2/go.mod h1:vF5M1XqhBAHgbjKcJOXY3JZz3GP0T3FQhz/uyOUS38Q= +github.com/lucas-clemente/quic-go v0.23.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= +github.com/lucas-clemente/quic-go v0.24.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= +github.com/lucas-clemente/quic-go v0.25.0 h1:K+X9Gvd7JXsOHtU0N2icZ2Nw3rx82uBej3mP4CLgibc= +github.com/lucas-clemente/quic-go v0.25.0/go.mod h1:YtzP8bxRVCBlO77yRanE264+fY/T2U9ZlW1AaHOsMOg= github.com/lucasb-eyer/go-colorful v1.0.3 h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tWFlaaUAac= github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magefile/mage v1.9.0 h1:t3AU2wNwehMCW97vuqQLtw6puppWXHO+O2MHo5a50XE= +github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1342,19 +1364,19 @@ github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/marten-seemann/qpack v0.1.0/go.mod h1:LFt1NU/Ptjip0C2CPkhimBz5CGE3WGDAUWqna+CNTrI= github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= -github.com/marten-seemann/qtls v0.9.1/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZbBb/j3cvzHhk= github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/marten-seemann/qtls-go1-15 v0.1.4/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= -github.com/marten-seemann/qtls-go1-15 v0.1.5 h1:Ci4EIUN6Rlb+D6GmLdej/bCQ4nPYNtVXQB+xjiXE1nk= github.com/marten-seemann/qtls-go1-15 v0.1.5/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/marten-seemann/qtls-go1-16 v0.1.4 h1:xbHbOGGhrenVtII6Co8akhLEdrawwB2iHl5yhJRpnco= github.com/marten-seemann/qtls-go1-16 v0.1.4/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= -github.com/marten-seemann/qtls-go1-17 v0.1.0-rc.1 h1:/rpmWuGvceLwwWuaKPdjpR4JJEUH0tq64/I3hvzaNLM= github.com/marten-seemann/qtls-go1-17 v0.1.0-rc.1/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= +github.com/marten-seemann/qtls-go1-17 v0.1.0 h1:P9ggrs5xtwiqXv/FHNwntmuLMNq3KaSIG93AtAZ48xk= +github.com/marten-seemann/qtls-go1-17 v0.1.0/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= +github.com/marten-seemann/qtls-go1-18 v0.1.0-beta.1 h1:EnzzN9fPUkUck/1CuY1FlzBaIYMoiBsdwTNmNGkwUUM= +github.com/marten-seemann/qtls-go1-18 v0.1.0-beta.1/go.mod h1:PUhIQk19LoFt2174H4+an8TYvWOGjb/hHwphBeaDHwI= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -1394,7 +1416,6 @@ github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= @@ -1460,8 +1481,9 @@ github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4 github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= github.com/multiformats/go-multiaddr v0.4.0/go.mod h1:YcpyLH8ZPudLxQlemYBPhSm0/oCXAT8Z4mzFpyoPyRc= -github.com/multiformats/go-multiaddr v0.4.1 h1:Pq37uLx3hsyNlTDir7FZyU8+cFCTqd5y1KiM2IzOutI= github.com/multiformats/go-multiaddr v0.4.1/go.mod h1:3afI9HfVW8csiF8UZqtpYRiDyew8pRX7qLIGHu9FLuM= +github.com/multiformats/go-multiaddr v0.5.0 h1:i/JuOoVg4szYQ4YEzDGtb2h0o8M7CG/Yq6cGlcjWZpM= +github.com/multiformats/go-multiaddr v0.5.0/go.mod h1:3KAxNkUqLTJ20AAwN4XVX4kZar+bR+gh4zgbfr3SNug= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.3/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= @@ -1481,14 +1503,13 @@ github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysj github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= -github.com/multiformats/go-multibase v0.0.2/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multicodec v0.2.0/go.mod h1:/y4YVwkfMyry5kFbMTbLJKErhycTIftytRV+llXdyS4= -github.com/multiformats/go-multicodec v0.2.1-0.20210713081508-b421db6850ae/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= -github.com/multiformats/go-multicodec v0.2.1-0.20210714093213-b2b5bd6fe68b/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= -github.com/multiformats/go-multicodec v0.3.0 h1:tstDwfIjiHbnIjeM5Lp+pMrSeN+LCMsEwOrkPmWm03A= github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= +github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= +github.com/multiformats/go-multicodec v0.3.1-0.20211210143421-a526f306ed2c h1:VyANTtZ0wsx0IAZnCZhfMmAmfUyzJq/5JQi2hHOtKS0= +github.com/multiformats/go-multicodec v0.3.1-0.20211210143421-a526f306ed2c/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= @@ -1497,13 +1518,12 @@ github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpK github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= -github.com/multiformats/go-multihash v0.0.16 h1:D2qsyy1WVculJbGv69pWmQ36ehxFoA5NiIUr1OEs6qI= -github.com/multiformats/go-multihash v0.0.16/go.mod h1:zhfEIgVnB/rPMfxgFw15ZmGoNaKyNUIE4IWHG/kC+Ag= +github.com/multiformats/go-multihash v0.1.0 h1:CgAgwqk3//SVEw3T+6DqI4mWMyRuDwZtOWcJT0q9+EA= +github.com/multiformats/go-multihash v0.1.0/go.mod h1:RJlXsxt6vHGaia+S8We0ErjhojtKzPP2AH4+kYM7k84= github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= -github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-multistream v0.2.2 h1:TCYu1BHTDr1F/Qm75qwYISQdzGcRdC21nFgQW7l7GBo= github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= @@ -1544,7 +1564,6 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= @@ -1555,7 +1574,6 @@ github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5 github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= @@ -1586,6 +1604,8 @@ github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= @@ -1618,7 +1638,6 @@ github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= @@ -1655,7 +1674,6 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.1.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= @@ -1665,8 +1683,8 @@ github.com/prometheus/statsd_exporter v0.21.0 h1:hA05Q5RFeIjgwKIYEdFd59xu5Wwaznf github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= github.com/raulk/clock v1.1.0 h1:dpb29+UKMbLqiU/jqIJptgLR1nn23HLgMY0sTCDza5Y= github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0= -github.com/raulk/go-watchdog v1.0.1 h1:qgm3DIJAeb+2byneLrQJ7kvmDLGxN2vy3apXyGaDKN4= -github.com/raulk/go-watchdog v1.0.1/go.mod h1:lzSbAl5sh4rtI8tYHU01BWIDzgzqaQLj6RcA1i4mlqI= +github.com/raulk/go-watchdog v1.2.0 h1:konN75pw2BMmZ+AfuAm5rtFsWcJpKF3m02rKituuXNo= +github.com/raulk/go-watchdog v1.2.0/go.mod h1:lzSbAl5sh4rtI8tYHU01BWIDzgzqaQLj6RcA1i4mlqI= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= @@ -1675,6 +1693,8 @@ github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= @@ -1682,7 +1702,6 @@ github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.21.0/go.mod h1:ZPhntP/xmq1nnND05hhpAh2QMhSsA4UN3MGZ6O2J3hM= github.com/rs/zerolog v1.23.0/go.mod h1:6c7hFfxPOy7TacJc4Fcdi24/J0NKYGzjG8FWRI916Qo= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -1765,13 +1784,13 @@ github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5J github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25 h1:7z3LSn867ex6VSaahyKadf4WtSsJIgne6A1WLOAGM8A= github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -1779,15 +1798,12 @@ github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFd github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/texttheater/golang-levenshtein v0.0.0-20180516184445-d188e65d659e/go.mod h1:XDKHRm5ThF8YJjx001LtgelzsoaEcvnA7lVWz9EeX3g= +github.com/tidwall/gjson v1.6.0 h1:9VEQWz6LLMUsUl6PueE49ir4Ka6CzLymOAZDxpFsTDc= github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= -github.com/tidwall/gjson v1.9.3 h1:hqzS9wAHMO+KVBBkLxYdkEeeFHuqr95GfClRLKlgK0E= -github.com/tidwall/gjson v1.9.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= -github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= -github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= -github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tj/go-spin v1.1.0 h1:lhdWZsvImxvZ3q1C5OIB7d72DuOwP4O2NdBg9PyzNds= github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -1831,14 +1847,12 @@ github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIf github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/cbor-gen v0.0.0-20200402171437-3d27c146c105/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200715143311-227fab5a2377/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200723185710-6a3894a6352b/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200806213330-63aa96ca5488/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= -github.com/whyrusleeping/cbor-gen v0.0.0-20200810223238-211df3b9e24c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200812213548-958ddffe352c/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= @@ -1882,6 +1896,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY= github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/blake3 v0.2.0 h1:1SGx3IvKWFUU/xl+/7kjdcjjMcvVSm+3dMo/N42afC8= @@ -1917,13 +1932,34 @@ go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.6-0.20201102222123-380f4078db9f/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel v1.2.0/go.mod h1:aT17Fk0Z1Nor9e0uisf98LrntPGMnk4frBO9+dkf69I= +go.opentelemetry.io/otel v1.3.0 h1:APxLf0eiBwLl+SOXiJJCVYzA1OOJNyAoV8C5RNRyy7Y= +go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= +go.opentelemetry.io/otel/bridge/opencensus v0.25.0 h1:18Ww8TpCEGes12HZJzB2nEbUglvMLzPxqgZypsrKiNc= +go.opentelemetry.io/otel/bridge/opencensus v0.25.0/go.mod h1:dkZDdaNwLlIutxK2Kc2m3jwW2M1ISaNf8/rOYVwuVHs= +go.opentelemetry.io/otel/exporters/jaeger v1.2.0 h1:C/5Egj3MJBXRJi22cSl07suqPqtZLnLFmH//OxETUEc= +go.opentelemetry.io/otel/exporters/jaeger v1.2.0/go.mod h1:KJLFbEMKTNPIfOxcg/WikIozEoKcPgJRz3Ce1vLlM8E= +go.opentelemetry.io/otel/internal/metric v0.25.0 h1:w/7RXe16WdPylaIXDgcYM6t/q0K5lXgSdZOEbIEyliE= +go.opentelemetry.io/otel/internal/metric v0.25.0/go.mod h1:Nhuw26QSX7d6n4duoqAFi5KOQR4AuzyMcl5eXOgwxtc= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/metric v0.25.0 h1:7cXOnCADUsR3+EOqxPaSKwhEuNu0gz/56dRN1hpIdKw= +go.opentelemetry.io/otel/metric v0.25.0/go.mod h1:E884FSpQfnJOMMUaq+05IWlJ4rjZpk2s/F1Ju+TEEm8= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk v1.2.0 h1:wKN260u4DesJYhyjxDa7LRFkuhH7ncEVKU37LWcyNIo= +go.opentelemetry.io/otel/sdk v1.2.0/go.mod h1:jNN8QtpvbsKhgaC6V5lHiejMoKD+V8uadoSafgHPx1U= +go.opentelemetry.io/otel/sdk/export/metric v0.25.0 h1:6UjAFmVB5Fza3K5qUJpYWGrk8QMPIqlSnya5FI46VBY= +go.opentelemetry.io/otel/sdk/export/metric v0.25.0/go.mod h1:Ej7NOa+WpN49EIcr1HMUYRvxXXCCnQCg2+ovdt2z8Pk= +go.opentelemetry.io/otel/sdk/metric v0.25.0 h1:J+Ta+4IAA5W9AdWhGQLfciEpavBqqSkBzTDeYvJLFNU= +go.opentelemetry.io/otel/sdk/metric v0.25.0/go.mod h1:G4xzj4LvC6xDDSsVXpvRVclQCbofGGg4ZU2VKKtDRfg= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/otel/trace v1.2.0/go.mod h1:N5FLswTubnxKxOJHM7XZC074qpeEdLy3CgAVsdMucK0= +go.opentelemetry.io/otel/trace v1.3.0 h1:doy8Hzb1RJ+I3yFhtDmwNc7tIyw1tNMOIsyPzp1NOGY= +go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1983,14 +2019,13 @@ golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -2007,8 +2042,9 @@ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210915214749-c084706c2272 h1:3erb+vDS8lU1sxfDHF4/hhWyaXnhIaO+7RgL4fDZORA= golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b h1:QAqMVf3pSa6eeTsuklijukjXBlj7Es2QQplab+/RbQ4= +golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2024,7 +2060,6 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/exp v0.0.0-20210615023648-acb5c1269671/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= golang.org/x/exp v0.0.0-20210714144626-1041f73d31d8/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= golang.org/x/exp v0.0.0-20210715201039-d37aa40e8013 h1:Jp57DBw4K7mimZNA3F9f7CndVcUt4kJjmyJf2rzJHoI= @@ -2055,7 +2090,6 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2073,7 +2107,6 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -2090,7 +2123,6 @@ golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -2102,10 +2134,8 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200519113804-d87ec0cfa476/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -2121,9 +2151,11 @@ golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210917221730-978cfadd31cf h1:R150MpwJIv1MpS0N/pc+NhTM8ajzvlmxlY5OYsrevXQ= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2210,9 +2242,7 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2238,6 +2268,7 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2245,10 +2276,12 @@ golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210820121016-41cdb8703e55/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210917161153-d61c044b1678 h1:J27LZFQBFoihqXoegpscI10HpjZ7B5WQLLKL2FZXQKw= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211209171907-798191bca915 h1:P+8mCzuEpyszAT6T42q0sxU+eveBAF/cJ2Kp0x6/8+0= +golang.org/x/sys v0.0.0-20211209171907-798191bca915/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M= @@ -2292,6 +2325,7 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -2328,16 +2362,15 @@ golang.org/x/tools v0.0.0-20200711155855-7342f9734a7d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1-0.20210225150353-54dc8c5edb56/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ= +golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2366,7 +2399,6 @@ google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0 h1:yfrXXP61wVuLb0vBcG6qaOoIoqYEzOQS8jum51jkv2w= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2408,7 +2440,6 @@ google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -2507,6 +2538,9 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= +lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= +lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= modernc.org/cc v1.0.0 h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= diff --git a/itests/api_test.go b/itests/api_test.go index c380a6ed8..ad39f8879 100644 --- a/itests/api_test.go +++ b/itests/api_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -19,6 +20,12 @@ import ( ) func TestAPI(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_STATE_MINER_INFO_001 t.Run("direct", func(t *testing.T) { runAPITest(t) }) @@ -116,11 +123,13 @@ func (ts *apiSuite) testSearchMsg(t *testing.T) { sm, err := full.MpoolPushMessage(ctx, msg, nil) require.NoError(t, err) + //stm: @CHAIN_STATE_WAIT_MSG_001 res, err := full.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) require.NoError(t, err) require.Equal(t, exitcode.Ok, res.Receipt.ExitCode, "message not successful") + //stm: @CHAIN_STATE_SEARCH_MSG_001 searchRes, err := full.StateSearchMsg(ctx, types.EmptyTSK, sm.Cid(), lapi.LookbackNoLimit, true) require.NoError(t, err) require.NotNil(t, searchRes) diff --git a/itests/ccupgrade_test.go b/itests/ccupgrade_test.go index c5b380835..51e70dd5b 100644 --- a/itests/ccupgrade_test.go +++ b/itests/ccupgrade_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -6,14 +7,27 @@ import ( "testing" "time" + "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/itests/kit" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestCCUpgrade(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_STATE_MINER_GET_INFO_001 + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 + + //stm: @MINER_SECTOR_LIST_001 kit.QuietMiningLogs() for _, height := range []abi.ChainEpoch{ @@ -28,37 +42,44 @@ func TestCCUpgrade(t *testing.T) { } } -func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) { +func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) *kit.TestFullNode { ctx := context.Background() - blockTime := 5 * time.Millisecond + blockTime := 1 * time.Millisecond - client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.LatestActorsAt(upgradeHeight)) - ens.InterconnectAll().BeginMining(blockTime) + client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version15)) + ens.InterconnectAll().BeginMiningMustPost(blockTime) maddr, err := miner.ActorAddress(ctx) if err != nil { t.Fatal(err) } - CC := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1) - Upgraded := CC + 1 + CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1) + fmt.Printf("CCUpgrade: %d\n", CCUpgrade) + // wait for deadline 0 to pass so that committing starts after post on preseals + // this gives max time for post to complete minimizing chances of timeout + // waitForDeadline(ctx, t, 1, client, maddr) miner.PledgeSectors(ctx, 1, 0, nil) - sl, err := miner.SectorsList(ctx) require.NoError(t, err) require.Len(t, sl, 1, "expected 1 sector") - require.Equal(t, CC, sl[0], "unexpected sector number") - + require.Equal(t, CCUpgrade, sl[0], "unexpected sector number") { - si, err := client.StateSectorGetInfo(ctx, maddr, CC, types.EmptyTSK) + si, err := client.StateSectorGetInfo(ctx, maddr, CCUpgrade, types.EmptyTSK) require.NoError(t, err) require.Less(t, 50000, int(si.Expiration)) } + waitForSectorActive(ctx, t, CCUpgrade, client, maddr) - err = miner.SectorMarkForUpgrade(ctx, sl[0]) + //stm: @SECTOR_CC_UPGRADE_001 + err = miner.SectorMarkForUpgrade(ctx, sl[0], true) require.NoError(t, err) + sl, err = miner.SectorsList(ctx) + require.NoError(t, err) + require.Len(t, sl, 1, "expected 1 sector") + dh := kit.NewDealHarness(t, client, miner, miner) deal, res, inPath := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{ Rseed: 6, @@ -67,37 +88,112 @@ func runTestCCUpgrade(t *testing.T, upgradeHeight abi.ChainEpoch) { outPath := dh.PerformRetrieval(context.Background(), deal, res.Root, false) kit.AssertFilesEqual(t, inPath, outPath) - // Validate upgrade + status, err := miner.SectorsStatus(ctx, CCUpgrade, true) + require.NoError(t, err) + assert.Equal(t, 1, len(status.Deals)) + return client +} - { - exp, err := client.StateSectorExpiration(ctx, maddr, CC, types.EmptyTSK) - if err != nil { - require.Contains(t, err.Error(), "failed to find sector 3") // already cleaned up - } else { - require.NoError(t, err) - require.NotNil(t, exp) - require.Greater(t, 50000, int(exp.OnTime)) +func waitForDeadline(ctx context.Context, t *testing.T, waitIdx uint64, node *kit.TestFullNode, maddr address.Address) { + for { + ts, err := node.ChainHead(ctx) + require.NoError(t, err) + dl, err := node.StateMinerProvingDeadline(ctx, maddr, ts.Key()) + require.NoError(t, err) + if dl.Index == waitIdx { + return } } - { - exp, err := client.StateSectorExpiration(ctx, maddr, Upgraded, types.EmptyTSK) +} + +func waitForSectorActive(ctx context.Context, t *testing.T, sn abi.SectorNumber, node *kit.TestFullNode, maddr address.Address) { + for { + active, err := node.StateMinerActiveSectors(ctx, maddr, types.EmptyTSK) require.NoError(t, err) - require.Less(t, 50000, int(exp.OnTime)) - } + for _, si := range active { + if si.SectorNumber == sn { + fmt.Printf("ACTIVE\n") + return + } + } - dlInfo, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) - require.NoError(t, err) + time.Sleep(time.Second) + } +} - // Sector should expire. +func waitForSectorStartUpgrade(ctx context.Context, t *testing.T, sn abi.SectorNumber, miner *kit.TestMiner) { for { - // Wait for the sector to expire. - status, err := miner.SectorsStatus(ctx, CC, true) + si, err := miner.StorageMiner.SectorsStatus(ctx, sn, false) require.NoError(t, err) - if status.OnTime == 0 && status.Early == 0 { - break + if si.State != api.SectorState("Proving") { + t.Logf("Done proving sector in state: %s", si.State) + return } - t.Log("waiting for sector to expire") - // wait one deadline per loop. - time.Sleep(time.Duration(dlInfo.WPoStChallengeWindow) * blockTime) + } } + +func TestCCUpgradeAndPoSt(t *testing.T) { + kit.QuietMiningLogs() + t.Run("upgrade and then post", func(t *testing.T) { + ctx := context.Background() + n := runTestCCUpgrade(t, 100) + ts, err := n.ChainHead(ctx) + require.NoError(t, err) + start := ts.Height() + // wait for a full proving period + t.Log("waiting for chain") + + n.WaitTillChain(ctx, func(ts *types.TipSet) bool { + if ts.Height() > start+abi.ChainEpoch(2880) { + return true + } + return false + }) + }) +} + +func TestTooManyMarkedForUpgrade(t *testing.T) { + kit.QuietMiningLogs() + + ctx := context.Background() + blockTime := 1 * time.Millisecond + + client, miner, ens := kit.EnsembleMinimal(t, kit.GenesisNetworkVersion(network.Version15)) + ens.InterconnectAll().BeginMiningMustPost(blockTime) + + maddr, err := miner.ActorAddress(ctx) + if err != nil { + t.Fatal(err) + } + + CCUpgrade := abi.SectorNumber(kit.DefaultPresealsPerBootstrapMiner + 1) + waitForDeadline(ctx, t, 1, client, maddr) + miner.PledgeSectors(ctx, 3, 0, nil) + + sl, err := miner.SectorsList(ctx) + require.NoError(t, err) + require.Len(t, sl, 3, "expected 3 sectors") + + { + si, err := client.StateSectorGetInfo(ctx, maddr, CCUpgrade, types.EmptyTSK) + require.NoError(t, err) + require.Less(t, 50000, int(si.Expiration)) + } + + waitForSectorActive(ctx, t, CCUpgrade, client, maddr) + waitForSectorActive(ctx, t, CCUpgrade+1, client, maddr) + waitForSectorActive(ctx, t, CCUpgrade+2, client, maddr) + + err = miner.SectorMarkForUpgrade(ctx, CCUpgrade, true) + require.NoError(t, err) + err = miner.SectorMarkForUpgrade(ctx, CCUpgrade+1, true) + require.NoError(t, err) + + waitForSectorStartUpgrade(ctx, t, CCUpgrade, miner) + waitForSectorStartUpgrade(ctx, t, CCUpgrade+1, miner) + + err = miner.SectorMarkForUpgrade(ctx, CCUpgrade+2, true) + require.Error(t, err) + assert.Contains(t, err.Error(), "no free resources to wait for deals") +} diff --git a/itests/cli_test.go b/itests/cli_test.go index 0bd1ec3b4..ac7e4d488 100644 --- a/itests/cli_test.go +++ b/itests/cli_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -11,6 +12,11 @@ import ( // TestClient does a basic test to exercise the client CLI commands. func TestClient(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 _ = os.Setenv("BELLMAN_NO_GPU", "1") kit.QuietMiningLogs() diff --git a/itests/deadlines_test.go b/itests/deadlines_test.go index c698f1154..f0abdb556 100644 --- a/itests/deadlines_test.go +++ b/itests/deadlines_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -52,6 +53,13 @@ import ( // * asserts that miner B loses power // * asserts that miner D loses power, is inactive func TestDeadlineToggling(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 + //stm: @MINER_SECTOR_LIST_001 kit.Expensive(t) kit.QuietMiningLogs() @@ -108,6 +116,7 @@ func TestDeadlineToggling(t *testing.T) { { minerC.PledgeSectors(ctx, sectorsC, 0, nil) + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err := client.StateMinerProvingDeadline(ctx, maddrC, types.EmptyTSK) require.NoError(t, err) @@ -127,6 +136,7 @@ func TestDeadlineToggling(t *testing.T) { expectedPower := types.NewInt(uint64(ssz) * sectorsC) + //stm: @CHAIN_STATE_MINER_POWER_001 p, err := client.StateMinerPower(ctx, maddrC, types.EmptyTSK) require.NoError(t, err) @@ -147,12 +157,14 @@ func TestDeadlineToggling(t *testing.T) { } checkMiner := func(ma address.Address, power abi.StoragePower, active, activeIfCron bool, tsk types.TipSetKey) { + //stm: @CHAIN_STATE_MINER_POWER_001 p, err := client.StateMinerPower(ctx, ma, tsk) require.NoError(t, err) // make sure it has the expected power. require.Equal(t, p.MinerPower.RawBytePower, power) + //stm: @CHAIN_STATE_GET_ACTOR_001 mact, err := client.StateGetActor(ctx, ma, tsk) require.NoError(t, err) @@ -187,6 +199,7 @@ func TestDeadlineToggling(t *testing.T) { checkMiner(maddrB, types.NewInt(0), true, true, uts.Key()) } + //stm: @CHAIN_STATE_NETWORK_VERSION_001 nv, err := client.StateNetworkVersion(ctx, types.EmptyTSK) require.NoError(t, err) require.GreaterOrEqual(t, nv, network.Version12) @@ -246,6 +259,7 @@ func TestDeadlineToggling(t *testing.T) { }, nil) require.NoError(t, err) + //stm: @CHAIN_STATE_WAIT_MSG_001 r, err := client.StateWaitMsg(ctx, m.Cid(), 2, api.LookbackNoLimit, true) require.NoError(t, err) require.Equal(t, exitcode.Ok, r.Receipt.ExitCode) @@ -298,6 +312,7 @@ func TestDeadlineToggling(t *testing.T) { sectorbit := bitfield.New() sectorbit.Set(uint64(sectorNum)) + //stm: @CHAIN_STATE_SECTOR_PARTITION_001 loca, err := client.StateSectorPartition(ctx, maddrD, sectorNum, types.EmptyTSK) require.NoError(t, err) @@ -329,6 +344,7 @@ func TestDeadlineToggling(t *testing.T) { t.Log("sent termination message:", smsg.Cid()) + //stm: @CHAIN_STATE_WAIT_MSG_001 r, err := client.StateWaitMsg(ctx, smsg.Cid(), 2, api.LookbackNoLimit, true) require.NoError(t, err) require.Equal(t, exitcode.Ok, r.Receipt.ExitCode) diff --git a/itests/deals_512mb_test.go b/itests/deals_512mb_test.go index 766d83835..967e33da4 100644 --- a/itests/deals_512mb_test.go +++ b/itests/deals_512mb_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -12,6 +13,13 @@ import ( ) func TestStorageDealMissingBlock(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 + //stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001 ctx := context.Background() // enable 512MiB proofs so we can conduct larger transfers. diff --git a/itests/deals_concurrent_test.go b/itests/deals_concurrent_test.go index c0458e8d1..18d8da02a 100644 --- a/itests/deals_concurrent_test.go +++ b/itests/deals_concurrent_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -71,6 +72,12 @@ func TestDealWithMarketAndMinerNode(t *testing.T) { } func TestDealCyclesConcurrent(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 if testing.Short() { t.Skip("skipping test in short mode") } @@ -139,7 +146,7 @@ func TestSimultanenousTransferLimit(t *testing.T) { ) runTest := func(t *testing.T) { client, miner, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.ConstructorOpts( - node.ApplyIf(node.IsType(repo.StorageMiner), node.Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(graphsyncThrottle, graphsyncThrottle))), + node.ApplyIf(node.IsType(repo.StorageMiner), node.Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(graphsyncThrottle, 0, graphsyncThrottle))), node.Override(new(dtypes.Graphsync), modules.Graphsync(graphsyncThrottle, graphsyncThrottle)), )) ens.InterconnectAll().BeginMining(250 * time.Millisecond) diff --git a/itests/deals_max_staging_deals_test.go b/itests/deals_max_staging_deals_test.go index 895a07954..6a4234e02 100644 --- a/itests/deals_max_staging_deals_test.go +++ b/itests/deals_max_staging_deals_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -12,6 +13,13 @@ import ( ) func TestMaxStagingDeals(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 + //stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001 ctx := context.Background() // enable 512MiB proofs so we can conduct larger transfers. diff --git a/itests/deals_offline_test.go b/itests/deals_offline_test.go index 003f12b11..bb2549026 100644 --- a/itests/deals_offline_test.go +++ b/itests/deals_offline_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -16,7 +17,13 @@ import ( ) func TestOfflineDealFlow(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 + //stm: @CLIENT_DATA_CALCULATE_COMMP_001, @CLIENT_DATA_GENERATE_CAR_001, @CLIENT_DATA_GET_DEAL_PIECE_CID_001, @CLIENT_DATA_GET_DEAL_PIECE_CID_001 runTest := func(t *testing.T, fastRet bool, upscale abi.PaddedPieceSize) { ctx := context.Background() client, miner, ens := kit.EnsembleMinimal(t, kit.WithAllSubsystems()) // no mock proofs @@ -60,6 +67,7 @@ func TestOfflineDealFlow(t *testing.T) { proposalCid := dh.StartDeal(ctx, dp) + //stm: @CLIENT_STORAGE_DEALS_GET_001 // Wait for the deal to reach StorageDealCheckForAcceptance on the client cd, err := client.ClientGetDealInfo(ctx, *proposalCid) require.NoError(t, err) diff --git a/itests/deals_padding_test.go b/itests/deals_padding_test.go index cd15d30d7..c79b6a7db 100644 --- a/itests/deals_padding_test.go +++ b/itests/deals_padding_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -14,7 +15,13 @@ import ( ) func TestDealPadding(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 + //stm: @CLIENT_DATA_GET_DEAL_PIECE_CID_001 kit.QuietMiningLogs() var blockTime = 250 * time.Millisecond @@ -58,6 +65,7 @@ func TestDealPadding(t *testing.T) { // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this time.Sleep(time.Second) + //stm: @CLIENT_STORAGE_DEALS_GET_001 di, err := client.ClientGetDealInfo(ctx, *proposalCid) require.NoError(t, err) require.True(t, di.PieceCID.Equals(pcid)) diff --git a/itests/deals_partial_retrieval_dm-level_test.go b/itests/deals_partial_retrieval_dm-level_test.go new file mode 100644 index 000000000..fd289a0ac --- /dev/null +++ b/itests/deals_partial_retrieval_dm-level_test.go @@ -0,0 +1,252 @@ +package itests + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "testing" + "time" + + "github.com/filecoin-project/go-fil-markets/storagemarket" + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + api0 "github.com/filecoin-project/lotus/api/v0api" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/itests/kit" + blocks "github.com/ipfs/go-block-format" + "github.com/ipfs/go-cid" + "github.com/ipld/go-car" + textselector "github.com/ipld/go-ipld-selector-text-lite" + "github.com/stretchr/testify/require" +) + +// please talk to @ribasushi or @mikeal before modifying these test: there are +// downstream dependencies on ADL-less operation +var ( + adlFixtureCar = "fixtures/adl_test.car" + adlFixtureRoot, _ = cid.Parse("bafybeiaigxwanoxyeuzyiknhrg6io6kobfbm37ozcips6qdwumub2gaomy") + adlFixtureCommp, _ = cid.Parse("baga6ea4seaqjnmnrv4qsfz2rnda54mvo5al22dwpguhn2pmep63gl7bbqqqraai") + adlFixturePieceSize = abi.PaddedPieceSize(1024) + dmSelector = api.Selector("Links/0/Hash") + dmTextSelector = textselector.Expression(dmSelector) + dmExpectedResult = "NO ADL" + dmExpectedCarBlockCount = 4 + dmDagSpec = []api.DagSpec{{DataSelector: &dmSelector, ExportMerkleProof: true}} +) + +func TestDMLevelPartialRetrieval(t *testing.T) { + + ctx := context.Background() + + policy.SetPreCommitChallengeDelay(2) + kit.QuietMiningLogs() + client, miner, ens := kit.EnsembleMinimal(t, kit.ThroughRPC(), kit.MockProofs()) + dh := kit.NewDealHarness(t, client, miner, miner) + ens.InterconnectAll().BeginMining(50 * time.Millisecond) + + _, err := client.ClientImport(ctx, api.FileRef{Path: adlFixtureCar, IsCAR: true}) + require.NoError(t, err) + + caddr, err := client.WalletDefaultAddress(ctx) + require.NoError(t, err) + + // + // test retrieval from local car 1st + require.NoError(t, testDMExportAsCar( + ctx, client, api.ExportRef{ + FromLocalCAR: adlFixtureCar, + Root: adlFixtureRoot, + DAGs: dmDagSpec, + }, t.TempDir(), + )) + require.NoError(t, testDMExportAsFile( + ctx, client, api.ExportRef{ + FromLocalCAR: adlFixtureCar, + Root: adlFixtureRoot, + DAGs: dmDagSpec, + }, t.TempDir(), + )) + + // + // ensure V0 continues functioning as expected + require.NoError(t, tesV0RetrievalAsCar( + ctx, client, api0.RetrievalOrder{ + FromLocalCAR: adlFixtureCar, + Root: adlFixtureRoot, + DatamodelPathSelector: &dmTextSelector, + }, t.TempDir(), + )) + require.NoError(t, testV0RetrievalAsFile( + ctx, client, api0.RetrievalOrder{ + FromLocalCAR: adlFixtureCar, + Root: adlFixtureRoot, + DatamodelPathSelector: &dmTextSelector, + }, t.TempDir(), + )) + + // + // now perform a storage/retrieval deal as well, and retest + dp := dh.DefaultStartDealParams() + dp.Data = &storagemarket.DataRef{ + Root: adlFixtureRoot, + PieceCid: &adlFixtureCommp, + PieceSize: adlFixturePieceSize.Unpadded(), + } + proposalCid := dh.StartDeal(ctx, dp) + + // Wait for the deal to reach StorageDealCheckForAcceptance on the client + cd, err := client.ClientGetDealInfo(ctx, *proposalCid) + require.NoError(t, err) + require.Eventually(t, func() bool { + cd, _ := client.ClientGetDealInfo(ctx, *proposalCid) + return cd.State == storagemarket.StorageDealCheckForAcceptance + }, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State]) + + dh.WaitDealSealed(ctx, proposalCid, false, false, nil) + + offers, err := client.ClientFindData(ctx, adlFixtureRoot, nil) + require.NoError(t, err) + require.NotEmpty(t, offers, "no offers") + + retOrder := offers[0].Order(caddr) + retOrder.DataSelector = &dmSelector + + rr, err := client.ClientRetrieve(ctx, retOrder) + require.NoError(t, err) + + err = client.ClientRetrieveWait(ctx, rr.DealID) + require.NoError(t, err) + + require.NoError(t, testDMExportAsCar( + ctx, client, api.ExportRef{ + DealID: rr.DealID, + Root: adlFixtureRoot, + DAGs: dmDagSpec, + }, t.TempDir(), + )) + require.NoError(t, testDMExportAsFile( + ctx, client, api.ExportRef{ + DealID: rr.DealID, + Root: adlFixtureRoot, + DAGs: dmDagSpec, + }, t.TempDir(), + )) + +} + +func testDMExportAsFile(ctx context.Context, client *kit.TestFullNode, expDirective api.ExportRef, tempDir string) error { + out, err := ioutil.TempFile(tempDir, "exp-test") + if err != nil { + return err + } + defer out.Close() //nolint:errcheck + + fileDest := api.FileRef{ + Path: out.Name(), + } + err = client.ClientExport(ctx, expDirective, fileDest) + if err != nil { + return err + } + return validateDMUnixFile(out) +} +func testV0RetrievalAsFile(ctx context.Context, client *kit.TestFullNode, retOrder api0.RetrievalOrder, tempDir string) error { + out, err := ioutil.TempFile(tempDir, "exp-test") + if err != nil { + return err + } + defer out.Close() //nolint:errcheck + + cv0 := &api0.WrapperV1Full{client.FullNode} //nolint:govet + err = cv0.ClientRetrieve(ctx, retOrder, &api.FileRef{ + Path: out.Name(), + }) + if err != nil { + return err + } + return validateDMUnixFile(out) +} +func validateDMUnixFile(r io.Reader) error { + data, err := io.ReadAll(r) + if err != nil { + return err + } + if string(data) != dmExpectedResult { + return fmt.Errorf("retrieved data mismatch: expected '%s' got '%s'", dmExpectedResult, data) + } + + return nil +} + +func testDMExportAsCar(ctx context.Context, client *kit.TestFullNode, expDirective api.ExportRef, tempDir string) error { + out, err := ioutil.TempFile(tempDir, "exp-test") + if err != nil { + return err + } + defer out.Close() //nolint:errcheck + + carDest := api.FileRef{ + IsCAR: true, + Path: out.Name(), + } + err = client.ClientExport(ctx, expDirective, carDest) + if err != nil { + return err + } + + return validateDMCar(out) +} +func tesV0RetrievalAsCar(ctx context.Context, client *kit.TestFullNode, retOrder api0.RetrievalOrder, tempDir string) error { + out, err := ioutil.TempFile(tempDir, "exp-test") + if err != nil { + return err + } + defer out.Close() //nolint:errcheck + + cv0 := &api0.WrapperV1Full{client.FullNode} //nolint:govet + err = cv0.ClientRetrieve(ctx, retOrder, &api.FileRef{ + Path: out.Name(), + IsCAR: true, + }) + if err != nil { + return err + } + + return validateDMCar(out) +} +func validateDMCar(r io.Reader) error { + cr, err := car.NewCarReader(r) + if err != nil { + return err + } + + if len(cr.Header.Roots) != 1 { + return fmt.Errorf("expected a single root in result car, got %d", len(cr.Header.Roots)) + } else if cr.Header.Roots[0].String() != adlFixtureRoot.String() { + return fmt.Errorf("expected root cid '%s', got '%s'", adlFixtureRoot.String(), cr.Header.Roots[0].String()) + } + + blks := make([]blocks.Block, 0) + for { + b, err := cr.Next() + if err == io.EOF { + break + } else if err != nil { + return err + } + + blks = append(blks, b) + } + + if len(blks) != dmExpectedCarBlockCount { + return fmt.Errorf("expected a car file with %d blocks, got one with %d instead", dmExpectedCarBlockCount, len(blks)) + } + + data := fmt.Sprintf("%s%s", blks[2].RawData(), blks[3].RawData()) + if data != dmExpectedResult { + return fmt.Errorf("retrieved data mismatch: expected '%s' got '%s'", dmExpectedResult, data) + } + + return nil +} diff --git a/itests/deals_partial_retrieval_test.go b/itests/deals_partial_retrieval_test.go index ffc8c5e2c..abc5cf411 100644 --- a/itests/deals_partial_retrieval_test.go +++ b/itests/deals_partial_retrieval_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -9,6 +10,8 @@ import ( "testing" "time" + "golang.org/x/xerrors" + "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/big" @@ -18,7 +21,6 @@ import ( blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" "github.com/ipld/go-car" - textselector "github.com/ipld/go-ipld-selector-text-lite" "github.com/stretchr/testify/require" ) @@ -28,15 +30,22 @@ var ( sourceCar = "../build/genesis/mainnet.car" carRoot, _ = cid.Parse("bafy2bzacecnamqgqmifpluoeldx7zzglxcljo6oja4vrmtj7432rphldpdmm2") carCommp, _ = cid.Parse("baga6ea4seaqmrivgzei3fmx5qxtppwankmtou6zvigyjaveu3z2zzwhysgzuina") + selectedCid, _ = cid.Parse("bafkqaetgnfwc6mjpon2g64tbm5sxa33xmvza") carPieceSize = abi.PaddedPieceSize(2097152) - textSelector = textselector.Expression("8/1/8/1/0/1/0") - textSelectorNonLink = textselector.Expression("8/1/8/1/0/1") - textSelectorNonexistent = textselector.Expression("42") + textSelector = api.Selector("8/1/8/1/0/1/0") + textSelectorNonLink = api.Selector("8/1/8/1/0/1") + textSelectorNonexistent = api.Selector("42") expectedResult = "fil/1/storagepower" ) func TestPartialRetrieval(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 + //stm: @CLIENT_RETRIEVAL_RETRIEVE_001 ctx := context.Background() policy.SetPreCommitChallengeDelay(2) @@ -53,74 +62,79 @@ func TestPartialRetrieval(t *testing.T) { require.NoError(t, err) // first test retrieval from local car, then do an actual deal - for _, fullCycle := range []bool{false, true} { - - var retOrder api.RetrievalOrder - - if !fullCycle { - - retOrder.FromLocalCAR = sourceCar - retOrder.Root = carRoot - - } else { - - dp := dh.DefaultStartDealParams() - dp.Data = &storagemarket.DataRef{ - // FIXME: figure out how to do this with an online partial transfer - TransferType: storagemarket.TTManual, - Root: carRoot, - PieceCid: &carCommp, - PieceSize: carPieceSize.Unpadded(), + for _, exportMerkleProof := range []bool{false, true} { + for _, fullCycle := range []bool{false, true} { + + var retOrder api.RetrievalOrder + var eref api.ExportRef + + if !fullCycle { + eref.FromLocalCAR = sourceCar + } else { + dp := dh.DefaultStartDealParams() + dp.Data = &storagemarket.DataRef{ + // FIXME: figure out how to do this with an online partial transfer + TransferType: storagemarket.TTManual, + Root: carRoot, + PieceCid: &carCommp, + PieceSize: carPieceSize.Unpadded(), + } + proposalCid := dh.StartDeal(ctx, dp) + + // Wait for the deal to reach StorageDealCheckForAcceptance on the client + cd, err := client.ClientGetDealInfo(ctx, *proposalCid) + require.NoError(t, err) + require.Eventually(t, func() bool { + cd, _ := client.ClientGetDealInfo(ctx, *proposalCid) + return cd.State == storagemarket.StorageDealCheckForAcceptance + }, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State]) + + err = miner.DealsImportData(ctx, *proposalCid, sourceCar) + require.NoError(t, err) + + // Wait for the deal to be published, we should be able to start retrieval right away + dh.WaitDealPublished(ctx, proposalCid) + + offers, err := client.ClientFindData(ctx, carRoot, nil) + require.NoError(t, err) + require.NotEmpty(t, offers, "no offers") + + retOrder = offers[0].Order(caddr) } - proposalCid := dh.StartDeal(ctx, dp) - // Wait for the deal to reach StorageDealCheckForAcceptance on the client - cd, err := client.ClientGetDealInfo(ctx, *proposalCid) - require.NoError(t, err) - require.Eventually(t, func() bool { - cd, _ := client.ClientGetDealInfo(ctx, *proposalCid) - return cd.State == storagemarket.StorageDealCheckForAcceptance - }, 30*time.Second, 1*time.Second, "actual deal status is %s", storagemarket.DealStates[cd.State]) - - err = miner.DealsImportData(ctx, *proposalCid, sourceCar) - require.NoError(t, err) - - // Wait for the deal to be published, we should be able to start retrieval right away - dh.WaitDealPublished(ctx, proposalCid) - - offers, err := client.ClientFindData(ctx, carRoot, nil) - require.NoError(t, err) - require.NotEmpty(t, offers, "no offers") - - retOrder = offers[0].Order(caddr) - } - - retOrder.DatamodelPathSelector = &textSelector - - // test retrieval of either data or constructing a partial selective-car - for _, retrieveAsCar := range []bool{false, true} { - outFile, err := ioutil.TempFile(t.TempDir(), "ret-file") - require.NoError(t, err) - defer outFile.Close() //nolint:errcheck - - require.NoError(t, testGenesisRetrieval( - ctx, - client, - retOrder, - &api.FileRef{ - Path: outFile.Name(), - IsCAR: retrieveAsCar, - }, - outFile, - )) - - // UGH if I do not sleep here, I get things like: - /* - retrieval failed: Retrieve failed: there is an active retrieval deal with peer 12D3KooWK9fB9a3HZ4PQLVmEQ6pweMMn5CAyKtumB71CPTnuBDi6 for payload CID bafy2bzacecnamqgqmifpluoeldx7zzglxcljo6oja4vrmtj7432rphldpdmm2 (retrieval deal ID 1631259332180384709, state DealStatusFinalizingBlockstore) - existing deal must be cancelled before starting a new retrieval deal: - github.com/filecoin-project/lotus/node/impl/client.(*API).ClientRetrieve - /home/circleci/project/node/impl/client/client.go:774 - */ - time.Sleep(time.Second) + retOrder.DataSelector = &textSelector + eref.DAGs = append(eref.DAGs, api.DagSpec{ + DataSelector: &textSelector, + ExportMerkleProof: exportMerkleProof, + }) + eref.Root = carRoot + + // test retrieval of either data or constructing a partial selective-car + for _, retrieveAsCar := range []bool{false, true} { + outFile, err := ioutil.TempFile(t.TempDir(), "ret-file") + require.NoError(t, err) + defer outFile.Close() //nolint:errcheck + + require.NoError(t, testGenesisRetrieval( + ctx, + client, + retOrder, + eref, + &api.FileRef{ + Path: outFile.Name(), + IsCAR: retrieveAsCar, + }, + outFile, + )) + + // UGH if I do not sleep here, I get things like: + /* + retrieval failed: Retrieve failed: there is an active retrieval deal with peer 12D3KooWK9fB9a3HZ4PQLVmEQ6pweMMn5CAyKtumB71CPTnuBDi6 for payload CID bafy2bzacecnamqgqmifpluoeldx7zzglxcljo6oja4vrmtj7432rphldpdmm2 (retrieval deal ID 1631259332180384709, state DealStatusFinalizingBlockstore) - existing deal must be cancelled before starting a new retrieval deal: + github.com/filecoin-project/lotus/node/impl/client.(*API).ClientRetrieve + /home/circleci/project/node/impl/client/client.go:774 + */ + time.Sleep(time.Second) + } } } @@ -131,14 +145,18 @@ func TestPartialRetrieval(t *testing.T) { ctx, client, api.RetrievalOrder{ - FromLocalCAR: sourceCar, - Root: carRoot, - DatamodelPathSelector: &textSelectorNonexistent, + Root: carRoot, + DataSelector: &textSelectorNonexistent, + }, + api.ExportRef{ + Root: carRoot, + FromLocalCAR: sourceCar, + DAGs: []api.DagSpec{{DataSelector: &textSelectorNonexistent}}, }, &api.FileRef{}, nil, ), - fmt.Sprintf("retrieval failed: path selection '%s' does not match a node within %s", textSelectorNonexistent, carRoot), + fmt.Sprintf("parsing dag spec: path selection does not match a node within %s", carRoot), ) // ensure non-boundary retrievals fail @@ -148,18 +166,22 @@ func TestPartialRetrieval(t *testing.T) { ctx, client, api.RetrievalOrder{ - FromLocalCAR: sourceCar, - Root: carRoot, - DatamodelPathSelector: &textSelectorNonLink, + Root: carRoot, + DataSelector: &textSelectorNonLink, + }, + api.ExportRef{ + Root: carRoot, + FromLocalCAR: sourceCar, + DAGs: []api.DagSpec{{DataSelector: &textSelectorNonLink}}, }, &api.FileRef{}, nil, ), - fmt.Sprintf("retrieval failed: error while locating partial retrieval sub-root: unsupported selection path '%s' does not correspond to a block boundary (a.k.a. CID link)", textSelectorNonLink), + fmt.Sprintf("parsing dag spec: error while locating partial retrieval sub-root: unsupported selection path '%s' does not correspond to a block boundary (a.k.a. CID link)", textSelectorNonLink), ) } -func testGenesisRetrieval(ctx context.Context, client *kit.TestFullNode, retOrder api.RetrievalOrder, retRef *api.FileRef, outFile *os.File) error { +func testGenesisRetrieval(ctx context.Context, client *kit.TestFullNode, retOrder api.RetrievalOrder, eref api.ExportRef, retRef *api.FileRef, outFile *os.File) error { if retOrder.Total.Nil() { retOrder.Total = big.Zero() @@ -168,7 +190,19 @@ func testGenesisRetrieval(ctx context.Context, client *kit.TestFullNode, retOrde retOrder.UnsealPrice = big.Zero() } - err := client.ClientRetrieve(ctx, retOrder, retRef) + if eref.FromLocalCAR == "" { + rr, err := client.ClientRetrieve(ctx, retOrder) + if err != nil { + return err + } + eref.DealID = rr.DealID + + if err := client.ClientRetrieveWait(ctx, rr.DealID); err != nil { + return xerrors.Errorf("retrieval wait: %w", err) + } + } + + err := client.ClientExport(ctx, eref, *retRef) if err != nil { return err } @@ -190,8 +224,10 @@ func testGenesisRetrieval(ctx context.Context, client *kit.TestFullNode, retOrde if len(cr.Header.Roots) != 1 { return fmt.Errorf("expected a single root in result car, got %d", len(cr.Header.Roots)) - } else if cr.Header.Roots[0].String() != carRoot.String() { + } else if eref.DAGs[0].ExportMerkleProof && cr.Header.Roots[0].String() != carRoot.String() { return fmt.Errorf("expected root cid '%s', got '%s'", carRoot.String(), cr.Header.Roots[0].String()) + } else if !eref.DAGs[0].ExportMerkleProof && cr.Header.Roots[0].String() != selectedCid.String() { + return fmt.Errorf("expected root cid '%s', got '%s'", selectedCid.String(), cr.Header.Roots[0].String()) } blks := make([]blocks.Block, 0) @@ -206,11 +242,11 @@ func testGenesisRetrieval(ctx context.Context, client *kit.TestFullNode, retOrde blks = append(blks, b) } - if len(blks) != 3 { - return fmt.Errorf("expected a car file with 3 blocks, got one with %d instead", len(blks)) + if (eref.DAGs[0].ExportMerkleProof && len(blks) != 3) || (!eref.DAGs[0].ExportMerkleProof && len(blks) != 1) { + return fmt.Errorf("expected a car file with 3/1 blocks, got one with %d instead", len(blks)) } - data = blks[2].RawData() + data = blks[len(blks)-1].RawData() } if string(data) != expectedResult { diff --git a/itests/deals_power_test.go b/itests/deals_power_test.go index 0c29ad060..27b196109 100644 --- a/itests/deals_power_test.go +++ b/itests/deals_power_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -9,6 +10,12 @@ import ( ) func TestFirstDealEnablesMining(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 // test making a deal with a fresh miner, and see if it starts to mine. if testing.Short() { t.Skip("skipping test in short mode") diff --git a/itests/deals_pricing_test.go b/itests/deals_pricing_test.go index eb28af0bd..b1f1d7e5d 100644 --- a/itests/deals_pricing_test.go +++ b/itests/deals_pricing_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -12,6 +13,12 @@ import ( ) func TestQuotePriceForUnsealedRetrieval(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 var ( ctx = context.Background() blocktime = 50 * time.Millisecond @@ -43,10 +50,12 @@ func TestQuotePriceForUnsealedRetrieval(t *testing.T) { _, res2, _ := dh.MakeOnlineDeal(ctx, kit.MakeFullDealParams{Rseed: 6}) require.Equal(t, res1.Root, res2.Root) + //stm: @CLIENT_STORAGE_DEALS_GET_001 // Retrieval dealInfo, err := client.ClientGetDealInfo(ctx, *deal1) require.NoError(t, err) + //stm: @CLIENT_RETRIEVAL_FIND_001 // fetch quote -> zero for unsealed price since unsealed file already exists. offers, err := client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID) require.NoError(t, err) @@ -56,11 +65,13 @@ func TestQuotePriceForUnsealedRetrieval(t *testing.T) { require.Equal(t, dealInfo.Size*uint64(ppb), offers[0].MinPrice.Uint64()) // remove ONLY one unsealed file + //stm: @STORAGE_LIST_001, @MINER_SECTOR_LIST_001 ss, err := miner.StorageList(context.Background()) require.NoError(t, err) _, err = miner.SectorsList(ctx) require.NoError(t, err) + //stm: @STORAGE_DROP_SECTOR_001, @STORAGE_LIST_001 iLoop: for storeID, sd := range ss { for _, sector := range sd { @@ -70,6 +81,7 @@ iLoop: } } + //stm: @CLIENT_RETRIEVAL_FIND_001 // get retrieval quote -> zero for unsealed price as unsealed file exists. offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID) require.NoError(t, err) @@ -89,6 +101,7 @@ iLoop: } } + //stm: @CLIENT_RETRIEVAL_FIND_001 // fetch quote -> non-zero for unseal price as we no more unsealed files. offers, err = client.ClientFindData(ctx, res1.Root, &dealInfo.PieceCID) require.NoError(t, err) @@ -100,6 +113,10 @@ iLoop: } func TestZeroPricePerByteRetrieval(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 if testing.Short() { t.Skip("skipping test in short mode") } diff --git a/itests/deals_publish_test.go b/itests/deals_publish_test.go index 85a358f06..8d707c235 100644 --- a/itests/deals_publish_test.go +++ b/itests/deals_publish_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -23,6 +24,12 @@ import ( ) func TestPublishDealsBatching(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 var ( ctx = context.Background() publishPeriod = 10 * time.Second @@ -103,6 +110,7 @@ func TestPublishDealsBatching(t *testing.T) { } // Expect a single PublishStorageDeals message that includes the first two deals + //stm: @CHAIN_STATE_LIST_MESSAGES_001 msgCids, err := client.StateListMessages(ctx, &api.MessageMatch{To: market.Address}, types.EmptyTSK, 1) require.NoError(t, err) count := 0 diff --git a/itests/deals_retry_deal_no_funds_test.go b/itests/deals_retry_deal_no_funds_test.go new file mode 100644 index 000000000..a14a0d085 --- /dev/null +++ b/itests/deals_retry_deal_no_funds_test.go @@ -0,0 +1,268 @@ +//stm: #integration +package itests + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/actors/policy" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/wallet" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/filecoin-project/lotus/markets/storageadapter" + "github.com/filecoin-project/lotus/node" + "github.com/filecoin-project/lotus/node/config" + "github.com/filecoin-project/lotus/node/modules" + "github.com/filecoin-project/lotus/storage" + "github.com/stretchr/testify/require" +) + +var ( + publishPeriod = 1 * time.Second + maxDealsPerMsg = uint64(2) // Set max deals per publish deals message to 2 + + blockTime = 3 * time.Millisecond +) + +func TestDealsRetryLackOfFunds(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 + //stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001 + ctx := context.Background() + oldDelay := policy.GetPreCommitChallengeDelay() + policy.SetPreCommitChallengeDelay(5) + + t.Cleanup(func() { + policy.SetPreCommitChallengeDelay(oldDelay) + }) + + policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg8MiBV1) + kit.QuietMiningLogs() + + // Allow 8MB sectors + eightMBSectorsOpt := kit.SectorSize(8 << 20) + + publishStorageDealKey, err := wallet.GenerateKey(types.KTSecp256k1) + require.NoError(t, err) + + opts := node.Options( + node.Override(new(*storageadapter.DealPublisher), + storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{ + Period: publishPeriod, + MaxDealsPerMsg: maxDealsPerMsg, + }), + ), + node.Override(new(*storage.AddressSelector), modules.AddressSelector(&config.MinerAddressConfig{ + DealPublishControl: []string{ + publishStorageDealKey.Address.String(), + }, + DisableOwnerFallback: true, + DisableWorkerFallback: true, + })), + ) + + publishStorageAccountFunds := types.NewInt(1020000000000) + minerFullNode, clientFullNode, miner, ens := kit.EnsembleTwoOne(t, kit.Account(publishStorageDealKey, publishStorageAccountFunds), kit.ConstructorOpts(opts), kit.MockProofs(), eightMBSectorsOpt) + + kit.QuietMiningLogs() + + ens. + Start(). + InterconnectAll(). + BeginMining(blockTime) + + _, err = minerFullNode.WalletImport(ctx, &publishStorageDealKey.KeyInfo) + require.NoError(t, err) + + miner.SetControlAddresses(publishStorageDealKey.Address) + + dh := kit.NewDealHarness(t, clientFullNode, miner, miner) + + res, _ := clientFullNode.CreateImportFile(ctx, 0, 4<<20) // 4MiB file. + list, err := clientFullNode.ClientListImports(ctx) + require.NoError(t, err) + require.Len(t, list, 1) + require.Equal(t, res.Root, *list[0].Root) + + dp := dh.DefaultStartDealParams() + dp.Data.Root = res.Root + dp.FastRetrieval = true + dp.EpochPrice = abi.NewTokenAmount(62500000) // minimum asking price. + deal := dh.StartDeal(ctx, dp) + + propcid := *deal + + go func() { + time.Sleep(3 * time.Second) + + kit.SendFunds(ctx, t, minerFullNode, publishStorageDealKey.Address, types.FromFil(1)) + + err := miner.MarketRetryPublishDeal(ctx, propcid) + if err != nil { + panic(err) + } + }() + + dh.WaitDealSealed(ctx, deal, false, false, nil) +} + +func TestDealsRetryLackOfFunds_blockInPublishDeal(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + //stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001 + ctx := context.Background() + oldDelay := policy.GetPreCommitChallengeDelay() + policy.SetPreCommitChallengeDelay(5) + + t.Cleanup(func() { + policy.SetPreCommitChallengeDelay(oldDelay) + }) + + policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg8MiBV1) + kit.QuietMiningLogs() + + // Allow 8MB sectors + eightMBSectorsOpt := kit.SectorSize(8 << 20) + + publishStorageDealKey, err := wallet.GenerateKey(types.KTSecp256k1) + require.NoError(t, err) + + opts := node.Options( + node.Override(new(*storageadapter.DealPublisher), + storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{ + Period: publishPeriod, + MaxDealsPerMsg: maxDealsPerMsg, + }), + ), + node.Override(new(*storage.AddressSelector), modules.AddressSelector(&config.MinerAddressConfig{ + DealPublishControl: []string{ + publishStorageDealKey.Address.String(), + }, + DisableOwnerFallback: true, + DisableWorkerFallback: true, + })), + ) + + publishStorageAccountFunds := types.NewInt(1020000000000) + minerFullNode, clientFullNode, miner, ens := kit.EnsembleTwoOne(t, kit.Account(publishStorageDealKey, publishStorageAccountFunds), kit.ConstructorOpts(opts), kit.MockProofs(), eightMBSectorsOpt) + + kit.QuietMiningLogs() + + ens. + Start(). + InterconnectAll(). + BeginMining(blockTime) + + _, err = minerFullNode.WalletImport(ctx, &publishStorageDealKey.KeyInfo) + require.NoError(t, err) + + miner.SetControlAddresses(publishStorageDealKey.Address) + + dh := kit.NewDealHarness(t, clientFullNode, miner, miner) + + res, _ := clientFullNode.CreateImportFile(ctx, 0, 4<<20) // 4MiB file. + list, err := clientFullNode.ClientListImports(ctx) + require.NoError(t, err) + require.Len(t, list, 1) + require.Equal(t, res.Root, *list[0].Root) + + dp := dh.DefaultStartDealParams() + dp.Data.Root = res.Root + dp.FastRetrieval = true + dp.EpochPrice = abi.NewTokenAmount(62500000) // minimum asking price. + deal := dh.StartDeal(ctx, dp) + + dealSealed := make(chan struct{}) + go func() { + dh.WaitDealSealedQuiet(ctx, deal, false, false, nil) + dealSealed <- struct{}{} + }() + + select { + case <-dealSealed: + t.Fatal("deal shouldn't have sealed") + case <-time.After(time.Second * 15): + } +} + +func TestDealsRetryLackOfFunds_belowLimit(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + //stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001 + ctx := context.Background() + oldDelay := policy.GetPreCommitChallengeDelay() + policy.SetPreCommitChallengeDelay(5) + + t.Cleanup(func() { + policy.SetPreCommitChallengeDelay(oldDelay) + }) + + policy.SetSupportedProofTypes(abi.RegisteredSealProof_StackedDrg8MiBV1) + kit.QuietMiningLogs() + + // Allow 8MB sectors + eightMBSectorsOpt := kit.SectorSize(8 << 20) + + publishStorageDealKey, err := wallet.GenerateKey(types.KTSecp256k1) + require.NoError(t, err) + + opts := node.Options( + node.Override(new(*storageadapter.DealPublisher), + storageadapter.NewDealPublisher(nil, storageadapter.PublishMsgConfig{ + Period: publishPeriod, + MaxDealsPerMsg: maxDealsPerMsg, + }), + ), + node.Override(new(*storage.AddressSelector), modules.AddressSelector(&config.MinerAddressConfig{ + DealPublishControl: []string{ + publishStorageDealKey.Address.String(), + }, + DisableOwnerFallback: true, + DisableWorkerFallback: true, + })), + ) + + publishStorageAccountFunds := types.NewInt(1) + minerFullNode, clientFullNode, miner, ens := kit.EnsembleTwoOne(t, kit.Account(publishStorageDealKey, publishStorageAccountFunds), kit.ConstructorOpts(opts), kit.MockProofs(), eightMBSectorsOpt) + + kit.QuietMiningLogs() + + ens. + Start(). + InterconnectAll(). + BeginMining(blockTime) + + _, err = minerFullNode.WalletImport(ctx, &publishStorageDealKey.KeyInfo) + require.NoError(t, err) + + miner.SetControlAddresses(publishStorageDealKey.Address) + + dh := kit.NewDealHarness(t, clientFullNode, miner, miner) + + res, _ := clientFullNode.CreateImportFile(ctx, 0, 4<<20) // 4MiB file. + list, err := clientFullNode.ClientListImports(ctx) + require.NoError(t, err) + require.Len(t, list, 1) + require.Equal(t, res.Root, *list[0].Root) + + dp := dh.DefaultStartDealParams() + dp.Data.Root = res.Root + dp.FastRetrieval = true + dp.EpochPrice = abi.NewTokenAmount(62500000) // minimum asking price. + deal := dh.StartDeal(ctx, dp) + + err = dh.ExpectDealFailure(ctx, deal, "actor balance less than needed") + if err != nil { + t.Fatal(err) + } +} diff --git a/itests/deals_test.go b/itests/deals_test.go index 4ad97e969..fb8e6e4f3 100644 --- a/itests/deals_test.go +++ b/itests/deals_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -9,6 +10,12 @@ import ( ) func TestDealsWithSealingAndRPC(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 if testing.Short() { t.Skip("skipping test in short mode") } diff --git a/itests/fixtures/adl_test.car b/itests/fixtures/adl_test.car new file mode 100644 index 000000000..d00ca0915 Binary files /dev/null and b/itests/fixtures/adl_test.car differ diff --git a/itests/gateway_test.go b/itests/gateway_test.go index f9e4a0fb6..d5bc9c0eb 100644 --- a/itests/gateway_test.go +++ b/itests/gateway_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -38,6 +39,12 @@ const ( // TestGatewayWalletMsig tests that API calls to wallet and msig can be made on a lite // node that is connected through a gateway to a full API node func TestGatewayWalletMsig(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 kit.QuietMiningLogs() blocktime := 5 * time.Millisecond @@ -116,6 +123,7 @@ func TestGatewayWalletMsig(t *testing.T) { addProposal, err := doSend(proto) require.NoError(t, err) + //stm: @CHAIN_STATE_WAIT_MSG_001 res, err := lite.StateWaitMsg(ctx, addProposal, 1, api.LookbackNoLimit, true) require.NoError(t, err) require.EqualValues(t, 0, res.Receipt.ExitCode) @@ -127,6 +135,7 @@ func TestGatewayWalletMsig(t *testing.T) { // Get available balance of msig: should be greater than zero and less // than initial amount msig := execReturn.IDAddress + //stm: @CHAIN_STATE_MINER_AVAILABLE_BALANCE_001 msigBalance, err := lite.MsigGetAvailableBalance(ctx, msig, types.EmptyTSK) require.NoError(t, err) require.Greater(t, msigBalance.Int64(), int64(0)) @@ -139,6 +148,7 @@ func TestGatewayWalletMsig(t *testing.T) { addProposal, err = doSend(proto) require.NoError(t, err) + //stm: @CHAIN_STATE_WAIT_MSG_001 res, err = lite.StateWaitMsg(ctx, addProposal, 1, api.LookbackNoLimit, true) require.NoError(t, err) require.EqualValues(t, 0, res.Receipt.ExitCode) @@ -156,6 +166,7 @@ func TestGatewayWalletMsig(t *testing.T) { approval1, err := doSend(proto) require.NoError(t, err) + //stm: @CHAIN_STATE_WAIT_MSG_001 res, err = lite.StateWaitMsg(ctx, approval1, 1, api.LookbackNoLimit, true) require.NoError(t, err) require.EqualValues(t, 0, res.Receipt.ExitCode) @@ -169,6 +180,10 @@ func TestGatewayWalletMsig(t *testing.T) { // TestGatewayMsigCLI tests that msig CLI calls can be made // on a lite node that is connected through a gateway to a full API node func TestGatewayMsigCLI(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 kit.QuietMiningLogs() blocktime := 5 * time.Millisecond @@ -180,6 +195,10 @@ func TestGatewayMsigCLI(t *testing.T) { } func TestGatewayDealFlow(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 kit.QuietMiningLogs() blocktime := 5 * time.Millisecond @@ -202,6 +221,10 @@ func TestGatewayDealFlow(t *testing.T) { } func TestGatewayCLIDealFlow(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 kit.QuietMiningLogs() blocktime := 5 * time.Millisecond diff --git a/itests/get_messages_in_ts_test.go b/itests/get_messages_in_ts_test.go index 61219a316..b5ef0387e 100644 --- a/itests/get_messages_in_ts_test.go +++ b/itests/get_messages_in_ts_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -16,6 +17,12 @@ import ( ) func TestChainGetMessagesInTs(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 ctx := context.Background() kit.QuietMiningLogs() @@ -84,6 +91,7 @@ func TestChainGetMessagesInTs(t *testing.T) { } for _, sm := range sms { + //stm: @CHAIN_STATE_WAIT_MSG_001 msgLookup, err := client.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true) require.NoError(t, err) diff --git a/itests/kit/blockminer.go b/itests/kit/blockminer.go index 2c9bd47c6..91ddc2e26 100644 --- a/itests/kit/blockminer.go +++ b/itests/kit/blockminer.go @@ -1,13 +1,18 @@ package kit import ( + "bytes" "context" "sync" "sync/atomic" "testing" "time" + "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/api" + aminer "github.com/filecoin-project/lotus/chain/actors/builtin/miner" + "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/miner" "github.com/stretchr/testify/require" ) @@ -30,6 +35,176 @@ func NewBlockMiner(t *testing.T, miner *TestMiner) *BlockMiner { } } +type partitionTracker struct { + partitions []api.Partition + posted bitfield.BitField +} + +func newPartitionTracker(ctx context.Context, dlIdx uint64, bm *BlockMiner) *partitionTracker { + dlines, err := bm.miner.FullNode.StateMinerDeadlines(ctx, bm.miner.ActorAddr, types.EmptyTSK) + require.NoError(bm.t, err) + dl := dlines[dlIdx] + + parts, err := bm.miner.FullNode.StateMinerPartitions(ctx, bm.miner.ActorAddr, dlIdx, types.EmptyTSK) + require.NoError(bm.t, err) + return &partitionTracker{ + partitions: parts, + posted: dl.PostSubmissions, + } +} + +func (p *partitionTracker) count(t *testing.T) uint64 { + pCnt, err := p.posted.Count() + require.NoError(t, err) + return pCnt +} + +func (p *partitionTracker) done(t *testing.T) bool { + return uint64(len(p.partitions)) == p.count(t) +} + +func (p *partitionTracker) recordIfPost(t *testing.T, bm *BlockMiner, smsg *types.SignedMessage) (ret bool) { + defer func() { + ret = p.done(t) + }() + msg := smsg.Message + if !(msg.To == bm.miner.ActorAddr) { + return + } + if msg.Method != aminer.Methods.SubmitWindowedPoSt { + return + } + params := aminer.SubmitWindowedPoStParams{} + require.NoError(t, params.UnmarshalCBOR(bytes.NewReader(msg.Params))) + for _, part := range params.Partitions { + p.posted.Set(part.Index) + } + return +} + +// Like MineBlocks but refuses to mine until the window post scheduler has wdpost messages in the mempool +// and everything shuts down if a post fails. It also enforces that every block mined succeeds +func (bm *BlockMiner) MineBlocksMustPost(ctx context.Context, blocktime time.Duration) { + + time.Sleep(3 * time.Second) + + // wrap context in a cancellable context. + ctx, bm.cancel = context.WithCancel(ctx) + bm.wg.Add(1) + go func() { + defer bm.wg.Done() + + activeDeadlines := make(map[int]struct{}) + _ = activeDeadlines + ts, err := bm.miner.FullNode.ChainHead(ctx) + require.NoError(bm.t, err) + wait := make(chan bool) + chg, err := bm.miner.FullNode.ChainNotify(ctx) + require.NoError(bm.t, err) + // read current out + curr := <-chg + require.Equal(bm.t, ts.Height(), curr[0].Val.Height()) + for { + select { + case <-time.After(blocktime): + case <-ctx.Done(): + return + } + nulls := atomic.SwapInt64(&bm.nextNulls, 0) + require.Equal(bm.t, int64(0), nulls, "Injecting > 0 null blocks while `MustPost` mining is currently unsupported") + + // Wake up and figure out if we are at the end of an active deadline + ts, err := bm.miner.FullNode.ChainHead(ctx) + require.NoError(bm.t, err) + tsk := ts.Key() + + dlinfo, err := bm.miner.FullNode.StateMinerProvingDeadline(ctx, bm.miner.ActorAddr, tsk) + require.NoError(bm.t, err) + if ts.Height()+1 == dlinfo.Last() { // Last epoch in dline, we need to check that miner has posted + + tracker := newPartitionTracker(ctx, dlinfo.Index, bm) + if !tracker.done(bm.t) { // need to wait for post + bm.t.Logf("expect %d partitions proved but only see %d", len(tracker.partitions), tracker.count(bm.t)) + poolEvts, err := bm.miner.FullNode.MpoolSub(ctx) + require.NoError(bm.t, err) + + // First check pending messages we'll mine this epoch + msgs, err := bm.miner.FullNode.MpoolPending(ctx, types.EmptyTSK) + require.NoError(bm.t, err) + for _, msg := range msgs { + tracker.recordIfPost(bm.t, bm, msg) + } + + // post not yet in mpool, wait for it + if !tracker.done(bm.t) { + bm.t.Logf("post missing from mpool, block mining suspended until it arrives") + POOL: + for { + bm.t.Logf("mpool event wait loop at block height %d, ts: %s", ts.Height(), ts.Key()) + select { + case <-ctx.Done(): + return + case evt := <-poolEvts: + bm.t.Logf("pool event: %d", evt.Type) + if evt.Type == api.MpoolAdd { + bm.t.Logf("incoming message %v", evt.Message) + if tracker.recordIfPost(bm.t, bm, evt.Message) { + break POOL + } + } + } + } + bm.t.Logf("done waiting on mpool") + } + } + } + + var target abi.ChainEpoch + reportSuccessFn := func(success bool, epoch abi.ChainEpoch, err error) { + require.NoError(bm.t, err) + target = epoch + wait <- success + } + + var success bool + for i := int64(0); !success; i++ { + err = bm.miner.MineOne(ctx, miner.MineReq{ + InjectNulls: abi.ChainEpoch(nulls + i), + Done: reportSuccessFn, + }) + success = <-wait + } + + // Wait until it shows up on the given full nodes ChainHead + // TODO this replicates a flaky condition from MineUntil, + // it would be better to use api to wait for sync, + // but currently this is a bit difficult + // and flaky failure is easy to debug and retry + nloops := 200 + for i := 0; i < nloops; i++ { + ts, err := bm.miner.FullNode.ChainHead(ctx) + require.NoError(bm.t, err) + + if ts.Height() == target { + break + } + + require.NotEqual(bm.t, i, nloops-1, "block never managed to sync to node") + time.Sleep(time.Millisecond * 10) + } + + switch { + case err == nil: // wrap around + case ctx.Err() != nil: // context fired. + return + default: // log error + bm.t.Error(err) + } + } + }() + +} + func (bm *BlockMiner) MineBlocks(ctx context.Context, blocktime time.Duration) { time.Sleep(time.Second) diff --git a/itests/kit/client.go b/itests/kit/client.go index c9f8946ec..4c20e37c1 100644 --- a/itests/kit/client.go +++ b/itests/kit/client.go @@ -101,9 +101,14 @@ func RunClientTest(t *testing.T, cmds []*lcli.Command, clientNode *TestFullNode) time.Sleep(time.Second) } + // client retrieval-ask --size=1 + out = clientCLI.RunCmd("client", "retrieval-ask", "--size=1", minerAddr.String(), dataCid.String()) + require.Regexp(t, regexp.MustCompile("Ask:"), out) + fmt.Println("retrieval ask:\n", out) + // Retrieve the first file from the Miner // client retrieve - tmpdir, err := ioutil.TempDir(os.TempDir(), "test-cli-Client") + tmpdir, err := ioutil.TempDir(os.TempDir(), "test-cli-client") require.NoError(t, err) path := filepath.Join(tmpdir, "outfile.dat") out = clientCLI.RunCmd("client", "retrieve", dataCid.String(), path) diff --git a/itests/kit/deals.go b/itests/kit/deals.go index 1b1daa5e4..f8de14d62 100644 --- a/itests/kit/deals.go +++ b/itests/kit/deals.go @@ -10,6 +10,7 @@ import ( "testing" "time" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" @@ -103,8 +104,9 @@ func (dh *DealHarness) MakeOnlineDeal(ctx context.Context, params MakeFullDealPa // TODO: this sleep is only necessary because deals don't immediately get logged in the dealstore, we should fix this time.Sleep(time.Second) + fmt.Printf("WAIT DEAL SEALEDS START\n") dh.WaitDealSealed(ctx, deal, false, false, nil) - + fmt.Printf("WAIT DEAL SEALEDS END\n") return deal, res, path } @@ -175,6 +177,42 @@ loop: cb() } } + fmt.Printf("WAIT DEAL SEALED LOOP BROKEN\n") +} + +// WaitDealSealedQuiet waits until the deal is sealed, without logging anything. +func (dh *DealHarness) WaitDealSealedQuiet(ctx context.Context, deal *cid.Cid, noseal, noSealStart bool, cb func()) { +loop: + for { + di, err := dh.client.ClientGetDealInfo(ctx, *deal) + require.NoError(dh.t, err) + + switch di.State { + case storagemarket.StorageDealAwaitingPreCommit, storagemarket.StorageDealSealing: + if noseal { + return + } + if !noSealStart { + dh.StartSealingWaiting(ctx) + } + case storagemarket.StorageDealProposalRejected: + dh.t.Fatal("deal rejected") + case storagemarket.StorageDealFailing: + dh.t.Fatal("deal failed") + case storagemarket.StorageDealError: + dh.t.Fatal("deal errored", di.Message) + case storagemarket.StorageDealActive: + break loop + } + + _, err = dh.market.MarketListIncompleteDeals(ctx) + require.NoError(dh.t, err) + + time.Sleep(time.Second / 2) + if cb != nil { + cb() + } + } } func (dh *DealHarness) ExpectDealFailure(ctx context.Context, deal *cid.Cid, errs string) error { @@ -254,12 +292,11 @@ func (dh *DealHarness) WaitDealPublished(ctx context.Context, deal *cid.Cid) { func (dh *DealHarness) StartSealingWaiting(ctx context.Context) { snums, err := dh.main.SectorsList(ctx) require.NoError(dh.t, err) - for _, snum := range snums { si, err := dh.main.SectorsStatus(ctx, snum, false) require.NoError(dh.t, err) - dh.t.Logf("Sector state: %s", si.State) + dh.t.Logf("Sector state <%d>-[%d]:, %s", snum, si.SealProof, si.State) if si.State == api.SectorState(sealing.WaitDeals) { require.NoError(dh.t, dh.main.SectorStartSealing(ctx, snum)) } @@ -285,17 +322,45 @@ func (dh *DealHarness) PerformRetrieval(ctx context.Context, deal *cid.Cid, root caddr, err := dh.client.WalletDefaultAddress(ctx) require.NoError(dh.t, err) - ref := &api.FileRef{ - Path: carFile.Name(), - IsCAR: carExport, - } - - updates, err := dh.client.ClientRetrieveWithEvents(ctx, offers[0].Order(caddr), ref) + updatesCtx, cancel := context.WithCancel(ctx) + updates, err := dh.client.ClientGetRetrievalUpdates(updatesCtx) require.NoError(dh.t, err) - for update := range updates { - require.Emptyf(dh.t, update.Err, "retrieval failed: %s", update.Err) + retrievalRes, err := dh.client.ClientRetrieve(ctx, offers[0].Order(caddr)) + require.NoError(dh.t, err) +consumeEvents: + for { + var evt api.RetrievalInfo + select { + case <-updatesCtx.Done(): + dh.t.Fatal("Retrieval Timed Out") + case evt = <-updates: + if evt.ID != retrievalRes.DealID { + continue + } + } + switch evt.Status { + case retrievalmarket.DealStatusCompleted: + break consumeEvents + case retrievalmarket.DealStatusRejected: + dh.t.Fatalf("Retrieval Proposal Rejected: %s", evt.Message) + case + retrievalmarket.DealStatusDealNotFound, + retrievalmarket.DealStatusErrored: + dh.t.Fatalf("Retrieval Error: %s", evt.Message) + } } + cancel() + + require.NoError(dh.t, dh.client.ClientExport(ctx, + api.ExportRef{ + Root: root, + DealID: retrievalRes.DealID, + }, + api.FileRef{ + Path: carFile.Name(), + IsCAR: carExport, + })) ret := carFile.Name() if carExport { @@ -309,7 +374,7 @@ func (dh *DealHarness) PerformRetrieval(ctx context.Context, deal *cid.Cid, root func (dh *DealHarness) ExtractFileFromCAR(ctx context.Context, file *os.File) (out *os.File) { bserv := dstest.Bserv() - ch, err := car.LoadCar(bserv.Blockstore(), file) + ch, err := car.LoadCar(ctx, bserv.Blockstore(), file) require.NoError(dh.t, err) b, err := bserv.GetBlock(ctx, ch.Roots[0]) diff --git a/itests/kit/ensemble.go b/itests/kit/ensemble.go index 90a614645..0227ee81e 100644 --- a/itests/kit/ensemble.go +++ b/itests/kit/ensemble.go @@ -277,7 +277,7 @@ func (n *Ensemble) Start() *Ensemble { // We haven't been bootstrapped yet, we need to generate genesis and // create the networking backbone. gtempl = n.generateGenesis() - n.mn = mocknet.New(ctx) + n.mn = mocknet.New() } // --------------------- @@ -487,7 +487,7 @@ func (n *Ensemble) Start() *Ensemble { ds, err := lr.Datastore(context.TODO(), "/metadata") require.NoError(n.t, err) - err = ds.Put(datastore.NewKey("miner-address"), m.ActorAddr.Bytes()) + err = ds.Put(ctx, datastore.NewKey("miner-address"), m.ActorAddr.Bytes()) require.NoError(n.t, err) nic := storedcounter.New(ds, datastore.NewKey(modules.StorageCounterDSPrefix)) @@ -675,6 +675,43 @@ func (n *Ensemble) Connect(from api.Net, to ...api.Net) *Ensemble { return n } +func (n *Ensemble) BeginMiningMustPost(blocktime time.Duration, miners ...*TestMiner) []*BlockMiner { + ctx := context.Background() + + // wait one second to make sure that nodes are connected and have handshaken. + // TODO make this deterministic by listening to identify events on the + // libp2p eventbus instead (or something else). + time.Sleep(1 * time.Second) + + var bms []*BlockMiner + if len(miners) == 0 { + // no miners have been provided explicitly, instantiate block miners + // for all active miners that aren't still mining. + for _, m := range n.active.miners { + if _, ok := n.active.bms[m]; ok { + continue // skip, already have a block miner + } + miners = append(miners, m) + } + } + + if len(miners) > 1 { + n.t.Fatalf("Only one active miner for MustPost, but have %d", len(miners)) + } + + for _, m := range miners { + bm := NewBlockMiner(n.t, m) + bm.MineBlocksMustPost(ctx, blocktime) + n.t.Cleanup(bm.Stop) + + bms = append(bms, bm) + + n.active.bms[m] = bm + } + + return bms +} + // BeginMining kicks off mining for the specified miners. If nil or 0-length, // it will kick off mining for all enrolled and active miners. It also adds a // cleanup function to stop all mining operations on test teardown. diff --git a/itests/kit/ensemble_opts_nv.go b/itests/kit/ensemble_opts_nv.go index a03e63f4a..45ed51443 100644 --- a/itests/kit/ensemble_opts_nv.go +++ b/itests/kit/ensemble_opts_nv.go @@ -38,14 +38,25 @@ func SDRUpgradeAt(calico, persian abi.ChainEpoch) EnsembleOpt { } func LatestActorsAt(upgradeHeight abi.ChainEpoch) EnsembleOpt { + /* inline-gen template + return UpgradeSchedule(stmgr.Upgrade{ + Network: network.Version{{add .latestNetworkVersion -1}}, + Height: -1, + }, stmgr.Upgrade{ + Network: network.Version{{.latestNetworkVersion}}, + Height: upgradeHeight, + Migration: filcns.UpgradeActorsV{{.latestActorsVersion}}, + }) + /* inline-gen start */ return UpgradeSchedule(stmgr.Upgrade{ - Network: network.Version12, + Network: network.Version14, Height: -1, }, stmgr.Upgrade{ - Network: network.Version13, + Network: network.Version15, Height: upgradeHeight, - Migration: filcns.UpgradeActorsV5, + Migration: filcns.UpgradeActorsV7, }) + /* inline-gen end */ } func TurboUpgradeAt(upgradeHeight abi.ChainEpoch) EnsembleOpt { diff --git a/itests/kit/files.go b/itests/kit/files.go index 9babac941..c78352afe 100644 --- a/itests/kit/files.go +++ b/itests/kit/files.go @@ -83,7 +83,7 @@ func CreateRandomCARv1(t *testing.T, rseed, size int) (carV1FilePath string, ori require.NoError(t, car.WriteCar(ctx, dagSvc, []cid.Cid{root}, tmp)) _, err = tmp.Seek(0, io.SeekStart) require.NoError(t, err) - hd, _, err := car.ReadHeader(bufio.NewReader(tmp)) + hd, err := car.ReadHeader(bufio.NewReader(tmp)) require.NoError(t, err) require.EqualValues(t, 1, hd.Version) require.Len(t, hd.Roots, 1) diff --git a/itests/multisig_test.go b/itests/multisig_test.go index 9a15e8c0e..09d9254a3 100644 --- a/itests/multisig_test.go +++ b/itests/multisig_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -10,6 +11,12 @@ import ( // TestMultisig does a basic test to exercise the multisig CLI commands func TestMultisig(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 kit.QuietMiningLogs() blockTime := 5 * time.Millisecond diff --git a/itests/nonce_test.go b/itests/nonce_test.go index b50fcbe26..e0c247ed6 100644 --- a/itests/nonce_test.go +++ b/itests/nonce_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -13,6 +14,12 @@ import ( ) func TestNonceIncremental(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 ctx := context.Background() kit.QuietMiningLogs() @@ -51,6 +58,7 @@ func TestNonceIncremental(t *testing.T) { } for _, sm := range sms { + //stm: @CHAIN_STATE_WAIT_MSG_001 _, err := client.StateWaitMsg(ctx, sm.Cid(), 3, api.LookbackNoLimit, true) require.NoError(t, err) } diff --git a/itests/paych_api_test.go b/itests/paych_api_test.go index 49c23545b..a07c499f9 100644 --- a/itests/paych_api_test.go +++ b/itests/paych_api_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -27,6 +28,12 @@ import ( ) func TestPaymentChannelsAPI(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 kit.QuietMiningLogs() ctx := context.Background() @@ -107,6 +114,7 @@ func TestPaymentChannelsAPI(t *testing.T) { require.NoError(t, err) preds := state.NewStatePredicates(paymentCreator) finished := make(chan struct{}) + //stm: @CHAIN_STATE_GET_ACTOR_001 err = ev.StateChanged(func(ctx context.Context, ts *types.TipSet) (done bool, more bool, err error) { act, err := paymentCreator.StateGetActor(ctx, channel, ts.Key()) if err != nil { @@ -182,6 +190,7 @@ func TestPaymentChannelsAPI(t *testing.T) { collectMsg, err := paymentReceiver.PaychCollect(ctx, channel) require.NoError(t, err) + //stm: @CHAIN_STATE_WAIT_MSG_001 res, err = paymentReceiver.StateWaitMsg(ctx, collectMsg, 3, api.LookbackNoLimit, true) require.NoError(t, err) require.EqualValues(t, 0, res.Receipt.ExitCode, "unable to collect on payment channel") diff --git a/itests/paych_cli_test.go b/itests/paych_cli_test.go index a4ad1920b..c3f9deeba 100644 --- a/itests/paych_cli_test.go +++ b/itests/paych_cli_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -30,6 +31,12 @@ import ( // TestPaymentChannelsBasic does a basic test to exercise the payment channel CLI // commands func TestPaymentChannelsBasic(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 _ = os.Setenv("BELLMAN_NO_GPU", "1") kit.QuietMiningLogs() @@ -87,6 +94,10 @@ type voucherSpec struct { // TestPaymentChannelStatus tests the payment channel status CLI command func TestPaymentChannelStatus(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 _ = os.Setenv("BELLMAN_NO_GPU", "1") kit.QuietMiningLogs() @@ -167,6 +178,12 @@ func TestPaymentChannelStatus(t *testing.T) { // TestPaymentChannelVouchers does a basic test to exercise some payment // channel voucher commands func TestPaymentChannelVouchers(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 _ = os.Setenv("BELLMAN_NO_GPU", "1") kit.QuietMiningLogs() @@ -299,6 +316,12 @@ func TestPaymentChannelVouchers(t *testing.T) { // TestPaymentChannelVoucherCreateShortfall verifies that if a voucher amount // is greater than what's left in the channel, voucher create fails func TestPaymentChannelVoucherCreateShortfall(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 _ = os.Setenv("BELLMAN_NO_GPU", "1") kit.QuietMiningLogs() diff --git a/itests/sdr_upgrade_test.go b/itests/sdr_upgrade_test.go index f4cefd67c..c1198dd0c 100644 --- a/itests/sdr_upgrade_test.go +++ b/itests/sdr_upgrade_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -17,6 +18,15 @@ import ( ) func TestSDRUpgrade(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 + //stm: @CHAIN_STATE_NETWORK_VERSION_001 + + //stm: @MINER_SECTOR_LIST_001 kit.QuietMiningLogs() // oldDelay := policy.GetPreCommitChallengeDelay() diff --git a/itests/sector_finalize_early_test.go b/itests/sector_finalize_early_test.go index fa5cc9dd3..233bc8fcb 100644 --- a/itests/sector_finalize_early_test.go +++ b/itests/sector_finalize_early_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -18,6 +19,13 @@ import ( ) func TestDealsWithFinalizeEarly(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 + //stm: @STORAGE_INFO_001 if testing.Short() { t.Skip("skipping test in short mode") } @@ -42,6 +50,7 @@ func TestDealsWithFinalizeEarly(t *testing.T) { miner.AddStorage(ctx, t, 1000000000, true, false) miner.AddStorage(ctx, t, 1000000000, false, true) + //stm: @STORAGE_LIST_001 sl, err := miner.StorageList(ctx) require.NoError(t, err) for si, d := range sl { @@ -55,6 +64,7 @@ func TestDealsWithFinalizeEarly(t *testing.T) { dh.RunConcurrentDeals(kit.RunConcurrentDealsOpts{N: 1}) }) + //stm: @STORAGE_LIST_001 sl, err = miner.StorageList(ctx) require.NoError(t, err) for si, d := range sl { diff --git a/itests/sector_miner_collateral_test.go b/itests/sector_miner_collateral_test.go index de3da21f6..af67b132b 100644 --- a/itests/sector_miner_collateral_test.go +++ b/itests/sector_miner_collateral_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -21,6 +22,13 @@ import ( ) func TestMinerBalanceCollateral(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 + //stm: @MINER_SECTOR_LIST_001 kit.QuietMiningLogs() blockTime := 5 * time.Millisecond diff --git a/itests/sector_pledge_test.go b/itests/sector_pledge_test.go index a32eb958f..a6aa1a7c8 100644 --- a/itests/sector_pledge_test.go +++ b/itests/sector_pledge_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -22,6 +23,12 @@ import ( ) func TestPledgeSectors(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 kit.QuietMiningLogs() blockTime := 50 * time.Millisecond @@ -54,6 +61,7 @@ func TestPledgeSectors(t *testing.T) { } func TestPledgeBatching(t *testing.T) { + //stm: @SECTOR_PRE_COMMIT_FLUSH_001, @SECTOR_COMMIT_FLUSH_001 blockTime := 50 * time.Millisecond runTest := func(t *testing.T, nSectors int) { @@ -110,6 +118,12 @@ func TestPledgeBatching(t *testing.T) { } func TestPledgeMaxBatching(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 blockTime := 50 * time.Millisecond runTest := func(t *testing.T) { @@ -173,6 +187,7 @@ func TestPledgeMaxBatching(t *testing.T) { } // Ensure that max aggregate message has propagated to the other node by checking current state + //stm: @CHAIN_STATE_MINER_SECTORS_001 sectorInfosAfter, err := full.StateMinerSectors(ctx, miner.ActorAddr, nil, types.EmptyTSK) require.NoError(t, err) assert.Equal(t, miner5.MaxAggregatedSectors+kit.DefaultPresealsPerBootstrapMiner, len(sectorInfosAfter)) @@ -182,6 +197,12 @@ func TestPledgeMaxBatching(t *testing.T) { } func TestPledgeBeforeNv13(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 blocktime := 50 * time.Millisecond runTest := func(t *testing.T, nSectors int) { diff --git a/itests/sector_terminate_test.go b/itests/sector_terminate_test.go index 2a3143a0a..536e51538 100644 --- a/itests/sector_terminate_test.go +++ b/itests/sector_terminate_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -14,6 +15,12 @@ import ( ) func TestTerminate(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 kit.Expensive(t) kit.QuietMiningLogs() @@ -33,6 +40,7 @@ func TestTerminate(t *testing.T) { ssz, err := miner.ActorSectorSize(ctx, maddr) require.NoError(t, err) + //stm: @CHAIN_STATE_MINER_POWER_001 p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK) require.NoError(t, err) require.Equal(t, p.MinerPower, p.TotalPower) @@ -45,6 +53,7 @@ func TestTerminate(t *testing.T) { t.Log("wait for power") { + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 // Wait until proven. di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -58,6 +67,7 @@ func TestTerminate(t *testing.T) { nSectors++ + //stm: @CHAIN_STATE_MINER_POWER_001 p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) require.NoError(t, err) require.Equal(t, p.MinerPower, p.TotalPower) @@ -67,6 +77,7 @@ func TestTerminate(t *testing.T) { toTerminate := abi.SectorNumber(3) + //stm: @SECTOR_TERMINATE_001 err = miner.SectorTerminate(ctx, toTerminate) require.NoError(t, err) @@ -79,6 +90,7 @@ loop: t.Log("state: ", si.State, msgTriggerred) switch sealing.SectorState(si.State) { + //stm: @SECTOR_TERMINATE_PENDING_001 case sealing.Terminating: if !msgTriggerred { { @@ -111,6 +123,7 @@ loop: // need to wait for message to be mined and applied. time.Sleep(5 * time.Second) + //stm: @CHAIN_STATE_MINER_POWER_001 // check power decreased p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -119,6 +132,7 @@ loop: // check in terminated set { + //stm: @CHAIN_STATE_MINER_GET_PARTITIONS_001 parts, err := client.StateMinerPartitions(ctx, maddr, 1, types.EmptyTSK) require.NoError(t, err) require.Greater(t, len(parts), 0) @@ -133,6 +147,7 @@ loop: require.Equal(t, uint64(0), bflen(parts[0].LiveSectors)) } + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -141,6 +156,7 @@ loop: ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) t.Logf("Now head.Height = %d", ts.Height()) + //stm: @CHAIN_STATE_MINER_POWER_001 p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) require.NoError(t, err) diff --git a/itests/self_sent_txn_test.go b/itests/self_sent_txn_test.go new file mode 100644 index 000000000..b5ec2c0dc --- /dev/null +++ b/itests/self_sent_txn_test.go @@ -0,0 +1,101 @@ +package itests + +import ( + "context" + "testing" + "time" + + "github.com/filecoin-project/go-state-types/network" + + "github.com/filecoin-project/go-state-types/big" + "github.com/filecoin-project/go-state-types/exitcode" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/itests/kit" + "github.com/stretchr/testify/require" +) + +// these tests check that the versioned code in vm.transfer is functioning correctly across versions! +// we reordered the checks to make sure that a transaction with too much money in it sent to yourself will fail instead of succeeding as a noop +// more info in this PR! https://github.com/filecoin-project/lotus/pull/7637 +func TestSelfSentTxnV15(t *testing.T) { + ctx := context.Background() + + kit.QuietMiningLogs() + + client15, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.GenesisNetworkVersion(network.Version15)) + ens.InterconnectAll().BeginMining(10 * time.Millisecond) + + bal, err := client15.WalletBalance(ctx, client15.DefaultKey.Address) + require.NoError(t, err) + + // send self half of account balance + msgHalfBal := &types.Message{ + From: client15.DefaultKey.Address, + To: client15.DefaultKey.Address, + Value: big.Div(bal, big.NewInt(2)), + } + smHalfBal, err := client15.MpoolPushMessage(ctx, msgHalfBal, nil) + require.NoError(t, err) + mLookup, err := client15.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode) + + msgOverBal := &types.Message{ + From: client15.DefaultKey.Address, + To: client15.DefaultKey.Address, + Value: big.Mul(big.NewInt(2), bal), + GasLimit: 10000000000, + GasPremium: big.NewInt(10000000000), + GasFeeCap: big.NewInt(100000000000), + Nonce: 1, + } + smOverBal, err := client15.WalletSignMessage(ctx, client15.DefaultKey.Address, msgOverBal) + require.NoError(t, err) + smcid, err := client15.MpoolPush(ctx, smOverBal) + require.NoError(t, err) + mLookup, err = client15.StateWaitMsg(ctx, smcid, 3, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Equal(t, exitcode.SysErrInsufficientFunds, mLookup.Receipt.ExitCode) +} + +func TestSelfSentTxnV14(t *testing.T) { + ctx := context.Background() + + kit.QuietMiningLogs() + + client14, _, ens := kit.EnsembleMinimal(t, kit.MockProofs(), kit.GenesisNetworkVersion(network.Version14)) + ens.InterconnectAll().BeginMining(10 * time.Millisecond) + + bal, err := client14.WalletBalance(ctx, client14.DefaultKey.Address) + require.NoError(t, err) + + // send self half of account balance + msgHalfBal := &types.Message{ + From: client14.DefaultKey.Address, + To: client14.DefaultKey.Address, + Value: big.Div(bal, big.NewInt(2)), + } + smHalfBal, err := client14.MpoolPushMessage(ctx, msgHalfBal, nil) + require.NoError(t, err) + mLookup, err := client14.StateWaitMsg(ctx, smHalfBal.Cid(), 3, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode) + + msgOverBal := &types.Message{ + From: client14.DefaultKey.Address, + To: client14.DefaultKey.Address, + Value: big.Mul(big.NewInt(2), bal), + GasLimit: 10000000000, + GasPremium: big.NewInt(10000000000), + GasFeeCap: big.NewInt(100000000000), + Nonce: 1, + } + smOverBal, err := client14.WalletSignMessage(ctx, client14.DefaultKey.Address, msgOverBal) + require.NoError(t, err) + smcid, err := client14.MpoolPush(ctx, smOverBal) + require.NoError(t, err) + mLookup, err = client14.StateWaitMsg(ctx, smcid, 3, api.LookbackNoLimit, true) + require.NoError(t, err) + require.Equal(t, exitcode.Ok, mLookup.Receipt.ExitCode) +} diff --git a/itests/tape_test.go b/itests/tape_test.go index c6728b834..79f8961e4 100644 --- a/itests/tape_test.go +++ b/itests/tape_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -14,6 +15,12 @@ import ( ) func TestTapeFix(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 kit.QuietMiningLogs() var blocktime = 2 * time.Millisecond diff --git a/itests/verifreg_test.go b/itests/verifreg_test.go index 80a21b0a0..9efefc7b9 100644 --- a/itests/verifreg_test.go +++ b/itests/verifreg_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -23,6 +24,12 @@ import ( ) func TestVerifiedClientTopUp(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 blockTime := 100 * time.Millisecond test := func(nv network.Version, shouldWork bool) func(*testing.T) { @@ -51,6 +58,7 @@ func TestVerifiedClientTopUp(t *testing.T) { defer cancel() // get VRH + //stm: @CHAIN_STATE_VERIFIED_REGISTRY_ROOT_KEY_001 vrh, err := api.StateVerifiedRegistryRootKey(ctx, types.TipSetKey{}) fmt.Println(vrh.String()) require.NoError(t, err) @@ -81,6 +89,7 @@ func TestVerifiedClientTopUp(t *testing.T) { sm, err := api.MpoolPushMessage(ctx, msg, nil) require.NoError(t, err, "AddVerifier failed") + //stm: @CHAIN_STATE_WAIT_MSG_001 res, err := api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) require.NoError(t, err) require.EqualValues(t, 0, res.Receipt.ExitCode) @@ -102,11 +111,13 @@ func TestVerifiedClientTopUp(t *testing.T) { sm, err = api.MpoolPushMessage(ctx, msg, nil) require.NoError(t, err) + //stm: @CHAIN_STATE_WAIT_MSG_001 res, err = api.StateWaitMsg(ctx, sm.Cid(), 1, lapi.LookbackNoLimit, true) require.NoError(t, err) require.EqualValues(t, 0, res.Receipt.ExitCode) // check datacap balance + //stm: @CHAIN_STATE_VERIFIED_CLIENT_STATUS_001 dcap, err := api.StateVerifiedClientStatus(ctx, verifiedClientAddr, types.EmptyTSK) require.NoError(t, err) diff --git a/itests/wdpost_dispute_test.go b/itests/wdpost_dispute_test.go index aa892aca7..fe723a814 100644 --- a/itests/wdpost_dispute_test.go +++ b/itests/wdpost_dispute_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -20,6 +21,12 @@ import ( ) func TestWindowPostDispute(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 kit.Expensive(t) kit.QuietMiningLogs() @@ -61,6 +68,7 @@ func TestWindowPostDispute(t *testing.T) { evilMinerAddr, err := evilMiner.ActorAddress(ctx) require.NoError(t, err) + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err := client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) require.NoError(t, err) @@ -72,6 +80,7 @@ func TestWindowPostDispute(t *testing.T) { ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) t.Logf("Now head.Height = %d", ts.Height()) + //stm: @CHAIN_STATE_MINER_POWER_001 p, err := client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK) require.NoError(t, err) @@ -81,9 +90,11 @@ func TestWindowPostDispute(t *testing.T) { // make sure it has gained power. require.Equal(t, p.MinerPower.RawBytePower, types.NewInt(uint64(ssz))) + //stm: @MINER_SECTOR_LIST_001 evilSectors, err := evilMiner.SectorsList(ctx) require.NoError(t, err) evilSectorNo := evilSectors[0] // only one. + //stm: @CHAIN_STATE_SECTOR_PARTITION_001 evilSectorLoc, err := client.StateSectorPartition(ctx, evilMinerAddr, evilSectorNo, types.EmptyTSK) require.NoError(t, err) @@ -96,6 +107,7 @@ func TestWindowPostDispute(t *testing.T) { // Wait until we need to prove our sector. for { + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) require.NoError(t, err) if di.Index == evilSectorLoc.Deadline && di.CurrentEpoch-di.PeriodStart > 1 { @@ -109,6 +121,7 @@ func TestWindowPostDispute(t *testing.T) { // Wait until after the proving period. for { + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) require.NoError(t, err) if di.Index != evilSectorLoc.Deadline { @@ -119,6 +132,7 @@ func TestWindowPostDispute(t *testing.T) { t.Log("accepted evil proof") + //stm: @CHAIN_STATE_MINER_POWER_001 // Make sure the evil node didn't lose any power. p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK) require.NoError(t, err) @@ -145,11 +159,13 @@ func TestWindowPostDispute(t *testing.T) { require.NoError(t, err) t.Log("waiting dispute") + //stm: @CHAIN_STATE_WAIT_MSG_001 rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) require.NoError(t, err) require.Zero(t, rec.Receipt.ExitCode, "dispute not accepted: %s", rec.Receipt.ExitCode.Error()) } + //stm: @CHAIN_STATE_MINER_POWER_001 // Objection SUSTAINED! // Make sure the evil node lost power. p, err = client.StateMinerPower(ctx, evilMinerAddr, types.EmptyTSK) @@ -162,6 +178,7 @@ func TestWindowPostDispute(t *testing.T) { // First, recover the sector. { + //stm: @CHAIN_STATE_MINER_INFO_001 minerInfo, err := client.StateMinerInfo(ctx, evilMinerAddr, types.EmptyTSK) require.NoError(t, err) @@ -186,6 +203,7 @@ func TestWindowPostDispute(t *testing.T) { sm, err := client.MpoolPushMessage(ctx, msg, nil) require.NoError(t, err) + //stm: @CHAIN_STATE_WAIT_MSG_001 rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) require.NoError(t, err) require.Zero(t, rec.Receipt.ExitCode, "recovery not accepted: %s", rec.Receipt.ExitCode.Error()) @@ -193,6 +211,7 @@ func TestWindowPostDispute(t *testing.T) { // Then wait for the deadline. for { + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err = client.StateMinerProvingDeadline(ctx, evilMinerAddr, types.EmptyTSK) require.NoError(t, err) if di.Index == evilSectorLoc.Deadline { @@ -210,6 +229,11 @@ func TestWindowPostDispute(t *testing.T) { } func TestWindowPostDisputeFails(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + //stm: @CHAIN_STATE_MINER_GET_DEADLINES_001 kit.Expensive(t) kit.QuietMiningLogs() @@ -232,6 +256,7 @@ func TestWindowPostDisputeFails(t *testing.T) { miner.PledgeSectors(ctx, 10, 0, nil) + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -246,6 +271,7 @@ func TestWindowPostDisputeFails(t *testing.T) { require.NoError(t, err) expectedPower := types.NewInt(uint64(ssz) * (kit.DefaultPresealsPerBootstrapMiner + 10)) + //stm: @CHAIN_STATE_MINER_POWER_001 p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -271,6 +297,7 @@ waitForProof: } for { + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) require.NoError(t, err) // wait until the deadline finishes. @@ -314,11 +341,13 @@ func submitBadProof( return err } + //stm: @CHAIN_STATE_MINER_INFO_001 minerInfo, err := client.StateMinerInfo(ctx, maddr, head.Key()) if err != nil { return err } + //stm: @CHAIN_STATE_GET_RANDOMNESS_FROM_TICKETS_001 commEpoch := di.Open commRand, err := client.StateGetRandomnessFromTickets( ctx, crypto.DomainSeparationTag_PoStChainCommit, @@ -355,6 +384,7 @@ func submitBadProof( return err } + //stm: @CHAIN_STATE_WAIT_MSG_001 rec, err := client.StateWaitMsg(ctx, sm.Cid(), build.MessageConfidence, api.LookbackNoLimit, true) if err != nil { return err diff --git a/itests/wdpost_test.go b/itests/wdpost_test.go index d87059bb4..bbeedb8d8 100644 --- a/itests/wdpost_test.go +++ b/itests/wdpost_test.go @@ -1,3 +1,4 @@ +//stm: #integration package itests import ( @@ -23,6 +24,12 @@ import ( ) func TestWindowedPost(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 kit.Expensive(t) kit.QuietMiningLogs() @@ -58,6 +65,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, maddr, err := miner.ActorAddress(ctx) require.NoError(t, err) + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err := client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -71,6 +79,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, ts := client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) t.Logf("Now head.Height = %d", ts.Height()) + //stm: @CHAIN_STATE_MINER_POWER_001 p, err := client.StateMinerPower(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -84,6 +93,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, // Drop 2 sectors from deadline 2 partition 0 (full partition / deadline) { + //stm: @CHAIN_STATE_MINER_GET_PARTITIONS_001 parts, err := client.StateMinerPartitions(ctx, maddr, 2, types.EmptyTSK) require.NoError(t, err) require.Greater(t, len(parts), 0) @@ -109,6 +119,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, // Drop 1 sectors from deadline 3 partition 0 { + //stm: @CHAIN_STATE_MINER_GET_PARTITIONS_001 parts, err := client.StateMinerPartitions(ctx, maddr, 3, types.EmptyTSK) require.NoError(t, err) require.Greater(t, len(parts), 0) @@ -137,6 +148,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, require.NoError(t, err) } + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -147,6 +159,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) t.Logf("Now head.Height = %d", ts.Height()) + //stm: @CHAIN_STATE_MINER_POWER_001 p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -160,6 +173,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, err = miner.StorageMiner.(*impl.StorageMinerAPI).IStorageMgr.(*mock.SectorMgr).MarkFailed(s, false) require.NoError(t, err) + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -169,6 +183,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, ts = client.WaitTillChain(ctx, kit.HeightAtLeast(waitUntil)) t.Logf("Now head.Height = %d", ts.Height()) + //stm: @CHAIN_STATE_MINER_POWER_001 p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -183,6 +198,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, { // Wait until proven. + //stm: @CHAIN_STATE_MINER_CALCULATE_DEADLINE_001 di, err = client.StateMinerProvingDeadline(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -193,6 +209,7 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, t.Logf("Now head.Height = %d", ts.Height()) } + //stm: @CHAIN_STATE_MINER_POWER_001 p, err = client.StateMinerPower(ctx, maddr, types.EmptyTSK) require.NoError(t, err) @@ -203,6 +220,12 @@ func testWindowPostUpgrade(t *testing.T, blocktime time.Duration, nSectors int, } func TestWindowPostBaseFeeNoBurn(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 kit.Expensive(t) kit.QuietMiningLogs() @@ -225,10 +248,12 @@ func TestWindowPostBaseFeeNoBurn(t *testing.T) { maddr, err := miner.ActorAddress(ctx) require.NoError(t, err) + //stm: @CHAIN_STATE_MINER_INFO_001 mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK) require.NoError(t, err) miner.PledgeSectors(ctx, nSectors, 0, nil) + //stm: @CHAIN_STATE_GET_ACTOR_001 wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) require.NoError(t, err) en := wact.Nonce @@ -237,6 +262,7 @@ func TestWindowPostBaseFeeNoBurn(t *testing.T) { waitForProof: for { + //stm: @CHAIN_STATE_GET_ACTOR_001 wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) require.NoError(t, err) if wact.Nonce > en { @@ -246,9 +272,11 @@ waitForProof: build.Clock.Sleep(blocktime) } + //stm: @CHAIN_STATE_LIST_MESSAGES_001 slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0) require.NoError(t, err) + //stm: @CHAIN_STATE_REPLAY_001 pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0]) require.NoError(t, err) @@ -256,6 +284,12 @@ waitForProof: } func TestWindowPostBaseFeeBurn(t *testing.T) { + //stm: @CHAIN_SYNCER_LOAD_GENESIS_001, @CHAIN_SYNCER_FETCH_TIPSET_001, + //stm: @CHAIN_SYNCER_START_001, @CHAIN_SYNCER_SYNC_001, @BLOCKCHAIN_BEACON_VALIDATE_BLOCK_VALUES_01 + //stm: @CHAIN_SYNCER_COLLECT_CHAIN_001, @CHAIN_SYNCER_COLLECT_HEADERS_001, @CHAIN_SYNCER_VALIDATE_TIPSET_001 + //stm: @CHAIN_SYNCER_NEW_PEER_HEAD_001, @CHAIN_SYNCER_VALIDATE_MESSAGE_META_001, @CHAIN_SYNCER_STOP_001 + + //stm: @CHAIN_INCOMING_HANDLE_INCOMING_BLOCKS_001, @CHAIN_INCOMING_VALIDATE_BLOCK_PUBSUB_001, @CHAIN_INCOMING_VALIDATE_MESSAGE_PUBSUB_001 kit.Expensive(t) kit.QuietMiningLogs() @@ -271,10 +305,12 @@ func TestWindowPostBaseFeeBurn(t *testing.T) { maddr, err := miner.ActorAddress(ctx) require.NoError(t, err) + //stm: @CHAIN_STATE_MINER_INFO_001 mi, err := client.StateMinerInfo(ctx, maddr, types.EmptyTSK) require.NoError(t, err) miner.PledgeSectors(ctx, 10, 0, nil) + //stm: @CHAIN_STATE_GET_ACTOR_001 wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) require.NoError(t, err) en := wact.Nonce @@ -283,6 +319,7 @@ func TestWindowPostBaseFeeBurn(t *testing.T) { waitForProof: for { + //stm: @CHAIN_STATE_GET_ACTOR_001 wact, err := client.StateGetActor(ctx, mi.Worker, types.EmptyTSK) require.NoError(t, err) if wact.Nonce > en { @@ -292,9 +329,11 @@ waitForProof: build.Clock.Sleep(blocktime) } + //stm: @CHAIN_STATE_LIST_MESSAGES_001 slm, err := client.StateListMessages(ctx, &api.MessageMatch{To: maddr}, types.EmptyTSK, 0) require.NoError(t, err) + //stm: @CHAIN_STATE_REPLAY_001 pmr, err := client.StateReplay(ctx, types.EmptyTSK, slm[0]) require.NoError(t, err) diff --git a/lib/backupds/backupds_test.go b/lib/backupds/backupds_test.go index f7bc36e22..c681491e3 100644 --- a/lib/backupds/backupds_test.go +++ b/lib/backupds/backupds_test.go @@ -2,6 +2,7 @@ package backupds import ( "bytes" + "context" "fmt" "io/ioutil" "os" @@ -17,14 +18,14 @@ const valSize = 512 << 10 func putVals(t *testing.T, ds datastore.Datastore, start, end int) { for i := start; i < end; i++ { - err := ds.Put(datastore.NewKey(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%d-%s", i, strings.Repeat("~", valSize)))) + err := ds.Put(context.TODO(), datastore.NewKey(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%d-%s", i, strings.Repeat("~", valSize)))) require.NoError(t, err) } } func checkVals(t *testing.T, ds datastore.Datastore, start, end int, exist bool) { for i := start; i < end; i++ { - v, err := ds.Get(datastore.NewKey(fmt.Sprintf("%d", i))) + v, err := ds.Get(context.TODO(), datastore.NewKey(fmt.Sprintf("%d", i))) if exist { require.NoError(t, err) expect := []byte(fmt.Sprintf("%d-%s", i, strings.Repeat("~", valSize))) @@ -44,7 +45,7 @@ func TestNoLogRestore(t *testing.T) { require.NoError(t, err) var bup bytes.Buffer - require.NoError(t, bds.Backup(&bup)) + require.NoError(t, bds.Backup(context.TODO(), &bup)) putVals(t, ds1, 10, 20) diff --git a/lib/backupds/datastore.go b/lib/backupds/datastore.go index 350988a50..f0ece10ba 100644 --- a/lib/backupds/datastore.go +++ b/lib/backupds/datastore.go @@ -1,6 +1,7 @@ package backupds import ( + "context" "crypto/sha256" "io" "sync" @@ -52,7 +53,7 @@ func Wrap(child datastore.Batching, logdir string) (*Datastore, error) { // Writes a datastore dump into the provided writer as // [array(*) of [key, value] tuples, checksum] -func (d *Datastore) Backup(out io.Writer) error { +func (d *Datastore) Backup(ctx context.Context, out io.Writer) error { scratch := make([]byte, 9) if err := cbg.WriteMajorTypeHeaderBuf(scratch, out, cbg.MajArray, 2); err != nil { @@ -75,7 +76,7 @@ func (d *Datastore) Backup(out io.Writer) error { log.Info("Starting datastore backup") defer log.Info("Datastore backup done") - qr, err := d.child.Query(query.Query{}) + qr, err := d.child.Query(ctx, query.Query{}) if err != nil { return xerrors.Errorf("query: %w", err) } @@ -132,23 +133,23 @@ func (d *Datastore) Backup(out io.Writer) error { // proxy -func (d *Datastore) Get(key datastore.Key) (value []byte, err error) { - return d.child.Get(key) +func (d *Datastore) Get(ctx context.Context, key datastore.Key) (value []byte, err error) { + return d.child.Get(ctx, key) } -func (d *Datastore) Has(key datastore.Key) (exists bool, err error) { - return d.child.Has(key) +func (d *Datastore) Has(ctx context.Context, key datastore.Key) (exists bool, err error) { + return d.child.Has(ctx, key) } -func (d *Datastore) GetSize(key datastore.Key) (size int, err error) { - return d.child.GetSize(key) +func (d *Datastore) GetSize(ctx context.Context, key datastore.Key) (size int, err error) { + return d.child.GetSize(ctx, key) } -func (d *Datastore) Query(q query.Query) (query.Results, error) { - return d.child.Query(q) +func (d *Datastore) Query(ctx context.Context, q query.Query) (query.Results, error) { + return d.child.Query(ctx, q) } -func (d *Datastore) Put(key datastore.Key, value []byte) error { +func (d *Datastore) Put(ctx context.Context, key datastore.Key, value []byte) error { d.backupLk.RLock() defer d.backupLk.RUnlock() @@ -160,21 +161,21 @@ func (d *Datastore) Put(key datastore.Key, value []byte) error { } } - return d.child.Put(key, value) + return d.child.Put(ctx, key, value) } -func (d *Datastore) Delete(key datastore.Key) error { +func (d *Datastore) Delete(ctx context.Context, key datastore.Key) error { d.backupLk.RLock() defer d.backupLk.RUnlock() - return d.child.Delete(key) + return d.child.Delete(ctx, key) } -func (d *Datastore) Sync(prefix datastore.Key) error { +func (d *Datastore) Sync(ctx context.Context, prefix datastore.Key) error { d.backupLk.RLock() defer d.backupLk.RUnlock() - return d.child.Sync(prefix) + return d.child.Sync(ctx, prefix) } func (d *Datastore) CloseLog() error { @@ -196,8 +197,8 @@ func (d *Datastore) Close() error { ) } -func (d *Datastore) Batch() (datastore.Batch, error) { - b, err := d.child.Batch() +func (d *Datastore) Batch(ctx context.Context) (datastore.Batch, error) { + b, err := d.child.Batch(ctx) if err != nil { return nil, err } @@ -215,7 +216,7 @@ type bbatch struct { rlk sync.Locker } -func (b *bbatch) Put(key datastore.Key, value []byte) error { +func (b *bbatch) Put(ctx context.Context, key datastore.Key, value []byte) error { if b.d.log != nil { b.d.log <- Entry{ Key: []byte(key.String()), @@ -224,18 +225,18 @@ func (b *bbatch) Put(key datastore.Key, value []byte) error { } } - return b.b.Put(key, value) + return b.b.Put(ctx, key, value) } -func (b *bbatch) Delete(key datastore.Key) error { - return b.b.Delete(key) +func (b *bbatch) Delete(ctx context.Context, key datastore.Key) error { + return b.b.Delete(ctx, key) } -func (b *bbatch) Commit() error { +func (b *bbatch) Commit(ctx context.Context) error { b.rlk.Lock() defer b.rlk.Unlock() - return b.b.Commit() + return b.b.Commit(ctx) } var _ datastore.Batch = &bbatch{} diff --git a/lib/backupds/log.go b/lib/backupds/log.go index b76dfbfe6..b89f410f0 100644 --- a/lib/backupds/log.go +++ b/lib/backupds/log.go @@ -1,6 +1,7 @@ package backupds import ( + "context" "fmt" "io" "io/ioutil" @@ -100,6 +101,7 @@ type logfile struct { var compactThresh = 2 func (d *Datastore) createLog(logdir string) (*logfile, string, error) { + ctx := context.TODO() p := filepath.Join(logdir, strconv.FormatInt(time.Now().Unix(), 10)+".log.cbor") log.Infow("creating log", "file", p) @@ -108,7 +110,7 @@ func (d *Datastore) createLog(logdir string) (*logfile, string, error) { return nil, "", err } - if err := d.Backup(f); err != nil { + if err := d.Backup(ctx, f); err != nil { return nil, "", xerrors.Errorf("writing log base: %w", err) } if err := f.Sync(); err != nil { @@ -122,8 +124,9 @@ func (d *Datastore) createLog(logdir string) (*logfile, string, error) { } func (d *Datastore) openLog(p string) (*logfile, string, error) { + ctx := context.TODO() log.Infow("opening log", "file", p) - lh, err := d.child.Get(loghead) + lh, err := d.child.Get(ctx, loghead) if err != nil { return nil, "", xerrors.Errorf("checking log head (logfile '%s'): %w", p, err) } @@ -212,6 +215,7 @@ func (d *Datastore) openLog(p string) (*logfile, string, error) { } func (l *logfile) writeLogHead(logname string, ds datastore.Batching) error { + ctx := context.TODO() lval := []byte(fmt.Sprintf("%s;%s;%d", logname, uuid.New(), time.Now().Unix())) err := l.writeEntry(&Entry{ @@ -223,7 +227,7 @@ func (l *logfile) writeLogHead(logname string, ds datastore.Batching) error { return xerrors.Errorf("writing loghead to the log: %w", err) } - if err := ds.Put(loghead, lval); err != nil { + if err := ds.Put(ctx, loghead, lval); err != nil { return xerrors.Errorf("writing loghead to the datastore: %w", err) } diff --git a/lib/backupds/read.go b/lib/backupds/read.go index a44442af1..af4f30888 100644 --- a/lib/backupds/read.go +++ b/lib/backupds/read.go @@ -2,6 +2,7 @@ package backupds import ( "bytes" + "context" "crypto/sha256" "io" "os" @@ -117,13 +118,13 @@ func ReadBackup(r io.Reader, cb func(key datastore.Key, value []byte, log bool) } func RestoreInto(r io.Reader, dest datastore.Batching) error { - batch, err := dest.Batch() + batch, err := dest.Batch(context.TODO()) if err != nil { return xerrors.Errorf("creating batch: %w", err) } _, err = ReadBackup(r, func(key datastore.Key, value []byte, _ bool) error { - if err := batch.Put(key, value); err != nil { + if err := batch.Put(context.TODO(), key, value); err != nil { return xerrors.Errorf("put key: %w", err) } @@ -133,7 +134,7 @@ func RestoreInto(r io.Reader, dest datastore.Batching) error { return xerrors.Errorf("reading backup: %w", err) } - if err := batch.Commit(); err != nil { + if err := batch.Commit(context.TODO()); err != nil { return xerrors.Errorf("committing batch: %w", err) } diff --git a/lib/tracing/setup.go b/lib/tracing/setup.go index b8c0399ad..d90099f79 100644 --- a/lib/tracing/setup.go +++ b/lib/tracing/setup.go @@ -4,9 +4,16 @@ import ( "os" "strings" - "contrib.go.opencensus.io/exporter/jaeger" + octrace "go.opencensus.io/trace" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/bridge/opencensus" + "go.opentelemetry.io/otel/exporters/jaeger" + "go.opentelemetry.io/otel/sdk/resource" + tracesdk "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + "go.uber.org/zap" + logging "github.com/ipfs/go-log/v2" - "go.opencensus.io/trace" ) var log = logging.Logger("tracing") @@ -14,7 +21,6 @@ var log = logging.Logger("tracing") const ( // environment variable names envCollectorEndpoint = "LOTUS_JAEGER_COLLECTOR_ENDPOINT" - envAgentEndpoint = "LOTUS_JAEGER_AGENT_ENDPOINT" envAgentHost = "LOTUS_JAEGER_AGENT_HOST" envAgentPort = "LOTUS_JAEGER_AGENT_PORT" envJaegerUser = "LOTUS_JAEGER_USERNAME" @@ -26,54 +32,61 @@ const ( // The agent endpoint is a thrift/udp protocol and should be given // as a string like "hostname:port". The agent can also be configured // with separate host and port variables. -func jaegerOptsFromEnv(opts *jaeger.Options) bool { +func jaegerOptsFromEnv() jaeger.EndpointOption { var e string var ok bool - if e, ok = os.LookupEnv(envJaegerUser); ok { - if p, ok := os.LookupEnv(envJaegerCred); ok { - opts.Username = e - opts.Password = p - } else { - log.Warn("jaeger username supplied with no password. authentication will not be used.") - } - } + if e, ok = os.LookupEnv(envCollectorEndpoint); ok { - opts.CollectorEndpoint = e + options := []jaeger.CollectorEndpointOption{jaeger.WithEndpoint(e)} + if u, ok := os.LookupEnv(envJaegerUser); ok { + if p, ok := os.LookupEnv(envJaegerCred); ok { + options = append(options, jaeger.WithUsername(u)) + options = append(options, jaeger.WithPassword(p)) + } else { + log.Warn("jaeger username supplied with no password. authentication will not be used.") + } + } log.Infof("jaeger tracess will send to collector %s", e) - return true - } - if e, ok = os.LookupEnv(envAgentEndpoint); ok { - log.Infof("jaeger traces will be sent to agent %s", e) - opts.AgentEndpoint = e - return true + return jaeger.WithCollectorEndpoint(options...) } + if e, ok = os.LookupEnv(envAgentHost); ok { + options := []jaeger.AgentEndpointOption{jaeger.WithAgentHost(e), jaeger.WithLogger(zap.NewStdLog(log.Desugar()))} + var ep string if p, ok := os.LookupEnv(envAgentPort); ok { - opts.AgentEndpoint = strings.Join([]string{e, p}, ":") + options = append(options, jaeger.WithAgentPort(p)) + ep = strings.Join([]string{e, p}, ":") } else { - opts.AgentEndpoint = strings.Join([]string{e, "6831"}, ":") + ep = strings.Join([]string{e, "6831"}, ":") } - log.Infof("jaeger traces will be sent to agent %s", opts.AgentEndpoint) - return true + log.Infof("jaeger traces will be sent to agent %s", ep) + return jaeger.WithAgentEndpoint(options...) } - return false + return nil } -func SetupJaegerTracing(serviceName string) *jaeger.Exporter { - opts := jaeger.Options{} - if !jaegerOptsFromEnv(&opts) { +func SetupJaegerTracing(serviceName string) *tracesdk.TracerProvider { + jaegerEndpoint := jaegerOptsFromEnv() + if jaegerEndpoint == nil { return nil } - opts.ServiceName = serviceName - je, err := jaeger.NewExporter(opts) + je, err := jaeger.New(jaegerEndpoint) if err != nil { log.Errorw("failed to create the jaeger exporter", "error", err) return nil } - - trace.RegisterExporter(je) - trace.ApplyConfig(trace.Config{ - DefaultSampler: trace.AlwaysSample(), - }) - return je + tp := tracesdk.NewTracerProvider( + // Always be sure to batch in production. + tracesdk.WithBatcher(je), + // Record information about this application in an Resource. + tracesdk.WithResource(resource.NewWithAttributes( + semconv.SchemaURL, + semconv.ServiceNameKey.String(serviceName), + )), + tracesdk.WithSampler(tracesdk.AlwaysSample()), + ) + otel.SetTracerProvider(tp) + tracer := tp.Tracer(serviceName) + octrace.DefaultTracer = opencensus.NewTracer(tracer) + return tp } diff --git a/lotuspond/front/package-lock.json b/lotuspond/front/package-lock.json index 35e0f0b7c..580ce978f 100644 --- a/lotuspond/front/package-lock.json +++ b/lotuspond/front/package-lock.json @@ -1631,9 +1631,9 @@ } }, "acorn": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.2.1.tgz", - "integrity": "sha512-JD0xT5FCRDNyjDda3Lrg/IxFscp9q4tiYtxE1/nOzlKCk7hIRuYjhq1kCNkbPjMRMZuFq20HNQn1I9k8Oj0E+Q==" + "version": "6.4.2", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.4.2.tgz", + "integrity": "sha512-XtGIhXwF8YM8bJhGxG5kXgjkEuNGLTkoYqVE+KMR+aspr4KGYmKYg7yUe3KghyQ9yheNwLnjmzh/7+gfDBmHCQ==" }, "acorn-dynamic-import": { "version": "4.0.0", @@ -1665,14 +1665,21 @@ "integrity": "sha512-z55ocwKBRLryBs394Sm3ushTtBeg6VAeuku7utSoSnsJKvKcnXFIyC6vh27n3rXyxSgkJBBCAvyOn7gSUcTYjg==" }, "ajv": { - "version": "6.10.2", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.10.2.tgz", - "integrity": "sha512-TXtUUEYHuaTEbLZWIKUr5pmBuhDLy+8KYtPYdcV8qC+pOZL+NKqYwvWSRrVXHn+ZmRRAu8vJTAznH7Oag6RVRw==", + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", "requires": { - "fast-deep-equal": "^2.0.1", + "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", "json-schema-traverse": "^0.4.1", "uri-js": "^4.2.2" + }, + "dependencies": { + "fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==" + } } }, "ajv-errors": { @@ -5286,11 +5293,6 @@ "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=" }, - "fast-deep-equal": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-2.0.1.tgz", - "integrity": "sha1-ewUhjd+WZ79/Nwv3/bLLFf3Qqkk=" - }, "fast-glob": { "version": "2.2.7", "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-2.2.7.tgz", @@ -5492,22 +5494,9 @@ } }, "follow-redirects": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.7.0.tgz", - "integrity": "sha512-m/pZQy4Gj287eNy94nivy5wchN3Kp+Q5WgUPNy5lJSZ3sgkVKSYV/ZChMAQVIgx1SqfZ2zBZtPA2YlXIWxxJOQ==", - "requires": { - "debug": "^3.2.6" - }, - "dependencies": { - "debug": { - "version": "3.2.6", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.6.tgz", - "integrity": "sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==", - "requires": { - "ms": "^2.1.1" - } - } - } + "version": "1.14.8", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.8.tgz", + "integrity": "sha512-1x0S9UVJHsQprFcEC/qnNzBLcIxsjAV905f/UkQxbclCsoTWlacCNOpQa/anodLl2uaEKFhfWOvM2Qg77+15zA==" }, "for-in": { "version": "1.0.2", @@ -7756,9 +7745,9 @@ }, "dependencies": { "acorn": { - "version": "5.7.3", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-5.7.3.tgz", - "integrity": "sha512-T/zvzYRfbVojPWahDsE5evJdHb3oJoQfFbsrKM7w5Zcs++Tr257tia3BmMP8XYVjp1S9RZXQMh7gao96BlqZOw==" + "version": "5.7.4", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-5.7.4.tgz", + "integrity": "sha512-1D++VG7BhrtvQpNbBzovKNc1FLGGEE/oGe7b9xJm/RFHMBeUaUGpluV9RLjZa47YFdPcDAenEYuq9pQPcMdLJg==" }, "ws": { "version": "5.2.3", @@ -12440,9 +12429,9 @@ } }, "url-parse": { - "version": "1.5.3", - "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.3.tgz", - "integrity": "sha512-IIORyIQD9rvj0A4CLWsHkBBJuNqWpFQe224b6j9t/ABmquIS0qDU2pY6kl6AuOrL5OkCXHMCFNe1jBcuAggjvQ==", + "version": "1.5.10", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", + "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", "requires": { "querystringify": "^2.1.1", "requires-port": "^1.0.0" diff --git a/lotuspond/front/src/chain/methods.json b/lotuspond/front/src/chain/methods.json index c0f69d58c..15c04ca28 100644 --- a/lotuspond/front/src/chain/methods.json +++ b/lotuspond/front/src/chain/methods.json @@ -622,5 +622,113 @@ "AddVerifiedClient", "UseBytes", "RestoreBytes" + ], + "fil/7/account": [ + "Send", + "Constructor", + "PubkeyAddress" + ], + "fil/7/cron": [ + "Send", + "Constructor", + "EpochTick" + ], + "fil/7/init": [ + "Send", + "Constructor", + "Exec" + ], + "fil/7/multisig": [ + "Send", + "Constructor", + "Propose", + "Approve", + "Cancel", + "AddSigner", + "RemoveSigner", + "SwapSigner", + "ChangeNumApprovalsThreshold", + "LockBalance" + ], + "fil/7/paymentchannel": [ + "Send", + "Constructor", + "UpdateChannelState", + "Settle", + "Collect" + ], + "fil/7/reward": [ + "Send", + "Constructor", + "AwardBlockReward", + "ThisEpochReward", + "UpdateNetworkKPI" + ], + "fil/7/storagemarket": [ + "Send", + "Constructor", + "AddBalance", + "WithdrawBalance", + "PublishStorageDeals", + "VerifyDealsForActivation", + "ActivateDeals", + "OnMinerSectorsTerminate", + "ComputeDataCommitment", + "CronTick" + ], + "fil/7/storageminer": [ + "Send", + "Constructor", + "ControlAddresses", + "ChangeWorkerAddress", + "ChangePeerID", + "SubmitWindowedPoSt", + "PreCommitSector", + "ProveCommitSector", + "ExtendSectorExpiration", + "TerminateSectors", + "DeclareFaults", + "DeclareFaultsRecovered", + "OnDeferredCronEvent", + "CheckSectorProven", + "ApplyRewards", + "ReportConsensusFault", + "WithdrawBalance", + "ConfirmSectorProofsValid", + "ChangeMultiaddrs", + "CompactPartitions", + "CompactSectorNumbers", + "ConfirmUpdateWorkerKey", + "RepayDebt", + "ChangeOwnerAddress", + "DisputeWindowedPoSt", + "PreCommitSectorBatch", + "ProveCommitAggregate", + "ProveReplicaUpdates" + ], + "fil/7/storagepower": [ + "Send", + "Constructor", + "CreateMiner", + "UpdateClaimedPower", + "EnrollCronEvent", + "CronTick", + "UpdatePledgeTotal", + "SubmitPoRepForBulkVerify", + "CurrentTotalPower" + ], + "fil/7/system": [ + "Send", + "Constructor" + ], + "fil/7/verifiedregistry": [ + "Send", + "Constructor", + "AddVerifier", + "RemoveVerifier", + "AddVerifiedClient", + "UseBytes", + "RestoreBytes", + "RemoveVerifiedClientDataCap" ] } \ No newline at end of file diff --git a/markets/dagstore/blockstore.go b/markets/dagstore/blockstore.go index 8980d40cf..317cb08b9 100644 --- a/markets/dagstore/blockstore.go +++ b/markets/dagstore/blockstore.go @@ -1,6 +1,7 @@ package dagstore import ( + "context" "io" blocks "github.com/ipfs/go-block-format" @@ -20,14 +21,14 @@ type Blockstore struct { var _ bstore.Blockstore = (*Blockstore)(nil) -func (b *Blockstore) DeleteBlock(c cid.Cid) error { +func (b *Blockstore) DeleteBlock(context.Context, cid.Cid) error { return xerrors.Errorf("DeleteBlock called but not implemented") } -func (b *Blockstore) Put(block blocks.Block) error { +func (b *Blockstore) Put(context.Context, blocks.Block) error { return xerrors.Errorf("Put called but not implemented") } -func (b *Blockstore) PutMany(blocks []blocks.Block) error { +func (b *Blockstore) PutMany(context.Context, []blocks.Block) error { return xerrors.Errorf("PutMany called but not implemented") } diff --git a/markets/dagstore/miner_api.go b/markets/dagstore/miner_api.go index afe623eb2..8a12097d5 100644 --- a/markets/dagstore/miner_api.go +++ b/markets/dagstore/miner_api.go @@ -3,39 +3,56 @@ package dagstore import ( "context" "fmt" - "io" - "github.com/filecoin-project/dagstore/throttle" "github.com/ipfs/go-cid" "golang.org/x/xerrors" + "github.com/filecoin-project/dagstore/mount" + "github.com/filecoin-project/dagstore/throttle" "github.com/filecoin-project/go-fil-markets/piecestore" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/shared" + "github.com/filecoin-project/go-state-types/abi" ) +//go:generate go run github.com/golang/mock/mockgen -destination=mocks/mock_lotus_accessor.go -package=mock_dagstore . MinerAPI + type MinerAPI interface { - FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io.ReadCloser, error) + FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (mount.Reader, error) GetUnpaddedCARSize(ctx context.Context, pieceCid cid.Cid) (uint64, error) IsUnsealed(ctx context.Context, pieceCid cid.Cid) (bool, error) Start(ctx context.Context) error } +type SectorAccessor interface { + retrievalmarket.SectorAccessor + + UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error) +} + type minerAPI struct { - pieceStore piecestore.PieceStore - sa retrievalmarket.SectorAccessor - throttle throttle.Throttler - readyMgr *shared.ReadyManager + pieceStore piecestore.PieceStore + sa SectorAccessor + throttle throttle.Throttler + unsealThrottle throttle.Throttler + readyMgr *shared.ReadyManager } var _ MinerAPI = (*minerAPI)(nil) -func NewMinerAPI(store piecestore.PieceStore, sa retrievalmarket.SectorAccessor, concurrency int) MinerAPI { +func NewMinerAPI(store piecestore.PieceStore, sa SectorAccessor, concurrency int, unsealConcurrency int) MinerAPI { + var unsealThrottle throttle.Throttler + if unsealConcurrency == 0 { + unsealThrottle = throttle.Noop() + } else { + unsealThrottle = throttle.Fixed(unsealConcurrency) + } return &minerAPI{ - pieceStore: store, - sa: sa, - throttle: throttle.Fixed(concurrency), - readyMgr: shared.NewReadyManager(), + pieceStore: store, + sa: sa, + throttle: throttle.Fixed(concurrency), + unsealThrottle: unsealThrottle, + readyMgr: shared.NewReadyManager(), } } @@ -91,7 +108,7 @@ func (m *minerAPI) IsUnsealed(ctx context.Context, pieceCid cid.Cid) (bool, erro return false, nil } -func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io.ReadCloser, error) { +func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (mount.Reader, error) { err := m.readyMgr.AwaitReady() if err != nil { return nil, err @@ -117,7 +134,7 @@ func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io deal := deal // Throttle this path to avoid flooding the storage subsystem. - var reader io.ReadCloser + var reader mount.Reader err := m.throttle.Do(ctx, func(ctx context.Context) (err error) { isUnsealed, err := m.sa.IsUnsealed(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded()) if err != nil { @@ -127,7 +144,7 @@ func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io return nil } // Because we know we have an unsealed copy, this UnsealSector call will actually not perform any unsealing. - reader, err = m.sa.UnsealSector(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded()) + reader, err = m.sa.UnsealSectorAt(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded()) return err }) @@ -143,13 +160,19 @@ func (m *minerAPI) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io } lastErr := xerrors.New("no sectors found to unseal from") + // if there is no unsealed sector containing the piece, just read the piece from the first sector we are able to unseal. for _, deal := range pieceInfo.Deals { // Note that if the deal data is not already unsealed, unsealing may // block for a long time with the current PoRep - // - // This path is unthrottled. - reader, err := m.sa.UnsealSector(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded()) + var reader mount.Reader + deal := deal + err := m.throttle.Do(ctx, func(ctx context.Context) (err error) { + // Because we know we have an unsealed copy, this UnsealSector call will actually not perform any unsealing. + reader, err = m.sa.UnsealSectorAt(ctx, deal.SectorID, deal.Offset.Unpadded(), deal.Length.Unpadded()) + return err + }) + if err != nil { lastErr = xerrors.Errorf("failed to unseal deal %d: %w", deal.DealID, err) log.Warn(lastErr.Error()) diff --git a/markets/dagstore/miner_api_test.go b/markets/dagstore/miner_api_test.go index 4a61c62a8..ee2f0cdce 100644 --- a/markets/dagstore/miner_api_test.go +++ b/markets/dagstore/miner_api_test.go @@ -15,6 +15,7 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" + "github.com/filecoin-project/dagstore/mount" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-actors/actors/builtin/paych" @@ -74,7 +75,7 @@ func TestLotusAccessorFetchUnsealedPiece(t *testing.T) { rpn := &mockRPN{ sectors: mockData, } - api := NewMinerAPI(ps, rpn, 100) + api := NewMinerAPI(ps, rpn, 100, 5) require.NoError(t, api.Start(ctx)) // Add deals to piece store @@ -114,7 +115,7 @@ func TestLotusAccessorGetUnpaddedCARSize(t *testing.T) { ps := getPieceStore(t) rpn := &mockRPN{} - api := NewMinerAPI(ps, rpn, 100) + api := NewMinerAPI(ps, rpn, 100, 5) require.NoError(t, api.Start(ctx)) // Add a deal with data Length 10 @@ -141,7 +142,7 @@ func TestThrottle(t *testing.T) { unsealedSectorID: "foo", }, } - api := NewMinerAPI(ps, rpn, 3) + api := NewMinerAPI(ps, rpn, 3, 5) require.NoError(t, api.Start(ctx)) // Add a deal with data Length 10 @@ -203,6 +204,10 @@ type mockRPN struct { } func (m *mockRPN) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) { + return m.UnsealSectorAt(ctx, sectorID, offset, length) +} + +func (m *mockRPN) UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error) { atomic.AddInt32(&m.calls, 1) m.lk.RLock() defer m.lk.RUnlock() @@ -211,7 +216,13 @@ func (m *mockRPN) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, o if !ok { panic("sector not found") } - return io.NopCloser(bytes.NewBuffer([]byte(data))), nil + return struct { + io.ReadCloser + io.ReaderAt + io.Seeker + }{ + ReadCloser: io.NopCloser(bytes.NewBuffer([]byte(data[:]))), + }, nil } func (m *mockRPN) IsUnsealed(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (bool, error) { diff --git a/markets/dagstore/mocks/mock_lotus_accessor.go b/markets/dagstore/mocks/mock_lotus_accessor.go index 2e19b4482..19923cc2a 100644 --- a/markets/dagstore/mocks/mock_lotus_accessor.go +++ b/markets/dagstore/mocks/mock_lotus_accessor.go @@ -1,96 +1,96 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: lotusaccessor.go +// Source: github.com/filecoin-project/lotus/markets/dagstore (interfaces: MinerAPI) // Package mock_dagstore is a generated GoMock package. package mock_dagstore import ( context "context" - io "io" reflect "reflect" + mount "github.com/filecoin-project/dagstore/mount" gomock "github.com/golang/mock/gomock" cid "github.com/ipfs/go-cid" ) -// MockLotusAccessor is a mock of LotusAccessor interface. -type MockLotusAccessor struct { +// MockMinerAPI is a mock of MinerAPI interface. +type MockMinerAPI struct { ctrl *gomock.Controller - recorder *MockLotusAccessorMockRecorder + recorder *MockMinerAPIMockRecorder } -// MockLotusAccessorMockRecorder is the mock recorder for MockLotusAccessor. -type MockLotusAccessorMockRecorder struct { - mock *MockLotusAccessor +// MockMinerAPIMockRecorder is the mock recorder for MockMinerAPI. +type MockMinerAPIMockRecorder struct { + mock *MockMinerAPI } -// NewMockLotusAccessor creates a new mock instance. -func NewMockLotusAccessor(ctrl *gomock.Controller) *MockLotusAccessor { - mock := &MockLotusAccessor{ctrl: ctrl} - mock.recorder = &MockLotusAccessorMockRecorder{mock} +// NewMockMinerAPI creates a new mock instance. +func NewMockMinerAPI(ctrl *gomock.Controller) *MockMinerAPI { + mock := &MockMinerAPI{ctrl: ctrl} + mock.recorder = &MockMinerAPIMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockLotusAccessor) EXPECT() *MockLotusAccessorMockRecorder { +func (m *MockMinerAPI) EXPECT() *MockMinerAPIMockRecorder { return m.recorder } // FetchUnsealedPiece mocks base method. -func (m *MockLotusAccessor) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io.ReadCloser, error) { +func (m *MockMinerAPI) FetchUnsealedPiece(arg0 context.Context, arg1 cid.Cid) (mount.Reader, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "FetchUnsealedPiece", ctx, pieceCid) - ret0, _ := ret[0].(io.ReadCloser) + ret := m.ctrl.Call(m, "FetchUnsealedPiece", arg0, arg1) + ret0, _ := ret[0].(mount.Reader) ret1, _ := ret[1].(error) return ret0, ret1 } // FetchUnsealedPiece indicates an expected call of FetchUnsealedPiece. -func (mr *MockLotusAccessorMockRecorder) FetchUnsealedPiece(ctx, pieceCid interface{}) *gomock.Call { +func (mr *MockMinerAPIMockRecorder) FetchUnsealedPiece(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchUnsealedPiece", reflect.TypeOf((*MockLotusAccessor)(nil).FetchUnsealedPiece), ctx, pieceCid) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FetchUnsealedPiece", reflect.TypeOf((*MockMinerAPI)(nil).FetchUnsealedPiece), arg0, arg1) } // GetUnpaddedCARSize mocks base method. -func (m *MockLotusAccessor) GetUnpaddedCARSize(ctx context.Context, pieceCid cid.Cid) (uint64, error) { +func (m *MockMinerAPI) GetUnpaddedCARSize(arg0 context.Context, arg1 cid.Cid) (uint64, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetUnpaddedCARSize", ctx, pieceCid) + ret := m.ctrl.Call(m, "GetUnpaddedCARSize", arg0, arg1) ret0, _ := ret[0].(uint64) ret1, _ := ret[1].(error) return ret0, ret1 } // GetUnpaddedCARSize indicates an expected call of GetUnpaddedCARSize. -func (mr *MockLotusAccessorMockRecorder) GetUnpaddedCARSize(ctx, pieceCid interface{}) *gomock.Call { +func (mr *MockMinerAPIMockRecorder) GetUnpaddedCARSize(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUnpaddedCARSize", reflect.TypeOf((*MockLotusAccessor)(nil).GetUnpaddedCARSize), ctx, pieceCid) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetUnpaddedCARSize", reflect.TypeOf((*MockMinerAPI)(nil).GetUnpaddedCARSize), arg0, arg1) } // IsUnsealed mocks base method. -func (m *MockLotusAccessor) IsUnsealed(ctx context.Context, pieceCid cid.Cid) (bool, error) { +func (m *MockMinerAPI) IsUnsealed(arg0 context.Context, arg1 cid.Cid) (bool, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "IsUnsealed", ctx, pieceCid) + ret := m.ctrl.Call(m, "IsUnsealed", arg0, arg1) ret0, _ := ret[0].(bool) ret1, _ := ret[1].(error) return ret0, ret1 } // IsUnsealed indicates an expected call of IsUnsealed. -func (mr *MockLotusAccessorMockRecorder) IsUnsealed(ctx, pieceCid interface{}) *gomock.Call { +func (mr *MockMinerAPIMockRecorder) IsUnsealed(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsUnsealed", reflect.TypeOf((*MockLotusAccessor)(nil).IsUnsealed), ctx, pieceCid) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsUnsealed", reflect.TypeOf((*MockMinerAPI)(nil).IsUnsealed), arg0, arg1) } // Start mocks base method. -func (m *MockLotusAccessor) Start(ctx context.Context) error { +func (m *MockMinerAPI) Start(arg0 context.Context) error { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Start", ctx) + ret := m.ctrl.Call(m, "Start", arg0) ret0, _ := ret[0].(error) return ret0 } // Start indicates an expected call of Start. -func (mr *MockLotusAccessorMockRecorder) Start(ctx interface{}) *gomock.Call { +func (mr *MockMinerAPIMockRecorder) Start(arg0 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockLotusAccessor)(nil).Start), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockMinerAPI)(nil).Start), arg0) } diff --git a/markets/dagstore/mount.go b/markets/dagstore/mount.go index c97dcbf86..0ecdc9808 100644 --- a/markets/dagstore/mount.go +++ b/markets/dagstore/mount.go @@ -2,7 +2,6 @@ package dagstore import ( "context" - "io" "net/url" "github.com/ipfs/go-cid" @@ -57,19 +56,15 @@ func (l *LotusMount) Deserialize(u *url.URL) error { } func (l *LotusMount) Fetch(ctx context.Context) (mount.Reader, error) { - r, err := l.API.FetchUnsealedPiece(ctx, l.PieceCid) - if err != nil { - return nil, xerrors.Errorf("failed to fetch unsealed piece %s: %w", l.PieceCid, err) - } - return &readCloser{r}, nil + return l.API.FetchUnsealedPiece(ctx, l.PieceCid) } func (l *LotusMount) Info() mount.Info { return mount.Info{ Kind: mount.KindRemote, AccessSequential: true, - AccessSeek: false, - AccessRandom: false, + AccessSeek: true, + AccessRandom: true, } } @@ -94,17 +89,3 @@ func (l *LotusMount) Stat(ctx context.Context) (mount.Stat, error) { Ready: isUnsealed, }, nil } - -type readCloser struct { - io.ReadCloser -} - -var _ mount.Reader = (*readCloser)(nil) - -func (r *readCloser) ReadAt(p []byte, off int64) (n int, err error) { - return 0, xerrors.Errorf("ReadAt called but not implemented") -} - -func (r *readCloser) Seek(offset int64, whence int) (int64, error) { - return 0, xerrors.Errorf("Seek called but not implemented") -} diff --git a/markets/dagstore/mount_test.go b/markets/dagstore/mount_test.go index 09b255d6a..d6ea54964 100644 --- a/markets/dagstore/mount_test.go +++ b/markets/dagstore/mount_test.go @@ -2,6 +2,7 @@ package dagstore import ( "context" + "io" "io/ioutil" "net/url" "strings" @@ -12,7 +13,6 @@ import ( "github.com/stretchr/testify/require" "github.com/filecoin-project/dagstore/mount" - mock_dagstore "github.com/filecoin-project/lotus/markets/dagstore/mocks" ) @@ -26,12 +26,31 @@ func TestLotusMount(t *testing.T) { defer mockCtrl.Finish() // create a mock lotus api that returns the reader we want - mockLotusMountAPI := mock_dagstore.NewMockLotusAccessor(mockCtrl) + mockLotusMountAPI := mock_dagstore.NewMockMinerAPI(mockCtrl) mockLotusMountAPI.EXPECT().IsUnsealed(gomock.Any(), cid).Return(true, nil).Times(1) - mockLotusMountAPI.EXPECT().FetchUnsealedPiece(gomock.Any(), cid).Return(&readCloser{ioutil.NopCloser(strings.NewReader("testing"))}, nil).Times(1) - mockLotusMountAPI.EXPECT().FetchUnsealedPiece(gomock.Any(), cid).Return(&readCloser{ioutil.NopCloser(strings.NewReader("testing"))}, nil).Times(1) + mr1 := struct { + io.ReadCloser + io.ReaderAt + io.Seeker + }{ + ReadCloser: ioutil.NopCloser(strings.NewReader("testing")), + ReaderAt: nil, + Seeker: nil, + } + mr2 := struct { + io.ReadCloser + io.ReaderAt + io.Seeker + }{ + ReadCloser: ioutil.NopCloser(strings.NewReader("testing")), + ReaderAt: nil, + Seeker: nil, + } + + mockLotusMountAPI.EXPECT().FetchUnsealedPiece(gomock.Any(), cid).Return(mr1, nil).Times(1) + mockLotusMountAPI.EXPECT().FetchUnsealedPiece(gomock.Any(), cid).Return(mr2, nil).Times(1) mockLotusMountAPI.EXPECT().GetUnpaddedCARSize(ctx, cid).Return(uint64(100), nil).Times(1) mnt, err := NewLotusMount(cid, mockLotusMountAPI) @@ -109,7 +128,7 @@ func TestLotusMountRegistration(t *testing.T) { // when test is done, assert expectations on all mock objects. defer mockCtrl.Finish() - mockLotusMountAPI := mock_dagstore.NewMockLotusAccessor(mockCtrl) + mockLotusMountAPI := mock_dagstore.NewMockMinerAPI(mockCtrl) registry := mount.NewRegistry() err = registry.Register(lotusScheme, mountTemplate(mockLotusMountAPI)) require.NoError(t, err) diff --git a/markets/dagstore/wrapper_migration_test.go b/markets/dagstore/wrapper_migration_test.go index 13d8db876..437032da9 100644 --- a/markets/dagstore/wrapper_migration_test.go +++ b/markets/dagstore/wrapper_migration_test.go @@ -2,13 +2,16 @@ package dagstore import ( "context" + "io" "testing" - "github.com/filecoin-project/dagstore" "github.com/stretchr/testify/require" + "github.com/filecoin-project/dagstore" + "github.com/filecoin-project/dagstore/mount" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/retrievalmarket/impl/testnodes" tut "github.com/filecoin-project/go-fil-markets/shared_testutil" "github.com/filecoin-project/go-fil-markets/storagemarket" @@ -93,7 +96,7 @@ func TestShardRegistration(t *testing.T) { cfg := config.DefaultStorageMiner().DAGStore cfg.RootDir = t.TempDir() - mapi := NewMinerAPI(ps, sa, 10) + mapi := NewMinerAPI(ps, &wrappedSA{sa}, 10, 5) dagst, w, err := NewDAGStore(cfg, mapi) require.NoError(t, err) require.NotNil(t, dagst) @@ -119,3 +122,25 @@ func TestShardRegistration(t *testing.T) { // ps.VerifyExpectations(t) } + +type wrappedSA struct { + retrievalmarket.SectorAccessor +} + +func (w *wrappedSA) UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error) { + r, err := w.UnsealSector(ctx, sectorID, pieceOffset, length) + if err != nil { + return nil, err + } + return struct { + io.ReadCloser + io.Seeker + io.ReaderAt + }{ + ReadCloser: r, + Seeker: nil, + ReaderAt: nil, + }, err +} + +var _ SectorAccessor = &wrappedSA{} diff --git a/markets/dagstore/wrapper_test.go b/markets/dagstore/wrapper_test.go index 9d3e6939e..48e01100b 100644 --- a/markets/dagstore/wrapper_test.go +++ b/markets/dagstore/wrapper_test.go @@ -3,21 +3,19 @@ package dagstore import ( "bytes" "context" - "io" "os" "testing" "time" + "github.com/ipfs/go-cid" + "github.com/stretchr/testify/require" "golang.org/x/xerrors" - "github.com/filecoin-project/lotus/node/config" - "github.com/filecoin-project/dagstore" "github.com/filecoin-project/dagstore/mount" "github.com/filecoin-project/dagstore/shard" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/require" + "github.com/filecoin-project/lotus/node/config" ) // TestWrapperAcquireRecovery verifies that if acquire shard returns a "not found" @@ -191,7 +189,7 @@ func (m mockLotusMount) Start(ctx context.Context) error { return nil } -func (m mockLotusMount) FetchUnsealedPiece(ctx context.Context, pieceCid cid.Cid) (io.ReadCloser, error) { +func (m mockLotusMount) FetchUnsealedPiece(context.Context, cid.Cid) (mount.Reader, error) { panic("implement me") } diff --git a/markets/loggers/loggers.go b/markets/loggers/loggers.go index 2acf987cb..0d542a45d 100644 --- a/markets/loggers/loggers.go +++ b/markets/loggers/loggers.go @@ -40,7 +40,6 @@ func DataTransferLogger(event datatransfer.Event, state datatransfer.ChannelStat "sent", state.Sent(), "received", state.Received(), "queued", state.Queued(), - "received count", state.ReceivedCidsLen(), "total size", state.TotalSize(), "remote peer", state.OtherPeer(), "event message", event.Message, diff --git a/markets/retrievaladapter/provider_test.go b/markets/retrievaladapter/provider_test.go index eca3b1152..18dfe42a0 100644 --- a/markets/retrievaladapter/provider_test.go +++ b/markets/retrievaladapter/provider_test.go @@ -1,3 +1,4 @@ +//stm: #unit package retrievaladapter import ( @@ -18,6 +19,7 @@ import ( ) func TestGetPricingInput(t *testing.T) { + //stm: @CHAIN_STATE_MARKET_STORAGE_DEAL_001 ctx := context.Background() tsk := &types.TipSet{} key := tsk.Key() diff --git a/markets/sectoraccessor/sectoraccessor.go b/markets/sectoraccessor/sectoraccessor.go index 1304a3a00..4320e3fb1 100644 --- a/markets/sectoraccessor/sectoraccessor.go +++ b/markets/sectoraccessor/sectoraccessor.go @@ -4,23 +4,24 @@ import ( "context" "io" + "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" "golang.org/x/xerrors" + "github.com/filecoin-project/dagstore/mount" + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/go-fil-markets/retrievalmarket" + "github.com/filecoin-project/go-state-types/abi" + specstorage "github.com/filecoin-project/specs-storage/storage" + "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/chain/types" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" + "github.com/filecoin-project/lotus/markets/dagstore" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/storage/sectorblocks" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - "github.com/filecoin-project/go-state-types/abi" - specstorage "github.com/filecoin-project/specs-storage/storage" - - "github.com/ipfs/go-cid" - logging "github.com/ipfs/go-log/v2" ) var log = logging.Logger("sectoraccessor") @@ -34,12 +35,16 @@ type sectorAccessor struct { var _ retrievalmarket.SectorAccessor = (*sectorAccessor)(nil) -func NewSectorAccessor(maddr dtypes.MinerAddress, secb sectorblocks.SectorBuilder, pp sectorstorage.PieceProvider, full v1api.FullNode) retrievalmarket.SectorAccessor { +func NewSectorAccessor(maddr dtypes.MinerAddress, secb sectorblocks.SectorBuilder, pp sectorstorage.PieceProvider, full v1api.FullNode) dagstore.SectorAccessor { return §orAccessor{address.Address(maddr), secb, pp, full} } -func (sa *sectorAccessor) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, offset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) { - log.Debugf("get sector %d, offset %d, length %d", sectorID, offset, length) +func (sa *sectorAccessor) UnsealSector(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (io.ReadCloser, error) { + return sa.UnsealSectorAt(ctx, sectorID, pieceOffset, length) +} + +func (sa *sectorAccessor) UnsealSectorAt(ctx context.Context, sectorID abi.SectorNumber, pieceOffset abi.UnpaddedPieceSize, length abi.UnpaddedPieceSize) (mount.Reader, error) { + log.Debugf("get sector %d, pieceOffset %d, length %d", sectorID, pieceOffset, length) si, err := sa.sectorsStatus(ctx, sectorID, false) if err != nil { return nil, err @@ -64,8 +69,8 @@ func (sa *sectorAccessor) UnsealSector(ctx context.Context, sectorID abi.SectorN } // Get a reader for the piece, unsealing the piece if necessary - log.Debugf("read piece in sector %d, offset %d, length %d from miner %d", sectorID, offset, length, mid) - r, unsealed, err := sa.pp.ReadPiece(ctx, ref, storiface.UnpaddedByteIndex(offset), length, si.Ticket.Value, commD) + log.Debugf("read piece in sector %d, pieceOffset %d, length %d from miner %d", sectorID, pieceOffset, length, mid) + r, unsealed, err := sa.pp.ReadPiece(ctx, ref, storiface.UnpaddedByteIndex(pieceOffset), length, si.Ticket.Value, commD) if err != nil { return nil, xerrors.Errorf("failed to unseal piece from sector %d: %w", sectorID, err) } diff --git a/markets/storageadapter/dealstatematcher_test.go b/markets/storageadapter/dealstatematcher_test.go index cb0360778..755ecaf47 100644 --- a/markets/storageadapter/dealstatematcher_test.go +++ b/markets/storageadapter/dealstatematcher_test.go @@ -1,3 +1,4 @@ +//stm: #unit package storageadapter import ( @@ -27,6 +28,7 @@ import ( ) func TestDealStateMatcher(t *testing.T) { + //stm: @CHAIN_STATE_GET_ACTOR_001 ctx := context.Background() bs := bstore.NewMemorySync() store := adt2.WrapStore(ctx, cbornode.NewCborStore(bs)) diff --git a/markets/storageadapter/ondealsectorcommitted.go b/markets/storageadapter/ondealsectorcommitted.go index 4cd0a2d68..94eaadef4 100644 --- a/markets/storageadapter/ondealsectorcommitted.go +++ b/markets/storageadapter/ondealsectorcommitted.go @@ -5,6 +5,7 @@ import ( "context" "sync" + "github.com/filecoin-project/go-bitfield" sealing "github.com/filecoin-project/lotus/extern/storage-sealing" "github.com/ipfs/go-cid" "golang.org/x/xerrors" @@ -110,7 +111,7 @@ func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context, // Watch for a pre-commit message to the provider. matchEvent := func(msg *types.Message) (bool, error) { - matched := msg.To == provider && (msg.Method == miner.Methods.PreCommitSector || msg.Method == miner.Methods.PreCommitSectorBatch) + matched := msg.To == provider && (msg.Method == miner.Methods.PreCommitSector || msg.Method == miner.Methods.PreCommitSectorBatch || msg.Method == miner.Methods.ProveReplicaUpdates) return matched, nil } @@ -145,6 +146,20 @@ func (mgr *SectorCommittedManager) OnDealSectorPreCommitted(ctx context.Context, return false, err } + // If this is a replica update method that succeeded the deal is active + if msg.Method == miner.Methods.ProveReplicaUpdates { + sn, err := dealSectorInReplicaUpdateSuccess(msg, rec, res) + if err != nil { + return false, err + } + if sn != nil { + cb(*sn, true, nil) + return false, nil + } + // Didn't find the deal ID in this message, so keep looking + return true, nil + } + // Extract the message parameters sn, err := dealSectorInPreCommitMsg(msg, res) if err != nil { @@ -264,6 +279,42 @@ func (mgr *SectorCommittedManager) OnDealSectorCommitted(ctx context.Context, pr return nil } +func dealSectorInReplicaUpdateSuccess(msg *types.Message, rec *types.MessageReceipt, res sealing.CurrentDealInfo) (*abi.SectorNumber, error) { + var params miner.ProveReplicaUpdatesParams + if err := params.UnmarshalCBOR(bytes.NewReader(msg.Params)); err != nil { + return nil, xerrors.Errorf("unmarshal prove replica update: %w", err) + } + + var seekUpdate miner.ReplicaUpdate + var found bool + for _, update := range params.Updates { + for _, did := range update.Deals { + if did == res.DealID { + seekUpdate = update + found = true + break + } + } + } + if !found { + return nil, nil + } + + // check that this update passed validation steps + var successBf bitfield.BitField + if err := successBf.UnmarshalCBOR(bytes.NewReader(rec.Return)); err != nil { + return nil, xerrors.Errorf("unmarshal return value: %w", err) + } + success, err := successBf.IsSet(uint64(seekUpdate.SectorID)) + if err != nil { + return nil, xerrors.Errorf("failed to check success of replica update: %w", err) + } + if !success { + return nil, xerrors.Errorf("replica update %d failed", seekUpdate.SectorID) + } + return &seekUpdate.SectorID, nil +} + // dealSectorInPreCommitMsg tries to find a sector containing the specified deal func dealSectorInPreCommitMsg(msg *types.Message, res sealing.CurrentDealInfo) (*abi.SectorNumber, error) { switch msg.Method { diff --git a/markets/storageadapter/provider.go b/markets/storageadapter/provider.go index 5c82d0dc8..0828db271 100644 --- a/markets/storageadapter/provider.go +++ b/markets/storageadapter/provider.go @@ -4,7 +4,6 @@ package storageadapter import ( "context" - "io" "time" "github.com/ipfs/go-cid" @@ -88,7 +87,7 @@ func (n *ProviderNodeAdapter) PublishDeals(ctx context.Context, deal storagemark return n.dealPublisher.Publish(ctx, deal.ClientDealProposal) } -func (n *ProviderNodeAdapter) OnDealComplete(ctx context.Context, deal storagemarket.MinerDeal, pieceSize abi.UnpaddedPieceSize, pieceData io.Reader) (*storagemarket.PackingResult, error) { +func (n *ProviderNodeAdapter) OnDealComplete(ctx context.Context, deal storagemarket.MinerDeal, pieceSize abi.UnpaddedPieceSize, pieceData shared.ReadSeekStarter) (*storagemarket.PackingResult, error) { if deal.PublishCid == nil { return nil, xerrors.Errorf("deal.PublishCid can't be nil") } @@ -104,17 +103,32 @@ func (n *ProviderNodeAdapter) OnDealComplete(ctx context.Context, deal storagema KeepUnsealed: deal.FastRetrieval, } + // Attempt to add the piece to the sector p, offset, err := n.secb.AddPiece(ctx, pieceSize, pieceData, sdInfo) curTime := build.Clock.Now() for build.Clock.Since(curTime) < addPieceRetryTimeout { + // Check if there was an error because of too many sectors being sealed if !xerrors.Is(err, sealing.ErrTooManySectorsSealing) { if err != nil { log.Errorf("failed to addPiece for deal %d, err: %v", deal.DealID, err) } + + // There was either a fatal error or no error. In either case + // don't retry AddPiece break } + + // The piece could not be added to the sector because there are too + // many sectors being sealed, back-off for a while before trying again select { case <-build.Clock.After(addPieceRetryWait): + // Reset the reader to the start + err = pieceData.SeekStart() + if err != nil { + return nil, xerrors.Errorf("failed to reset piece reader to start before retrying AddPiece for deal %d: %w", deal.DealID, err) + } + + // Attempt to add the piece again p, offset, err = n.secb.AddPiece(ctx, pieceSize, pieceData, sdInfo) case <-ctx.Done(): return nil, xerrors.New("context expired while waiting to retry AddPiece") @@ -228,7 +242,7 @@ func (n *ProviderNodeAdapter) GetBalance(ctx context.Context, addr address.Addre // TODO: why doesnt this method take in a sector ID? func (n *ProviderNodeAdapter) LocatePieceForDealWithinSector(ctx context.Context, dealID abi.DealID, encodedTs shared.TipSetToken) (sectorID abi.SectorNumber, offset abi.PaddedPieceSize, length abi.PaddedPieceSize, err error) { - refs, err := n.secb.GetRefs(dealID) + refs, err := n.secb.GetRefs(ctx, dealID) if err != nil { return 0, 0, 0, err } diff --git a/metrics/metrics.go b/metrics/metrics.go index fd538839d..b4032bb1d 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -46,6 +46,7 @@ var ( TaskType, _ = tag.NewKey("task_type") WorkerHostname, _ = tag.NewKey("worker_hostname") StorageID, _ = tag.NewKey("storage_id") + SectorState, _ = tag.NewKey("sector_state") ) // Measures @@ -55,6 +56,22 @@ var ( PeerCount = stats.Int64("peer/count", "Current number of FIL peers", stats.UnitDimensionless) APIRequestDuration = stats.Float64("api/request_duration_ms", "Duration of API requests", stats.UnitMilliseconds) + // graphsync + + GraphsyncReceivingPeersCount = stats.Int64("graphsync/receiving_peers", "number of peers we are receiving graphsync data from", stats.UnitDimensionless) + GraphsyncReceivingActiveCount = stats.Int64("graphsync/receiving_active", "number of active receiving graphsync transfers", stats.UnitDimensionless) + GraphsyncReceivingCountCount = stats.Int64("graphsync/receiving_pending", "number of pending receiving graphsync transfers", stats.UnitDimensionless) + GraphsyncReceivingTotalMemoryAllocated = stats.Int64("graphsync/receiving_total_allocated", "amount of block memory allocated for receiving graphsync data", stats.UnitBytes) + GraphsyncReceivingTotalPendingAllocations = stats.Int64("graphsync/receiving_pending_allocations", "amount of block memory on hold being received pending allocation", stats.UnitBytes) + GraphsyncReceivingPeersPending = stats.Int64("graphsync/receiving_peers_pending", "number of peers we can't receive more data from cause of pending allocations", stats.UnitDimensionless) + + GraphsyncSendingPeersCount = stats.Int64("graphsync/sending_peers", "number of peers we are sending graphsync data to", stats.UnitDimensionless) + GraphsyncSendingActiveCount = stats.Int64("graphsync/sending_active", "number of active sending graphsync transfers", stats.UnitDimensionless) + GraphsyncSendingCountCount = stats.Int64("graphsync/sending_pending", "number of pending sending graphsync transfers", stats.UnitDimensionless) + GraphsyncSendingTotalMemoryAllocated = stats.Int64("graphsync/sending_total_allocated", "amount of block memory allocated for sending graphsync data", stats.UnitBytes) + GraphsyncSendingTotalPendingAllocations = stats.Int64("graphsync/sending_pending_allocations", "amount of block memory on hold from sending pending allocation", stats.UnitBytes) + GraphsyncSendingPeersPending = stats.Int64("graphsync/sending_peers_pending", "number of peers we can't send more data to cause of pending allocations", stats.UnitDimensionless) + // chain ChainNodeHeight = stats.Int64("chain/node_height", "Current Height of the node", stats.UnitDimensionless) ChainNodeHeightExpected = stats.Int64("chain/node_height_expected", "Expected Height of the node", stats.UnitDimensionless) @@ -98,6 +115,8 @@ var ( WorkerCallsReturnedDuration = stats.Float64("sealing/worker_calls_returned_ms", "Counter of returned worker tasks", stats.UnitMilliseconds) WorkerUntrackedCallsReturned = stats.Int64("sealing/worker_untracked_calls_returned", "Counter of returned untracked worker tasks", stats.UnitDimensionless) + SectorStates = stats.Int64("sealing/states", "Number of sectors in each state", stats.UnitDimensionless) + StorageFSAvailable = stats.Float64("storage/path_fs_available_frac", "Fraction of filesystem available storage", stats.UnitDimensionless) StorageAvailable = stats.Float64("storage/path_available_frac", "Fraction of available storage", stats.UnitDimensionless) StorageReserved = stats.Float64("storage/path_reserved_frac", "Fraction of reserved storage", stats.UnitDimensionless) @@ -109,6 +128,15 @@ var ( StorageLimitUsedBytes = stats.Int64("storage/path_limit_used_bytes", "used optional storage limit bytes", stats.UnitBytes) StorageLimitMaxBytes = stats.Int64("storage/path_limit_max_bytes", "optional storage limit", stats.UnitBytes) + DagStorePRInitCount = stats.Int64("dagstore/pr_init_count", "PieceReader init count", stats.UnitDimensionless) + DagStorePRBytesRequested = stats.Int64("dagstore/pr_requested_bytes", "PieceReader requested bytes", stats.UnitBytes) + DagStorePRBytesDiscarded = stats.Int64("dagstore/pr_discarded_bytes", "PieceReader discarded bytes", stats.UnitBytes) + DagStorePRDiscardCount = stats.Int64("dagstore/pr_discard_count", "PieceReader discard count", stats.UnitDimensionless) + DagStorePRSeekBackCount = stats.Int64("dagstore/pr_seek_back_count", "PieceReader seek back count", stats.UnitDimensionless) + DagStorePRSeekForwardCount = stats.Int64("dagstore/pr_seek_forward_count", "PieceReader seek forward count", stats.UnitDimensionless) + DagStorePRSeekBackBytes = stats.Int64("dagstore/pr_seek_back_bytes", "PieceReader seek back bytes", stats.UnitBytes) + DagStorePRSeekForwardBytes = stats.Int64("dagstore/pr_seek_forward_bytes", "PieceReader seek forward bytes", stats.UnitBytes) + // splitstore SplitstoreMiss = stats.Int64("splitstore/miss", "Number of misses in hotstre access", stats.UnitDimensionless) SplitstoreCompactionTimeSeconds = stats.Float64("splitstore/compaction_time", "Compaction time in seconds", stats.UnitSeconds) @@ -123,7 +151,7 @@ var ( Description: "Lotus node information", Measure: LotusInfo, Aggregation: view.LastValue(), - TagKeys: []tag.Key{Version, Commit}, + TagKeys: []tag.Key{Version, Commit, NodeType}, } ChainNodeHeightView = &view.View{ Measure: ChainNodeHeight, @@ -308,6 +336,11 @@ var ( Aggregation: workMillisecondsDistribution, TagKeys: []tag.Key{TaskType, WorkerHostname}, } + SectorStatesView = &view.View{ + Measure: SectorStates, + Aggregation: view.LastValue(), + TagKeys: []tag.Key{SectorState}, + } StorageFSAvailableView = &view.View{ Measure: StorageFSAvailable, Aggregation: view.LastValue(), @@ -359,6 +392,39 @@ var ( TagKeys: []tag.Key{StorageID}, } + DagStorePRInitCountView = &view.View{ + Measure: DagStorePRInitCount, + Aggregation: view.Count(), + } + DagStorePRBytesRequestedView = &view.View{ + Measure: DagStorePRBytesRequested, + Aggregation: view.Sum(), + } + DagStorePRBytesDiscardedView = &view.View{ + Measure: DagStorePRBytesDiscarded, + Aggregation: view.Sum(), + } + DagStorePRDiscardCountView = &view.View{ + Measure: DagStorePRDiscardCount, + Aggregation: view.Count(), + } + DagStorePRSeekBackCountView = &view.View{ + Measure: DagStorePRSeekBackCount, + Aggregation: view.Count(), + } + DagStorePRSeekForwardCountView = &view.View{ + Measure: DagStorePRSeekForwardCount, + Aggregation: view.Count(), + } + DagStorePRSeekBackBytesView = &view.View{ + Measure: DagStorePRSeekBackBytes, + Aggregation: view.Sum(), + } + DagStorePRSeekForwardBytesView = &view.View{ + Measure: DagStorePRSeekForwardBytes, + Aggregation: view.Sum(), + } + // splitstore SplitstoreMissView = &view.View{ Measure: SplitstoreMiss, @@ -380,6 +446,56 @@ var ( Measure: SplitstoreCompactionDead, Aggregation: view.Sum(), } + + // graphsync + GraphsyncReceivingPeersCountView = &view.View{ + Measure: GraphsyncReceivingPeersCount, + Aggregation: view.LastValue(), + } + GraphsyncReceivingActiveCountView = &view.View{ + Measure: GraphsyncReceivingActiveCount, + Aggregation: view.LastValue(), + } + GraphsyncReceivingCountCountView = &view.View{ + Measure: GraphsyncReceivingCountCount, + Aggregation: view.LastValue(), + } + GraphsyncReceivingTotalMemoryAllocatedView = &view.View{ + Measure: GraphsyncReceivingTotalMemoryAllocated, + Aggregation: view.LastValue(), + } + GraphsyncReceivingTotalPendingAllocationsView = &view.View{ + Measure: GraphsyncReceivingTotalPendingAllocations, + Aggregation: view.LastValue(), + } + GraphsyncReceivingPeersPendingView = &view.View{ + Measure: GraphsyncReceivingPeersPending, + Aggregation: view.LastValue(), + } + GraphsyncSendingPeersCountView = &view.View{ + Measure: GraphsyncSendingPeersCount, + Aggregation: view.LastValue(), + } + GraphsyncSendingActiveCountView = &view.View{ + Measure: GraphsyncSendingActiveCount, + Aggregation: view.LastValue(), + } + GraphsyncSendingCountCountView = &view.View{ + Measure: GraphsyncSendingCountCount, + Aggregation: view.LastValue(), + } + GraphsyncSendingTotalMemoryAllocatedView = &view.View{ + Measure: GraphsyncSendingTotalMemoryAllocated, + Aggregation: view.LastValue(), + } + GraphsyncSendingTotalPendingAllocationsView = &view.View{ + Measure: GraphsyncSendingTotalPendingAllocations, + Aggregation: view.LastValue(), + } + GraphsyncSendingPeersPendingView = &view.View{ + Measure: GraphsyncSendingPeersPending, + Aggregation: view.LastValue(), + } ) // DefaultViews is an array of OpenCensus views for metric gathering purposes @@ -388,6 +504,19 @@ var DefaultViews = func() []*view.View { InfoView, PeerCountView, APIRequestDurationView, + + GraphsyncReceivingPeersCountView, + GraphsyncReceivingActiveCountView, + GraphsyncReceivingCountCountView, + GraphsyncReceivingTotalMemoryAllocatedView, + GraphsyncReceivingTotalPendingAllocationsView, + GraphsyncReceivingPeersPendingView, + GraphsyncSendingPeersCountView, + GraphsyncSendingActiveCountView, + GraphsyncSendingCountCountView, + GraphsyncSendingTotalMemoryAllocatedView, + GraphsyncSendingTotalPendingAllocationsView, + GraphsyncSendingPeersPendingView, } views = append(views, blockstore.DefaultViews...) views = append(views, rpcmetrics.DefaultViews...) @@ -441,14 +570,25 @@ var MinerNodeViews = append([]*view.View{ WorkerCallsReturnedCountView, WorkerUntrackedCallsReturnedView, WorkerCallsReturnedDurationView, + SectorStatesView, StorageFSAvailableView, StorageAvailableView, StorageReservedView, StorageLimitUsedView, + StorageCapacityBytesView, StorageFSAvailableBytesView, StorageAvailableBytesView, StorageReservedBytesView, StorageLimitUsedBytesView, + StorageLimitMaxBytesView, + DagStorePRInitCountView, + DagStorePRBytesRequestedView, + DagStorePRBytesDiscardedView, + DagStorePRDiscardCountView, + DagStorePRSeekBackCountView, + DagStorePRSeekForwardCountView, + DagStorePRSeekBackBytesView, + DagStorePRSeekForwardBytesView, }, DefaultViews...) // SinceInMilliseconds returns the duration of time since the provide time as a float64. diff --git a/miner/miner.go b/miner/miner.go index 582ade723..976e9ca6f 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -325,7 +325,7 @@ minerLoop: "block-time", btime, "time", build.Clock.Now(), "difference", build.Clock.Since(btime)) } - if err := m.sf.MinedBlock(b.Header, base.TipSet.Height()+base.NullRounds); err != nil { + if err := m.sf.MinedBlock(ctx, b.Header, base.TipSet.Height()+base.NullRounds); err != nil { log.Errorf(" SLASH FILTER ERROR: %s", err) if os.Getenv("LOTUS_MINER_NO_SLASHFILTER") != "_yes_i_know_i_can_and_probably_will_lose_all_my_fil_and_power_" { continue @@ -535,8 +535,12 @@ func (m *Miner) mineOne(ctx context.Context, base *MiningBase) (minedBlock *type prand := abi.PoStRandomness(rand) tSeed := build.Clock.Now() + nv, err := m.api.StateNetworkVersion(ctx, base.TipSet.Key()) + if err != nil { + return nil, err + } - postProof, err := m.epp.ComputeProof(ctx, mbi.Sectors, prand) + postProof, err := m.epp.ComputeProof(ctx, mbi.Sectors, prand, round, nv) if err != nil { err = xerrors.Errorf("failed to compute winning post proof: %w", err) return nil, err diff --git a/miner/warmup.go b/miner/warmup.go index 991679c09..be5ac3ea7 100644 --- a/miner/warmup.go +++ b/miner/warmup.go @@ -10,8 +10,7 @@ import ( "github.com/filecoin-project/go-bitfield" "github.com/filecoin-project/go-state-types/abi" - - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/filecoin-project/lotus/chain/types" ) @@ -61,13 +60,22 @@ out: return xerrors.Errorf("getting sector info: %w", err) } - _, err = m.epp.ComputeProof(ctx, []proof2.SectorInfo{ + ts, err := m.api.ChainHead(ctx) + if err != nil { + return xerrors.Errorf("getting chain head") + } + nv, err := m.api.StateNetworkVersion(ctx, ts.Key()) + if err != nil { + return xerrors.Errorf("getting network version") + } + + _, err = m.epp.ComputeProof(ctx, []proof7.ExtendedSectorInfo{ { SealProof: si.SealProof, SectorNumber: sector, SealedCID: si.SealedCID, }, - }, r) + }, r, ts.Height(), nv) if err != nil { return xerrors.Errorf("failed to compute proof: %w", err) } diff --git a/node/builder.go b/node/builder.go index 3aa0944fa..eb186fb77 100644 --- a/node/builder.go +++ b/node/builder.go @@ -15,11 +15,11 @@ import ( logging "github.com/ipfs/go-log/v2" ci "github.com/libp2p/go-libp2p-core/crypto" "github.com/libp2p/go-libp2p-core/host" + "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peerstore" "github.com/libp2p/go-libp2p-core/routing" dht "github.com/libp2p/go-libp2p-kad-dht" - "github.com/libp2p/go-libp2p-peerstore/pstoremem" pubsub "github.com/libp2p/go-libp2p-pubsub" record "github.com/libp2p/go-libp2p-record" "github.com/libp2p/go-libp2p/p2p/net/conngater" @@ -69,6 +69,7 @@ var ( BandwidthReporterKey = special{11} // Libp2p option ConnGaterKey = special{12} // libp2p option DAGStoreKey = special{13} // constructor returns multiple values + ResourceManagerKey = special{14} // Libp2p option ) type Invoke int @@ -121,6 +122,7 @@ const ( SetApiEndpointKey StartSubnetMgrKey + StartCrossMsgResolverMgrKey _nInvokes // keep this last ) @@ -173,6 +175,12 @@ func defaults() []Option { }), Override(new(dtypes.ShutdownChan), make(chan struct{})), + + // the great context in the sky, otherwise we can't DI build genesis; there has to be a better + // solution than this hack. + Override(new(context.Context), func(lc fx.Lifecycle, mctx helpers.MetricsCtx) context.Context { + return helpers.LifecycleCtx(mctx, lc) + }), } } @@ -181,14 +189,14 @@ var LibP2P = Options( Override(new(dtypes.Bootstrapper), dtypes.Bootstrapper(false)), // Host dependencies - Override(new(peerstore.Peerstore), pstoremem.NewPeerstore), + Override(new(peerstore.Peerstore), lp2p.Peerstore), Override(PstoreAddSelfKeysKey, lp2p.PstoreAddSelfKeys), Override(StartListeningKey, lp2p.StartListening(config.DefaultFullNode().Libp2p.ListenAddresses)), // Host settings Override(DefaultTransportsKey, lp2p.DefaultTransports), Override(AddrsFactoryKey, lp2p.AddrsFactory(nil, nil)), - Override(SmuxTransportKey, lp2p.SmuxTransport(true)), + Override(SmuxTransportKey, lp2p.SmuxTransport()), Override(RelayKey, lp2p.NoRelay()), Override(SecurityKey, lp2p.Security(true, false)), @@ -221,6 +229,10 @@ var LibP2P = Options( Override(ConnectionManagerKey, lp2p.ConnectionManager(50, 200, 20*time.Second, nil)), Override(new(*conngater.BasicConnectionGater), lp2p.ConnGater), Override(ConnGaterKey, lp2p.ConnGaterOption), + + // Services (resource management) + Override(new(network.ResourceManager), lp2p.ResourceManager), + Override(ResourceManagerKey, lp2p.ResourceManagerOption), ) func IsType(t repo.RepoType) func(s *Settings) bool { @@ -385,6 +397,13 @@ func WithRepoType(repoType repo.RepoType) func(s *Settings) error { } } +func WithEnableLibp2pNode(enable bool) func(s *Settings) error { + return func(s *Settings) error { + s.enableLibp2pNode = enable + return nil + } +} + func WithInvokesKey(i Invoke, resApi interface{}) func(s *Settings) error { return func(s *Settings) error { s.invokes[i] = fx.Populate(resApi) diff --git a/node/builder_chain.go b/node/builder_chain.go index 072817176..e538d2e10 100644 --- a/node/builder_chain.go +++ b/node/builder_chain.go @@ -16,7 +16,10 @@ import ( "github.com/filecoin-project/lotus/chain/beacon" "github.com/filecoin-project/lotus/chain/consensus" "github.com/filecoin-project/lotus/chain/consensus/filcns" + module "github.com/filecoin-project/lotus/chain/consensus/hierarchical/modules" "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet" + snmgr "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet/manager" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet/resolver" "github.com/filecoin-project/lotus/chain/exchange" "github.com/filecoin-project/lotus/chain/gen/slashfilter" "github.com/filecoin-project/lotus/chain/market" @@ -137,8 +140,11 @@ var ChainNode = Options( // Subneting // Start sharding sub to listent to shard events - Override(new(*subnet.SubnetMgr), subnet.NewSubnetMgr), - Override(StartSubnetMgrKey, subnet.BuildSubnetMgr), + Override(new(*resolver.Resolver), resolver.NewRootResolver), + Override(new(*snmgr.SubnetMgr), snmgr.NewSubnetMgr), + Override(new(subnet.SubnetMgr), module.SetSubMgrIface), + Override(StartSubnetMgrKey, snmgr.BuildSubnetMgr), + Override(StartCrossMsgResolverMgrKey, resolver.HandleMsgs), // Lite node API ApplyIf(isLiteNode, diff --git a/node/builder_miner.go b/node/builder_miner.go index 3447eb3e6..e813a2d24 100644 --- a/node/builder_miner.go +++ b/node/builder_miner.go @@ -136,7 +136,7 @@ func ConfigStorageMiner(c interface{}) Option { If(cfg.Subsystems.EnableMarkets, // Markets Override(new(dtypes.StagingBlockstore), modules.StagingBlockstore), - Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(cfg.Dealmaking.SimultaneousTransfersForStorage, cfg.Dealmaking.SimultaneousTransfersForRetrieval)), + Override(new(dtypes.StagingGraphsync), modules.StagingGraphsync(cfg.Dealmaking.SimultaneousTransfersForStorage, cfg.Dealmaking.SimultaneousTransfersForStoragePerClient, cfg.Dealmaking.SimultaneousTransfersForRetrieval)), Override(new(dtypes.ProviderPieceStore), modules.NewProviderPieceStore), Override(new(*sectorblocks.SectorBlocks), sectorblocks.NewSectorBlocks), @@ -155,7 +155,8 @@ func ConfigStorageMiner(c interface{}) Option { Override(DAGStoreKey, modules.DAGStore), // Markets (retrieval) - Override(new(retrievalmarket.SectorAccessor), sectoraccessor.NewSectorAccessor), + Override(new(dagstore.SectorAccessor), sectoraccessor.NewSectorAccessor), + Override(new(retrievalmarket.SectorAccessor), From(new(dagstore.SectorAccessor))), Override(new(retrievalmarket.RetrievalProviderNode), retrievaladapter.NewRetrievalProviderNode), Override(new(rmnet.RetrievalMarketNetwork), modules.RetrievalNetwork), Override(new(retrievalmarket.RetrievalProvider), modules.RetrievalProvider), @@ -163,7 +164,9 @@ func ConfigStorageMiner(c interface{}) Option { Override(HandleRetrievalKey, modules.HandleRetrieval), // Markets (storage) - Override(new(dtypes.ProviderDataTransfer), modules.NewProviderDAGServiceDataTransfer), + Override(new(dtypes.ProviderTransferNetwork), modules.NewProviderTransferNetwork), + Override(new(dtypes.ProviderTransport), modules.NewProviderTransport), + Override(new(dtypes.ProviderDataTransfer), modules.NewProviderDataTransfer), Override(new(*storedask.StoredAsk), modules.NewStorageAsk), Override(new(dtypes.StorageDealFilter), modules.BasicDealFilter(cfg.Dealmaking, nil)), Override(new(storagemarket.StorageProvider), modules.StorageProvider), diff --git a/node/config/def.go b/node/config/def.go index 735107e29..644c28bea 100644 --- a/node/config/def.go +++ b/node/config/def.go @@ -109,10 +109,11 @@ func DefaultStorageMiner() *StorageMiner { AvailableBalanceBuffer: types.FIL(big.Zero()), DisableCollateralFallback: false, - BatchPreCommits: true, - MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // up to 256 sectors - PreCommitBatchWait: Duration(24 * time.Hour), // this should be less than 31.5 hours, which is the expiration of a precommit ticket - PreCommitBatchSlack: Duration(3 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration + BatchPreCommits: true, + MaxPreCommitBatch: miner5.PreCommitSectorBatchMaxSize, // up to 256 sectors + PreCommitBatchWait: Duration(24 * time.Hour), // this should be less than 31.5 hours, which is the expiration of a precommit ticket + // XXX snap deals wait deals slack if first + PreCommitBatchSlack: Duration(3 * time.Hour), // time buffer for forceful batch submission before sectors/deals in batch would start expiring, higher value will lower the chances for message fail due to expiration CommittedCapacitySectorLifetime: Duration(builtin.EpochDurationSeconds * uint64(policy.GetMaxSectorExpirationExtension()) * uint64(time.Second)), @@ -131,11 +132,13 @@ func DefaultStorageMiner() *StorageMiner { }, Storage: sectorstorage.SealerConfig{ - AllowAddPiece: true, - AllowPreCommit1: true, - AllowPreCommit2: true, - AllowCommit: true, - AllowUnseal: true, + AllowAddPiece: true, + AllowPreCommit1: true, + AllowPreCommit2: true, + AllowCommit: true, + AllowUnseal: true, + AllowReplicaUpdate: true, + AllowProveReplicaUpdate2: true, // Default to 10 - tcp should still be able to figure this out, and // it's the ratio between 10gbit / 1gbit @@ -160,8 +163,9 @@ func DefaultStorageMiner() *StorageMiner { MaxDealsPerPublishMsg: 8, MaxProviderCollateralMultiplier: 2, - SimultaneousTransfersForStorage: DefaultSimultaneousTransfers, - SimultaneousTransfersForRetrieval: DefaultSimultaneousTransfers, + SimultaneousTransfersForStorage: DefaultSimultaneousTransfers, + SimultaneousTransfersForStoragePerClient: 0, + SimultaneousTransfersForRetrieval: DefaultSimultaneousTransfers, StartEpochSealingBuffer: 480, // 480 epochs buffer == 4 hours from adding deal to sector to sector being sealed @@ -212,6 +216,7 @@ func DefaultStorageMiner() *StorageMiner { DAGStore: DAGStoreConfig{ MaxConcurrentIndex: 5, MaxConcurrencyStorageCalls: 100, + MaxConcurrentUnseals: 5, GCInterval: Duration(1 * time.Minute), }, } diff --git a/node/config/doc_gen.go b/node/config/doc_gen.go index 296501edc..c3730cbac 100644 --- a/node/config/doc_gen.go +++ b/node/config/doc_gen.go @@ -162,6 +162,14 @@ Default value: 5.`, Comment: `The maximum amount of unsealed deals that can be fetched simultaneously from the storage subsystem. 0 means unlimited. +Default value: 0 (unlimited).`, + }, + { + Name: "MaxConcurrentUnseals", + Type: "int", + + Comment: `The maximum amount of unseals that can be processed simultaneously +from the storage subsystem. 0 means unlimited. Default value: 0 (unlimited).`, }, { @@ -272,6 +280,17 @@ passed to the sealing node by the markets service. 0 is unlimited.`, Comment: `The maximum number of parallel online data transfers for storage deals`, }, + { + Name: "SimultaneousTransfersForStoragePerClient", + Type: "uint64", + + Comment: `The maximum number of simultaneous data transfers from any single client +for storage deals. +Unset by default (0), and values higher than SimultaneousTransfersForStorage +will have no effect; i.e. the total number of simultaneous data transfers +across all storage clients is bound by SimultaneousTransfersForStorage +regardless of this number.`, + }, { Name: "SimultaneousTransfersForRetrieval", Type: "uint64", diff --git a/node/config/load_test.go b/node/config/load_test.go index 9abe8a54b..9267b44ad 100644 --- a/node/config/load_test.go +++ b/node/config/load_test.go @@ -1,3 +1,4 @@ +//stm: #unit package config import ( diff --git a/node/config/types.go b/node/config/types.go index 5013d4274..21caac99e 100644 --- a/node/config/types.go +++ b/node/config/types.go @@ -76,6 +76,11 @@ type DAGStoreConfig struct { // Default value: 0 (unlimited). MaxConcurrentReadyFetches int + // The maximum amount of unseals that can be processed simultaneously + // from the storage subsystem. 0 means unlimited. + // Default value: 0 (unlimited). + MaxConcurrentUnseals int + // The maximum number of simultaneous inflight API calls to the storage // subsystem. // Default value: 100. @@ -132,6 +137,13 @@ type DealmakingConfig struct { MaxStagingDealsBytes int64 // The maximum number of parallel online data transfers for storage deals SimultaneousTransfersForStorage uint64 + // The maximum number of simultaneous data transfers from any single client + // for storage deals. + // Unset by default (0), and values higher than SimultaneousTransfersForStorage + // will have no effect; i.e. the total number of simultaneous data transfers + // across all storage clients is bound by SimultaneousTransfersForStorage + // regardless of this number. + SimultaneousTransfersForStoragePerClient uint64 // The maximum number of parallel online data transfers for retrieval deals SimultaneousTransfersForRetrieval uint64 // Minimum start epoch buffer to give time for sealing of sector with deal. diff --git a/node/hello/hello.go b/node/hello/hello.go index f00a317cc..ada63e0d4 100644 --- a/node/hello/hello.go +++ b/node/hello/hello.go @@ -163,7 +163,7 @@ func (hs *Service) SayHello(ctx context.Context, pid peer.ID) error { return err } - gen, err := hs.cs.GetGenesis() + gen, err := hs.cs.GetGenesis(ctx) if err != nil { return err } diff --git a/node/impl/backup.go b/node/impl/backup.go index 10f673a4b..7acc7e018 100644 --- a/node/impl/backup.go +++ b/node/impl/backup.go @@ -1,6 +1,7 @@ package impl import ( + "context" "os" "path/filepath" "strings" @@ -12,7 +13,7 @@ import ( "github.com/filecoin-project/lotus/node/modules/dtypes" ) -func backup(mds dtypes.MetadataDS, fpath string) error { +func backup(ctx context.Context, mds dtypes.MetadataDS, fpath string) error { bb, ok := os.LookupEnv("LOTUS_BACKUP_BASE_PATH") if !ok { return xerrors.Errorf("LOTUS_BACKUP_BASE_PATH env var not set") @@ -52,7 +53,7 @@ func backup(mds dtypes.MetadataDS, fpath string) error { return xerrors.Errorf("open %s: %w", fpath, err) } - if err := bds.Backup(out); err != nil { + if err := bds.Backup(ctx, out); err != nil { if cerr := out.Close(); cerr != nil { log.Errorw("error closing backup file while handling backup error", "closeErr", cerr, "backupErr", err) } diff --git a/node/impl/client/client.go b/node/impl/client/client.go index 199a2122d..7848c84f9 100644 --- a/node/impl/client/client.go +++ b/node/impl/client/client.go @@ -4,17 +4,22 @@ import ( "bufio" "bytes" "context" + "errors" "fmt" "io" "os" "sort" + "strings" "time" bstore "github.com/ipfs/go-ipfs-blockstore" + format "github.com/ipfs/go-ipld-format" unixfile "github.com/ipfs/go-unixfs/file" "github.com/ipld/go-car" + "github.com/ipld/go-car/util" carv2 "github.com/ipld/go-car/v2" carv2bs "github.com/ipld/go-car/v2/blockstore" + "github.com/ipld/go-ipld-prime/datamodel" "golang.org/x/xerrors" "github.com/filecoin-project/go-padreader" @@ -58,7 +63,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/specs-actors/v3/actors/builtin/market" - marketevents "github.com/filecoin-project/lotus/markets/loggers" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/repo/imports" @@ -146,7 +150,7 @@ func (a *API) dealStarter(ctx context.Context, params *api.StartDealParams, isSt if err != nil { return nil, xerrors.Errorf("failed to find blockstore for root CID: %w", err) } - if has, err := bs.Has(params.Data.Root); err != nil { + if has, err := bs.Has(ctx, params.Data.Root); err != nil { return nil, xerrors.Errorf("failed to query blockstore for root CID: %w", err) } else if !has { return nil, xerrors.Errorf("failed to find root CID in blockstore: %w", err) @@ -486,6 +490,7 @@ func (a *API) makeRetrievalQuery(ctx context.Context, rp rm.RetrievalPeer, paylo Size: queryResponse.Size, MinPrice: queryResponse.PieceRetrievalPrice(), UnsealPrice: queryResponse.UnsealPrice, + PricePerByte: queryResponse.MinPricePerByte, PaymentInterval: queryResponse.MaxPaymentInterval, PaymentIntervalIncrease: queryResponse.MaxPaymentIntervalIncrease, Miner: queryResponse.PaymentAddress, // TODO: check @@ -516,7 +521,7 @@ func (a *API) ClientImport(ctx context.Context, ref api.FileRef) (res *api.Impor } defer f.Close() //nolint:errcheck - hd, _, err := car.ReadHeader(bufio.NewReader(f)) + hd, err := car.ReadHeader(bufio.NewReader(f)) if err != nil { return nil, xerrors.Errorf("failed to read CAR header: %w", err) } @@ -760,325 +765,405 @@ func (a *API) ClientCancelRetrievalDeal(ctx context.Context, dealID rm.DealID) e } } -func (a *API) ClientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) error { - events := make(chan marketevents.RetrievalEvent) - go a.clientRetrieve(ctx, order, ref, events) +func getDataSelector(dps *api.Selector, matchPath bool) (datamodel.Node, error) { + sel := selectorparse.CommonSelector_ExploreAllRecursively + if dps != nil { - for { - select { - case evt, ok := <-events: - if !ok { // done successfully - return nil + if strings.HasPrefix(string(*dps), "{") { + var err error + sel, err = selectorparse.ParseJSONSelector(string(*dps)) + if err != nil { + return nil, xerrors.Errorf("failed to parse json-selector '%s': %w", *dps, err) } + } else { + ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) + + selspec, err := textselector.SelectorSpecFromPath( + textselector.Expression(*dps), matchPath, - if evt.Err != "" { - return xerrors.Errorf("retrieval failed: %s", evt.Err) + ssb.ExploreRecursive( + selector.RecursionLimitNone(), + ssb.ExploreUnion(ssb.Matcher(), ssb.ExploreAll(ssb.ExploreRecursiveEdge())), + ), + ) + if err != nil { + return nil, xerrors.Errorf("failed to parse text-selector '%s': %w", *dps, err) } - case <-ctx.Done(): - return xerrors.Errorf("retrieval timed out") + + sel = selspec.Node() + log.Infof("partial retrieval of datamodel-path-selector %s/*", *dps) } } -} - -func (a *API) ClientRetrieveWithEvents(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef) (<-chan marketevents.RetrievalEvent, error) { - events := make(chan marketevents.RetrievalEvent) - go a.clientRetrieve(ctx, order, ref, events) - return events, nil -} -type retrievalSubscribeEvent struct { - event rm.ClientEvent - state rm.ClientDealState + return sel, nil } -func consumeAllEvents(ctx context.Context, dealID rm.DealID, subscribeEvents chan retrievalSubscribeEvent, events chan marketevents.RetrievalEvent) error { - for { - var subscribeEvent retrievalSubscribeEvent - select { - case <-ctx.Done(): - return xerrors.New("Retrieval Timed Out") - case subscribeEvent = <-subscribeEvents: - if subscribeEvent.state.ID != dealID { - // we can't check the deal ID ahead of time because: - // 1. We need to subscribe before retrieving. - // 2. We won't know the deal ID until after retrieving. - continue - } - } - - select { - case <-ctx.Done(): - return xerrors.New("Retrieval Timed Out") - case events <- marketevents.RetrievalEvent{ - Event: subscribeEvent.event, - Status: subscribeEvent.state.Status, - BytesReceived: subscribeEvent.state.TotalReceived, - FundsSpent: subscribeEvent.state.FundsSpent, - }: - } +func (a *API) ClientRetrieve(ctx context.Context, params api.RetrievalOrder) (*api.RestrievalRes, error) { + sel, err := getDataSelector(params.DataSelector, false) + if err != nil { + return nil, err + } - state := subscribeEvent.state - switch state.Status { - case rm.DealStatusCompleted: - return nil - case rm.DealStatusRejected: - return xerrors.Errorf("Retrieval Proposal Rejected: %s", state.Message) - case rm.DealStatusCancelled: - return xerrors.Errorf("Retrieval was cancelled externally: %s", state.Message) - case - rm.DealStatusDealNotFound, - rm.DealStatusErrored: - return xerrors.Errorf("Retrieval Error: %s", state.Message) - } + di, err := a.doRetrieval(ctx, params, sel) + if err != nil { + return nil, err } + + return &api.RestrievalRes{ + DealID: di, + }, nil } -func (a *API) clientRetrieve(ctx context.Context, order api.RetrievalOrder, ref *api.FileRef, events chan marketevents.RetrievalEvent) { - defer close(events) +func (a *API) doRetrieval(ctx context.Context, order api.RetrievalOrder, sel datamodel.Node) (rm.DealID, error) { + if order.MinerPeer == nil || order.MinerPeer.ID == "" { + mi, err := a.StateMinerInfo(ctx, order.Miner, types.EmptyTSK) + if err != nil { + return 0, err + } - finish := func(e error) { - if e != nil { - events <- marketevents.RetrievalEvent{Err: e.Error(), FundsSpent: big.Zero()} + order.MinerPeer = &rm.RetrievalPeer{ + ID: *mi.PeerId, + Address: order.Miner, } } - sel := selectorparse.CommonSelector_ExploreAllRecursively - if order.DatamodelPathSelector != nil { - - ssb := builder.NewSelectorSpecBuilder(basicnode.Prototype.Any) + if order.Total.Int == nil { + return 0, xerrors.Errorf("cannot make retrieval deal for null total") + } - selspec, err := textselector.SelectorSpecFromPath( + if order.Size == 0 { + return 0, xerrors.Errorf("cannot make retrieval deal for zero bytes") + } - *order.DatamodelPathSelector, + ppb := types.BigDiv(order.Total, types.NewInt(order.Size)) - // URGH - this is a direct copy from https://github.com/filecoin-project/go-fil-markets/blob/v1.12.0/shared/selectors.go#L10-L16 - // Unable to use it because we need the SelectorSpec, and markets exposes just a reified node - ssb.ExploreRecursive( - selector.RecursionLimitNone(), - ssb.ExploreAll(ssb.ExploreRecursiveEdge()), - ), - ) - if err != nil { - finish(xerrors.Errorf("failed to parse text-selector '%s': %w", *order.DatamodelPathSelector, err)) - return - } - - sel = selspec.Node() - log.Infof("partial retrieval of datamodel-path-selector %s/*", *order.DatamodelPathSelector) + params, err := rm.NewParamsV1(ppb, order.PaymentInterval, order.PaymentIntervalIncrease, sel, order.Piece, order.UnsealPrice) + if err != nil { + return 0, xerrors.Errorf("Error in retrieval params: %s", err) } - // summary: - // 1. if we're retrieving from an import, FromLocalCAR will be set. - // Skip the retrieval itself, and use the provided car as a blockstore further down - // to extract a CAR or UnixFS export from. - // 2. if we're using an IPFS blockstore for retrieval, retrieve into it, - // then use the virtual blockstore to extract a CAR or UnixFS export from it. - // 3. if we have to retrieve, perform a CARv2 retrieval, then either - // extract the CARv1 (with ExtractV1File) or use it as a blockstore further down. + id := a.Retrieval.NextID() + id, err = a.Retrieval.Retrieve( + ctx, + id, + order.Root, + params, + order.Total, + *order.MinerPeer, + order.Client, + order.Miner, + ) - // this indicates we're proxying to IPFS. - proxyBss, retrieveIntoIPFS := a.RtvlBlockstoreAccessor.(*retrievaladapter.ProxyBlockstoreAccessor) + if err != nil { + return 0, xerrors.Errorf("Retrieve failed: %w", err) + } - carBss, retrieveIntoCAR := a.RtvlBlockstoreAccessor.(*retrievaladapter.CARBlockstoreAccessor) + return id, nil +} - carPath := order.FromLocalCAR +func (a *API) ClientRetrieveWait(ctx context.Context, deal rm.DealID) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() - // we actually need to retrieve from the network - if carPath == "" { + subscribeEvents := make(chan rm.ClientDealState, 1) - if !retrieveIntoIPFS && !retrieveIntoCAR { - // we don't recognize the blockstore accessor. - finish(xerrors.Errorf("unsupported retrieval blockstore accessor")) + unsubscribe := a.Retrieval.SubscribeToEvents(func(event rm.ClientEvent, state rm.ClientDealState) { + // We'll check the deal IDs inside consumeAllEvents. + if state.ID != deal { return } - - if order.MinerPeer == nil || order.MinerPeer.ID == "" { - mi, err := a.StateMinerInfo(ctx, order.Miner, types.EmptyTSK) - if err != nil { - finish(err) - return - } - - order.MinerPeer = &rm.RetrievalPeer{ - ID: *mi.PeerId, - Address: order.Miner, - } + select { + case <-ctx.Done(): + case subscribeEvents <- state: } + }) + defer unsubscribe() - if order.Total.Int == nil { - finish(xerrors.Errorf("cannot make retrieval deal for null total")) - return + { + state, err := a.Retrieval.GetDeal(deal) + if err != nil { + return xerrors.Errorf("getting deal state: %w", err) + } + select { + case subscribeEvents <- state: + default: // already have an event queued from the subscription } + } - if order.Size == 0 { - finish(xerrors.Errorf("cannot make retrieval deal for zero bytes")) - return + for { + select { + case <-ctx.Done(): + return xerrors.New("Retrieval Timed Out") + case state := <-subscribeEvents: + switch state.Status { + case rm.DealStatusCompleted: + return nil + case rm.DealStatusRejected: + return xerrors.Errorf("Retrieval Proposal Rejected: %s", state.Message) + case rm.DealStatusCancelled: + return xerrors.Errorf("Retrieval was cancelled externally: %s", state.Message) + case + rm.DealStatusDealNotFound, + rm.DealStatusErrored: + return xerrors.Errorf("Retrieval Error: %s", state.Message) + } } + } +} - ppb := types.BigDiv(order.Total, types.NewInt(order.Size)) +type ExportDest struct { + Writer io.Writer + Path string +} - params, err := rm.NewParamsV1(ppb, order.PaymentInterval, order.PaymentIntervalIncrease, sel, order.Piece, order.UnsealPrice) - if err != nil { - finish(xerrors.Errorf("Error in retrieval params: %s", err)) - return - } +func (ed *ExportDest) doWrite(cb func(io.Writer) error) error { + if ed.Writer != nil { + return cb(ed.Writer) + } - // Subscribe to events before retrieving to avoid losing events. - subscribeEvents := make(chan retrievalSubscribeEvent, 1) - subscribeCtx, cancel := context.WithCancel(ctx) - defer cancel() - unsubscribe := a.Retrieval.SubscribeToEvents(func(event rm.ClientEvent, state rm.ClientDealState) { - // We'll check the deal IDs inside consumeAllEvents. - if state.PayloadCID.Equals(order.Root) { - select { - case <-subscribeCtx.Done(): - case subscribeEvents <- retrievalSubscribeEvent{event, state}: - } - } - }) + f, err := os.OpenFile(ed.Path, os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } - id := a.Retrieval.NextID() - id, err = a.Retrieval.Retrieve( - ctx, - id, - order.Root, - params, - order.Total, - *order.MinerPeer, - order.Client, - order.Miner, - ) + if err := cb(f); err != nil { + _ = f.Close() + return err + } - if err != nil { - unsubscribe() - finish(xerrors.Errorf("Retrieve failed: %w", err)) - return - } + return f.Close() +} - err = consumeAllEvents(ctx, id, subscribeEvents, events) +func (a *API) ClientExport(ctx context.Context, exportRef api.ExportRef, ref api.FileRef) error { + return a.ClientExportInto(ctx, exportRef, ref.IsCAR, ExportDest{Path: ref.Path}) +} - unsubscribe() - if err != nil { - finish(xerrors.Errorf("Retrieve: %w", err)) - return +func (a *API) ClientExportInto(ctx context.Context, exportRef api.ExportRef, car bool, dest ExportDest) error { + proxyBss, retrieveIntoIPFS := a.RtvlBlockstoreAccessor.(*retrievaladapter.ProxyBlockstoreAccessor) + carBss, retrieveIntoCAR := a.RtvlBlockstoreAccessor.(*retrievaladapter.CARBlockstoreAccessor) + carPath := exportRef.FromLocalCAR + + if carPath == "" { + if !retrieveIntoIPFS && !retrieveIntoCAR { + return xerrors.Errorf("unsupported retrieval blockstore accessor") } if retrieveIntoCAR { - carPath = carBss.PathFor(id) + carPath = carBss.PathFor(exportRef.DealID) } } - if ref == nil { - // If ref is nil, it only fetches the data into the configured blockstore - // (if fetching from network). - finish(nil) - return - } - - // determine where did the retrieval go var retrievalBs bstore.Blockstore if retrieveIntoIPFS { retrievalBs = proxyBss.Blockstore } else { cbs, err := stores.ReadOnlyFilestore(carPath) if err != nil { - finish(err) - return + return err } defer cbs.Close() //nolint:errcheck retrievalBs = cbs } - // Are we outputting a CAR? - if ref.IsCAR { + dserv := merkledag.NewDAGService(blockservice.New(retrievalBs, offline.Exchange(retrievalBs))) + // Are we outputting a CAR? + if car { // not IPFS and we do full selection - just extract the CARv1 from the CARv2 we stored the retrieval in - if !retrieveIntoIPFS && order.DatamodelPathSelector == nil { - finish(carv2.ExtractV1File(carPath, ref.Path)) - return + if !retrieveIntoIPFS && len(exportRef.DAGs) == 0 && dest.Writer == nil { + return carv2.ExtractV1File(carPath, dest.Path) } + } - // generating a CARv1 from the configured blockstore - f, err := os.OpenFile(ref.Path, os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - finish(err) - return + roots, err := parseDagSpec(ctx, exportRef.Root, exportRef.DAGs, dserv, car) + if err != nil { + return xerrors.Errorf("parsing dag spec: %w", err) + } + if car { + return a.outputCAR(ctx, dserv, retrievalBs, exportRef.Root, roots, dest) + } + + if len(roots) != 1 { + return xerrors.Errorf("unixfs retrieval requires one root node, got %d", len(roots)) + } + + return a.outputUnixFS(ctx, roots[0].root, dserv, dest) +} + +func (a *API) outputCAR(ctx context.Context, ds format.DAGService, bs bstore.Blockstore, root cid.Cid, dags []dagSpec, dest ExportDest) error { + // generating a CARv1 from the configured blockstore + roots := make([]cid.Cid, len(dags)) + for i, dag := range dags { + roots[i] = dag.root + } + + return dest.doWrite(func(w io.Writer) error { + + if err := car.WriteHeader(&car.CarHeader{ + Roots: roots, + Version: 1, + }, w); err != nil { + return fmt.Errorf("failed to write car header: %s", err) } - err = car.NewSelectiveCar( - ctx, - retrievalBs, - []car.Dag{{ - Root: order.Root, - Selector: sel, - }}, - car.MaxTraversalLinks(config.MaxTraversalLinks), - ).Write(f) + cs := cid.NewSet() + + for _, dagSpec := range dags { + if err := utils.TraverseDag( + ctx, + ds, + root, + dagSpec.selector, + func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error { + if r == traversal.VisitReason_SelectionMatch { + var c cid.Cid + if p.LastBlock.Link == nil { + c = root + } else { + cidLnk, castOK := p.LastBlock.Link.(cidlink.Link) + if !castOK { + return xerrors.Errorf("cidlink cast unexpectedly failed on '%s'", p.LastBlock.Link) + } + + c = cidLnk.Cid + } + + if cs.Visit(c) { + nb, err := bs.Get(ctx, c) + if err != nil { + return xerrors.Errorf("getting block data: %w", err) + } + + err = util.LdWrite(w, c.Bytes(), nb.RawData()) + if err != nil { + return xerrors.Errorf("writing block data: %w", err) + } + } + + return nil + } + return nil + }, + ); err != nil { + return xerrors.Errorf("error while traversing car dag: %w", err) + } + } + + return nil + }) +} + +func (a *API) outputUnixFS(ctx context.Context, root cid.Cid, ds format.DAGService, dest ExportDest) error { + nd, err := ds.Get(ctx, root) + if err != nil { + return xerrors.Errorf("ClientRetrieve: %w", err) + } + file, err := unixfile.NewUnixfsFile(ctx, ds, nd) + if err != nil { + return xerrors.Errorf("ClientRetrieve: %w", err) + } + + if dest.Writer == nil { + return files.WriteTo(file, dest.Path) + } + + switch f := file.(type) { + case files.File: + _, err = io.Copy(dest.Writer, f) if err != nil { - finish(err) - return + return err } + return nil + default: + return fmt.Errorf("file type %T is not supported", nd) + } +} + +type dagSpec struct { + root cid.Cid + selector ipld.Node +} - finish(f.Close()) - return +func parseDagSpec(ctx context.Context, root cid.Cid, dsp []api.DagSpec, ds format.DAGService, car bool) ([]dagSpec, error) { + if len(dsp) == 0 { + return []dagSpec{ + { + root: root, + selector: nil, + }, + }, nil } - // we are extracting a UnixFS file. - ds := merkledag.NewDAGService(blockservice.New(retrievalBs, offline.Exchange(retrievalBs))) - root := order.Root + out := make([]dagSpec, len(dsp)) + for i, spec := range dsp { - // if we used a selector - need to find the sub-root the user actually wanted to retrieve - if order.DatamodelPathSelector != nil { + if spec.DataSelector == nil { + return nil, xerrors.Errorf("invalid DagSpec at position %d: `DataSelector` can not be nil", i) + } - var subRootFound bool + // reify selector + var err error + out[i].selector, err = getDataSelector(spec.DataSelector, car && spec.ExportMerkleProof) + if err != nil { + return nil, err + } - // no err check - we just compiled this before starting, but now we do not wrap a `*` - selspec, _ := textselector.SelectorSpecFromPath(*order.DatamodelPathSelector, nil) //nolint:errcheck + // find the pointed-at root node within the containing ds + var rsn ipld.Node + + if strings.HasPrefix(string(*spec.DataSelector), "{") { + var err error + rsn, err = selectorparse.ParseJSONSelector(string(*spec.DataSelector)) + if err != nil { + return nil, xerrors.Errorf("failed to parse json-selector '%s': %w", *spec.DataSelector, err) + } + } else { + selspec, _ := textselector.SelectorSpecFromPath(textselector.Expression(*spec.DataSelector), car && spec.ExportMerkleProof, nil) //nolint:errcheck + rsn = selspec.Node() + } + + var newRoot cid.Cid + var errHalt = errors.New("halt walk") if err := utils.TraverseDag( ctx, ds, root, - selspec.Node(), + rsn, func(p traversal.Progress, n ipld.Node, r traversal.VisitReason) error { if r == traversal.VisitReason_SelectionMatch { - - if p.LastBlock.Path.String() != p.Path.String() { + if !car && p.LastBlock.Path.String() != p.Path.String() { return xerrors.Errorf("unsupported selection path '%s' does not correspond to a block boundary (a.k.a. CID link)", p.Path.String()) } + if p.LastBlock.Link == nil { + // this is likely the root node that we've matched here + newRoot = root + return errHalt + } + cidLnk, castOK := p.LastBlock.Link.(cidlink.Link) if !castOK { - return xerrors.Errorf("cidlink cast unexpectedly failed on '%s'", p.LastBlock.Link.String()) + return xerrors.Errorf("cidlink cast unexpectedly failed on '%s'", p.LastBlock.Link) } - root = cidLnk.Cid - subRootFound = true + newRoot = cidLnk.Cid + + return errHalt } return nil }, - ); err != nil { - finish(xerrors.Errorf("error while locating partial retrieval sub-root: %w", err)) - return + ); err != nil && err != errHalt { + return nil, xerrors.Errorf("error while locating partial retrieval sub-root: %w", err) } - if !subRootFound { - finish(xerrors.Errorf("path selection '%s' does not match a node within %s", *order.DatamodelPathSelector, root)) - return + if newRoot == cid.Undef { + return nil, xerrors.Errorf("path selection does not match a node within %s", root) } - } - nd, err := ds.Get(ctx, root) - if err != nil { - finish(xerrors.Errorf("ClientRetrieve: %w", err)) - return - } - file, err := unixfile.NewUnixfsFile(ctx, ds, nd) - if err != nil { - finish(xerrors.Errorf("ClientRetrieve: %w", err)) - return + out[i].root = newRoot } - finish(files.WriteTo(file, ref.Path)) + return out, nil } func (a *API) ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, error) { @@ -1110,8 +1195,13 @@ func (a *API) ClientListRetrievals(ctx context.Context) ([]api.RetrievalInfo, er func (a *API) ClientGetRetrievalUpdates(ctx context.Context) (<-chan api.RetrievalInfo, error) { updates := make(chan api.RetrievalInfo) - unsub := a.Retrieval.SubscribeToEvents(func(_ rm.ClientEvent, deal rm.ClientDealState) { - updates <- a.newRetrievalInfo(ctx, deal) + unsub := a.Retrieval.SubscribeToEvents(func(evt rm.ClientEvent, deal rm.ClientDealState) { + update := a.newRetrievalInfo(ctx, deal) + update.Event = &evt + select { + case updates <- update: + case <-ctx.Done(): + } }) go func() { @@ -1196,7 +1286,7 @@ func (a *API) ClientCalcCommP(ctx context.Context, inpath string) (*api.CommPRet } // check that the data is a car file; if it's not, retrieval won't work - _, _, err = car.ReadHeader(bufio.NewReader(rdr)) + _, err = car.ReadHeader(bufio.NewReader(rdr)) if err != nil { return nil, xerrors.Errorf("not a car file: %w", err) } diff --git a/node/impl/client/client_test.go b/node/impl/client/client_test.go index 834c980ab..1b195816d 100644 --- a/node/impl/client/client_test.go +++ b/node/impl/client/client_test.go @@ -1,3 +1,4 @@ +//stm: #unit package client import ( @@ -31,6 +32,7 @@ import ( var testdata embed.FS func TestImportLocal(t *testing.T) { + //stm: @CLIENT_STORAGE_DEALS_IMPORT_LOCAL_001, @CLIENT_RETRIEVAL_FIND_001 ds := dssync.MutexWrap(datastore.NewMapDatastore()) dir := t.TempDir() im := imports.NewManager(ds, dir) @@ -44,6 +46,7 @@ func TestImportLocal(t *testing.T) { b, err := testdata.ReadFile("testdata/payload.txt") require.NoError(t, err) + //stm: @CLIENT_STORAGE_DEALS_LIST_IMPORTS_001 root, err := a.ClientImportLocal(ctx, bytes.NewReader(b)) require.NoError(t, err) require.NotEqual(t, cid.Undef, root) @@ -56,11 +59,12 @@ func TestImportLocal(t *testing.T) { require.Equal(t, root, *it.Root) require.True(t, strings.HasPrefix(it.CARPath, dir)) + //stm: @CLIENT_DATA_HAS_LOCAL_001 local, err := a.ClientHasLocal(ctx, root) require.NoError(t, err) require.True(t, local) - order := api.RetrievalOrder{ + order := api.ExportRef{ Root: root, FromLocalCAR: it.CARPath, } @@ -68,7 +72,7 @@ func TestImportLocal(t *testing.T) { // retrieve as UnixFS. out1 := filepath.Join(dir, "retrieval1.data") // as unixfs out2 := filepath.Join(dir, "retrieval2.data") // as car - err = a.ClientRetrieve(ctx, order, &api.FileRef{ + err = a.ClientExport(ctx, order, api.FileRef{ Path: out1, }) require.NoError(t, err) @@ -77,7 +81,7 @@ func TestImportLocal(t *testing.T) { require.NoError(t, err) require.Equal(t, b, outBytes) - err = a.ClientRetrieve(ctx, order, &api.FileRef{ + err = a.ClientExport(ctx, order, api.FileRef{ Path: out2, IsCAR: true, }) @@ -107,7 +111,7 @@ func TestImportLocal(t *testing.T) { // recreate the unixfs dag, and see if it matches the original file byte by byte // import the car into a memory blockstore, then export the unixfs file. bs := blockstore.NewBlockstore(datastore.NewMapDatastore()) - _, err = car.LoadCar(bs, exported.DataReader()) + _, err = car.LoadCar(ctx, bs, exported.DataReader()) require.NoError(t, err) dag := merkledag.NewDAGService(blockservice.New(bs, offline.Exchange(bs))) diff --git a/node/impl/client/import_test.go b/node/impl/client/import_test.go index adf6531d0..1d7af86cb 100644 --- a/node/impl/client/import_test.go +++ b/node/impl/client/import_test.go @@ -1,3 +1,4 @@ +//stm: #unit package client import ( diff --git a/node/impl/full.go b/node/impl/full.go index 847a02467..79845ca22 100644 --- a/node/impl/full.go +++ b/node/impl/full.go @@ -45,7 +45,7 @@ type FullNodeAPI struct { } func (n *FullNodeAPI) CreateBackup(ctx context.Context, fpath string) error { - return backup(n.DS, fpath) + return backup(ctx, n.DS, fpath) } func (n *FullNodeAPI) NodeStatus(ctx context.Context, inclChainStatus bool) (status api.NodeStatus, err error) { diff --git a/node/impl/full/chain.go b/node/impl/full/chain.go index e8d403337..df3aef766 100644 --- a/node/impl/full/chain.go +++ b/node/impl/full/chain.go @@ -51,6 +51,7 @@ type ChainModuleAPI interface { ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) ChainGetTipSetAfterHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) ChainReadObj(context.Context, cid.Cid) ([]byte, error) + ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*api.HeadChange, error) } var _ ChainModuleAPI = *new(api.FullNode) @@ -98,25 +99,29 @@ func (m *ChainModule) ChainHead(context.Context) (*types.TipSet, error) { } func (a *ChainAPI) ChainGetBlock(ctx context.Context, msg cid.Cid) (*types.BlockHeader, error) { - return a.Chain.GetBlock(msg) + return a.Chain.GetBlock(ctx, msg) } func (m *ChainModule) ChainGetTipSet(ctx context.Context, key types.TipSetKey) (*types.TipSet, error) { - return m.Chain.LoadTipSet(key) + return m.Chain.LoadTipSet(ctx, key) +} + +func (m *ChainModule) ChainGetPath(ctx context.Context, from, to types.TipSetKey) ([]*api.HeadChange, error) { + return m.Chain.GetPath(ctx, from, to) } func (m *ChainModule) ChainGetBlockMessages(ctx context.Context, msg cid.Cid) (*api.BlockMessages, error) { - b, err := m.Chain.GetBlock(msg) + b, err := m.Chain.GetBlock(ctx, msg) if err != nil { return nil, err } - bmsgs, smsgs, err := m.Chain.MessagesForBlock(b) + bmsgs, smsgs, crossmsgs, err := m.Chain.MessagesForBlock(ctx, b) if err != nil { return nil, err } - cids := make([]cid.Cid, len(bmsgs)+len(smsgs)) + cids := make([]cid.Cid, len(bmsgs)+len(smsgs)+len(crossmsgs)) for i, m := range bmsgs { cids[i] = m.Cid() @@ -126,9 +131,14 @@ func (m *ChainModule) ChainGetBlockMessages(ctx context.Context, msg cid.Cid) (* cids[i+len(bmsgs)] = m.Cid() } + for i, m := range crossmsgs { + cids[i+len(smsgs)] = m.Cid() + } + return &api.BlockMessages{ BlsMessages: bmsgs, SecpkMessages: smsgs, + CrossMessages: crossmsgs, Cids: cids, }, nil } @@ -138,7 +148,7 @@ func (a *ChainAPI) ChainGetPath(ctx context.Context, from types.TipSetKey, to ty } func (a *ChainAPI) ChainGetParentMessages(ctx context.Context, bcid cid.Cid) ([]api.Message, error) { - b, err := a.Chain.GetBlock(bcid) + b, err := a.Chain.GetBlock(ctx, bcid) if err != nil { return nil, err } @@ -149,12 +159,12 @@ func (a *ChainAPI) ChainGetParentMessages(ctx context.Context, bcid cid.Cid) ([] } // TODO: need to get the number of messages better than this - pts, err := a.Chain.LoadTipSet(types.NewTipSetKey(b.Parents...)) + pts, err := a.Chain.LoadTipSet(ctx, types.NewTipSetKey(b.Parents...)) if err != nil { return nil, err } - cm, err := a.Chain.MessagesForTipset(pts) + cm, err := a.Chain.MessagesForTipset(ctx, pts) if err != nil { return nil, err } @@ -171,7 +181,7 @@ func (a *ChainAPI) ChainGetParentMessages(ctx context.Context, bcid cid.Cid) ([] } func (a *ChainAPI) ChainGetParentReceipts(ctx context.Context, bcid cid.Cid) ([]*types.MessageReceipt, error) { - b, err := a.Chain.GetBlock(bcid) + b, err := a.Chain.GetBlock(ctx, bcid) if err != nil { return nil, err } @@ -181,19 +191,19 @@ func (a *ChainAPI) ChainGetParentReceipts(ctx context.Context, bcid cid.Cid) ([] } // TODO: need to get the number of messages better than this - pts, err := a.Chain.LoadTipSet(types.NewTipSetKey(b.Parents...)) + pts, err := a.Chain.LoadTipSet(ctx, types.NewTipSetKey(b.Parents...)) if err != nil { return nil, err } - cm, err := a.Chain.MessagesForTipset(pts) + cm, err := a.Chain.MessagesForTipset(ctx, pts) if err != nil { return nil, err } var out []*types.MessageReceipt for i := 0; i < len(cm); i++ { - r, err := a.Chain.GetParentReceipt(b, i) + r, err := a.Chain.GetParentReceipt(ctx, b, i) if err != nil { return nil, err } @@ -205,7 +215,7 @@ func (a *ChainAPI) ChainGetParentReceipts(ctx context.Context, bcid cid.Cid) ([] } func (a *ChainAPI) ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSetKey) ([]api.Message, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, err } @@ -215,7 +225,7 @@ func (a *ChainAPI) ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSe return nil, nil } - cm, err := a.Chain.MessagesForTipset(ts) + cm, err := a.Chain.MessagesForTipset(ctx, ts) if err != nil { return nil, err } @@ -232,7 +242,7 @@ func (a *ChainAPI) ChainGetMessagesInTipset(ctx context.Context, tsk types.TipSe } func (m *ChainModule) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -240,7 +250,7 @@ func (m *ChainModule) ChainGetTipSetByHeight(ctx context.Context, h abi.ChainEpo } func (m *ChainModule) ChainGetTipSetAfterHeight(ctx context.Context, h abi.ChainEpoch, tsk types.TipSetKey) (*types.TipSet, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -248,7 +258,7 @@ func (m *ChainModule) ChainGetTipSetAfterHeight(ctx context.Context, h abi.Chain } func (m *ChainModule) ChainReadObj(ctx context.Context, obj cid.Cid) ([]byte, error) { - blk, err := m.ExposedBlockstore.Get(obj) + blk, err := m.ExposedBlockstore.Get(ctx, obj) if err != nil { return nil, xerrors.Errorf("blockstore get: %w", err) } @@ -257,11 +267,11 @@ func (m *ChainModule) ChainReadObj(ctx context.Context, obj cid.Cid) ([]byte, er } func (a *ChainAPI) ChainDeleteObj(ctx context.Context, obj cid.Cid) error { - return a.ExposedBlockstore.DeleteBlock(obj) + return a.ExposedBlockstore.DeleteBlock(ctx, obj) } func (m *ChainModule) ChainHasObj(ctx context.Context, obj cid.Cid) (bool, error) { - return m.ExposedBlockstore.Has(obj) + return m.ExposedBlockstore.Has(ctx, obj) } func (a *ChainAPI) ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) (api.ObjStat, error) { @@ -313,7 +323,7 @@ func (a *ChainAPI) ChainStatObj(ctx context.Context, obj cid.Cid, base cid.Cid) } func (a *ChainAPI) ChainSetHead(ctx context.Context, tsk types.TipSetKey) error { - newHeadTs, err := a.Chain.GetTipSetFromKey(tsk) + newHeadTs, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -337,11 +347,11 @@ func (a *ChainAPI) ChainSetHead(ctx context.Context, tsk types.TipSetKey) error } } - return a.Chain.SetHead(newHeadTs) + return a.Chain.SetHead(ctx, newHeadTs) } func (a *ChainAPI) ChainGetGenesis(ctx context.Context) (*types.TipSet, error) { - genb, err := a.Chain.GetGenesis() + genb, err := a.Chain.GetGenesis(ctx) if err != nil { return nil, err } @@ -350,7 +360,7 @@ func (a *ChainAPI) ChainGetGenesis(ctx context.Context) (*types.TipSet, error) { } func (a *ChainAPI) ChainTipSetWeight(ctx context.Context, tsk types.TipSetKey) (types.BigInt, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -429,7 +439,7 @@ func resolveOnce(bs blockstore.Blockstore, tse stmgr.Executor) func(ctx context. return nil, nil, err } - if err := bs.Put(n); err != nil { + if err := bs.Put(ctx, n); err != nil { return nil, nil, xerrors.Errorf("put hamt val: %w", err) } @@ -477,7 +487,7 @@ func resolveOnce(bs blockstore.Blockstore, tse stmgr.Executor) func(ctx context. return nil, nil, err } - if err := bs.Put(n); err != nil { + if err := bs.Put(ctx, n); err != nil { return nil, nil, xerrors.Errorf("put amt val: %w", err) } @@ -525,7 +535,7 @@ func resolveOnce(bs blockstore.Blockstore, tse stmgr.Executor) func(ctx context. return nil, nil, err } - if err := bs.Put(n); err != nil { + if err := bs.Put(ctx, n); err != nil { return nil, nil, xerrors.Errorf("put amt val: %w", err) } @@ -572,7 +582,7 @@ func (a *ChainAPI) ChainGetNode(ctx context.Context, p string) (*api.IpldObject, } func (m *ChainModule) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.Message, error) { - cm, err := m.Chain.GetCMessage(mc) + cm, err := m.Chain.GetCMessage(ctx, mc) if err != nil { return nil, err } @@ -581,7 +591,7 @@ func (m *ChainModule) ChainGetMessage(ctx context.Context, mc cid.Cid) (*types.M } func (a *ChainAPI) ChainExport(ctx context.Context, nroots abi.ChainEpoch, skipoldmsgs bool, tsk types.TipSetKey) (<-chan []byte, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } diff --git a/node/impl/full/gas.go b/node/impl/full/gas.go index edf53ff63..dd1e8d5ea 100644 --- a/node/impl/full/gas.go +++ b/node/impl/full/gas.go @@ -82,14 +82,14 @@ type GasMeta struct { Limit int64 } -func (g *GasPriceCache) GetTSGasStats(cstore *store.ChainStore, ts *types.TipSet) ([]GasMeta, error) { +func (g *GasPriceCache) GetTSGasStats(ctx context.Context, cstore *store.ChainStore, ts *types.TipSet) ([]GasMeta, error) { i, has := g.c.Get(ts.Key()) if has { return i.([]GasMeta), nil } var prices []GasMeta - msgs, err := cstore.MessagesForTipset(ts) + msgs, err := cstore.MessagesForTipset(ctx, ts) if err != nil { return nil, xerrors.Errorf("loading messages: %w", err) } @@ -173,7 +173,7 @@ func (a *GasAPI) GasEstimateGasPremium( gaslimit int64, _ types.TipSetKey, ) (types.BigInt, error) { - return gasEstimateGasPremium(a.Chain, a.PriceCache, nblocksincl) + return gasEstimateGasPremium(ctx, a.Chain, a.PriceCache, nblocksincl) } func (m *GasModule) GasEstimateGasPremium( ctx context.Context, @@ -182,9 +182,9 @@ func (m *GasModule) GasEstimateGasPremium( gaslimit int64, _ types.TipSetKey, ) (types.BigInt, error) { - return gasEstimateGasPremium(m.Chain, m.PriceCache, nblocksincl) + return gasEstimateGasPremium(ctx, m.Chain, m.PriceCache, nblocksincl) } -func gasEstimateGasPremium(cstore *store.ChainStore, cache *GasPriceCache, nblocksincl uint64) (types.BigInt, error) { +func gasEstimateGasPremium(ctx context.Context, cstore *store.ChainStore, cache *GasPriceCache, nblocksincl uint64) (types.BigInt, error) { if nblocksincl == 0 { nblocksincl = 1 } @@ -198,13 +198,13 @@ func gasEstimateGasPremium(cstore *store.ChainStore, cache *GasPriceCache, nbloc break // genesis } - pts, err := cstore.LoadTipSet(ts.Parents()) + pts, err := cstore.LoadTipSet(ctx, ts.Parents()) if err != nil { return types.BigInt{}, err } blocks += len(pts.Blocks()) - meta, err := cache.GetTSGasStats(cstore, pts) + meta, err := cache.GetTSGasStats(ctx, cstore, pts) if err != nil { return types.BigInt{}, err } @@ -236,14 +236,14 @@ func gasEstimateGasPremium(cstore *store.ChainStore, cache *GasPriceCache, nbloc } func (a *GasAPI) GasEstimateGasLimit(ctx context.Context, msgIn *types.Message, tsk types.TipSetKey) (int64, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return -1, xerrors.Errorf("getting tipset: %w", err) } return gasEstimateGasLimit(ctx, a.Chain, a.Stmgr, a.Mpool, msgIn, ts) } func (m *GasModule) GasEstimateGasLimit(ctx context.Context, msgIn *types.Message, tsk types.TipSetKey) (int64, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return -1, xerrors.Errorf("getting tipset: %w", err) } @@ -283,7 +283,7 @@ func gasEstimateGasLimit( if err != stmgr.ErrExpensiveFork { break } - ts, err = cstore.GetTipSetFromKey(ts.Parents()) + ts, err = cstore.GetTipSetFromKey(ctx, ts.Parents()) if err != nil { return -1, xerrors.Errorf("getting parent tipset: %w", err) } diff --git a/node/impl/full/gas_test.go b/node/impl/full/gas_test.go index 028e039ce..ac2835790 100644 --- a/node/impl/full/gas_test.go +++ b/node/impl/full/gas_test.go @@ -1,3 +1,4 @@ +//stm: #unit package full import ( diff --git a/node/impl/full/mpool.go b/node/impl/full/mpool.go index f792cdf99..afff871ca 100644 --- a/node/impl/full/mpool.go +++ b/node/impl/full/mpool.go @@ -51,11 +51,11 @@ func (a *MpoolAPI) MpoolGetConfig(context.Context) (*types.MpoolConfig, error) { } func (a *MpoolAPI) MpoolSetConfig(ctx context.Context, cfg *types.MpoolConfig) error { - return a.Mpool.SetConfig(cfg) + return a.Mpool.SetConfig(ctx, cfg) } func (a *MpoolAPI) MpoolSelect(ctx context.Context, tsk types.TipSetKey, ticketQuality float64) ([]*types.SignedMessage, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -64,7 +64,7 @@ func (a *MpoolAPI) MpoolSelect(ctx context.Context, tsk types.TipSetKey, ticketQ } func (a *MpoolAPI) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*types.SignedMessage, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -87,7 +87,7 @@ func (a *MpoolAPI) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*ty // different blocks in tipsets of the same height // we exclude messages that have been included in blocks in the mpool tipset - have, err := a.Mpool.MessagesForBlocks(mpts.Blocks()) + have, err := a.Mpool.MessagesForBlocks(ctx, mpts.Blocks()) if err != nil { return nil, xerrors.Errorf("getting messages for base ts: %w", err) } @@ -97,7 +97,7 @@ func (a *MpoolAPI) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*ty } } - msgs, err := a.Mpool.MessagesForBlocks(ts.Blocks()) + msgs, err := a.Mpool.MessagesForBlocks(ctx, ts.Blocks()) if err != nil { return nil, xerrors.Errorf(": %w", err) } @@ -115,7 +115,7 @@ func (a *MpoolAPI) MpoolPending(ctx context.Context, tsk types.TipSetKey) ([]*ty return pending, nil } - ts, err = a.Chain.LoadTipSet(ts.Parents()) + ts, err = a.Chain.LoadTipSet(ctx, ts.Parents()) if err != nil { return nil, xerrors.Errorf("loading parent tipset: %w", err) } diff --git a/node/impl/full/multisig.go b/node/impl/full/multisig.go index 0d20c3f03..edc67ec9e 100644 --- a/node/impl/full/multisig.go +++ b/node/impl/full/multisig.go @@ -100,7 +100,7 @@ func (a *MsigAPI) MsigAddCancel(ctx context.Context, msig address.Address, src a return nil, actErr } - return a.MsigCancel(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.AddSigner), enc) + return a.MsigCancelTxnHash(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.AddSigner), enc) } func (a *MsigAPI) MsigSwapPropose(ctx context.Context, msig address.Address, src address.Address, oldAdd address.Address, newAdd address.Address) (*api.MessagePrototype, error) { @@ -127,7 +127,7 @@ func (a *MsigAPI) MsigSwapCancel(ctx context.Context, msig address.Address, src return nil, actErr } - return a.MsigCancel(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.SwapSigner), enc) + return a.MsigCancelTxnHash(ctx, msig, txID, msig, big.Zero(), src, uint64(multisig.Methods.SwapSigner), enc) } func (a *MsigAPI) MsigApprove(ctx context.Context, msig address.Address, txID uint64, src address.Address) (*api.MessagePrototype, error) { @@ -138,7 +138,11 @@ func (a *MsigAPI) MsigApproveTxnHash(ctx context.Context, msig address.Address, return a.msigApproveOrCancelTxnHash(ctx, api.MsigApprove, msig, txID, proposer, to, amt, src, method, params) } -func (a *MsigAPI) MsigCancel(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*api.MessagePrototype, error) { +func (a *MsigAPI) MsigCancel(ctx context.Context, msig address.Address, txID uint64, src address.Address) (*api.MessagePrototype, error) { + return a.msigApproveOrCancelSimple(ctx, api.MsigCancel, msig, txID, src) +} + +func (a *MsigAPI) MsigCancelTxnHash(ctx context.Context, msig address.Address, txID uint64, to address.Address, amt types.BigInt, src address.Address, method uint64, params []byte) (*api.MessagePrototype, error) { return a.msigApproveOrCancelTxnHash(ctx, api.MsigCancel, msig, txID, src, to, amt, src, method, params) } diff --git a/node/impl/full/state.go b/node/impl/full/state.go index e251fa3d5..e75c83b24 100644 --- a/node/impl/full/state.go +++ b/node/impl/full/state.go @@ -132,7 +132,7 @@ func (a *StateAPI) StateMinerActiveSectors(ctx context.Context, maddr address.Ad } func (m *StateModule) StateMinerInfo(ctx context.Context, actor address.Address, tsk types.TipSetKey) (miner.MinerInfo, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return miner.MinerInfo{}, xerrors.Errorf("failed to load tipset: %w", err) } @@ -250,7 +250,7 @@ func (a *StateAPI) StateMinerPartitions(ctx context.Context, m address.Address, } func (m *StateModule) StateMinerProvingDeadline(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*dline.Info, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -345,7 +345,7 @@ func (a *StateAPI) StateMinerRecoveries(ctx context.Context, addr address.Addres } func (m *StateModule) StateMinerPower(ctx context.Context, addr address.Address, tsk types.TipSetKey) (*api.MinerPower, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -363,7 +363,7 @@ func (m *StateModule) StateMinerPower(ctx context.Context, addr address.Address, } func (a *StateAPI) StateCall(ctx context.Context, msg *types.Message, tsk types.TipSetKey) (res *api.InvocResult, err error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -372,7 +372,7 @@ func (a *StateAPI) StateCall(ctx context.Context, msg *types.Message, tsk types. if err != stmgr.ErrExpensiveFork { break } - ts, err = a.Chain.GetTipSetFromKey(ts.Parents()) + ts, err = a.Chain.GetTipSetFromKey(ctx, ts.Parents()) if err != nil { return nil, xerrors.Errorf("getting parent tipset: %w", err) } @@ -395,17 +395,17 @@ func (a *StateAPI) StateReplay(ctx context.Context, tsk types.TipSetKey, mc cid. msgToReplay = mlkp.Message - executionTs, err := a.Chain.GetTipSetFromKey(mlkp.TipSet) + executionTs, err := a.Chain.GetTipSetFromKey(ctx, mlkp.TipSet) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", mlkp.TipSet, err) } - ts, err = a.Chain.LoadTipSet(executionTs.Parents()) + ts, err = a.Chain.LoadTipSet(ctx, executionTs.Parents()) if err != nil { return nil, xerrors.Errorf("loading parent tipset %s: %w", mlkp.TipSet, err) } } else { - ts, err = a.Chain.LoadTipSet(tsk) + ts, err = a.Chain.LoadTipSet(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading specified tipset %s: %w", tsk, err) } @@ -433,7 +433,7 @@ func (a *StateAPI) StateReplay(ctx context.Context, tsk types.TipSetKey, mc cid. } func (m *StateModule) StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (a *types.Actor, err error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -441,7 +441,7 @@ func (m *StateModule) StateGetActor(ctx context.Context, actor address.Address, } func (m *StateModule) StateLookupID(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return address.Undef, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -450,7 +450,7 @@ func (m *StateModule) StateLookupID(ctx context.Context, addr address.Address, t } func (m *StateModule) StateAccountKey(ctx context.Context, addr address.Address, tsk types.TipSetKey) (address.Address, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return address.Undef, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -459,7 +459,7 @@ func (m *StateModule) StateAccountKey(ctx context.Context, addr address.Address, } func (a *StateAPI) StateReadState(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*api.ActorState, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -468,7 +468,7 @@ func (a *StateAPI) StateReadState(ctx context.Context, actor address.Address, ts return nil, xerrors.Errorf("getting actor: %w", err) } - blk, err := a.Chain.StateBlockstore().Get(act.Head) + blk, err := a.Chain.StateBlockstore().Get(ctx, act.Head) if err != nil { return nil, xerrors.Errorf("getting actor head: %w", err) } @@ -544,6 +544,9 @@ func (a *StateAPI) MinerCreateBlock(ctx context.Context, bt *api.BlockTemplate) for _, msg := range fblk.SecpkMessages { out.SecpkMessages = append(out.SecpkMessages, msg.Cid()) } + for _, msg := range fblk.CrossMessages { + out.CrossMessages = append(out.CrossMessages, msg.Cid()) + } return &out, nil } @@ -556,7 +559,7 @@ func (m *StateModule) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence var returndec interface{} if recpt.ExitCode == 0 && len(recpt.Return) > 0 { - cmsg, err := m.Chain.GetCMessage(msg) + cmsg, err := m.Chain.GetCMessage(ctx, msg) if err != nil { return nil, xerrors.Errorf("failed to load message after successful receipt search: %w", err) } @@ -585,7 +588,7 @@ func (m *StateModule) StateWaitMsg(ctx context.Context, msg cid.Cid, confidence } func (m *StateModule) StateSearchMsg(ctx context.Context, tsk types.TipSetKey, msg cid.Cid, lookbackLimit abi.ChainEpoch, allowReplaced bool) (*api.MsgLookup, error) { - fromTs, err := m.Chain.GetTipSetFromKey(tsk) + fromTs, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -607,7 +610,7 @@ func (m *StateModule) StateSearchMsg(ctx context.Context, tsk types.TipSetKey, m } func (m *StateModule) StateListMiners(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -615,7 +618,7 @@ func (m *StateModule) StateListMiners(ctx context.Context, tsk types.TipSetKey) } func (a *StateAPI) StateListActors(ctx context.Context, tsk types.TipSetKey) ([]address.Address, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -623,7 +626,7 @@ func (a *StateAPI) StateListActors(ctx context.Context, tsk types.TipSetKey) ([] } func (m *StateModule) StateMarketBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MarketBalance, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return api.MarketBalance{}, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -633,7 +636,7 @@ func (m *StateModule) StateMarketBalance(ctx context.Context, addr address.Addre func (a *StateAPI) StateMarketParticipants(ctx context.Context, tsk types.TipSetKey) (map[string]api.MarketBalance, error) { out := map[string]api.MarketBalance{} - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -673,7 +676,7 @@ func (a *StateAPI) StateMarketParticipants(ctx context.Context, tsk types.TipSet func (a *StateAPI) StateMarketDeals(ctx context.Context, tsk types.TipSetKey) (map[string]api.MarketDeal, error) { out := map[string]api.MarketDeal{} - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -712,7 +715,7 @@ func (a *StateAPI) StateMarketDeals(ctx context.Context, tsk types.TipSetKey) (m } func (m *StateModule) StateMarketStorageDeal(ctx context.Context, dealId abi.DealID, tsk types.TipSetKey) (*api.MarketDeal, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -777,7 +780,7 @@ func (a *StateAPI) StateMinerSectorCount(ctx context.Context, addr address.Addre } func (a *StateAPI) StateSectorPreCommitInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (miner.SectorPreCommitOnChainInfo, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return miner.SectorPreCommitOnChainInfo{}, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -793,7 +796,7 @@ func (a *StateAPI) StateSectorPreCommitInfo(ctx context.Context, maddr address.A } func (m *StateModule) StateSectorGetInfo(ctx context.Context, maddr address.Address, n abi.SectorNumber, tsk types.TipSetKey) (*miner.SectorOnChainInfo, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -825,7 +828,7 @@ func (a *StateAPI) StateSectorPartition(ctx context.Context, maddr address.Addre } func (a *StateAPI) StateListMessages(ctx context.Context, match *api.MessageMatch, tsk types.TipSetKey, toheight abi.ChainEpoch) ([]cid.Cid, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -874,7 +877,7 @@ func (a *StateAPI) StateListMessages(ctx context.Context, match *api.MessageMatc var out []cid.Cid for ts.Height() >= toheight { - msgs, err := a.Chain.MessagesForTipset(ts) + msgs, err := a.Chain.MessagesForTipset(ctx, ts) if err != nil { return nil, xerrors.Errorf("failed to get messages for tipset (%s): %w", ts.Key(), err) } @@ -889,7 +892,7 @@ func (a *StateAPI) StateListMessages(ctx context.Context, match *api.MessageMatc break } - next, err := a.Chain.LoadTipSet(ts.Parents()) + next, err := a.Chain.LoadTipSet(ctx, ts.Parents()) if err != nil { return nil, xerrors.Errorf("loading next tipset: %w", err) } @@ -901,7 +904,7 @@ func (a *StateAPI) StateListMessages(ctx context.Context, match *api.MessageMatc } func (a *StateAPI) StateCompute(ctx context.Context, height abi.ChainEpoch, msgs []*types.Message, tsk types.TipSetKey) (*api.ComputeStateOutput, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -917,7 +920,7 @@ func (a *StateAPI) StateCompute(ctx context.Context, height abi.ChainEpoch, msgs } func (m *StateModule) MsigGetAvailableBalance(ctx context.Context, addr address.Address, tsk types.TipSetKey) (types.BigInt, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -938,7 +941,7 @@ func (m *StateModule) MsigGetAvailableBalance(ctx context.Context, addr address. } func (a *StateAPI) MsigGetVestingSchedule(ctx context.Context, addr address.Address, tsk types.TipSetKey) (api.MsigVesting, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return api.EmptyVesting, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -976,12 +979,12 @@ func (a *StateAPI) MsigGetVestingSchedule(ctx context.Context, addr address.Addr } func (m *StateModule) MsigGetVested(ctx context.Context, addr address.Address, start types.TipSetKey, end types.TipSetKey) (types.BigInt, error) { - startTs, err := m.Chain.GetTipSetFromKey(start) + startTs, err := m.Chain.GetTipSetFromKey(ctx, start) if err != nil { return types.EmptyInt, xerrors.Errorf("loading start tipset %s: %w", start, err) } - endTs, err := m.Chain.GetTipSetFromKey(end) + endTs, err := m.Chain.GetTipSetFromKey(ctx, end) if err != nil { return types.EmptyInt, xerrors.Errorf("loading end tipset %s: %w", end, err) } @@ -1016,7 +1019,7 @@ func (m *StateModule) MsigGetVested(ctx context.Context, addr address.Address, s } func (m *StateModule) MsigGetPending(ctx context.Context, addr address.Address, tsk types.TipSetKey) ([]*api.MsigTransaction, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return nil, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1053,7 +1056,7 @@ var initialPledgeNum = types.NewInt(110) var initialPledgeDen = types.NewInt(100) func (a *StateAPI) StateMinerPreCommitDepositForPower(ctx context.Context, maddr address.Address, pci miner.SectorPreCommitInfo, tsk types.TipSetKey) (types.BigInt, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1114,7 +1117,7 @@ func (a *StateAPI) StateMinerPreCommitDepositForPower(ctx context.Context, maddr func (a *StateAPI) StateMinerInitialPledgeCollateral(ctx context.Context, maddr address.Address, pci miner.SectorPreCommitInfo, tsk types.TipSetKey) (types.BigInt, error) { // TODO: this repeats a lot of the previous function. Fix that. - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1190,7 +1193,7 @@ func (a *StateAPI) StateMinerInitialPledgeCollateral(ctx context.Context, maddr } func (a *StateAPI) StateMinerAvailableBalance(ctx context.Context, maddr address.Address, tsk types.TipSetKey) (types.BigInt, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1219,7 +1222,7 @@ func (a *StateAPI) StateMinerAvailableBalance(ctx context.Context, maddr address } func (a *StateAPI) StateMinerSectorAllocated(ctx context.Context, maddr address.Address, s abi.SectorNumber, tsk types.TipSetKey) (bool, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return false, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1319,7 +1322,7 @@ var dealProviderCollateralDen = types.NewInt(100) // StateDealProviderCollateralBounds returns the min and max collateral a storage provider // can issue. It takes the deal size and verified status as parameters. func (m *StateModule) StateDealProviderCollateralBounds(ctx context.Context, size abi.PaddedPieceSize, verified bool, tsk types.TipSetKey) (api.DealCollateralBounds, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return api.DealCollateralBounds{}, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1365,7 +1368,7 @@ func (m *StateModule) StateDealProviderCollateralBounds(ctx context.Context, siz powClaim.QualityAdjPower, rewPow, circ.FilCirculating, - m.StateManager.GetNtwkVersion(ctx, ts.Height())) + m.StateManager.GetNetworkVersion(ctx, ts.Height())) if err != nil { return api.DealCollateralBounds{}, xerrors.Errorf("getting deal provider coll bounds: %w", err) } @@ -1376,7 +1379,7 @@ func (m *StateModule) StateDealProviderCollateralBounds(ctx context.Context, siz } func (a *StateAPI) StateCirculatingSupply(ctx context.Context, tsk types.TipSetKey) (abi.TokenAmount, error) { - ts, err := a.Chain.GetTipSetFromKey(tsk) + ts, err := a.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return types.EmptyInt, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1397,7 +1400,7 @@ func stateVMCirculatingSupplyInternal( cstore *store.ChainStore, smgr *stmgr.StateManager, ) (api.CirculatingSupply, error) { - ts, err := cstore.GetTipSetFromKey(tsk) + ts, err := cstore.GetTipSetFromKey(ctx, tsk) if err != nil { return api.CirculatingSupply{}, xerrors.Errorf("loading tipset %s: %w", tsk, err) } @@ -1411,14 +1414,14 @@ func stateVMCirculatingSupplyInternal( } func (m *StateModule) StateNetworkVersion(ctx context.Context, tsk types.TipSetKey) (network.Version, error) { - ts, err := m.Chain.GetTipSetFromKey(tsk) + ts, err := m.Chain.GetTipSetFromKey(ctx, tsk) if err != nil { return network.VersionMax, xerrors.Errorf("loading tipset %s: %w", tsk, err) } // TODO: Height-1 to be consistent with the rest of the APIs? // But that's likely going to break a bunch of stuff. - return m.StateManager.GetNtwkVersion(ctx, ts.Height()), nil + return m.StateManager.GetNetworkVersion(ctx, ts.Height()), nil } func (a *StateAPI) StateGetRandomnessFromTickets(ctx context.Context, personalization crypto.DomainSeparationTag, randEpoch abi.ChainEpoch, entropy []byte, tsk types.TipSetKey) (abi.Randomness, error) { diff --git a/node/impl/full/sync.go b/node/impl/full/sync.go index 652ae3ecb..efaaa5277 100644 --- a/node/impl/full/sync.go +++ b/node/impl/full/sync.go @@ -51,25 +51,30 @@ func (a *SyncAPI) SyncState(ctx context.Context) (*api.SyncState, error) { } func (a *SyncAPI) SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) error { - parent, err := a.Syncer.ChainStore().GetBlock(blk.Header.Parents[0]) + parent, err := a.Syncer.ChainStore().GetBlock(ctx, blk.Header.Parents[0]) if err != nil { return xerrors.Errorf("loading parent block: %w", err) } if a.SlashFilter != nil { - if err := a.SlashFilter.MinedBlock(blk.Header, parent.Height); err != nil { + if err := a.SlashFilter.MinedBlock(ctx, blk.Header, parent.Height); err != nil { log.Errorf(" SLASH FILTER ERROR: %s", err) return xerrors.Errorf(" SLASH FILTER ERROR: %w", err) } } // TODO: should we have some sort of fast path to adding a local block? - bmsgs, err := a.Syncer.ChainStore().LoadMessagesFromCids(blk.BlsMessages) + bmsgs, err := a.Syncer.ChainStore().LoadMessagesFromCids(ctx, blk.BlsMessages) if err != nil { return xerrors.Errorf("failed to load bls messages: %w", err) } - smsgs, err := a.Syncer.ChainStore().LoadSignedMessagesFromCids(blk.SecpkMessages) + smsgs, err := a.Syncer.ChainStore().LoadSignedMessagesFromCids(ctx, blk.SecpkMessages) + if err != nil { + return xerrors.Errorf("failed to load secpk message: %w", err) + } + + crossmsgs, err := a.Syncer.ChainStore().LoadMessagesFromCids(ctx, blk.CrossMessages) if err != nil { return xerrors.Errorf("failed to load secpk message: %w", err) } @@ -78,6 +83,7 @@ func (a *SyncAPI) SyncSubmitBlock(ctx context.Context, blk *types.BlockMsg) erro Header: blk.Header, BlsMessages: bmsgs, SecpkMessages: smsgs, + CrossMessages: crossmsgs, } if err := a.Syncer.ValidateMsgMeta(fb); err != nil { @@ -137,12 +143,12 @@ func (a *SyncAPI) SyncCheckBad(ctx context.Context, bcid cid.Cid) (string, error } func (a *SyncAPI) SyncValidateTipset(ctx context.Context, tsk types.TipSetKey) (bool, error) { - ts, err := a.Syncer.ChainStore().LoadTipSet(tsk) + ts, err := a.Syncer.ChainStore().LoadTipSet(ctx, tsk) if err != nil { return false, err } - fts, err := a.Syncer.ChainStore().TryFillTipSet(ts) + fts, err := a.Syncer.ChainStore().TryFillTipSet(ctx, ts) if err != nil { return false, err } diff --git a/node/impl/hierarchical/subnet.go b/node/impl/hierarchical/subnet.go index f5c220874..c18809b18 100644 --- a/node/impl/hierarchical/subnet.go +++ b/node/impl/hierarchical/subnet.go @@ -6,8 +6,9 @@ import ( "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/chain/consensus/hierarchical" - "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/checkpoints/schema" + snmgr "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet/manager" + "github.com/filecoin-project/lotus/chain/types" "github.com/ipfs/go-cid" "go.uber.org/fx" ) @@ -17,34 +18,69 @@ var _ api.HierarchicalCns = &HierarchicalAPI{} type HierarchicalAPI struct { fx.In - Sub *subnet.SubnetMgr + Sub *snmgr.SubnetMgr } func (a *HierarchicalAPI) AddSubnet( ctx context.Context, wallet address.Address, - parent hierarchical.SubnetID, name string, + parent address.SubnetID, name string, consensus uint64, minerStake abi.TokenAmount, + checkPeriod abi.ChainEpoch, delegminer address.Address) (address.Address, error) { - return a.Sub.AddSubnet(ctx, wallet, parent, name, consensus, minerStake, delegminer) + return a.Sub.AddSubnet(ctx, wallet, parent, name, consensus, minerStake, checkPeriod, delegminer) } func (a *HierarchicalAPI) JoinSubnet(ctx context.Context, wallet address.Address, - value abi.TokenAmount, id hierarchical.SubnetID) (cid.Cid, error) { + value abi.TokenAmount, id address.SubnetID) (cid.Cid, error) { return a.Sub.JoinSubnet(ctx, wallet, value, id) } +func (a *HierarchicalAPI) SyncSubnet(ctx context.Context, id address.SubnetID, stop bool) error { + return a.Sub.SyncSubnet(ctx, id, stop) +} + func (a *HierarchicalAPI) MineSubnet(ctx context.Context, wallet address.Address, - id hierarchical.SubnetID, stop bool) error { + id address.SubnetID, stop bool) error { return a.Sub.MineSubnet(ctx, wallet, id, stop) } func (a *HierarchicalAPI) LeaveSubnet(ctx context.Context, wallet address.Address, - id hierarchical.SubnetID) (cid.Cid, error) { + id address.SubnetID) (cid.Cid, error) { return a.Sub.LeaveSubnet(ctx, wallet, id) } func (a *HierarchicalAPI) KillSubnet(ctx context.Context, wallet address.Address, - id hierarchical.SubnetID) (cid.Cid, error) { + id address.SubnetID) (cid.Cid, error) { return a.Sub.KillSubnet(ctx, wallet, id) } + +func (a *HierarchicalAPI) ListCheckpoints(ctx context.Context, + id address.SubnetID, num int) ([]*schema.Checkpoint, error) { + return a.Sub.ListCheckpoints(ctx, id, num) +} + +func (a *HierarchicalAPI) ValidateCheckpoint(ctx context.Context, + id address.SubnetID, epoch abi.ChainEpoch) (*schema.Checkpoint, error) { + return a.Sub.ValidateCheckpoint(ctx, id, epoch) +} + +func (a *HierarchicalAPI) GetCrossMsgsPool(ctx context.Context, id address.SubnetID, + height abi.ChainEpoch) ([]*types.Message, error) { + return a.Sub.GetCrossMsgsPool(ctx, id, height) +} + +func (a *HierarchicalAPI) FundSubnet(ctx context.Context, wallet address.Address, + id address.SubnetID, value abi.TokenAmount) (cid.Cid, error) { + return a.Sub.FundSubnet(ctx, wallet, id, value) +} + +func (a *HierarchicalAPI) ReleaseFunds(ctx context.Context, wallet address.Address, + id address.SubnetID, value abi.TokenAmount) (cid.Cid, error) { + return a.Sub.ReleaseFunds(ctx, wallet, id, value) +} + +func (a *HierarchicalAPI) CrossMsgResolve(ctx context.Context, id address.SubnetID, + c cid.Cid, from address.SubnetID) ([]types.Message, error) { + return a.Sub.CrossMsgResolve(ctx, id, c, from) +} diff --git a/node/impl/net/net.go b/node/impl/net/net.go index a1003ffe5..27e7734a1 100644 --- a/node/impl/net/net.go +++ b/node/impl/net/net.go @@ -25,12 +25,13 @@ import ( type NetAPI struct { fx.In - RawHost lp2p.RawHost - Host host.Host - Router lp2p.BaseIpfsRouting - ConnGater *conngater.BasicConnectionGater - Reporter metrics.Reporter - Sk *dtypes.ScoreKeeper + RawHost lp2p.RawHost + Host host.Host + Router lp2p.BaseIpfsRouting + ConnGater *conngater.BasicConnectionGater + ResourceManager network.ResourceManager + Reporter metrics.Reporter + Sk *dtypes.ScoreKeeper } func (a *NetAPI) ID(context.Context) (peer.ID, error) { diff --git a/node/impl/net/rcmgr.go b/node/impl/net/rcmgr.go new file mode 100644 index 000000000..1b6d57d8e --- /dev/null +++ b/node/impl/net/rcmgr.go @@ -0,0 +1,271 @@ +package net + +import ( + "context" + "strings" + + "golang.org/x/xerrors" + + "github.com/libp2p/go-libp2p-core/network" + "github.com/libp2p/go-libp2p-core/peer" + "github.com/libp2p/go-libp2p-core/protocol" + rcmgr "github.com/libp2p/go-libp2p-resource-manager" + + "github.com/filecoin-project/lotus/api" +) + +func (a *NetAPI) NetStat(ctx context.Context, scope string) (result api.NetStat, err error) { + switch { + case scope == "all": + rapi, ok := a.ResourceManager.(rcmgr.ResourceManagerState) + if !ok { + return result, xerrors.Errorf("rexource manager does not support ResourceManagerState API") + } + + stat := rapi.Stat() + result.System = &stat.System + result.Transient = &stat.Transient + if len(stat.Services) > 0 { + result.Services = stat.Services + } + if len(stat.Protocols) > 0 { + result.Protocols = make(map[string]network.ScopeStat, len(stat.Protocols)) + for proto, stat := range stat.Protocols { + result.Protocols[string(proto)] = stat + } + } + if len(stat.Peers) > 0 { + result.Peers = make(map[string]network.ScopeStat, len(stat.Peers)) + for p, stat := range stat.Peers { + result.Peers[p.Pretty()] = stat + } + } + + return result, nil + + case scope == "system": + err = a.ResourceManager.ViewSystem(func(s network.ResourceScope) error { + stat := s.Stat() + result.System = &stat + return nil + }) + return result, err + + case scope == "transient": + err = a.ResourceManager.ViewTransient(func(s network.ResourceScope) error { + stat := s.Stat() + result.Transient = &stat + return nil + }) + return result, err + + case strings.HasPrefix(scope, "svc:"): + svc := scope[4:] + err = a.ResourceManager.ViewService(svc, func(s network.ServiceScope) error { + stat := s.Stat() + result.Services = map[string]network.ScopeStat{ + svc: stat, + } + return nil + }) + return result, err + + case strings.HasPrefix(scope, "proto:"): + proto := scope[6:] + err = a.ResourceManager.ViewProtocol(protocol.ID(proto), func(s network.ProtocolScope) error { + stat := s.Stat() + result.Protocols = map[string]network.ScopeStat{ + proto: stat, + } + return nil + }) + return result, err + + case strings.HasPrefix(scope, "peer:"): + p := scope[5:] + pid, err := peer.IDFromString(p) + if err != nil { + return result, xerrors.Errorf("invalid peer ID: %s: %w", p, err) + } + err = a.ResourceManager.ViewPeer(pid, func(s network.PeerScope) error { + stat := s.Stat() + result.Peers = map[string]network.ScopeStat{ + p: stat, + } + return nil + }) + return result, err + + default: + return result, xerrors.Errorf("invalid scope %s", scope) + } +} + +func (a *NetAPI) NetLimit(ctx context.Context, scope string) (result api.NetLimit, err error) { + getLimit := func(s network.ResourceScope) error { + limiter, ok := s.(rcmgr.ResourceScopeLimiter) + if !ok { + return xerrors.Errorf("resource scope doesn't implement ResourceScopeLimiter interface") + } + + limit := limiter.Limit() + switch l := limit.(type) { + case *rcmgr.StaticLimit: + result.Memory = l.Memory + result.Streams = l.BaseLimit.Streams + result.StreamsInbound = l.BaseLimit.StreamsInbound + result.StreamsOutbound = l.BaseLimit.StreamsOutbound + result.Conns = l.BaseLimit.Conns + result.ConnsInbound = l.BaseLimit.ConnsInbound + result.ConnsOutbound = l.BaseLimit.ConnsOutbound + result.FD = l.BaseLimit.FD + + case *rcmgr.DynamicLimit: + result.Dynamic = true + result.MemoryFraction = l.MemoryLimit.MemoryFraction + result.MinMemory = l.MemoryLimit.MinMemory + result.MaxMemory = l.MemoryLimit.MaxMemory + result.Streams = l.BaseLimit.Streams + result.StreamsInbound = l.BaseLimit.StreamsInbound + result.StreamsOutbound = l.BaseLimit.StreamsOutbound + result.Conns = l.BaseLimit.Conns + result.ConnsInbound = l.BaseLimit.ConnsInbound + result.ConnsOutbound = l.BaseLimit.ConnsOutbound + result.FD = l.BaseLimit.FD + + default: + return xerrors.Errorf("unknown limit type %T", limit) + } + + return nil + } + + switch { + case scope == "system": + err = a.ResourceManager.ViewSystem(func(s network.ResourceScope) error { + return getLimit(s) + }) + return result, err + + case scope == "transient": + err = a.ResourceManager.ViewTransient(func(s network.ResourceScope) error { + return getLimit(s) + }) + return result, err + + case strings.HasPrefix(scope, "svc:"): + svc := scope[4:] + err = a.ResourceManager.ViewService(svc, func(s network.ServiceScope) error { + return getLimit(s) + }) + return result, err + + case strings.HasPrefix(scope, "proto:"): + proto := scope[6:] + err = a.ResourceManager.ViewProtocol(protocol.ID(proto), func(s network.ProtocolScope) error { + return getLimit(s) + }) + return result, err + + case strings.HasPrefix(scope, "peer:"): + p := scope[5:] + pid, err := peer.IDFromString(p) + if err != nil { + return result, xerrors.Errorf("invalid peer ID: %s: %w", p, err) + } + err = a.ResourceManager.ViewPeer(pid, func(s network.PeerScope) error { + return getLimit(s) + }) + return result, err + + default: + return result, xerrors.Errorf("invalid scope %s", scope) + } +} + +func (a *NetAPI) NetSetLimit(ctx context.Context, scope string, limit api.NetLimit) error { + setLimit := func(s network.ResourceScope) error { + limiter, ok := s.(rcmgr.ResourceScopeLimiter) + if !ok { + return xerrors.Errorf("resource scope doesn't implement ResourceScopeLimiter interface") + } + + var newLimit rcmgr.Limit + if limit.Dynamic { + newLimit = &rcmgr.DynamicLimit{ + MemoryLimit: rcmgr.MemoryLimit{ + MemoryFraction: limit.MemoryFraction, + MinMemory: limit.MinMemory, + MaxMemory: limit.MaxMemory, + }, + BaseLimit: rcmgr.BaseLimit{ + Streams: limit.Streams, + StreamsInbound: limit.StreamsInbound, + StreamsOutbound: limit.StreamsOutbound, + Conns: limit.Conns, + ConnsInbound: limit.ConnsInbound, + ConnsOutbound: limit.ConnsOutbound, + FD: limit.FD, + }, + } + } else { + newLimit = &rcmgr.StaticLimit{ + Memory: limit.Memory, + BaseLimit: rcmgr.BaseLimit{ + Streams: limit.Streams, + StreamsInbound: limit.StreamsInbound, + StreamsOutbound: limit.StreamsOutbound, + Conns: limit.Conns, + ConnsInbound: limit.ConnsInbound, + ConnsOutbound: limit.ConnsOutbound, + FD: limit.FD, + }, + } + } + + limiter.SetLimit(newLimit) + return nil + } + + switch { + case scope == "system": + err := a.ResourceManager.ViewSystem(func(s network.ResourceScope) error { + return setLimit(s) + }) + return err + + case scope == "transient": + err := a.ResourceManager.ViewTransient(func(s network.ResourceScope) error { + return setLimit(s) + }) + return err + + case strings.HasPrefix(scope, "svc:"): + svc := scope[4:] + err := a.ResourceManager.ViewService(svc, func(s network.ServiceScope) error { + return setLimit(s) + }) + return err + + case strings.HasPrefix(scope, "proto:"): + proto := scope[6:] + err := a.ResourceManager.ViewProtocol(protocol.ID(proto), func(s network.ProtocolScope) error { + return setLimit(s) + }) + return err + + case strings.HasPrefix(scope, "peer:"): + p := scope[5:] + pid, err := peer.IDFromString(p) + if err != nil { + return xerrors.Errorf("invalid peer ID: %s: %w", p, err) + } + err = a.ResourceManager.ViewPeer(pid, func(s network.PeerScope) error { + return setLimit(s) + }) + return err + + default: + return xerrors.Errorf("invalid scope %s", scope) + } +} diff --git a/node/impl/paych/paych.go b/node/impl/paych/paych.go index 773a5efab..df3b1e3e4 100644 --- a/node/impl/paych/paych.go +++ b/node/impl/paych/paych.go @@ -35,11 +35,11 @@ func (a *PaychAPI) PaychGet(ctx context.Context, from, to address.Address, amt t } func (a *PaychAPI) PaychAvailableFunds(ctx context.Context, ch address.Address) (*api.ChannelAvailableFunds, error) { - return a.PaychMgr.AvailableFunds(ch) + return a.PaychMgr.AvailableFunds(ctx, ch) } func (a *PaychAPI) PaychAvailableFundsByFromTo(ctx context.Context, from, to address.Address) (*api.ChannelAvailableFunds, error) { - return a.PaychMgr.AvailableFundsByFromTo(from, to) + return a.PaychMgr.AvailableFundsByFromTo(ctx, from, to) } func (a *PaychAPI) PaychGetWaitReady(ctx context.Context, sentinel cid.Cid) (address.Address, error) { @@ -47,7 +47,7 @@ func (a *PaychAPI) PaychGetWaitReady(ctx context.Context, sentinel cid.Cid) (add } func (a *PaychAPI) PaychAllocateLane(ctx context.Context, ch address.Address) (uint64, error) { - return a.PaychMgr.AllocateLane(ch) + return a.PaychMgr.AllocateLane(ctx, ch) } func (a *PaychAPI) PaychNewPayment(ctx context.Context, from, to address.Address, vouchers []api.VoucherSpec) (*api.PaymentInfo, error) { @@ -60,7 +60,7 @@ func (a *PaychAPI) PaychNewPayment(ctx context.Context, from, to address.Address return nil, err } - lane, err := a.PaychMgr.AllocateLane(ch.Channel) + lane, err := a.PaychMgr.AllocateLane(ctx, ch.Channel) if err != nil { return nil, err } @@ -95,11 +95,11 @@ func (a *PaychAPI) PaychNewPayment(ctx context.Context, from, to address.Address } func (a *PaychAPI) PaychList(ctx context.Context) ([]address.Address, error) { - return a.PaychMgr.ListChannels() + return a.PaychMgr.ListChannels(ctx) } func (a *PaychAPI) PaychStatus(ctx context.Context, pch address.Address) (*api.PaychStatus, error) { - ci, err := a.PaychMgr.GetChannelInfo(pch) + ci, err := a.PaychMgr.GetChannelInfo(ctx, pch) if err != nil { return nil, err } diff --git a/node/impl/storminer.go b/node/impl/storminer.go index 4e970343e..3ebac1409 100644 --- a/node/impl/storminer.go +++ b/node/impl/storminer.go @@ -3,6 +3,7 @@ package impl import ( "context" "encoding/json" + "errors" "fmt" "net/http" "os" @@ -19,6 +20,9 @@ import ( "github.com/google/uuid" "github.com/ipfs/go-cid" + "github.com/ipfs/go-graphsync" + gsimpl "github.com/ipfs/go-graphsync/impl" + "github.com/ipfs/go-graphsync/peerstate" "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/peer" "go.uber.org/fx" @@ -28,10 +32,12 @@ import ( "github.com/filecoin-project/go-address" datatransfer "github.com/filecoin-project/go-data-transfer" + gst "github.com/filecoin-project/go-data-transfer/transport/graphsync" "github.com/filecoin-project/go-fil-markets/piecestore" "github.com/filecoin-project/go-fil-markets/retrievalmarket" "github.com/filecoin-project/go-fil-markets/storagemarket" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/network" sectorstorage "github.com/filecoin-project/lotus/extern/sector-storage" "github.com/filecoin-project/lotus/extern/sector-storage/fsutil" @@ -70,6 +76,8 @@ type StorageMinerAPI struct { RetrievalProvider retrievalmarket.RetrievalProvider `optional:"true"` SectorAccessor retrievalmarket.SectorAccessor `optional:"true"` DataTransfer dtypes.ProviderDataTransfer `optional:"true"` + StagingGraphsync dtypes.StagingGraphsync `optional:"true"` + Transport dtypes.ProviderTransport `optional:"true"` DealPublisher *storageadapter.DealPublisher `optional:"true"` SectorBlocks *sectorblocks.SectorBlocks `optional:"true"` Host host.Host `optional:"true"` @@ -300,11 +308,11 @@ func (sm *StorageMinerAPI) StorageLocal(ctx context.Context) (map[stores.ID]stri return out, nil } -func (sm *StorageMinerAPI) SectorsRefs(context.Context) (map[string][]api.SealedRef, error) { +func (sm *StorageMinerAPI) SectorsRefs(ctx context.Context) (map[string][]api.SealedRef, error) { // json can't handle cids as map keys out := map[string][]api.SealedRef{} - refs, err := sm.SectorBlocks.List() + refs, err := sm.SectorBlocks.List(ctx) if err != nil { return nil, err } @@ -379,8 +387,8 @@ func (sm *StorageMinerAPI) SectorPreCommitPending(ctx context.Context) ([]abi.Se return sm.Miner.SectorPreCommitPending(ctx) } -func (sm *StorageMinerAPI) SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber) error { - return sm.Miner.MarkForUpgrade(id) +func (sm *StorageMinerAPI) SectorMarkForUpgrade(ctx context.Context, id abi.SectorNumber, snap bool) error { + return sm.Miner.MarkForUpgrade(ctx, id, snap) } func (sm *StorageMinerAPI) SectorCommitFlush(ctx context.Context) ([]sealiface.CommitBatchRes, error) { @@ -391,6 +399,10 @@ func (sm *StorageMinerAPI) SectorCommitPending(ctx context.Context) ([]abi.Secto return sm.Miner.CommitPending(ctx) } +func (sm *StorageMinerAPI) SectorMatchPendingPiecesToOpenSectors(ctx context.Context) error { + return sm.Miner.SectorMatchPendingPiecesToOpenSectors(ctx) +} + func (sm *StorageMinerAPI) WorkerConnect(ctx context.Context, url string) error { w, err := connectRemoteWorker(ctx, sm, url) if err != nil { @@ -553,10 +565,174 @@ func (sm *StorageMinerAPI) MarketDataTransferUpdates(ctx context.Context) (<-cha return channels, nil } +func (sm *StorageMinerAPI) MarketDataTransferDiagnostics(ctx context.Context, mpid peer.ID) (*api.TransferDiagnostics, error) { + gsTransport, ok := sm.Transport.(*gst.Transport) + if !ok { + return nil, errors.New("api only works for graphsync as transport") + } + graphsyncConcrete, ok := sm.StagingGraphsync.(*gsimpl.GraphSync) + if !ok { + return nil, errors.New("api only works for non-mock graphsync implementation") + } + + inProgressChannels, err := sm.DataTransfer.InProgressChannels(ctx) + if err != nil { + return nil, err + } + + allReceivingChannels := make(map[datatransfer.ChannelID]datatransfer.ChannelState) + allSendingChannels := make(map[datatransfer.ChannelID]datatransfer.ChannelState) + for channelID, channel := range inProgressChannels { + if channel.OtherPeer() != mpid { + continue + } + if channel.Status() == datatransfer.Completed { + continue + } + if channel.Status() == datatransfer.Failed || channel.Status() == datatransfer.Cancelled { + continue + } + if channel.SelfPeer() == channel.Sender() { + allSendingChannels[channelID] = channel + } else { + allReceivingChannels[channelID] = channel + } + } + + // gather information about active transport channels + transportChannels := gsTransport.ChannelsForPeer(mpid) + // gather information about graphsync state for peer + gsPeerState := graphsyncConcrete.PeerState(mpid) + + sendingTransfers := sm.generateTransfers(ctx, transportChannels.SendingChannels, gsPeerState.IncomingState, allSendingChannels) + receivingTransfers := sm.generateTransfers(ctx, transportChannels.ReceivingChannels, gsPeerState.OutgoingState, allReceivingChannels) + + return &api.TransferDiagnostics{ + SendingTransfers: sendingTransfers, + ReceivingTransfers: receivingTransfers, + }, nil +} + +// generate transfers matches graphsync state and data transfer state for a given peer +// to produce detailed output on what's happening with a transfer +func (sm *StorageMinerAPI) generateTransfers(ctx context.Context, + transportChannels map[datatransfer.ChannelID]gst.ChannelGraphsyncRequests, + gsPeerState peerstate.PeerState, + allChannels map[datatransfer.ChannelID]datatransfer.ChannelState) []*api.GraphSyncDataTransfer { + tc := &transferConverter{ + matchedChannelIds: make(map[datatransfer.ChannelID]struct{}), + matchedRequests: make(map[graphsync.RequestID]*api.GraphSyncDataTransfer), + gsDiagnostics: gsPeerState.Diagnostics(), + requestStates: gsPeerState.RequestStates, + allChannels: allChannels, + } + + // iterate through all operating data transfer transport channels + for channelID, channelRequests := range transportChannels { + originalState, err := sm.DataTransfer.ChannelState(ctx, channelID) + var baseDiagnostics []string + var channelState *api.DataTransferChannel + if err != nil { + baseDiagnostics = append(baseDiagnostics, fmt.Sprintf("Unable to lookup channel state: %s", err)) + } else { + cs := api.NewDataTransferChannel(sm.Host.ID(), originalState) + channelState = &cs + } + // add the current request for this channel + tc.convertTransfer(channelID, true, channelState, baseDiagnostics, channelRequests.Current, true) + for _, requestID := range channelRequests.Previous { + // add any previous requests that were cancelled for a restart + tc.convertTransfer(channelID, true, channelState, baseDiagnostics, requestID, false) + } + } + + // collect any graphsync data for channels we don't have any data transfer data for + tc.collectRemainingTransfers() + + return tc.transfers +} + +type transferConverter struct { + matchedChannelIds map[datatransfer.ChannelID]struct{} + matchedRequests map[graphsync.RequestID]*api.GraphSyncDataTransfer + transfers []*api.GraphSyncDataTransfer + gsDiagnostics map[graphsync.RequestID][]string + requestStates graphsync.RequestStates + allChannels map[datatransfer.ChannelID]datatransfer.ChannelState +} + +// convert transfer assembles transfer and diagnostic data for a given graphsync/data-transfer request +func (tc *transferConverter) convertTransfer(channelID datatransfer.ChannelID, hasChannelID bool, channelState *api.DataTransferChannel, baseDiagnostics []string, + requestID graphsync.RequestID, isCurrentChannelRequest bool) { + diagnostics := baseDiagnostics + state, hasState := tc.requestStates[requestID] + stateString := state.String() + if !hasState { + stateString = "no graphsync state found" + } + var channelIDPtr *datatransfer.ChannelID + if !hasChannelID { + diagnostics = append(diagnostics, fmt.Sprintf("No data transfer channel id for GraphSync request ID %d", requestID)) + } else { + channelIDPtr = &channelID + if isCurrentChannelRequest && !hasState { + diagnostics = append(diagnostics, fmt.Sprintf("No current request state for data transfer channel id %s", channelID)) + } else if !isCurrentChannelRequest && hasState { + diagnostics = append(diagnostics, fmt.Sprintf("Graphsync request %d is a previous request on data transfer channel id %s that was restarted, but it is still running", requestID, channelID)) + } + } + diagnostics = append(diagnostics, tc.gsDiagnostics[requestID]...) + transfer := &api.GraphSyncDataTransfer{ + RequestID: requestID, + RequestState: stateString, + IsCurrentChannelRequest: isCurrentChannelRequest, + ChannelID: channelIDPtr, + ChannelState: channelState, + Diagnostics: diagnostics, + } + tc.transfers = append(tc.transfers, transfer) + tc.matchedRequests[requestID] = transfer + if hasChannelID { + tc.matchedChannelIds[channelID] = struct{}{} + } +} + +func (tc *transferConverter) collectRemainingTransfers() { + for requestID := range tc.requestStates { + if _, ok := tc.matchedRequests[requestID]; !ok { + tc.convertTransfer(datatransfer.ChannelID{}, false, nil, nil, requestID, false) + } + } + for requestID := range tc.gsDiagnostics { + if _, ok := tc.matchedRequests[requestID]; !ok { + tc.convertTransfer(datatransfer.ChannelID{}, false, nil, nil, requestID, false) + } + } + for channelID, channelState := range tc.allChannels { + if _, ok := tc.matchedChannelIds[channelID]; !ok { + channelID := channelID + cs := api.NewDataTransferChannel(channelState.SelfPeer(), channelState) + transfer := &api.GraphSyncDataTransfer{ + RequestID: graphsync.RequestID(-1), + RequestState: "graphsync state unknown", + IsCurrentChannelRequest: false, + ChannelID: &channelID, + ChannelState: &cs, + Diagnostics: []string{"data transfer with no open transport channel, cannot determine linked graphsync request"}, + } + tc.transfers = append(tc.transfers, transfer) + } + } +} + func (sm *StorageMinerAPI) MarketPendingDeals(ctx context.Context) (api.PendingDealInfo, error) { return sm.DealPublisher.PendingDeals(), nil } +func (sm *StorageMinerAPI) MarketRetryPublishDeal(ctx context.Context, propcid cid.Cid) error { + return sm.StorageProvider.RetryDealPublishing(propcid) +} + func (sm *StorageMinerAPI) MarketPublishPendingDeals(ctx context.Context) error { sm.DealPublisher.ForcePublishPendingDeals() return nil @@ -944,7 +1120,7 @@ func (sm *StorageMinerAPI) PiecesGetCIDInfo(ctx context.Context, payloadCid cid. } func (sm *StorageMinerAPI) CreateBackup(ctx context.Context, fpath string) error { - return backup(sm.DS, fpath) + return backup(ctx, sm.DS, fpath) } func (sm *StorageMinerAPI) CheckProvable(ctx context.Context, pp abi.RegisteredPoStProof, sectors []sto.SectorRef, expensive bool) (map[abi.SectorNumber]string, error) { @@ -984,8 +1160,8 @@ func (sm *StorageMinerAPI) Discover(ctx context.Context) (apitypes.OpenRPCDocume return build.OpenRPCDiscoverJSON_Miner(), nil } -func (sm *StorageMinerAPI) ComputeProof(ctx context.Context, ssi []builtin.SectorInfo, rand abi.PoStRandomness) ([]builtin.PoStProof, error) { - return sm.Epp.ComputeProof(ctx, ssi, rand) +func (sm *StorageMinerAPI) ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, poStEpoch abi.ChainEpoch, nv network.Version) ([]builtin.PoStProof, error) { + return sm.Epp.ComputeProof(ctx, ssi, rand, poStEpoch, nv) } func (sm *StorageMinerAPI) RuntimeSubsystems(context.Context) (res api.MinerSubsystems, err error) { diff --git a/node/modules/chain.go b/node/modules/chain.go index 3518c3b29..39fc6d706 100644 --- a/node/modules/chain.go +++ b/node/modules/chain.go @@ -30,7 +30,7 @@ import ( ) // ChainBitswap uses a blockstore that bypasses all caches. -func ChainBitswap(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, rt routing.Routing, bs dtypes.ExposedBlockstore) dtypes.ChainBitswap { +func ChainBitswap(lc fx.Lifecycle, mctx helpers.MetricsCtx, host host.Host, rt routing.Routing, bs dtypes.ExposedBlockstore) dtypes.ChainBitswap { // prefix protocol for chain bitswap // (so bitswap uses /chain/ipfs/bitswap/1.0.0 internally for chain sync stuff) bitswapNetwork := network.NewFromIpfsHost(host, rt, network.Prefix("/chain")) @@ -58,8 +58,8 @@ func ChainBlockService(bs dtypes.ExposedBlockstore, rem dtypes.ChainBitswap) dty return blockservice.New(bs, rem) } -func MessagePool(lc fx.Lifecycle, us stmgr.UpgradeSchedule, mpp messagepool.Provider, ds dtypes.MetadataDS, nn dtypes.NetworkName, j journal.Journal, protector dtypes.GCReferenceProtector) (*messagepool.MessagePool, error) { - mp, err := messagepool.New(mpp, ds, us, nn, j) +func MessagePool(lc fx.Lifecycle, mctx helpers.MetricsCtx, us stmgr.UpgradeSchedule, mpp messagepool.Provider, ds dtypes.MetadataDS, nn dtypes.NetworkName, j journal.Journal, protector dtypes.GCReferenceProtector) (*messagepool.MessagePool, error) { + mp, err := messagepool.New(helpers.LifecycleCtx(mctx, lc), mpp, ds, us, nn, j) if err != nil { return nil, xerrors.Errorf("constructing mpool: %w", err) } @@ -73,23 +73,25 @@ func MessagePool(lc fx.Lifecycle, us stmgr.UpgradeSchedule, mpp messagepool.Prov } func ChainStore(lc fx.Lifecycle, + mctx helpers.MetricsCtx, cbs dtypes.ChainBlockstore, sbs dtypes.StateBlockstore, ds dtypes.MetadataDS, basebs dtypes.BaseBlockstore, weight store.WeightFunc, + us stmgr.UpgradeSchedule, j journal.Journal) *store.ChainStore { chain := store.NewChainStore(cbs, sbs, ds, weight, j) - if err := chain.Load(); err != nil { + if err := chain.Load(helpers.LifecycleCtx(mctx, lc)); err != nil { log.Warnf("loading chain state from disk: %s", err) } var startHook func(context.Context) error if ss, ok := basebs.(*splitstore.SplitStore); ok { startHook = func(_ context.Context) error { - err := ss.Start(chain) + err := ss.Start(chain, us) if err != nil { err = xerrors.Errorf("error starting splitstore: %w", err) } @@ -120,7 +122,9 @@ func NetworkName(mctx helpers.MetricsCtx, ctx := helpers.LifecycleCtx(mctx, lc) - sm, err := stmgr.NewStateManager(cs, tsexec, syscalls, us, nil) + // The statemanager is initialized here only get the network name + // so we can use a nil resolver + sm, err := stmgr.NewStateManager(cs, tsexec, nil, syscalls, us, nil) if err != nil { return "", err } diff --git a/node/modules/client.go b/node/modules/client.go index 4d988d98a..48f9dc3d7 100644 --- a/node/modules/client.go +++ b/node/modules/client.go @@ -40,11 +40,12 @@ import ( "github.com/filecoin-project/lotus/node/impl/full" payapi "github.com/filecoin-project/lotus/node/impl/paych" "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/modules/helpers" "github.com/filecoin-project/lotus/node/repo" "github.com/filecoin-project/lotus/node/repo/imports" ) -func HandleMigrateClientFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, wallet full.WalletAPI, fundMgr *market.FundManager) { +func HandleMigrateClientFunds(lc fx.Lifecycle, mctx helpers.MetricsCtx, ds dtypes.MetadataDS, wallet full.WalletAPI, fundMgr *market.FundManager) { lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { addr, err := wallet.WalletDefaultAddress(ctx) @@ -52,7 +53,7 @@ func HandleMigrateClientFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, wallet full if err != nil { return nil } - b, err := ds.Get(datastore.NewKey("/marketfunds/client")) + b, err := ds.Get(helpers.LifecycleCtx(mctx, lc), datastore.NewKey("/marketfunds/client")) if err != nil { if xerrors.Is(err, datastore.ErrNotFound) { return nil @@ -73,7 +74,7 @@ func HandleMigrateClientFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, wallet full return nil } - return ds.Delete(datastore.NewKey("/marketfunds/client")) + return ds.Delete(helpers.LifecycleCtx(mctx, lc), datastore.NewKey("/marketfunds/client")) }, }) } @@ -113,11 +114,7 @@ func NewClientGraphsyncDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.Grap net := dtnet.NewFromLibp2pHost(h, dtRetryParams) dtDs := namespace.Wrap(ds, datastore.NewKey("/datatransfer/client/transfers")) - transport := dtgstransport.NewTransport(h.ID(), gs, net) - err := os.MkdirAll(filepath.Join(r.Path(), "data-transfer"), 0755) //nolint: gosec - if err != nil && !os.IsExist(err) { - return nil, err - } + transport := dtgstransport.NewTransport(h.ID(), gs) // data-transfer push / pull channel restart configuration: dtRestartConfig := dtimpl.ChannelRestartConfig(channelmonitor.Config{ @@ -137,7 +134,7 @@ func NewClientGraphsyncDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.Grap // After trying to restart 3 times, give up and fail the transfer MaxConsecutiveRestarts: 3, }) - dt, err := dtimpl.NewDataTransfer(dtDs, filepath.Join(r.Path(), "data-transfer"), net, transport, dtRestartConfig) + dt, err := dtimpl.NewDataTransfer(dtDs, net, transport, dtRestartConfig) if err != nil { return nil, err } diff --git a/node/modules/dtypes/storage.go b/node/modules/dtypes/storage.go index 6893908f7..542445b1e 100644 --- a/node/modules/dtypes/storage.go +++ b/node/modules/dtypes/storage.go @@ -8,6 +8,7 @@ import ( "github.com/ipfs/go-graphsync" exchange "github.com/ipfs/go-ipfs-exchange-interface" + dtnet "github.com/filecoin-project/go-data-transfer/network" "github.com/filecoin-project/go-fil-markets/piecestore" "github.com/filecoin-project/go-fil-markets/storagemarket/impl/requestvalidation" @@ -85,6 +86,7 @@ type ProviderRequestValidator *requestvalidation.UnifiedRequestValidator // ProviderDataTransfer is a data transfer manager for the provider type ProviderDataTransfer datatransfer.Manager - +type ProviderTransferNetwork dtnet.DataTransferNetwork +type ProviderTransport datatransfer.Transport type StagingBlockstore blockstore.BasicBlockstore type StagingGraphsync graphsync.GraphExchange diff --git a/node/modules/genesis.go b/node/modules/genesis.go index 43443b125..03b4e2907 100644 --- a/node/modules/genesis.go +++ b/node/modules/genesis.go @@ -4,6 +4,8 @@ import ( "bytes" "os" + "go.uber.org/fx" + "github.com/ipfs/go-datastore" "github.com/ipld/go-car" "golang.org/x/xerrors" @@ -11,6 +13,7 @@ import ( "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/node/modules/dtypes" + "github.com/filecoin-project/lotus/node/modules/helpers" ) func ErrorGenesis() Genesis { @@ -19,17 +22,18 @@ func ErrorGenesis() Genesis { } } -func LoadGenesis(genBytes []byte) func(dtypes.ChainBlockstore) Genesis { - return func(bs dtypes.ChainBlockstore) Genesis { +func LoadGenesis(genBytes []byte) func(fx.Lifecycle, helpers.MetricsCtx, dtypes.ChainBlockstore) Genesis { + return func(lc fx.Lifecycle, mctx helpers.MetricsCtx, bs dtypes.ChainBlockstore) Genesis { return func() (header *types.BlockHeader, e error) { - c, err := car.LoadCar(bs, bytes.NewReader(genBytes)) + ctx := helpers.LifecycleCtx(mctx, lc) + c, err := car.LoadCar(ctx, bs, bytes.NewReader(genBytes)) if err != nil { return nil, xerrors.Errorf("loading genesis car file failed: %w", err) } if len(c.Roots) != 1 { return nil, xerrors.New("expected genesis file to have one root") } - root, err := bs.Get(c.Roots[0]) + root, err := bs.Get(ctx, c.Roots[0]) if err != nil { return nil, err } @@ -45,8 +49,9 @@ func LoadGenesis(genBytes []byte) func(dtypes.ChainBlockstore) Genesis { func DoSetGenesis(_ dtypes.AfterGenesisSet) {} -func SetGenesis(cs *store.ChainStore, g Genesis) (dtypes.AfterGenesisSet, error) { - genFromRepo, err := cs.GetGenesis() +func SetGenesis(lc fx.Lifecycle, mctx helpers.MetricsCtx, cs *store.ChainStore, g Genesis) (dtypes.AfterGenesisSet, error) { + ctx := helpers.LifecycleCtx(mctx, lc) + genFromRepo, err := cs.GetGenesis(ctx) if err == nil { if os.Getenv("LOTUS_SKIP_GENESIS_CHECK") != "_yes_" { expectedGenesis, err := g() @@ -69,5 +74,5 @@ func SetGenesis(cs *store.ChainStore, g Genesis) (dtypes.AfterGenesisSet, error) return dtypes.AfterGenesisSet{}, xerrors.Errorf("genesis func failed: %w", err) } - return dtypes.AfterGenesisSet{}, cs.SetGenesis(genesis) + return dtypes.AfterGenesisSet{}, cs.SetGenesis(ctx, genesis) } diff --git a/node/modules/graphsync.go b/node/modules/graphsync.go index 839508900..724c57ef0 100644 --- a/node/modules/graphsync.go +++ b/node/modules/graphsync.go @@ -1,14 +1,19 @@ package modules import ( + "context" + "time" + "github.com/ipfs/go-graphsync" graphsyncimpl "github.com/ipfs/go-graphsync/impl" gsnet "github.com/ipfs/go-graphsync/network" "github.com/ipfs/go-graphsync/storeutil" "github.com/libp2p/go-libp2p-core/host" "github.com/libp2p/go-libp2p-core/peer" + "go.opencensus.io/stats" "go.uber.org/fx" + "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/filecoin-project/lotus/node/modules/helpers" @@ -49,6 +54,48 @@ func Graphsync(parallelTransfersForStorage uint64, parallelTransfersForRetrieval hookActions.UsePersistenceOption("chainstore") } }) + + graphsyncStats(mctx, lc, gs) + return gs, nil } } + +func graphsyncStats(mctx helpers.MetricsCtx, lc fx.Lifecycle, gs dtypes.Graphsync) { + stopStats := make(chan struct{}) + lc.Append(fx.Hook{ + OnStart: func(context.Context) error { + go func() { + t := time.NewTicker(10 * time.Second) + for { + select { + case <-t.C: + + st := gs.Stats() + stats.Record(mctx, metrics.GraphsyncReceivingPeersCount.M(int64(st.OutgoingRequests.TotalPeers))) + stats.Record(mctx, metrics.GraphsyncReceivingActiveCount.M(int64(st.OutgoingRequests.Active))) + stats.Record(mctx, metrics.GraphsyncReceivingCountCount.M(int64(st.OutgoingRequests.Pending))) + stats.Record(mctx, metrics.GraphsyncReceivingTotalMemoryAllocated.M(int64(st.IncomingResponses.TotalAllocatedAllPeers))) + stats.Record(mctx, metrics.GraphsyncReceivingTotalPendingAllocations.M(int64(st.IncomingResponses.TotalPendingAllocations))) + stats.Record(mctx, metrics.GraphsyncReceivingPeersPending.M(int64(st.IncomingResponses.NumPeersWithPendingAllocations))) + stats.Record(mctx, metrics.GraphsyncSendingPeersCount.M(int64(st.IncomingRequests.TotalPeers))) + stats.Record(mctx, metrics.GraphsyncSendingActiveCount.M(int64(st.IncomingRequests.Active))) + stats.Record(mctx, metrics.GraphsyncSendingCountCount.M(int64(st.IncomingRequests.Pending))) + stats.Record(mctx, metrics.GraphsyncSendingTotalMemoryAllocated.M(int64(st.OutgoingResponses.TotalAllocatedAllPeers))) + stats.Record(mctx, metrics.GraphsyncSendingTotalPendingAllocations.M(int64(st.OutgoingResponses.TotalPendingAllocations))) + stats.Record(mctx, metrics.GraphsyncSendingPeersPending.M(int64(st.OutgoingResponses.NumPeersWithPendingAllocations))) + + case <-stopStats: + return + } + } + }() + + return nil + }, + OnStop: func(ctx context.Context) error { + close(stopStats) + return nil + }, + }) +} diff --git a/node/modules/lp2p/host.go b/node/modules/lp2p/host.go index b0436e9c9..982d9f4cd 100644 --- a/node/modules/lp2p/host.go +++ b/node/modules/lp2p/host.go @@ -10,6 +10,7 @@ import ( "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peerstore" dht "github.com/libp2p/go-libp2p-kad-dht" + "github.com/libp2p/go-libp2p-peerstore/pstoremem" record "github.com/libp2p/go-libp2p-record" routedhost "github.com/libp2p/go-libp2p/p2p/host/routed" mocknet "github.com/libp2p/go-libp2p/p2p/net/mock" @@ -33,9 +34,11 @@ type P2PHostIn struct { type RawHost host.Host -func Host(mctx helpers.MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (RawHost, error) { - ctx := helpers.LifecycleCtx(mctx, lc) +func Peerstore() (peerstore.Peerstore, error) { + return pstoremem.NewPeerstore() +} +func Host(mctx helpers.MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (RawHost, error) { pkey := params.Peerstore.PrivKey(params.ID) if pkey == nil { return nil, fmt.Errorf("missing private key for node ID: %s", params.ID.Pretty()) @@ -52,7 +55,7 @@ func Host(mctx helpers.MetricsCtx, lc fx.Lifecycle, params P2PHostIn) (RawHost, opts = append(opts, o...) } - h, err := libp2p.New(ctx, opts...) + h, err := libp2p.New(opts...) if err != nil { return nil, err } diff --git a/node/modules/lp2p/libp2p.go b/node/modules/lp2p/libp2p.go index 4dee15ae9..997792d48 100644 --- a/node/modules/lp2p/libp2p.go +++ b/node/modules/lp2p/libp2p.go @@ -10,10 +10,10 @@ import ( logging "github.com/ipfs/go-log/v2" "github.com/libp2p/go-libp2p" - connmgr "github.com/libp2p/go-libp2p-connmgr" "github.com/libp2p/go-libp2p-core/crypto" "github.com/libp2p/go-libp2p-core/peer" "github.com/libp2p/go-libp2p-core/peerstore" + "github.com/libp2p/go-libp2p/p2p/net/connmgr" "go.uber.org/fx" ) @@ -69,7 +69,11 @@ func genLibp2pKey() (crypto.PrivKey, error) { func ConnectionManager(low, high uint, grace time.Duration, protected []string) func() (opts Libp2pOpts, err error) { return func() (Libp2pOpts, error) { - cm := connmgr.NewConnManager(int(low), int(high), grace) + cm, err := connmgr.NewConnManager(int(low), int(high), connmgr.WithGracePeriod(grace)) + if err != nil { + return Libp2pOpts{}, err + } + for _, p := range protected { pid, err := peer.IDFromString(p) if err != nil { diff --git a/node/modules/lp2p/rcmgr.go b/node/modules/lp2p/rcmgr.go new file mode 100644 index 000000000..8b286ff5e --- /dev/null +++ b/node/modules/lp2p/rcmgr.go @@ -0,0 +1,72 @@ +package lp2p + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + + "go.uber.org/fx" + + "github.com/libp2p/go-libp2p" + "github.com/libp2p/go-libp2p-core/network" + rcmgr "github.com/libp2p/go-libp2p-resource-manager" + + "github.com/filecoin-project/lotus/node/repo" +) + +func ResourceManager(lc fx.Lifecycle, repo repo.LockedRepo) (network.ResourceManager, error) { + var limiter *rcmgr.BasicLimiter + var opts []rcmgr.Option + + repoPath := repo.Path() + + // create limiter -- parse $repo/limits.json if exists + limitsFile := filepath.Join(repoPath, "limits.json") + limitsIn, err := os.Open(limitsFile) + switch { + case err == nil: + defer limitsIn.Close() //nolint:errcheck + limiter, err = rcmgr.NewDefaultLimiterFromJSON(limitsIn) + if err != nil { + return nil, fmt.Errorf("error parsing limit file: %w", err) + } + + case errors.Is(err, os.ErrNotExist): + limiter = rcmgr.NewDefaultLimiter() + + default: + return nil, err + } + + // TODO: also set appropriate default limits for lotus protocols + libp2p.SetDefaultServiceLimits(limiter) + + if os.Getenv("LOTUS_DEBUG_RCMGR") != "" { + debugPath := filepath.Join(repoPath, "debug") + if err := os.MkdirAll(debugPath, 0755); err != nil { + return nil, fmt.Errorf("error creating debug directory: %w", err) + } + traceFile := filepath.Join(debugPath, "rcmgr.json.gz") + opts = append(opts, rcmgr.WithTrace(traceFile)) + } + + mgr, err := rcmgr.NewResourceManager(limiter, opts...) + if err != nil { + return nil, fmt.Errorf("error creating resource manager: %w", err) + } + + lc.Append(fx.Hook{ + OnStop: func(_ context.Context) error { + return mgr.Close() + }}) + + return mgr, nil +} + +func ResourceManagerOption(mgr network.ResourceManager) Libp2pOpts { + return Libp2pOpts{ + Opts: []libp2p.Option{libp2p.ResourceManager(mgr)}, + } +} diff --git a/node/modules/lp2p/smux.go b/node/modules/lp2p/smux.go index f5c74e18b..608467255 100644 --- a/node/modules/lp2p/smux.go +++ b/node/modules/lp2p/smux.go @@ -2,17 +2,13 @@ package lp2p import ( "os" - "strings" "github.com/libp2p/go-libp2p" - smux "github.com/libp2p/go-libp2p-core/mux" - mplex "github.com/libp2p/go-libp2p-mplex" yamux "github.com/libp2p/go-libp2p-yamux" ) -func makeSmuxTransportOption(mplexExp bool) libp2p.Option { +func makeSmuxTransportOption() libp2p.Option { const yamuxID = "/yamux/1.0.0" - const mplexID = "/mplex/6.7.0" ymxtpt := *yamux.DefaultTransport ymxtpt.AcceptBacklog = 512 @@ -21,34 +17,12 @@ func makeSmuxTransportOption(mplexExp bool) libp2p.Option { ymxtpt.LogOutput = os.Stderr } - muxers := map[string]smux.Multiplexer{yamuxID: &ymxtpt} - if mplexExp { - muxers[mplexID] = mplex.DefaultTransport - } - - // Allow muxer preference order overriding - order := []string{yamuxID, mplexID} - if prefs := os.Getenv("LIBP2P_MUX_PREFS"); prefs != "" { - order = strings.Fields(prefs) - } - - opts := make([]libp2p.Option, 0, len(order)) - for _, id := range order { - tpt, ok := muxers[id] - if !ok { - log.Warnf("unknown or duplicate muxer in LIBP2P_MUX_PREFS: %s", id) - continue - } - delete(muxers, id) - opts = append(opts, libp2p.Muxer(id, tpt)) - } - - return libp2p.ChainOptions(opts...) + return libp2p.Muxer(yamuxID, &ymxtpt) } -func SmuxTransport(mplex bool) func() (opts Libp2pOpts, err error) { +func SmuxTransport() func() (opts Libp2pOpts, err error) { return func() (opts Libp2pOpts, err error) { - opts.Opts = append(opts.Opts, makeSmuxTransportOption(mplex)) + opts.Opts = append(opts.Opts, makeSmuxTransportOption()) return } } diff --git a/node/modules/services.go b/node/modules/services.go index 17d4a7476..299c01fb0 100644 --- a/node/modules/services.go +++ b/node/modules/services.go @@ -228,8 +228,8 @@ func BuiltinDrandConfig() dtypes.DrandSchedule { return build.DrandConfigSchedule() } -func RandomSchedule(p RandomBeaconParams, _ dtypes.AfterGenesisSet) (beacon.Schedule, error) { - gen, err := p.Cs.GetGenesis() +func RandomSchedule(lc fx.Lifecycle, mctx helpers.MetricsCtx, p RandomBeaconParams, _ dtypes.AfterGenesisSet) (beacon.Schedule, error) { + gen, err := p.Cs.GetGenesis(helpers.LifecycleCtx(mctx, lc)) if err != nil { return nil, err } diff --git a/node/modules/stmgr.go b/node/modules/stmgr.go index daef52b42..0018a962d 100644 --- a/node/modules/stmgr.go +++ b/node/modules/stmgr.go @@ -2,6 +2,7 @@ package modules import ( "github.com/filecoin-project/lotus/chain/beacon" + "github.com/filecoin-project/lotus/chain/consensus/hierarchical/subnet/resolver" "github.com/filecoin-project/lotus/chain/vm" "go.uber.org/fx" @@ -9,8 +10,8 @@ import ( "github.com/filecoin-project/lotus/chain/store" ) -func StateManager(lc fx.Lifecycle, cs *store.ChainStore, exec stmgr.Executor, sys vm.SyscallBuilder, us stmgr.UpgradeSchedule, b beacon.Schedule) (*stmgr.StateManager, error) { - sm, err := stmgr.NewStateManager(cs, exec, sys, us, b) +func StateManager(lc fx.Lifecycle, cs *store.ChainStore, exec stmgr.Executor, r *resolver.Resolver, sys vm.SyscallBuilder, us stmgr.UpgradeSchedule, b beacon.Schedule) (*stmgr.StateManager, error) { + sm, err := stmgr.NewStateManager(cs, exec, r, sys, us, b) if err != nil { return nil, err } diff --git a/node/modules/storageminer.go b/node/modules/storageminer.go index b32cbe9e0..da1a016f7 100644 --- a/node/modules/storageminer.go +++ b/node/modules/storageminer.go @@ -32,13 +32,13 @@ import ( "github.com/filecoin-project/go-jsonrpc/auth" "github.com/filecoin-project/go-paramfetch" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-statestore" "github.com/filecoin-project/go-storedcounter" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/namespace" graphsync "github.com/ipfs/go-graphsync/impl" - graphsyncimpl "github.com/ipfs/go-graphsync/impl" gsnet "github.com/ipfs/go-graphsync/network" "github.com/ipfs/go-graphsync/storeutil" "github.com/libp2p/go-libp2p-core/host" @@ -77,7 +77,7 @@ var ( ) func minerAddrFromDS(ds dtypes.MetadataDS) (address.Address, error) { - maddrb, err := ds.Get(datastore.NewKey("miner-address")) + maddrb, err := ds.Get(context.TODO(), datastore.NewKey("miner-address")) if err != nil { return address.Undef, err } @@ -299,7 +299,7 @@ func HandleDeals(mctx helpers.MetricsCtx, lc fx.Lifecycle, host host.Host, h sto func HandleMigrateProviderFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, node api.FullNode, minerAddress dtypes.MinerAddress) { lc.Append(fx.Hook{ OnStart: func(ctx context.Context) error { - b, err := ds.Get(datastore.NewKey("/marketfunds/provider")) + b, err := ds.Get(ctx, datastore.NewKey("/marketfunds/provider")) if err != nil { if xerrors.Is(err, datastore.ErrNotFound) { return nil @@ -330,24 +330,26 @@ func HandleMigrateProviderFunds(lc fx.Lifecycle, ds dtypes.MetadataDS, node api. return nil } - return ds.Delete(datastore.NewKey("/marketfunds/provider")) + return ds.Delete(ctx, datastore.NewKey("/marketfunds/provider")) }, }) } -// NewProviderDAGServiceDataTransfer returns a data transfer manager that just -// uses the provider's Staging DAG service for transfers -func NewProviderDAGServiceDataTransfer(lc fx.Lifecycle, h host.Host, gs dtypes.StagingGraphsync, ds dtypes.MetadataDS, r repo.LockedRepo) (dtypes.ProviderDataTransfer, error) { - net := dtnet.NewFromLibp2pHost(h) +// NewProviderTransferNetwork sets up the libp2p2 protocol networking for data transfer +func NewProviderTransferNetwork(h host.Host) dtypes.ProviderTransferNetwork { + return dtnet.NewFromLibp2pHost(h) +} + +// NewProviderTransport sets up a data transfer transport over graphsync +func NewProviderTransport(h host.Host, gs dtypes.StagingGraphsync) dtypes.ProviderTransport { + return dtgstransport.NewTransport(h.ID(), gs) +} +// NewProviderDataTransfer returns a data transfer manager +func NewProviderDataTransfer(lc fx.Lifecycle, net dtypes.ProviderTransferNetwork, transport dtypes.ProviderTransport, ds dtypes.MetadataDS, r repo.LockedRepo) (dtypes.ProviderDataTransfer, error) { dtDs := namespace.Wrap(ds, datastore.NewKey("/datatransfer/provider/transfers")) - transport := dtgstransport.NewTransport(h.ID(), gs, net) - err := os.MkdirAll(filepath.Join(r.Path(), "data-transfer"), 0755) //nolint: gosec - if err != nil && !os.IsExist(err) { - return nil, err - } - dt, err := dtimpl.NewDataTransfer(dtDs, filepath.Join(r.Path(), "data-transfer"), net, transport) + dt, err := dtimpl.NewDataTransfer(dtDs, net, transport) if err != nil { return nil, err } @@ -395,7 +397,7 @@ func StagingBlockstore(lc fx.Lifecycle, mctx helpers.MetricsCtx, r repo.LockedRe // StagingGraphsync creates a graphsync instance which reads and writes blocks // to the StagingBlockstore -func StagingGraphsync(parallelTransfersForStorage uint64, parallelTransfersForRetrieval uint64) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync { +func StagingGraphsync(parallelTransfersForStorage uint64, parallelTransfersForStoragePerPeer uint64, parallelTransfersForRetrieval uint64) func(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync { return func(mctx helpers.MetricsCtx, lc fx.Lifecycle, ibs dtypes.StagingBlockstore, h host.Host) dtypes.StagingGraphsync { graphsyncNetwork := gsnet.NewFromLibp2pHost(h) lsys := storeutil.LinkSystemForBlockstore(ibs) @@ -404,9 +406,12 @@ func StagingGraphsync(parallelTransfersForStorage uint64, parallelTransfersForRe lsys, graphsync.RejectAllRequestsByDefault(), graphsync.MaxInProgressIncomingRequests(parallelTransfersForRetrieval), + graphsync.MaxInProgressIncomingRequestsPerPeer(parallelTransfersForStoragePerPeer), graphsync.MaxInProgressOutgoingRequests(parallelTransfersForStorage), - graphsyncimpl.MaxLinksPerIncomingRequests(config.MaxTraversalLinks), - graphsyncimpl.MaxLinksPerOutgoingRequests(config.MaxTraversalLinks)) + graphsync.MaxLinksPerIncomingRequests(config.MaxTraversalLinks), + graphsync.MaxLinksPerOutgoingRequests(config.MaxTraversalLinks)) + + graphsyncStats(mctx, lc, gs) return gs } @@ -681,6 +686,9 @@ func RetrievalProvider( dagStore *dagstore.Wrapper, ) (retrievalmarket.RetrievalProvider, error) { opt := retrievalimpl.DealDeciderOpt(retrievalimpl.DealDecider(userFilter)) + + retrievalmarket.DefaultPricePerByte = big.Zero() // todo: for whatever reason this is a global var in markets + return retrievalimpl.NewProvider( address.Address(maddr), adapter, diff --git a/node/modules/storageminer_dagstore.go b/node/modules/storageminer_dagstore.go index 1f72a49b9..513acaad1 100644 --- a/node/modules/storageminer_dagstore.go +++ b/node/modules/storageminer_dagstore.go @@ -11,8 +11,6 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/dagstore" - "github.com/filecoin-project/go-fil-markets/retrievalmarket" - mdagstore "github.com/filecoin-project/lotus/markets/dagstore" "github.com/filecoin-project/lotus/node/config" "github.com/filecoin-project/lotus/node/modules/dtypes" @@ -25,7 +23,7 @@ const ( ) // NewMinerAPI creates a new MinerAPI adaptor for the dagstore mounts. -func NewMinerAPI(lc fx.Lifecycle, r repo.LockedRepo, pieceStore dtypes.ProviderPieceStore, sa retrievalmarket.SectorAccessor) (mdagstore.MinerAPI, error) { +func NewMinerAPI(lc fx.Lifecycle, r repo.LockedRepo, pieceStore dtypes.ProviderPieceStore, sa mdagstore.SectorAccessor) (mdagstore.MinerAPI, error) { cfg, err := extractDAGStoreConfig(r) if err != nil { return nil, err @@ -40,7 +38,7 @@ func NewMinerAPI(lc fx.Lifecycle, r repo.LockedRepo, pieceStore dtypes.ProviderP } } - mountApi := mdagstore.NewMinerAPI(pieceStore, sa, cfg.MaxConcurrencyStorageCalls) + mountApi := mdagstore.NewMinerAPI(pieceStore, sa, cfg.MaxConcurrencyStorageCalls, cfg.MaxConcurrentUnseals) ready := make(chan error, 1) pieceStore.OnReady(func(err error) { ready <- err diff --git a/node/repo/fsrepo_test.go b/node/repo/fsrepo_test.go index bd03cc084..381ebdcbe 100644 --- a/node/repo/fsrepo_test.go +++ b/node/repo/fsrepo_test.go @@ -1,3 +1,4 @@ +//stm: #unit package repo import ( diff --git a/node/repo/imports/manager.go b/node/repo/imports/manager.go index d972ffb77..2deaa30af 100644 --- a/node/repo/imports/manager.go +++ b/node/repo/imports/manager.go @@ -1,6 +1,7 @@ package imports import ( + "context" "encoding/json" "fmt" "os" @@ -107,6 +108,7 @@ type Meta struct { // CreateImport initializes a new import, returning its ID and optionally a // CAR path where to place the data, if requested. func (m *Manager) CreateImport() (id ID, err error) { + ctx := context.TODO() id = ID(m.counter.Next()) meta := &Meta{Labels: map[LabelKey]LabelValue{ @@ -118,7 +120,7 @@ func (m *Manager) CreateImport() (id ID, err error) { return 0, xerrors.Errorf("marshaling store metadata: %w", err) } - err = m.ds.Put(id.dsKey(), metajson) + err = m.ds.Put(ctx, id.dsKey(), metajson) if err != nil { return 0, xerrors.Errorf("failed to insert import metadata: %w", err) } @@ -129,7 +131,8 @@ func (m *Manager) CreateImport() (id ID, err error) { // AllocateCAR creates a new CAR allocated to the supplied import under the // root directory. func (m *Manager) AllocateCAR(id ID) (path string, err error) { - meta, err := m.ds.Get(id.dsKey()) + ctx := context.TODO() + meta, err := m.ds.Get(ctx, id.dsKey()) if err != nil { return "", xerrors.Errorf("getting metadata form datastore: %w", err) } @@ -163,14 +166,15 @@ func (m *Manager) AllocateCAR(id ID) (path string, err error) { return "", xerrors.Errorf("marshaling store metadata: %w", err) } - err = m.ds.Put(id.dsKey(), meta) + err = m.ds.Put(ctx, id.dsKey(), meta) return path, err } // AddLabel adds a label associated with an import, such as the source, // car path, CID, etc. func (m *Manager) AddLabel(id ID, key LabelKey, value LabelValue) error { - meta, err := m.ds.Get(id.dsKey()) + ctx := context.TODO() + meta, err := m.ds.Get(ctx, id.dsKey()) if err != nil { return xerrors.Errorf("getting metadata form datastore: %w", err) } @@ -187,14 +191,15 @@ func (m *Manager) AddLabel(id ID, key LabelKey, value LabelValue) error { return xerrors.Errorf("marshaling store meta: %w", err) } - return m.ds.Put(id.dsKey(), meta) + return m.ds.Put(ctx, id.dsKey(), meta) } // List returns all import IDs known by this Manager. func (m *Manager) List() ([]ID, error) { + ctx := context.TODO() var keys []ID - qres, err := m.ds.Query(query.Query{KeysOnly: true}) + qres, err := m.ds.Query(ctx, query.Query{KeysOnly: true}) if err != nil { return nil, xerrors.Errorf("query error: %w", err) } @@ -218,7 +223,9 @@ func (m *Manager) List() ([]ID, error) { // Info returns the metadata known to this store for the specified import ID. func (m *Manager) Info(id ID) (*Meta, error) { - meta, err := m.ds.Get(id.dsKey()) + ctx := context.TODO() + + meta, err := m.ds.Get(ctx, id.dsKey()) if err != nil { return nil, xerrors.Errorf("getting metadata form datastore: %w", err) } @@ -233,7 +240,8 @@ func (m *Manager) Info(id ID) (*Meta, error) { // Remove drops all data associated with the supplied import ID. func (m *Manager) Remove(id ID) error { - if err := m.ds.Delete(id.dsKey()); err != nil { + ctx := context.TODO() + if err := m.ds.Delete(ctx, id.dsKey()); err != nil { return xerrors.Errorf("removing import metadata: %w", err) } return nil diff --git a/node/repo/memrepo_test.go b/node/repo/memrepo_test.go index 965bc02c1..fdf609bac 100644 --- a/node/repo/memrepo_test.go +++ b/node/repo/memrepo_test.go @@ -1,3 +1,4 @@ +//stm: #unit package repo import ( diff --git a/node/repo/repo_test.go b/node/repo/repo_test.go index 444fab267..cd19f86f6 100644 --- a/node/repo/repo_test.go +++ b/node/repo/repo_test.go @@ -1,3 +1,4 @@ +//stm: #unit package repo import ( diff --git a/node/rpc.go b/node/rpc.go index 86fbf2a90..afeac1bac 100644 --- a/node/rpc.go +++ b/node/rpc.go @@ -28,6 +28,7 @@ import ( "github.com/filecoin-project/lotus/metrics" "github.com/filecoin-project/lotus/metrics/proxy" "github.com/filecoin-project/lotus/node/impl" + "github.com/filecoin-project/lotus/node/impl/client" ) var rpclog = logging.Logger("rpc") @@ -90,14 +91,23 @@ func FullNodeHandler(prefix string, a v1api.FullNode, permissioned bool, opts .. // Import handler handleImportFunc := handleImport(a.(*impl.FullNodeAPI)) + handleExportFunc := handleExport(a.(*impl.FullNodeAPI)) if permissioned { importAH := &auth.Handler{ Verify: a.AuthVerify, Next: handleImportFunc, } m.Handle(prefix+"/rest/v0/import", importAH) + + exportAH := &auth.Handler{ + Verify: a.AuthVerify, + Next: handleExportFunc, + } + m.Handle("/rest/v0/export", exportAH) } else { m.HandleFunc(prefix+"/rest/v0/import", handleImportFunc) + + m.HandleFunc("/rest/v0/export", handleExportFunc) } // debugging @@ -170,6 +180,34 @@ func handleImport(a *impl.FullNodeAPI) func(w http.ResponseWriter, r *http.Reque } } +func handleExport(a *impl.FullNodeAPI) func(w http.ResponseWriter, r *http.Request) { + return func(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" { + w.WriteHeader(404) + return + } + if !auth.HasPerm(r.Context(), nil, api.PermWrite) { + w.WriteHeader(401) + _ = json.NewEncoder(w).Encode(struct{ Error string }{"unauthorized: missing write permission"}) + return + } + + var eref api.ExportRef + if err := json.Unmarshal([]byte(r.FormValue("export")), &eref); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + car := r.FormValue("car") == "true" + + err := a.ClientExportInto(r.Context(), eref, car, client.ExportDest{Writer: w}) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + } +} + func handleFractionOpt(name string, setter func(int)) http.HandlerFunc { return func(rw http.ResponseWriter, r *http.Request) { if r.Method != http.MethodPost { diff --git a/paychmgr/accessorcache.go b/paychmgr/accessorcache.go index 176fbdd11..358cf7900 100644 --- a/paychmgr/accessorcache.go +++ b/paychmgr/accessorcache.go @@ -1,6 +1,10 @@ package paychmgr -import "github.com/filecoin-project/go-address" +import ( + "context" + + "github.com/filecoin-project/go-address" +) // accessorByFromTo gets a channel accessor for a given from / to pair. // The channel accessor facilitates locking a channel so that operations @@ -36,10 +40,10 @@ func (pm *Manager) accessorByFromTo(from address.Address, to address.Address) (* // The channel accessor facilitates locking a channel so that operations // must be performed sequentially on a channel (but can be performed at // the same time on different channels). -func (pm *Manager) accessorByAddress(ch address.Address) (*channelAccessor, error) { +func (pm *Manager) accessorByAddress(ctx context.Context, ch address.Address) (*channelAccessor, error) { // Get the channel from / to pm.lk.RLock() - channelInfo, err := pm.store.ByAddress(ch) + channelInfo, err := pm.store.ByAddress(ctx, ch) pm.lk.RUnlock() if err != nil { return nil, err diff --git a/paychmgr/manager.go b/paychmgr/manager.go index 460722945..e0fcd7a75 100644 --- a/paychmgr/manager.go +++ b/paychmgr/manager.go @@ -92,7 +92,7 @@ func newManager(pchstore *Store, pchapi managerAPI) (*Manager, error) { // Start restarts tracking of any messages that were sent to chain. func (pm *Manager) Start() error { - return pm.restartPending() + return pm.restartPending(pm.ctx) } // Stop shuts down any processes used by the manager @@ -110,27 +110,27 @@ func (pm *Manager) GetPaych(ctx context.Context, from, to address.Address, amt t return chanAccessor.getPaych(ctx, amt) } -func (pm *Manager) AvailableFunds(ch address.Address) (*api.ChannelAvailableFunds, error) { - ca, err := pm.accessorByAddress(ch) +func (pm *Manager) AvailableFunds(ctx context.Context, ch address.Address) (*api.ChannelAvailableFunds, error) { + ca, err := pm.accessorByAddress(ctx, ch) if err != nil { return nil, err } - ci, err := ca.getChannelInfo(ch) + ci, err := ca.getChannelInfo(ctx, ch) if err != nil { return nil, err } - return ca.availableFunds(ci.ChannelID) + return ca.availableFunds(ctx, ci.ChannelID) } -func (pm *Manager) AvailableFundsByFromTo(from address.Address, to address.Address) (*api.ChannelAvailableFunds, error) { +func (pm *Manager) AvailableFundsByFromTo(ctx context.Context, from address.Address, to address.Address) (*api.ChannelAvailableFunds, error) { ca, err := pm.accessorByFromTo(from, to) if err != nil { return nil, err } - ci, err := ca.outboundActiveByFromTo(from, to) + ci, err := ca.outboundActiveByFromTo(ctx, from, to) if err == ErrChannelNotTracked { // If there is no active channel between from / to we still want to // return an empty ChannelAvailableFunds, so that clients can check @@ -151,7 +151,7 @@ func (pm *Manager) AvailableFundsByFromTo(from address.Address, to address.Addre return nil, err } - return ca.availableFunds(ci.ChannelID) + return ca.availableFunds(ctx, ci.ChannelID) } // GetPaychWaitReady waits until the create channel / add funds message with the @@ -160,7 +160,7 @@ func (pm *Manager) AvailableFundsByFromTo(from address.Address, to address.Addre func (pm *Manager) GetPaychWaitReady(ctx context.Context, mcid cid.Cid) (address.Address, error) { // Find the channel associated with the message CID pm.lk.Lock() - ci, err := pm.store.ByMessageCid(mcid) + ci, err := pm.store.ByMessageCid(ctx, mcid) pm.lk.Unlock() if err != nil { @@ -178,25 +178,25 @@ func (pm *Manager) GetPaychWaitReady(ctx context.Context, mcid cid.Cid) (address return chanAccessor.getPaychWaitReady(ctx, mcid) } -func (pm *Manager) ListChannels() ([]address.Address, error) { +func (pm *Manager) ListChannels(ctx context.Context) ([]address.Address, error) { // Need to take an exclusive lock here so that channel operations can't run // in parallel (see channelLock) pm.lk.Lock() defer pm.lk.Unlock() - return pm.store.ListChannels() + return pm.store.ListChannels(ctx) } -func (pm *Manager) GetChannelInfo(addr address.Address) (*ChannelInfo, error) { - ca, err := pm.accessorByAddress(addr) +func (pm *Manager) GetChannelInfo(ctx context.Context, addr address.Address) (*ChannelInfo, error) { + ca, err := pm.accessorByAddress(ctx, addr) if err != nil { return nil, err } - return ca.getChannelInfo(addr) + return ca.getChannelInfo(ctx, addr) } func (pm *Manager) CreateVoucher(ctx context.Context, ch address.Address, voucher paych.SignedVoucher) (*api.VoucherCreateResult, error) { - ca, err := pm.accessorByAddress(ch) + ca, err := pm.accessorByAddress(ctx, ch) if err != nil { return nil, err } @@ -223,7 +223,7 @@ func (pm *Manager) CheckVoucherSpendable(ctx context.Context, ch address.Address if len(proof) > 0 { return false, errProofNotSupported } - ca, err := pm.accessorByAddress(ch) + ca, err := pm.accessorByAddress(ctx, ch) if err != nil { return false, err } @@ -237,7 +237,7 @@ func (pm *Manager) AddVoucherOutbound(ctx context.Context, ch address.Address, s if len(proof) > 0 { return types.NewInt(0), errProofNotSupported } - ca, err := pm.accessorByAddress(ch) + ca, err := pm.accessorByAddress(ctx, ch) if err != nil { return types.NewInt(0), err } @@ -283,7 +283,7 @@ func (pm *Manager) trackInboundChannel(ctx context.Context, ch address.Address) defer pm.lk.Unlock() // Check if channel is in store - ci, err := pm.store.ByAddress(ch) + ci, err := pm.store.ByAddress(ctx, ch) if err == nil { // Channel is in store, so it's already being tracked return ci, nil @@ -316,7 +316,7 @@ func (pm *Manager) trackInboundChannel(ctx context.Context, ch address.Address) } // Save channel to store - return pm.store.TrackChannel(stateCi) + return pm.store.TrackChannel(ctx, stateCi) } // TODO: secret vs proof doesn't make sense, there is only one, not two @@ -324,23 +324,23 @@ func (pm *Manager) SubmitVoucher(ctx context.Context, ch address.Address, sv *pa if len(proof) > 0 { return cid.Undef, errProofNotSupported } - ca, err := pm.accessorByAddress(ch) + ca, err := pm.accessorByAddress(ctx, ch) if err != nil { return cid.Undef, err } return ca.submitVoucher(ctx, ch, sv, secret) } -func (pm *Manager) AllocateLane(ch address.Address) (uint64, error) { - ca, err := pm.accessorByAddress(ch) +func (pm *Manager) AllocateLane(ctx context.Context, ch address.Address) (uint64, error) { + ca, err := pm.accessorByAddress(ctx, ch) if err != nil { return 0, err } - return ca.allocateLane(ch) + return ca.allocateLane(ctx, ch) } func (pm *Manager) ListVouchers(ctx context.Context, ch address.Address) ([]*VoucherInfo, error) { - ca, err := pm.accessorByAddress(ch) + ca, err := pm.accessorByAddress(ctx, ch) if err != nil { return nil, err } @@ -348,7 +348,7 @@ func (pm *Manager) ListVouchers(ctx context.Context, ch address.Address) ([]*Vou } func (pm *Manager) Settle(ctx context.Context, addr address.Address) (cid.Cid, error) { - ca, err := pm.accessorByAddress(addr) + ca, err := pm.accessorByAddress(ctx, addr) if err != nil { return cid.Undef, err } @@ -356,7 +356,7 @@ func (pm *Manager) Settle(ctx context.Context, addr address.Address) (cid.Cid, e } func (pm *Manager) Collect(ctx context.Context, addr address.Address) (cid.Cid, error) { - ca, err := pm.accessorByAddress(addr) + ca, err := pm.accessorByAddress(ctx, addr) if err != nil { return cid.Undef, err } diff --git a/paychmgr/paych.go b/paychmgr/paych.go index e5e47dfca..16c6604c6 100644 --- a/paychmgr/paych.go +++ b/paychmgr/paych.go @@ -95,18 +95,18 @@ func (ca *channelAccessor) messageBuilder(ctx context.Context, from address.Addr return paych.Message(av, from), nil } -func (ca *channelAccessor) getChannelInfo(addr address.Address) (*ChannelInfo, error) { +func (ca *channelAccessor) getChannelInfo(ctx context.Context, addr address.Address) (*ChannelInfo, error) { ca.lk.Lock() defer ca.lk.Unlock() - return ca.store.ByAddress(addr) + return ca.store.ByAddress(ctx, addr) } -func (ca *channelAccessor) outboundActiveByFromTo(from, to address.Address) (*ChannelInfo, error) { +func (ca *channelAccessor) outboundActiveByFromTo(ctx context.Context, from, to address.Address) (*ChannelInfo, error) { ca.lk.Lock() defer ca.lk.Unlock() - return ca.store.OutboundActiveByFromTo(from, to) + return ca.store.OutboundActiveByFromTo(ctx, from, to) } // createVoucher creates a voucher with the given specification, setting its @@ -118,7 +118,7 @@ func (ca *channelAccessor) createVoucher(ctx context.Context, ch address.Address defer ca.lk.Unlock() // Find the channel for the voucher - ci, err := ca.store.ByAddress(ch) + ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return nil, xerrors.Errorf("failed to get channel info by address: %w", err) } @@ -229,7 +229,7 @@ func (ca *channelAccessor) checkVoucherValidUnlocked(ctx context.Context, ch add } // Check the voucher against the highest known voucher nonce / value - laneStates, err := ca.laneState(pchState, ch) + laneStates, err := ca.laneState(ctx, pchState, ch) if err != nil { return nil, err } @@ -298,7 +298,7 @@ func (ca *channelAccessor) checkVoucherSpendable(ctx context.Context, ch address return false, err } - ci, err := ca.store.ByAddress(ch) + ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return false, err } @@ -351,7 +351,7 @@ func (ca *channelAccessor) addVoucher(ctx context.Context, ch address.Address, s } func (ca *channelAccessor) addVoucherUnlocked(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, minDelta types.BigInt) (types.BigInt, error) { - ci, err := ca.store.ByAddress(ch) + ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return types.BigInt{}, err } @@ -400,14 +400,14 @@ func (ca *channelAccessor) addVoucherUnlocked(ctx context.Context, ch address.Ad ci.NextLane = sv.Lane + 1 } - return delta, ca.store.putChannelInfo(ci) + return delta, ca.store.putChannelInfo(ctx, ci) } func (ca *channelAccessor) submitVoucher(ctx context.Context, ch address.Address, sv *paych.SignedVoucher, secret []byte) (cid.Cid, error) { ca.lk.Lock() defer ca.lk.Unlock() - ci, err := ca.store.ByAddress(ch) + ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return cid.Undef, err } @@ -453,7 +453,7 @@ func (ca *channelAccessor) submitVoucher(ctx context.Context, ch address.Address } // Mark the voucher and any lower-nonce vouchers as having been submitted - err = ca.store.MarkVoucherSubmitted(ci, sv) + err = ca.store.MarkVoucherSubmitted(ctx, ci, sv) if err != nil { return cid.Undef, err } @@ -461,11 +461,11 @@ func (ca *channelAccessor) submitVoucher(ctx context.Context, ch address.Address return smsg.Cid(), nil } -func (ca *channelAccessor) allocateLane(ch address.Address) (uint64, error) { +func (ca *channelAccessor) allocateLane(ctx context.Context, ch address.Address) (uint64, error) { ca.lk.Lock() defer ca.lk.Unlock() - return ca.store.AllocateLane(ch) + return ca.store.AllocateLane(ctx, ch) } func (ca *channelAccessor) listVouchers(ctx context.Context, ch address.Address) ([]*VoucherInfo, error) { @@ -474,12 +474,12 @@ func (ca *channelAccessor) listVouchers(ctx context.Context, ch address.Address) // TODO: just having a passthrough method like this feels odd. Seems like // there should be some filtering we're doing here - return ca.store.VouchersForPaych(ch) + return ca.store.VouchersForPaych(ctx, ch) } // laneState gets the LaneStates from chain, then applies all vouchers in // the data store over the chain state -func (ca *channelAccessor) laneState(state paych.State, ch address.Address) (map[uint64]paych.LaneState, error) { +func (ca *channelAccessor) laneState(ctx context.Context, state paych.State, ch address.Address) (map[uint64]paych.LaneState, error) { // TODO: we probably want to call UpdateChannelState with all vouchers to be fully correct // (but technically dont't need to) @@ -501,7 +501,7 @@ func (ca *channelAccessor) laneState(state paych.State, ch address.Address) (map } // Apply locally stored vouchers - vouchers, err := ca.store.VouchersForPaych(ch) + vouchers, err := ca.store.VouchersForPaych(ctx, ch) if err != nil && err != ErrChannelNotTracked { return nil, err } @@ -583,7 +583,7 @@ func (ca *channelAccessor) settle(ctx context.Context, ch address.Address) (cid. ca.lk.Lock() defer ca.lk.Unlock() - ci, err := ca.store.ByAddress(ch) + ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return cid.Undef, err } @@ -602,7 +602,7 @@ func (ca *channelAccessor) settle(ctx context.Context, ch address.Address) (cid. } ci.Settling = true - err = ca.store.putChannelInfo(ci) + err = ca.store.putChannelInfo(ctx, ci) if err != nil { log.Errorf("Error marking channel as settled: %s", err) } @@ -614,7 +614,7 @@ func (ca *channelAccessor) collect(ctx context.Context, ch address.Address) (cid ca.lk.Lock() defer ca.lk.Unlock() - ci, err := ca.store.ByAddress(ch) + ci, err := ca.store.ByAddress(ctx, ch) if err != nil { return cid.Undef, err } diff --git a/paychmgr/paych_test.go b/paychmgr/paych_test.go index ab04ad7e0..e798ba585 100644 --- a/paychmgr/paych_test.go +++ b/paychmgr/paych_test.go @@ -1,3 +1,4 @@ +//stm: #unit package paychmgr import ( @@ -43,6 +44,9 @@ func TestCheckVoucherValid(t *testing.T) { mock.setAccountAddress(fromAcct, from) mock.setAccountAddress(toAcct, to) + //stm: @TOKEN_PAYCH_VOUCHER_VALID_001, @TOKEN_PAYCH_VOUCHER_VALID_002, @TOKEN_PAYCH_VOUCHER_VALID_003 + //stm: @TOKEN_PAYCH_VOUCHER_VALID_004, @TOKEN_PAYCH_VOUCHER_VALID_005, @TOKEN_PAYCH_VOUCHER_VALID_006, @TOKEN_PAYCH_VOUCHER_VALID_007 + //stm: @TOKEN_PAYCH_VOUCHER_VALID_009, @TOKEN_PAYCH_VOUCHER_VALID_010 tcases := []struct { name string expectError bool @@ -242,6 +246,7 @@ func TestCreateVoucher(t *testing.T) { Lane: 1, Amount: voucherLane1Amt, } + //stm: @TOKEN_PAYCH_VOUCHER_CREATE_001 res, err := s.mgr.CreateVoucher(ctx, s.ch, voucher) require.NoError(t, err) require.NotNil(t, res.Voucher) @@ -286,6 +291,7 @@ func TestCreateVoucher(t *testing.T) { Lane: 2, Amount: voucherLane2Amt, } + //stm: @TOKEN_PAYCH_VOUCHER_CREATE_004 res, err = s.mgr.CreateVoucher(ctx, s.ch, voucher) require.NoError(t, err) @@ -296,6 +302,7 @@ func TestCreateVoucher(t *testing.T) { } func TestAddVoucherDelta(t *testing.T) { + //stm: @TOKEN_PAYCH_LIST_VOUCHERS_001 ctx := context.Background() // Set up a manager with a single payment channel @@ -353,16 +360,17 @@ func TestAddVoucherNextLane(t *testing.T) { _, err := s.mgr.AddVoucherOutbound(ctx, s.ch, sv, nil, minDelta) require.NoError(t, err) - ci, err := s.mgr.GetChannelInfo(s.ch) + ci, err := s.mgr.GetChannelInfo(ctx, s.ch) require.NoError(t, err) require.EqualValues(t, ci.NextLane, 3) + //stm: @TOKEN_PAYCH_ALLOCATE_LANE_001 // Allocate a lane (should be lane 3) - lane, err := s.mgr.AllocateLane(s.ch) + lane, err := s.mgr.AllocateLane(ctx, s.ch) require.NoError(t, err) require.EqualValues(t, lane, 3) - ci, err = s.mgr.GetChannelInfo(s.ch) + ci, err = s.mgr.GetChannelInfo(ctx, s.ch) require.NoError(t, err) require.EqualValues(t, ci.NextLane, 4) @@ -372,7 +380,7 @@ func TestAddVoucherNextLane(t *testing.T) { _, err = s.mgr.AddVoucherOutbound(ctx, s.ch, sv, nil, minDelta) require.NoError(t, err) - ci, err = s.mgr.GetChannelInfo(s.ch) + ci, err = s.mgr.GetChannelInfo(ctx, s.ch) require.NoError(t, err) require.EqualValues(t, ci.NextLane, 4) @@ -382,22 +390,25 @@ func TestAddVoucherNextLane(t *testing.T) { _, err = s.mgr.AddVoucherOutbound(ctx, s.ch, sv, nil, minDelta) require.NoError(t, err) - ci, err = s.mgr.GetChannelInfo(s.ch) + ci, err = s.mgr.GetChannelInfo(ctx, s.ch) require.NoError(t, err) require.EqualValues(t, ci.NextLane, 8) } func TestAllocateLane(t *testing.T) { + ctx := context.Background() + // Set up a manager with a single payment channel s := testSetupMgrWithChannel(t) + //stm: @TOKEN_PAYCH_ALLOCATE_LANE_001 // First lane should be 0 - lane, err := s.mgr.AllocateLane(s.ch) + lane, err := s.mgr.AllocateLane(ctx, s.ch) require.NoError(t, err) require.EqualValues(t, lane, 0) // Next lane should be 1 - lane, err = s.mgr.AllocateLane(s.ch) + lane, err = s.mgr.AllocateLane(ctx, s.ch) require.NoError(t, err) require.EqualValues(t, lane, 1) } @@ -445,8 +456,9 @@ func TestAllocateLaneWithExistingLaneState(t *testing.T) { _, err = mgr.AddVoucherInbound(ctx, ch, sv, nil, minDelta) require.NoError(t, err) + //stm: @TOKEN_PAYCH_ALLOCATE_LANE_001 // Allocate lane should return the next lane (lane 3) - lane, err := mgr.AllocateLane(ch) + lane, err := mgr.AllocateLane(ctx, ch) require.NoError(t, err) require.EqualValues(t, 3, lane) } @@ -507,6 +519,7 @@ func TestAddVoucherInboundWalletKey(t *testing.T) { } func TestBestSpendable(t *testing.T) { + //stm: @TOKEN_PAYCH_LIST_VOUCHERS_001 ctx := context.Background() // Set up a manager with a single payment channel @@ -549,6 +562,7 @@ func TestBestSpendable(t *testing.T) { }, }) + //stm: @TOKEN_PAYCH_BEST_SPENDABLE_001 // Verify best spendable vouchers on each lane vouchers, err := BestSpendableByLane(ctx, bsapi, s.ch) require.NoError(t, err) @@ -689,6 +703,7 @@ func TestSubmitVoucher(t *testing.T) { err = p3.UnmarshalCBOR(bytes.NewReader(msg.Message.Params)) require.NoError(t, err) + //stm: @TOKEN_PAYCH_LIST_VOUCHERS_001 // Verify that vouchers are marked as submitted vis, err := s.mgr.ListVouchers(ctx, s.ch) require.NoError(t, err) @@ -746,7 +761,7 @@ func testSetupMgrWithChannel(t *testing.T) *testScaffold { Target: toAcct, Direction: DirOutbound, } - err = mgr.store.putChannelInfo(ci) + err = mgr.store.putChannelInfo(context.Background(), ci) require.NoError(t, err) // Add the from signing key to the wallet diff --git a/paychmgr/paychget_test.go b/paychmgr/paychget_test.go index e6b94db57..c9dc48b05 100644 --- a/paychmgr/paychget_test.go +++ b/paychmgr/paychget_test.go @@ -68,6 +68,7 @@ func TestPaychGetCreateChannelMsg(t *testing.T) { // TestPaychGetCreateChannelThenAddFunds tests creating a channel and then // adding funds to it func TestPaychGetCreateChannelThenAddFunds(t *testing.T) { + //stm: @TOKEN_PAYCH_LIST_CHANNELS_001, @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -87,7 +88,7 @@ func TestPaychGetCreateChannelThenAddFunds(t *testing.T) { require.NoError(t, err) // Should have no channels yet (message sent but channel not created) - cis, err := mgr.ListChannels() + cis, err := mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 0) @@ -112,7 +113,7 @@ func TestPaychGetCreateChannelThenAddFunds(t *testing.T) { require.NotEqual(t, createMsgCid, addFundsMsgCid) // Should have one channel, whose address is the channel that was created - cis, err := mgr.ListChannels() + cis, err := mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) @@ -121,7 +122,7 @@ func TestPaychGetCreateChannelThenAddFunds(t *testing.T) { // channel). // PendingAmount should be amount sent in second GetPaych // (second GetPaych triggered add funds, which has not yet been confirmed) - ci, err := mgr.GetChannelInfo(ch) + ci, err := mgr.GetChannelInfo(ctx, ch) require.NoError(t, err) require.EqualValues(t, 10, ci.Amount.Int64()) require.EqualValues(t, 5, ci.PendingAmount.Int64()) @@ -135,13 +136,13 @@ func TestPaychGetCreateChannelThenAddFunds(t *testing.T) { require.NoError(t, err) // Should still have one channel - cis, err = mgr.ListChannels() + cis, err = mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) // Channel amount should include last amount sent to GetPaych - ci, err = mgr.GetChannelInfo(ch) + ci, err = mgr.GetChannelInfo(ctx, ch) require.NoError(t, err) require.EqualValues(t, 15, ci.Amount.Int64()) require.EqualValues(t, 0, ci.PendingAmount.Int64()) @@ -158,6 +159,7 @@ func TestPaychGetCreateChannelThenAddFunds(t *testing.T) { // operation is queued up behind a create channel operation, and the create // channel fails, then the waiting operation can succeed. func TestPaychGetCreateChannelWithErrorThenCreateAgain(t *testing.T) { + //stm: @TOKEN_PAYCH_LIST_CHANNELS_001, @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -203,12 +205,12 @@ func TestPaychGetCreateChannelWithErrorThenCreateAgain(t *testing.T) { require.NoError(t, err) // Should have one channel, whose address is the channel that was created - cis, err := mgr.ListChannels() + cis, err := mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) - ci, err := mgr.GetChannelInfo(ch) + ci, err := mgr.GetChannelInfo(ctx, ch) require.NoError(t, err) require.Equal(t, amt2, ci.Amount) }() @@ -222,6 +224,7 @@ func TestPaychGetCreateChannelWithErrorThenCreateAgain(t *testing.T) { // TestPaychGetRecoverAfterError tests that after a create channel fails, the // next attempt to create channel can succeed. func TestPaychGetRecoverAfterError(t *testing.T) { + //stm: @TOKEN_PAYCH_LIST_CHANNELS_001, @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -259,12 +262,12 @@ func TestPaychGetRecoverAfterError(t *testing.T) { require.NoError(t, err) // Should have one channel, whose address is the channel that was created - cis, err := mgr.ListChannels() + cis, err := mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) - ci, err := mgr.GetChannelInfo(ch) + ci, err := mgr.GetChannelInfo(ctx, ch) require.NoError(t, err) require.Equal(t, amt2, ci.Amount) require.EqualValues(t, 0, ci.PendingAmount.Int64()) @@ -274,6 +277,7 @@ func TestPaychGetRecoverAfterError(t *testing.T) { // TestPaychGetRecoverAfterAddFundsError tests that after an add funds fails, the // next attempt to add funds can succeed. func TestPaychGetRecoverAfterAddFundsError(t *testing.T) { + //stm: @TOKEN_PAYCH_LIST_CHANNELS_001, @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -311,12 +315,12 @@ func TestPaychGetRecoverAfterAddFundsError(t *testing.T) { require.Error(t, err) // Should have one channel, whose address is the channel that was created - cis, err := mgr.ListChannels() + cis, err := mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) - ci, err := mgr.GetChannelInfo(ch) + ci, err := mgr.GetChannelInfo(ctx, ch) require.NoError(t, err) require.Equal(t, amt, ci.Amount) require.EqualValues(t, 0, ci.PendingAmount.Int64()) @@ -338,13 +342,13 @@ func TestPaychGetRecoverAfterAddFundsError(t *testing.T) { require.NoError(t, err) // Should have one channel, whose address is the channel that was created - cis, err = mgr.ListChannels() + cis, err = mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) // Amount should include amount for successful add funds msg - ci, err = mgr.GetChannelInfo(ch) + ci, err = mgr.GetChannelInfo(ctx, ch) require.NoError(t, err) require.Equal(t, amt.Int64()+amt3.Int64(), ci.Amount.Int64()) require.EqualValues(t, 0, ci.PendingAmount.Int64()) @@ -356,6 +360,7 @@ func TestPaychGetRecoverAfterAddFundsError(t *testing.T) { // right after the create channel message is sent, the channel will be // created when the system restarts. func TestPaychGetRestartAfterCreateChannelMsg(t *testing.T) { + //stm: @TOKEN_PAYCH_LIST_CHANNELS_001, @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -384,7 +389,7 @@ func TestPaychGetRestartAfterCreateChannelMsg(t *testing.T) { require.NoError(t, err) // Should have no channels yet (message sent but channel not created) - cis, err := mgr2.ListChannels() + cis, err := mgr2.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 0) @@ -409,7 +414,7 @@ func TestPaychGetRestartAfterCreateChannelMsg(t *testing.T) { require.NotEqual(t, createMsgCid, addFundsMsgCid) // Should have one channel, whose address is the channel that was created - cis, err := mgr2.ListChannels() + cis, err := mgr2.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) @@ -418,7 +423,7 @@ func TestPaychGetRestartAfterCreateChannelMsg(t *testing.T) { // channel). // PendingAmount should be amount sent in second GetPaych // (second GetPaych triggered add funds, which has not yet been confirmed) - ci, err := mgr2.GetChannelInfo(ch) + ci, err := mgr2.GetChannelInfo(ctx, ch) require.NoError(t, err) require.EqualValues(t, 10, ci.Amount.Int64()) require.EqualValues(t, 5, ci.PendingAmount.Int64()) @@ -435,6 +440,7 @@ func TestPaychGetRestartAfterCreateChannelMsg(t *testing.T) { // right after the add funds message is sent, the add funds will be // processed when the system restarts. func TestPaychGetRestartAfterAddFundsMsg(t *testing.T) { + //stm: @TOKEN_PAYCH_LIST_CHANNELS_001, @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -481,13 +487,13 @@ func TestPaychGetRestartAfterAddFundsMsg(t *testing.T) { require.NoError(t, err) // Should have one channel, whose address is the channel that was created - cis, err := mgr2.ListChannels() + cis, err := mgr2.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 1) require.Equal(t, ch, cis[0]) // Amount should include amount for successful add funds msg - ci, err := mgr2.GetChannelInfo(ch) + ci, err := mgr2.GetChannelInfo(ctx, ch) require.NoError(t, err) require.Equal(t, amt.Int64()+amt2.Int64(), ci.Amount.Int64()) require.EqualValues(t, 0, ci.PendingAmount.Int64()) @@ -498,6 +504,7 @@ func TestPaychGetRestartAfterAddFundsMsg(t *testing.T) { // TestPaychGetWait tests that GetPaychWaitReady correctly waits for the // channel to be created or funds to be added func TestPaychGetWait(t *testing.T) { + //stm: @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -555,6 +562,7 @@ func TestPaychGetWait(t *testing.T) { // TestPaychGetWaitErr tests that GetPaychWaitReady correctly handles errors func TestPaychGetWaitErr(t *testing.T) { + //stm: @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -602,6 +610,7 @@ func TestPaychGetWaitErr(t *testing.T) { // TestPaychGetWaitCtx tests that GetPaychWaitReady returns early if the context // is cancelled func TestPaychGetWaitCtx(t *testing.T) { + //stm: @TOKEN_PAYCH_WAIT_READY_001 ctx, cancel := context.WithCancel(context.Background()) store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -631,6 +640,7 @@ func TestPaychGetWaitCtx(t *testing.T) { // progress and two add funds are queued up behind it, the two add funds // will be merged func TestPaychGetMergeAddFunds(t *testing.T) { + //stm: @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -729,6 +739,7 @@ func TestPaychGetMergeAddFunds(t *testing.T) { // TestPaychGetMergeAddFundsCtxCancelOne tests that when a queued add funds // request is cancelled, its amount is removed from the total merged add funds func TestPaychGetMergeAddFundsCtxCancelOne(t *testing.T) { + //stm: @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -826,6 +837,7 @@ func TestPaychGetMergeAddFundsCtxCancelOne(t *testing.T) { // TestPaychGetMergeAddFundsCtxCancelAll tests that when all queued add funds // requests are cancelled, no add funds message is sent func TestPaychGetMergeAddFundsCtxCancelAll(t *testing.T) { + //stm: @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -900,6 +912,7 @@ func TestPaychGetMergeAddFundsCtxCancelAll(t *testing.T) { // TestPaychAvailableFunds tests that PaychAvailableFunds returns the correct // channel state func TestPaychAvailableFunds(t *testing.T) { + //stm: @TOKEN_PAYCH_WAIT_READY_001, @TOKEN_PAYCH_AVAILABLE_FUNDS_001, @TOKEN_PAYCH_AVAILABLE_FUNDS_002, @TOKEN_PAYCH_AVAILABLE_FUNDS_003 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -917,7 +930,7 @@ func TestPaychAvailableFunds(t *testing.T) { require.NoError(t, err) // No channel created yet so available funds should be all zeroes - av, err := mgr.AvailableFundsByFromTo(from, to) + av, err := mgr.AvailableFundsByFromTo(ctx, from, to) require.NoError(t, err) require.Nil(t, av.Channel) require.Nil(t, av.PendingWaitSentinel) @@ -932,7 +945,7 @@ func TestPaychAvailableFunds(t *testing.T) { require.NoError(t, err) // Available funds should reflect create channel message sent - av, err = mgr.AvailableFundsByFromTo(from, to) + av, err = mgr.AvailableFundsByFromTo(ctx, from, to) require.NoError(t, err) require.Nil(t, av.Channel) require.EqualValues(t, 0, av.ConfirmedAmt.Int64()) @@ -961,7 +974,7 @@ func TestPaychAvailableFunds(t *testing.T) { waitForQueueSize(t, mgr, from, to, 1) // Available funds should now include queued funds - av, err = mgr.AvailableFundsByFromTo(from, to) + av, err = mgr.AvailableFundsByFromTo(ctx, from, to) require.NoError(t, err) require.Nil(t, av.Channel) require.NotNil(t, av.PendingWaitSentinel) @@ -996,7 +1009,7 @@ func TestPaychAvailableFunds(t *testing.T) { // Available funds should now include the channel and also a wait sentinel // for the add funds message - av, err = mgr.AvailableFunds(ch) + av, err = mgr.AvailableFunds(ctx, ch) require.NoError(t, err) require.NotNil(t, av.Channel) require.NotNil(t, av.PendingWaitSentinel) @@ -1018,7 +1031,7 @@ func TestPaychAvailableFunds(t *testing.T) { require.NoError(t, err) // Available funds should no longer have a wait sentinel - av, err = mgr.AvailableFunds(ch) + av, err = mgr.AvailableFunds(ctx, ch) require.NoError(t, err) require.NotNil(t, av.Channel) require.Nil(t, av.PendingWaitSentinel) @@ -1039,7 +1052,7 @@ func TestPaychAvailableFunds(t *testing.T) { _, err = mgr.AddVoucherOutbound(ctx, ch, voucher, nil, types.NewInt(0)) require.NoError(t, err) - av, err = mgr.AvailableFunds(ch) + av, err = mgr.AvailableFunds(ctx, ch) require.NoError(t, err) require.NotNil(t, av.Channel) require.Nil(t, av.PendingWaitSentinel) diff --git a/paychmgr/paychvoucherfunds_test.go b/paychmgr/paychvoucherfunds_test.go index f83a7cd62..f081ee606 100644 --- a/paychmgr/paychvoucherfunds_test.go +++ b/paychmgr/paychvoucherfunds_test.go @@ -23,6 +23,7 @@ import ( // insufficient funds, then adding funds to the channel, then adding the // voucher again func TestPaychAddVoucherAfterAddFunds(t *testing.T) { + //stm: @TOKEN_PAYCH_WAIT_READY_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) diff --git a/paychmgr/settle_test.go b/paychmgr/settle_test.go index f17f961e2..4d2393e96 100644 --- a/paychmgr/settle_test.go +++ b/paychmgr/settle_test.go @@ -14,6 +14,7 @@ import ( ) func TestPaychSettle(t *testing.T) { + //stm: @TOKEN_PAYCH_WAIT_READY_001, @TOKEN_PAYCH_SETTLE_001, @TOKEN_PAYCH_LIST_CHANNELS_001 ctx := context.Background() store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) @@ -63,7 +64,7 @@ func TestPaychSettle(t *testing.T) { require.NotEqual(t, ch, ch2) // There should now be two channels - cis, err := mgr.ListChannels() + cis, err := mgr.ListChannels(ctx) require.NoError(t, err) require.Len(t, cis, 2) } diff --git a/paychmgr/simple.go b/paychmgr/simple.go index f93c6d5bd..502338e29 100644 --- a/paychmgr/simple.go +++ b/paychmgr/simple.go @@ -159,7 +159,7 @@ func (m *mergedFundsReq) sum() types.BigInt { func (ca *channelAccessor) getPaych(ctx context.Context, amt types.BigInt) (address.Address, cid.Cid, error) { // Add the request to add funds to a queue and wait for the result freq := newFundsReq(ctx, amt) - ca.enqueue(freq) + ca.enqueue(ctx, freq) select { case res := <-freq.promise: return res.channel, res.mcid, res.err @@ -170,16 +170,16 @@ func (ca *channelAccessor) getPaych(ctx context.Context, amt types.BigInt) (addr } // Queue up an add funds operation -func (ca *channelAccessor) enqueue(task *fundsReq) { +func (ca *channelAccessor) enqueue(ctx context.Context, task *fundsReq) { ca.lk.Lock() defer ca.lk.Unlock() ca.fundsReqQueue = append(ca.fundsReqQueue, task) - go ca.processQueue("") // nolint: errcheck + go ca.processQueue(ctx, "") // nolint: errcheck } // Run the operations in the queue -func (ca *channelAccessor) processQueue(channelID string) (*api.ChannelAvailableFunds, error) { +func (ca *channelAccessor) processQueue(ctx context.Context, channelID string) (*api.ChannelAvailableFunds, error) { ca.lk.Lock() defer ca.lk.Unlock() @@ -188,7 +188,7 @@ func (ca *channelAccessor) processQueue(channelID string) (*api.ChannelAvailable // If there's nothing in the queue, bail out if len(ca.fundsReqQueue) == 0 { - return ca.currentAvailableFunds(channelID, types.NewInt(0)) + return ca.currentAvailableFunds(ctx, channelID, types.NewInt(0)) } // Merge all pending requests into one. @@ -199,7 +199,7 @@ func (ca *channelAccessor) processQueue(channelID string) (*api.ChannelAvailable if amt.IsZero() { // Note: The amount can be zero if requests are cancelled as we're // building the mergedFundsReq - return ca.currentAvailableFunds(channelID, amt) + return ca.currentAvailableFunds(ctx, channelID, amt) } res := ca.processTask(merged.ctx, amt) @@ -209,7 +209,7 @@ func (ca *channelAccessor) processQueue(channelID string) (*api.ChannelAvailable if res == nil { // Stop processing the fundsReqQueue and wait. When the event occurs it will // call processQueue() again - return ca.currentAvailableFunds(channelID, amt) + return ca.currentAvailableFunds(ctx, channelID, amt) } // Finished processing so clear the queue @@ -218,7 +218,7 @@ func (ca *channelAccessor) processQueue(channelID string) (*api.ChannelAvailable // Call the task callback with its results merged.onComplete(res) - return ca.currentAvailableFunds(channelID, types.NewInt(0)) + return ca.currentAvailableFunds(ctx, channelID, types.NewInt(0)) } // filterQueue filters cancelled requests out of the queue @@ -255,12 +255,12 @@ func (ca *channelAccessor) queueSize() int { // msgWaitComplete is called when the message for a previous task is confirmed // or there is an error. -func (ca *channelAccessor) msgWaitComplete(mcid cid.Cid, err error) { +func (ca *channelAccessor) msgWaitComplete(ctx context.Context, mcid cid.Cid, err error) { ca.lk.Lock() defer ca.lk.Unlock() // Save the message result to the store - dserr := ca.store.SaveMessageResult(mcid, err) + dserr := ca.store.SaveMessageResult(ctx, mcid, err) if dserr != nil { log.Errorf("saving message result: %s", dserr) } @@ -271,16 +271,16 @@ func (ca *channelAccessor) msgWaitComplete(mcid cid.Cid, err error) { // The queue may have been waiting for msg completion to proceed, so // process the next queue item if len(ca.fundsReqQueue) > 0 { - go ca.processQueue("") // nolint: errcheck + go ca.processQueue(ctx, "") // nolint: errcheck } } -func (ca *channelAccessor) currentAvailableFunds(channelID string, queuedAmt types.BigInt) (*api.ChannelAvailableFunds, error) { +func (ca *channelAccessor) currentAvailableFunds(ctx context.Context, channelID string, queuedAmt types.BigInt) (*api.ChannelAvailableFunds, error) { if len(channelID) == 0 { return nil, nil } - channelInfo, err := ca.store.ByChannelID(channelID) + channelInfo, err := ca.store.ByChannelID(ctx, channelID) if err != nil { return nil, err } @@ -302,7 +302,7 @@ func (ca *channelAccessor) currentAvailableFunds(channelID string, queuedAmt typ return nil, err } - laneStates, err := ca.laneState(pchState, ch) + laneStates, err := ca.laneState(ctx, pchState, ch) if err != nil { return nil, err } @@ -337,7 +337,7 @@ func (ca *channelAccessor) processTask(ctx context.Context, amt types.BigInt) *p // Get the payment channel for the from/to addresses. // Note: It's ok if we get ErrChannelNotTracked. It just means we need to // create a channel. - channelInfo, err := ca.store.OutboundActiveByFromTo(ca.from, ca.to) + channelInfo, err := ca.store.OutboundActiveByFromTo(ctx, ca.from, ca.to) if err != nil && err != ErrChannelNotTracked { return &paychFundsRes{err: err} } @@ -393,26 +393,26 @@ func (ca *channelAccessor) createPaych(ctx context.Context, amt types.BigInt) (c mcid := smsg.Cid() // Create a new channel in the store - ci, err := ca.store.CreateChannel(ca.from, ca.to, mcid, amt) + ci, err := ca.store.CreateChannel(ctx, ca.from, ca.to, mcid, amt) if err != nil { log.Errorf("creating channel: %s", err) return cid.Undef, err } // Wait for the channel to be created on chain - go ca.waitForPaychCreateMsg(ci.ChannelID, mcid) + go ca.waitForPaychCreateMsg(ctx, ci.ChannelID, mcid) return mcid, nil } // waitForPaychCreateMsg waits for mcid to appear on chain and stores the robust address of the // created payment channel -func (ca *channelAccessor) waitForPaychCreateMsg(channelID string, mcid cid.Cid) { - err := ca.waitPaychCreateMsg(channelID, mcid) - ca.msgWaitComplete(mcid, err) +func (ca *channelAccessor) waitForPaychCreateMsg(ctx context.Context, channelID string, mcid cid.Cid) { + err := ca.waitPaychCreateMsg(ctx, channelID, mcid) + ca.msgWaitComplete(ctx, mcid, err) } -func (ca *channelAccessor) waitPaychCreateMsg(channelID string, mcid cid.Cid) error { +func (ca *channelAccessor) waitPaychCreateMsg(ctx context.Context, channelID string, mcid cid.Cid) error { mwait, err := ca.api.StateWaitMsg(ca.chctx, mcid, build.MessageConfidence, api.LookbackNoLimit, true) if err != nil { log.Errorf("wait msg: %v", err) @@ -425,7 +425,7 @@ func (ca *channelAccessor) waitPaychCreateMsg(channelID string, mcid cid.Cid) er defer ca.lk.Unlock() // Channel creation failed, so remove the channel from the datastore - dserr := ca.store.RemoveChannel(channelID) + dserr := ca.store.RemoveChannel(ctx, channelID) if dserr != nil { log.Errorf("failed to remove channel %s: %s", channelID, dserr) } @@ -449,7 +449,7 @@ func (ca *channelAccessor) waitPaychCreateMsg(channelID string, mcid cid.Cid) er defer ca.lk.Unlock() // Store robust address of channel - ca.mutateChannelInfo(channelID, func(channelInfo *ChannelInfo) { + ca.mutateChannelInfo(ctx, channelID, func(channelInfo *ChannelInfo) { channelInfo.Channel = &decodedReturn.RobustAddress channelInfo.Amount = channelInfo.PendingAmount channelInfo.PendingAmount = big.NewInt(0) @@ -475,30 +475,30 @@ func (ca *channelAccessor) addFunds(ctx context.Context, channelInfo *ChannelInf mcid := smsg.Cid() // Store the add funds message CID on the channel - ca.mutateChannelInfo(channelInfo.ChannelID, func(ci *ChannelInfo) { + ca.mutateChannelInfo(ctx, channelInfo.ChannelID, func(ci *ChannelInfo) { ci.PendingAmount = amt ci.AddFundsMsg = &mcid }) // Store a reference from the message CID to the channel, so that we can // look up the channel from the message CID - err = ca.store.SaveNewMessage(channelInfo.ChannelID, mcid) + err = ca.store.SaveNewMessage(ctx, channelInfo.ChannelID, mcid) if err != nil { log.Errorf("saving add funds message CID %s: %s", mcid, err) } - go ca.waitForAddFundsMsg(channelInfo.ChannelID, mcid) + go ca.waitForAddFundsMsg(ctx, channelInfo.ChannelID, mcid) return &mcid, nil } // waitForAddFundsMsg waits for mcid to appear on chain and returns error, if any -func (ca *channelAccessor) waitForAddFundsMsg(channelID string, mcid cid.Cid) { - err := ca.waitAddFundsMsg(channelID, mcid) - ca.msgWaitComplete(mcid, err) +func (ca *channelAccessor) waitForAddFundsMsg(ctx context.Context, channelID string, mcid cid.Cid) { + err := ca.waitAddFundsMsg(ctx, channelID, mcid) + ca.msgWaitComplete(ctx, mcid, err) } -func (ca *channelAccessor) waitAddFundsMsg(channelID string, mcid cid.Cid) error { +func (ca *channelAccessor) waitAddFundsMsg(ctx context.Context, channelID string, mcid cid.Cid) error { mwait, err := ca.api.StateWaitMsg(ca.chctx, mcid, build.MessageConfidence, api.LookbackNoLimit, true) if err != nil { log.Error(err) @@ -512,7 +512,7 @@ func (ca *channelAccessor) waitAddFundsMsg(channelID string, mcid cid.Cid) error ca.lk.Lock() defer ca.lk.Unlock() - ca.mutateChannelInfo(channelID, func(channelInfo *ChannelInfo) { + ca.mutateChannelInfo(ctx, channelID, func(channelInfo *ChannelInfo) { channelInfo.PendingAmount = big.NewInt(0) channelInfo.AddFundsMsg = nil }) @@ -524,7 +524,7 @@ func (ca *channelAccessor) waitAddFundsMsg(channelID string, mcid cid.Cid) error defer ca.lk.Unlock() // Store updated amount - ca.mutateChannelInfo(channelID, func(channelInfo *ChannelInfo) { + ca.mutateChannelInfo(ctx, channelID, func(channelInfo *ChannelInfo) { channelInfo.Amount = types.BigAdd(channelInfo.Amount, channelInfo.PendingAmount) channelInfo.PendingAmount = big.NewInt(0) channelInfo.AddFundsMsg = nil @@ -534,8 +534,8 @@ func (ca *channelAccessor) waitAddFundsMsg(channelID string, mcid cid.Cid) error } // Change the state of the channel in the store -func (ca *channelAccessor) mutateChannelInfo(channelID string, mutate func(*ChannelInfo)) { - channelInfo, err := ca.store.ByChannelID(channelID) +func (ca *channelAccessor) mutateChannelInfo(ctx context.Context, channelID string, mutate func(*ChannelInfo)) { + channelInfo, err := ca.store.ByChannelID(ctx, channelID) // If there's an error reading or writing to the store just log an error. // For now we're assuming it's unlikely to happen in practice. @@ -549,7 +549,7 @@ func (ca *channelAccessor) mutateChannelInfo(channelID string, mutate func(*Chan mutate(channelInfo) - err = ca.store.putChannelInfo(channelInfo) + err = ca.store.putChannelInfo(ctx, channelInfo) if err != nil { log.Errorf("Error writing channel info to store: %s", err) } @@ -560,8 +560,8 @@ func (ca *channelAccessor) mutateChannelInfo(channelID string, mutate func(*Chan // messages. // Outstanding messages can occur if a create / add funds message was sent and // then the system was shut down or crashed before the result was received. -func (pm *Manager) restartPending() error { - cis, err := pm.store.WithPendingAddFunds() +func (pm *Manager) restartPending(ctx context.Context) error { + cis, err := pm.store.WithPendingAddFunds(ctx) if err != nil { return err } @@ -575,16 +575,16 @@ func (pm *Manager) restartPending() error { if err != nil { return xerrors.Errorf("error initializing payment channel manager %s -> %s: %s", ci.Control, ci.Target, err) } - go ca.waitForPaychCreateMsg(ci.ChannelID, *ci.CreateMsg) + go ca.waitForPaychCreateMsg(ctx, ci.ChannelID, *ci.CreateMsg) return nil }) } else if ci.AddFundsMsg != nil { group.Go(func() error { - ca, err := pm.accessorByAddress(*ci.Channel) + ca, err := pm.accessorByAddress(ctx, *ci.Channel) if err != nil { return xerrors.Errorf("error initializing payment channel manager %s: %s", ci.Channel, err) } - go ca.waitForAddFundsMsg(ci.ChannelID, *ci.AddFundsMsg) + go ca.waitForAddFundsMsg(ctx, ci.ChannelID, *ci.AddFundsMsg) return nil }) } @@ -598,7 +598,7 @@ func (ca *channelAccessor) getPaychWaitReady(ctx context.Context, mcid cid.Cid) ca.lk.Lock() // First check if the message has completed - msgInfo, err := ca.store.GetMessage(mcid) + msgInfo, err := ca.store.GetMessage(ctx, mcid) if err != nil { ca.lk.Unlock() @@ -617,7 +617,7 @@ func (ca *channelAccessor) getPaychWaitReady(ctx context.Context, mcid cid.Cid) ca.lk.Unlock() // Get the channel address - ci, err := ca.store.ByMessageCid(mcid) + ci, err := ca.store.ByMessageCid(ctx, mcid) if err != nil { return address.Undef, err } @@ -660,7 +660,7 @@ func (ca *channelAccessor) msgPromise(ctx context.Context, mcid cid.Cid) chan on res := onMsgRes{err: err} if res.err == nil { // Get the channel associated with the message cid - ci, err := ca.store.ByMessageCid(mcid) + ci, err := ca.store.ByMessageCid(ctx, mcid) if err != nil { res.err = err } else { @@ -689,6 +689,6 @@ func (ca *channelAccessor) msgPromise(ctx context.Context, mcid cid.Cid) chan on return promise } -func (ca *channelAccessor) availableFunds(channelID string) (*api.ChannelAvailableFunds, error) { - return ca.processQueue(channelID) +func (ca *channelAccessor) availableFunds(ctx context.Context, channelID string) (*api.ChannelAvailableFunds, error) { + return ca.processQueue(ctx, channelID) } diff --git a/paychmgr/store.go b/paychmgr/store.go index 343149f93..62849e6be 100644 --- a/paychmgr/store.go +++ b/paychmgr/store.go @@ -2,6 +2,7 @@ package paychmgr import ( "bytes" + "context" "errors" "fmt" @@ -157,26 +158,26 @@ func (ci *ChannelInfo) wasVoucherSubmitted(sv *paych.SignedVoucher) (bool, error // TrackChannel stores a channel, returning an error if the channel was already // being tracked -func (ps *Store) TrackChannel(ci *ChannelInfo) (*ChannelInfo, error) { - _, err := ps.ByAddress(*ci.Channel) +func (ps *Store) TrackChannel(ctx context.Context, ci *ChannelInfo) (*ChannelInfo, error) { + _, err := ps.ByAddress(ctx, *ci.Channel) switch err { default: return nil, err case nil: return nil, fmt.Errorf("already tracking channel: %s", ci.Channel) case ErrChannelNotTracked: - err = ps.putChannelInfo(ci) + err = ps.putChannelInfo(ctx, ci) if err != nil { return nil, err } - return ps.ByAddress(*ci.Channel) + return ps.ByAddress(ctx, *ci.Channel) } } // ListChannels returns the addresses of all channels that have been created -func (ps *Store) ListChannels() ([]address.Address, error) { - cis, err := ps.findChans(func(ci *ChannelInfo) bool { +func (ps *Store) ListChannels(ctx context.Context) ([]address.Address, error) { + cis, err := ps.findChans(ctx, func(ci *ChannelInfo) bool { return ci.Channel != nil }, 0) if err != nil { @@ -193,8 +194,8 @@ func (ps *Store) ListChannels() ([]address.Address, error) { // findChan finds a single channel using the given filter. // If there isn't a channel that matches the filter, returns ErrChannelNotTracked -func (ps *Store) findChan(filter func(ci *ChannelInfo) bool) (*ChannelInfo, error) { - cis, err := ps.findChans(filter, 1) +func (ps *Store) findChan(ctx context.Context, filter func(ci *ChannelInfo) bool) (*ChannelInfo, error) { + cis, err := ps.findChans(ctx, filter, 1) if err != nil { return nil, err } @@ -208,8 +209,8 @@ func (ps *Store) findChan(filter func(ci *ChannelInfo) bool) (*ChannelInfo, erro // findChans loops over all channels, only including those that pass the filter. // max is the maximum number of channels to return. Set to zero to return unlimited channels. -func (ps *Store) findChans(filter func(*ChannelInfo) bool, max int) ([]ChannelInfo, error) { - res, err := ps.ds.Query(dsq.Query{Prefix: dsKeyChannelInfo}) +func (ps *Store) findChans(ctx context.Context, filter func(*ChannelInfo) bool, max int) ([]ChannelInfo, error) { + res, err := ps.ds.Query(ctx, dsq.Query{Prefix: dsKeyChannelInfo}) if err != nil { return nil, err } @@ -251,8 +252,8 @@ func (ps *Store) findChans(filter func(*ChannelInfo) bool, max int) ([]ChannelIn } // AllocateLane allocates a new lane for the given channel -func (ps *Store) AllocateLane(ch address.Address) (uint64, error) { - ci, err := ps.ByAddress(ch) +func (ps *Store) AllocateLane(ctx context.Context, ch address.Address) (uint64, error) { + ci, err := ps.ByAddress(ctx, ch) if err != nil { return 0, err } @@ -260,12 +261,12 @@ func (ps *Store) AllocateLane(ch address.Address) (uint64, error) { out := ci.NextLane ci.NextLane++ - return out, ps.putChannelInfo(ci) + return out, ps.putChannelInfo(ctx, ci) } // VouchersForPaych gets the vouchers for the given channel -func (ps *Store) VouchersForPaych(ch address.Address) ([]*VoucherInfo, error) { - ci, err := ps.ByAddress(ch) +func (ps *Store) VouchersForPaych(ctx context.Context, ch address.Address) ([]*VoucherInfo, error) { + ci, err := ps.ByAddress(ctx, ch) if err != nil { return nil, err } @@ -273,17 +274,17 @@ func (ps *Store) VouchersForPaych(ch address.Address) ([]*VoucherInfo, error) { return ci.Vouchers, nil } -func (ps *Store) MarkVoucherSubmitted(ci *ChannelInfo, sv *paych.SignedVoucher) error { +func (ps *Store) MarkVoucherSubmitted(ctx context.Context, ci *ChannelInfo, sv *paych.SignedVoucher) error { err := ci.markVoucherSubmitted(sv) if err != nil { return err } - return ps.putChannelInfo(ci) + return ps.putChannelInfo(ctx, ci) } // ByAddress gets the channel that matches the given address -func (ps *Store) ByAddress(addr address.Address) (*ChannelInfo, error) { - return ps.findChan(func(ci *ChannelInfo) bool { +func (ps *Store) ByAddress(ctx context.Context, addr address.Address) (*ChannelInfo, error) { + return ps.findChan(ctx, func(ci *ChannelInfo) bool { return ci.Channel != nil && *ci.Channel == addr }) } @@ -307,7 +308,7 @@ func dskeyForMsg(mcid cid.Cid) datastore.Key { } // SaveNewMessage is called when a message is sent -func (ps *Store) SaveNewMessage(channelID string, mcid cid.Cid) error { +func (ps *Store) SaveNewMessage(ctx context.Context, channelID string, mcid cid.Cid) error { k := dskeyForMsg(mcid) b, err := cborrpc.Dump(&MsgInfo{ChannelID: channelID, MsgCid: mcid}) @@ -315,12 +316,12 @@ func (ps *Store) SaveNewMessage(channelID string, mcid cid.Cid) error { return err } - return ps.ds.Put(k, b) + return ps.ds.Put(ctx, k, b) } // SaveMessageResult is called when the result of a message is received -func (ps *Store) SaveMessageResult(mcid cid.Cid, msgErr error) error { - minfo, err := ps.GetMessage(mcid) +func (ps *Store) SaveMessageResult(ctx context.Context, mcid cid.Cid, msgErr error) error { + minfo, err := ps.GetMessage(ctx, mcid) if err != nil { return err } @@ -336,17 +337,17 @@ func (ps *Store) SaveMessageResult(mcid cid.Cid, msgErr error) error { return err } - return ps.ds.Put(k, b) + return ps.ds.Put(ctx, k, b) } // ByMessageCid gets the channel associated with a message -func (ps *Store) ByMessageCid(mcid cid.Cid) (*ChannelInfo, error) { - minfo, err := ps.GetMessage(mcid) +func (ps *Store) ByMessageCid(ctx context.Context, mcid cid.Cid) (*ChannelInfo, error) { + minfo, err := ps.GetMessage(ctx, mcid) if err != nil { return nil, err } - ci, err := ps.findChan(func(ci *ChannelInfo) bool { + ci, err := ps.findChan(ctx, func(ci *ChannelInfo) bool { return ci.ChannelID == minfo.ChannelID }) if err != nil { @@ -357,10 +358,10 @@ func (ps *Store) ByMessageCid(mcid cid.Cid) (*ChannelInfo, error) { } // GetMessage gets the message info for a given message CID -func (ps *Store) GetMessage(mcid cid.Cid) (*MsgInfo, error) { +func (ps *Store) GetMessage(ctx context.Context, mcid cid.Cid) (*MsgInfo, error) { k := dskeyForMsg(mcid) - val, err := ps.ds.Get(k) + val, err := ps.ds.Get(ctx, k) if err != nil { return nil, err } @@ -375,8 +376,8 @@ func (ps *Store) GetMessage(mcid cid.Cid) (*MsgInfo, error) { // OutboundActiveByFromTo looks for outbound channels that have not been // settled, with the given from / to addresses -func (ps *Store) OutboundActiveByFromTo(from address.Address, to address.Address) (*ChannelInfo, error) { - return ps.findChan(func(ci *ChannelInfo) bool { +func (ps *Store) OutboundActiveByFromTo(ctx context.Context, from address.Address, to address.Address) (*ChannelInfo, error) { + return ps.findChan(ctx, func(ci *ChannelInfo) bool { if ci.Direction != DirOutbound { return false } @@ -390,8 +391,8 @@ func (ps *Store) OutboundActiveByFromTo(from address.Address, to address.Address // WithPendingAddFunds is used on startup to find channels for which a // create channel or add funds message has been sent, but lotus shut down // before the response was received. -func (ps *Store) WithPendingAddFunds() ([]ChannelInfo, error) { - return ps.findChans(func(ci *ChannelInfo) bool { +func (ps *Store) WithPendingAddFunds(ctx context.Context) ([]ChannelInfo, error) { + return ps.findChans(ctx, func(ci *ChannelInfo) bool { if ci.Direction != DirOutbound { return false } @@ -400,10 +401,10 @@ func (ps *Store) WithPendingAddFunds() ([]ChannelInfo, error) { } // ByChannelID gets channel info by channel ID -func (ps *Store) ByChannelID(channelID string) (*ChannelInfo, error) { +func (ps *Store) ByChannelID(ctx context.Context, channelID string) (*ChannelInfo, error) { var stored ChannelInfo - res, err := ps.ds.Get(dskeyForChannel(channelID)) + res, err := ps.ds.Get(ctx, dskeyForChannel(channelID)) if err != nil { if err == datastore.ErrNotFound { return nil, ErrChannelNotTracked @@ -415,7 +416,7 @@ func (ps *Store) ByChannelID(channelID string) (*ChannelInfo, error) { } // CreateChannel creates an outbound channel for the given from / to -func (ps *Store) CreateChannel(from address.Address, to address.Address, createMsgCid cid.Cid, amt types.BigInt) (*ChannelInfo, error) { +func (ps *Store) CreateChannel(ctx context.Context, from address.Address, to address.Address, createMsgCid cid.Cid, amt types.BigInt) (*ChannelInfo, error) { ci := &ChannelInfo{ Direction: DirOutbound, NextLane: 0, @@ -426,13 +427,13 @@ func (ps *Store) CreateChannel(from address.Address, to address.Address, createM } // Save the new channel - err := ps.putChannelInfo(ci) + err := ps.putChannelInfo(ctx, ci) if err != nil { return nil, err } // Save a reference to the create message - err = ps.SaveNewMessage(ci.ChannelID, createMsgCid) + err = ps.SaveNewMessage(ctx, ci.ChannelID, createMsgCid) if err != nil { return nil, err } @@ -441,8 +442,8 @@ func (ps *Store) CreateChannel(from address.Address, to address.Address, createM } // RemoveChannel removes the channel with the given channel ID -func (ps *Store) RemoveChannel(channelID string) error { - return ps.ds.Delete(dskeyForChannel(channelID)) +func (ps *Store) RemoveChannel(ctx context.Context, channelID string) error { + return ps.ds.Delete(ctx, dskeyForChannel(channelID)) } // The datastore key used to identify the channel info @@ -451,7 +452,7 @@ func dskeyForChannel(channelID string) datastore.Key { } // putChannelInfo stores the channel info in the datastore -func (ps *Store) putChannelInfo(ci *ChannelInfo) error { +func (ps *Store) putChannelInfo(ctx context.Context, ci *ChannelInfo) error { if len(ci.ChannelID) == 0 { ci.ChannelID = uuid.New().String() } @@ -462,7 +463,7 @@ func (ps *Store) putChannelInfo(ci *ChannelInfo) error { return err } - return ps.ds.Put(k, b) + return ps.ds.Put(ctx, k, b) } // TODO: This is a hack to get around not being able to CBOR marshall a nil diff --git a/paychmgr/store_test.go b/paychmgr/store_test.go index 1ec8895fa..563b82978 100644 --- a/paychmgr/store_test.go +++ b/paychmgr/store_test.go @@ -1,6 +1,7 @@ package paychmgr import ( + "context" "testing" "github.com/filecoin-project/go-address" @@ -12,8 +13,10 @@ import ( ) func TestStore(t *testing.T) { + ctx := context.Background() + store := NewStore(ds_sync.MutexWrap(ds.NewMapDatastore())) - addrs, err := store.ListChannels() + addrs, err := store.ListChannels(ctx) require.NoError(t, err) require.Len(t, addrs, 0) @@ -38,19 +41,19 @@ func TestStore(t *testing.T) { } // Track the channel - _, err = store.TrackChannel(ci) + _, err = store.TrackChannel(ctx, ci) require.NoError(t, err) // Tracking same channel again should error - _, err = store.TrackChannel(ci) + _, err = store.TrackChannel(ctx, ci) require.Error(t, err) // Track another channel - _, err = store.TrackChannel(ci2) + _, err = store.TrackChannel(ctx, ci2) require.NoError(t, err) // List channels should include all channels - addrs, err = store.ListChannels() + addrs, err = store.ListChannels(ctx) require.NoError(t, err) require.Len(t, addrs, 2) t0100, err := address.NewIDAddress(100) @@ -61,25 +64,25 @@ func TestStore(t *testing.T) { require.Contains(t, addrs, t0200) // Request vouchers for channel - vouchers, err := store.VouchersForPaych(*ci.Channel) + vouchers, err := store.VouchersForPaych(ctx, *ci.Channel) require.NoError(t, err) require.Len(t, vouchers, 1) // Requesting voucher for non-existent channel should error - _, err = store.VouchersForPaych(tutils.NewIDAddr(t, 300)) + _, err = store.VouchersForPaych(ctx, tutils.NewIDAddr(t, 300)) require.Equal(t, err, ErrChannelNotTracked) // Allocate lane for channel - lane, err := store.AllocateLane(*ci.Channel) + lane, err := store.AllocateLane(ctx, *ci.Channel) require.NoError(t, err) require.Equal(t, lane, uint64(0)) // Allocate next lane for channel - lane, err = store.AllocateLane(*ci.Channel) + lane, err = store.AllocateLane(ctx, *ci.Channel) require.NoError(t, err) require.Equal(t, lane, uint64(1)) // Allocate next lane for non-existent channel should error - _, err = store.AllocateLane(tutils.NewIDAddr(t, 300)) + _, err = store.AllocateLane(ctx, tutils.NewIDAddr(t, 300)) require.Equal(t, err, ErrChannelNotTracked) } diff --git a/scripts/add-dom-power.sh b/scripts/add-dom-power.sh new file mode 100755 index 000000000..cefdf5670 --- /dev/null +++ b/scripts/add-dom-power.sh @@ -0,0 +1 @@ +EUDICO_PATH=$PWD/data/alice ./eudico send --from t1d2xrzcslx7xlbbylc5c3d5lvandqw4iwl6epxba --method 2 --params-json "{\"Miners\":[\"12D3KooWSpyoi7KghH98SWDfDFMyAwuvtP8MWWGDcC1e1uHWzjSm\"]}" t065 0 \ No newline at end of file diff --git a/scripts/add-initial-mpower.sh b/scripts/add-initial-mpower.sh new file mode 100755 index 000000000..6097b0d05 --- /dev/null +++ b/scripts/add-initial-mpower.sh @@ -0,0 +1 @@ +EUDICO_PATH=$PWD/data/alice ./eudico send --from t1d2xrzcslx7xlbbylc5c3d5lvandqw4iwl6epxba --method 2 --params-json "{\"Miners\":[\"12D3KooWMBbLLKTM9Voo89TXLd98w4MjkJUych6QvECptousGtR4\", \"12D3KooWNTyoBdMB9bpSkf7PVWR863ejGVPq9ssaaAipNvhPeQ4t\", \"12D3KooWF1aFCGUtsGEaqNks3QADLUDZxW7ot7jZPSoDiAFKJuM6\"]}" t065 0 \ No newline at end of file diff --git a/scripts/archive-branches.sh b/scripts/archive-branches.sh index 98fdfaeb8..7eb680641 100755 --- a/scripts/archive-branches.sh +++ b/scripts/archive-branches.sh @@ -9,10 +9,12 @@ api_repo="repos/$org/$repo" exclusions=( 'master' + 'main' + 'releases' ) gh_api_next() { - links=$(grep '^Link:' | sed -e 's/Link: //' -e 's/, /\n/g') + links=$(grep '^link:' | sed -e 's/link: //' -e 's/, /\n/g') echo "$links" | grep '; rel="next"' >/dev/null || return link=$(echo "$links" | grep '; rel="next"' | sed -e 's/^.*//') @@ -43,7 +45,7 @@ active_branches() { git remote add archived "git@github.com:$arch_repo.git" || true -branches_to_move="$(cat <(active_branches) <(pr_branches) <((IFS=$'\n'; echo "${exclusions[*]}")) | sort -u | comm - <(origin_refs | sort) -13)" +branches_to_move="$(cat <(active_branches) <(pr_branches) <((IFS=$'\n'; echo "${exclusions[*]}")) | sort -u | comm - <(origin_refs | sort) -13 | grep -v -e '^release/' -e '^ntwk-')" echo "================" printf "%s\n" "$branches_to_move" diff --git a/scripts/build-bundle.sh b/scripts/build-bundle.sh index fe1c88611..550c80554 100755 --- a/scripts/build-bundle.sh +++ b/scripts/build-bundle.sh @@ -49,7 +49,4 @@ do ipfs add -q "lotus_${CIRCLE_TAG}_${ARCH}-amd64.tar.gz" > "lotus_${CIRCLE_TAG}_${ARCH}-amd64.tar.gz.cid" done -cp "../appimage/Lotus-${CIRCLE_TAG}-x86_64.AppImage" . -sha512sum "Lotus-${CIRCLE_TAG}-x86_64.AppImage" > "Lotus-${CIRCLE_TAG}-x86_64.AppImage.sha512" -ipfs add -q "Lotus-${CIRCLE_TAG}-x86_64.AppImage" > "Lotus-${CIRCLE_TAG}-x86_64.AppImage.cid" -popd +popd \ No newline at end of file diff --git a/scripts/data-permissions.sh b/scripts/data-permissions.sh index bcb9ad01c..7afef350d 100755 --- a/scripts/data-permissions.sh +++ b/scripts/data-permissions.sh @@ -4,4 +4,10 @@ chmod 600 data/charlie/keystore/* chmod 600 data/dom/keystore/* chmod +x ./scripts/restart_bitcoin.sh chmod +x ./scripts/delete_eudico_data.sh -chmod +x ./scripts/restart-demo.sh \ No newline at end of file +chmod +x ./scripts/restart-demo.sh +chmod +x ./scripts/add-initial-mpower.sh +chmod +x ./scripts/add-dom-power.sh +chmod +x ./scripts/remove-dom-mpower.sh +chmod +x ./scripts/remove-charlie.sh +chmod +x ./scripts/genesis-newstate.sh +chmod +x ./scripts/restart-testnet-demo.sh diff --git a/scripts/delete_eudico_data.sh b/scripts/delete_eudico_data.sh old mode 100644 new mode 100755 index 684bfaa45..382a28869 --- a/scripts/delete_eudico_data.sh +++ b/scripts/delete_eudico_data.sh @@ -1,2 +1,2 @@ rm -rf data/ -cp -R ../fil-taproot-data/data ./ \ No newline at end of file +cp -R ../fil-testnet-data/data ./ diff --git a/scripts/docker-lotus-miner-entrypoint.sh b/scripts/docker-lotus-miner-entrypoint.sh index 8cdbaecce..a8f2a5540 100755 --- a/scripts/docker-lotus-miner-entrypoint.sh +++ b/scripts/docker-lotus-miner-entrypoint.sh @@ -1,19 +1,24 @@ #!/usr/bin/env bash if [ ! -z $DOCKER_LOTUS_MINER_INIT ]; then - GATE="$LOTUS_PATH"/date_initialized + GATE="${LOTUS_MINER_PATH}/date_initialized" # Don't init if already initialized. - if [ -f "$GATE" ]; then + if [ ! -f "${GATE}" ]; then + echo starting init + eval "/usr/local/bin/lotus-miner init ${DOCKER_LOTUS_MINER_INIT_ARGS}" + if [ $? == 0 ] + then + echo lotus-miner init successful + date > "$GATE" + else + echo lotus-miner init unsuccessful + exit 1 + fi + else echo lotus-miner already initialized. - exit 0 fi - echo starting init - /usr/local/bin/lotus-miner init - - # Block future inits - date > "$GATE" fi exec /usr/local/bin/lotus-miner $@ diff --git a/scripts/generate-bitcoin-blocks.sh b/scripts/generate-bitcoin-blocks.sh index 2c3066a92..cc031818b 100755 --- a/scripts/generate-bitcoin-blocks.sh +++ b/scripts/generate-bitcoin-blocks.sh @@ -3,4 +3,6 @@ while sleep 1; do curl -u satoshi:amiens -X POST \ 127.0.0.1:18443 \ -d "{\"jsonrpc\":\"2.0\",\"id\":\"0\",\"method\":\"generatetoaddress\", \"params\": [1, \"bcrt1qgp62tlj8hwd7lpp0thz0ujjvgxsjug5hr4l8xj\"]}" \ - -H 'Content-Type:application/json'; done \ No newline at end of file + -H 'Content-Type:application/json'; done + + diff --git a/scripts/generate-lotus-cli.py b/scripts/generate-lotus-cli.py index 8018962e9..7999603b2 100644 --- a/scripts/generate-lotus-cli.py +++ b/scripts/generate-lotus-cli.py @@ -31,12 +31,11 @@ def get_cmd_recursively(cur_cmd): if cmd_flag is True and line == '': cmd_flag = False if cmd_flag is True and line[-1] != ':' and 'help, h' not in line: - gap_pos = 0 + gap_pos = None sub_cmd = line if ' ' in line: gap_pos = sub_cmd.index(' ') - if gap_pos: - sub_cmd = cur_cmd + ' ' + sub_cmd[:gap_pos] + sub_cmd = cur_cmd + ' ' + sub_cmd[:gap_pos] get_cmd_recursively(sub_cmd) except Exception as e: print('Fail to deal with "%s" with error:\n%s' % (line, e)) @@ -46,6 +45,12 @@ def get_cmd_recursively(cur_cmd): if __name__ == "__main__": + # When --help is generated one needs to make sure none of the + # urfave-cli `EnvVars:` defaults get triggered + # Unset everything we can find via: grep -ho 'EnvVars:.*' -r * | sort -u + for e in [ "LOTUS_PATH", "LOTUS_MARKETS_PATH", "LOTUS_MINER_PATH", "LOTUS_STORAGE_PATH", "LOTUS_WORKER_PATH", "WORKER_PATH", "LOTUS_PANIC_REPORT_PATH", "WALLET_PATH" ]: + os.environ.pop(e, None) + os.putenv("LOTUS_VERSION_IGNORE_COMMIT", "1") generate_lotus_cli('lotus') generate_lotus_cli('lotus-miner') diff --git a/scripts/genesis-newstate.sh b/scripts/genesis-newstate.sh new file mode 100755 index 000000000..c141cb479 --- /dev/null +++ b/scripts/genesis-newstate.sh @@ -0,0 +1,4 @@ +./scripts/delete_eudico_data.sh +export LOTUS_SKIP_GENESIS_CHECK=_yes_ +./eudico delegated genesis t1d2xrzcslx7xlbbylc5c3d5lvandqw4iwl6epxba gen.gen +./scripts/data-permissions.sh diff --git a/scripts/publish-release.sh b/scripts/publish-release.sh index 22572de60..ad2a52dcf 100755 --- a/scripts/publish-release.sh +++ b/scripts/publish-release.sh @@ -68,9 +68,6 @@ artifacts=( "lotus_${CIRCLE_TAG}_darwin-amd64.tar.gz" "lotus_${CIRCLE_TAG}_darwin-amd64.tar.gz.cid" "lotus_${CIRCLE_TAG}_darwin-amd64.tar.gz.sha512" - "Lotus-${CIRCLE_TAG}-x86_64.AppImage" - "Lotus-${CIRCLE_TAG}-x86_64.AppImage.cid" - "Lotus-${CIRCLE_TAG}-x86_64.AppImage.sha512" ) for RELEASE_FILE in "${artifacts[@]}" diff --git a/scripts/regtest.sh b/scripts/regtest.sh new file mode 100755 index 000000000..1180d671e --- /dev/null +++ b/scripts/regtest.sh @@ -0,0 +1,69 @@ +rm -rf data/ +cp -R ../fil-taproot-data/data ./ +./scripts/restart_bitcoin.sh +make eudico +./scripts/data-permissions.sh +#! /bin/bash + +# create Bitcoin wallet +curl -u satoshi:amiens -X POST \ + 127.0.0.1:18443 \ + -d "{\"jsonrpc\":\"2.0\",\"id\":\"0\",\"method\":\"createwallet\", \"params\": [\"wow\"]}" \ + -H 'Content-Type:application/json' + +# create a new address with getnewadress +ADDRESS=$(curl -u satoshi:amiens -X POST \ + 127.0.0.1:18443 \ + -d "{\"jsonrpc\":\"2.0\",\"id\":\"0\",\"method\":\"getnewaddress\", \"params\": [\"wow\"]}" \ + -H 'Content-Type:application/json' | jq -r '.result') + +echo "$ADDRESS" + +# create 150 Bitcoin blocks with the coinbase rewards that goes to our own address +# (note: according to Bitcoin's rules, we need to wait before being able to access the coinbase rewards) +curl -u satoshi:amiens -X POST \ + 127.0.0.1:18443 \ + -d "{\"jsonrpc\":\"2.0\",\"id\":\"0\",\"method\":\"generatetoaddress\", \"params\": [150, \"$ADDRESS\"]}" \ + -H 'Content-Type:application/json' +# Note: after this we do not mine Bitcoin blocks anymore. +# To create more Bitcoin blocks, we need to run another script: generate-bitcoin-blocks.sh in +# a new window. + +# We now create the transaction that funds the initial public key. +# This initial key is constructed based on the keys of Alice, Bob, Charlie +# and a commitment to the Eudico genesis block. +# If the hash of the genesis block changes this key needs to be re-generated. +# (This hash is defined in eudico delegated consensus: ./eudico delegated genesis t1d2xrzcslx7xlbbylc5c3d5lvandqw4iwl6epxba gen.gen) +# In order to compute the initial key, we can uncomment the following lines in the +# checkpointing/sub.go file: +# address, _ := pubkeyToTapprootAddress(c.pubkey) +# fmt.Println(address) +# This address changes when we change either one of the participants keys (i.e., Alice Bob or Charlie) or +# or eudico genesis block (this should not happen very often). +# Note if we change this address, we need to re-start Bitcoin regtest from scratch. +# Ideally we would like to use the transaction id instead of address in order to retrieve the first checkpoint. +# 50 is the amount sent (50 bitcoins) +# curl -u satoshi:amiens -X POST \ +# 127.0.0.1:18443 \ +# -d "{\"jsonrpc\": \"1.0\", \"id\":\"wow\", \"method\": \"sendtoaddress\", \"params\": [\"bcrt1pqxuadpegfl0037fkr9rhms8wlavvjggcqrlyeaj9qlkydydh3c2qy26th5\", 50]}" \ +# -H 'Content-Type:application/json' + + +tmux \ + new-session 'EUDICO_PATH=$PWD/data/alice ./eudico delegated daemon --genesis=gen.gen; sleep infinity' \; \ + split-window -h 'EUDICO_PATH=$PWD/data/bob ./eudico delegated daemon --genesis=gen.gen; sleep infinity' \; \ + split-window 'EUDICO_PATH=$PWD/data/bob ./eudico wait-api; EUDICO_PATH=$PWD/data/bob ./eudico log set-level error; EUDICO_PATH=$PWD/data/bob ./eudico net connect /ip4/127.0.0.1/tcp/3000/p2p/12D3KooWMBbLLKTM9Voo89TXLd98w4MjkJUych6QvECptousGtR4; sleep 3' \; \ + split-window -h 'EUDICO_PATH=$PWD/data/charlie ./eudico delegated daemon --genesis=gen.gen; sleep infinity' \; \ + split-window 'EUDICO_PATH=$PWD/data/charlie ./eudico wait-api; EUDICO_PATH=$PWD/data/charlie ./eudico log set-level error; EUDICO_PATH=$PWD/data/charlie ./eudico net connect /ip4/127.0.0.1/tcp/3000/p2p/12D3KooWMBbLLKTM9Voo89TXLd98w4MjkJUych6QvECptousGtR4 /ip4/127.0.0.1/tcp/3001/p2p/12D3KooWNTyoBdMB9bpSkf7PVWR863ejGVPq9ssaaAipNvhPeQ4t; sleep 3' \; \ + select-pane -t 0 \; \ + split-window -v 'EUDICO_PATH=$PWD/data/alice ./eudico wait-api; EUDICO_PATH=$PWD/data/alice ./eudico log set-level error; EUDICO_PATH=$PWD/data/alice ./eudico wallet import --as-default --format=json-lotus kek.key; EUDICO_PATH=$PWD/data/alice ./eudico delegated miner; sleep infinity' \; + +# tmux \ +# new-session 'EUDICO_PATH=$PWD/data/alice ./eudico delegated daemon --genesis=gen.gen; sleep infinity' \; \ +# split-window -h 'EUDICO_PATH=$PWD/data/bob ./eudico delegated daemon --genesis=gen.gen; sleep infinity' \; \ +# split-window 'EUDICO_PATH=$PWD/data/bob ./eudico wait-api; EUDICO_PATH=$PWD/data/bob ./eudico ; EUDICO_PATH=$PWD/data/bob ./eudico net connect /ip4/127.0.0.1/tcp/3000/p2p/12D3KooWMBbLLKTM9Voo89TXLd98w4MjkJUych6QvECptousGtR4; sleep 3' \; \ +# split-window -h 'EUDICO_PATH=$PWD/data/charlie ./eudico delegated daemon --genesis=gen.gen; sleep infinity' \; \ +# split-window 'EUDICO_PATH=$PWD/data/charlie ./eudico wait-api; EUDICO_PATH=$PWD/data/charlie ./eudico ; EUDICO_PATH=$PWD/data/charlie ./eudico net connect /ip4/127.0.0.1/tcp/3000/p2p/12D3KooWMBbLLKTM9Voo89TXLd98w4MjkJUych6QvECptousGtR4 /ip4/127.0.0.1/tcp/3001/p2p/12D3KooWNTyoBdMB9bpSkf7PVWR863ejGVPq9ssaaAipNvhPeQ4t; sleep 3' \; \ +# select-pane -t 0 \; \ +# split-window -v 'EUDICO_PATH=$PWD/data/alice ./eudico wait-api; EUDICO_PATH=$PWD/data/alice ./eudico ; EUDICO_PATH=$PWD/data/alice ./eudico wallet import --as-default --format=json-lotus kek.key; EUDICO_PATH=$PWD/data/alice ./eudico delegated miner; sleep infinity' \; + diff --git a/scripts/remove-charlie.sh b/scripts/remove-charlie.sh new file mode 100755 index 000000000..63a4eff7f --- /dev/null +++ b/scripts/remove-charlie.sh @@ -0,0 +1 @@ +EUDICO_PATH=$PWD/data/alice ./eudico send --from t1d2xrzcslx7xlbbylc5c3d5lvandqw4iwl6epxba --method 3 --params-json "{\"Miners\":[\"12D3KooWF1aFCGUtsGEaqNks3QADLUDZxW7ot7jZPSoDiAFKJuM6\"]}" t065 0 diff --git a/scripts/remove-dom-mpower.sh b/scripts/remove-dom-mpower.sh new file mode 100755 index 000000000..e105684d0 --- /dev/null +++ b/scripts/remove-dom-mpower.sh @@ -0,0 +1 @@ +EUDICO_PATH=$PWD/data/alice ./eudico send --from t1d2xrzcslx7xlbbylc5c3d5lvandqw4iwl6epxba --method 3 --params-json "{\"Miners\":[\"12D3KooWSpyoi7KghH98SWDfDFMyAwuvtP8MWWGDcC1e1uHWzjSm\"]}" t065 0 \ No newline at end of file diff --git a/scripts/restart-demo.sh b/scripts/restart-demo.sh old mode 100644 new mode 100755 index 5b6d149cd..61163c576 --- a/scripts/restart-demo.sh +++ b/scripts/restart-demo.sh @@ -1,4 +1,4 @@ -./scripts/restart_bitcoin.sh +#./scripts/restart_bitcoin.sh make eudico ./scripts/data-permissions.sh ./scripts/taproot.sh diff --git a/scripts/restart-testnet-demo.sh b/scripts/restart-testnet-demo.sh new file mode 100755 index 000000000..9eba8b985 --- /dev/null +++ b/scripts/restart-testnet-demo.sh @@ -0,0 +1,4 @@ +./scripts/genesis-newstate.sh +./scripts/delete_eudico_data.sh +./scripts/restart-demo.sh + diff --git a/scripts/restart_bitcoin.sh b/scripts/restart_bitcoin.sh old mode 100644 new mode 100755 diff --git a/scripts/taproot.sh b/scripts/taproot.sh index 18fa8ada8..7de77a0cf 100755 --- a/scripts/taproot.sh +++ b/scripts/taproot.sh @@ -1,25 +1,30 @@ #! /bin/bash -# create Bitcoin wallet -curl -u satoshi:amiens -X POST \ - 127.0.0.1:18443 \ - -d "{\"jsonrpc\":\"2.0\",\"id\":\"0\",\"method\":\"createwallet\", \"params\": [\"wow\"]}" \ - -H 'Content-Type:application/json' +# the creation of the wallet must be done priorly as the address needs +# to be funded using the bitcoin faucet +## create Bitcoin wallet +#curl -u satoshi:amiens -X POST \ +# 127.0.0.1:18443 \ +# -d "{\"jsonrpc\":\"2.0\",\"id\":\"0\",\"method\":\"createwallet\", \"params\": [\"wow\"]}" \ +# -H 'Content-Type:application/json' +# +## create a new address with getnewadress +#ADDRESS=$(curl -u satoshi:amiens -X POST \ +# 127.0.0.1:18443 \ +# -d "{\"jsonrpc\":\"2.0\",\"id\":\"0\",\"method\":\"getnewaddress\", \"params\": [\"wow\"]}" \ +# -H 'Content-Type:application/json' | jq -r '.result') +# -# create a new address with getnewadress -ADDRESS=$(curl -u satoshi:amiens -X POST \ - 127.0.0.1:18443 \ - -d "{\"jsonrpc\":\"2.0\",\"id\":\"0\",\"method\":\"getnewaddress\", \"params\": [\"wow\"]}" \ - -H 'Content-Type:application/json' | jq -r '.result') - -echo "$ADDRESS" +# manually paste the address +#ADDRESS= "tb1qfc3stujw72xjusugh2wm3g9wmqdm6hwnxzwkx5" +#echo "$ADDRESS" # create 150 Bitcoin blocks with the coinbase rewards that goes to our own address # (note: according to Bitcoin's rules, we need to wait before being able to access the coinbase rewards) -curl -u satoshi:amiens -X POST \ - 127.0.0.1:18443 \ - -d "{\"jsonrpc\":\"2.0\",\"id\":\"0\",\"method\":\"generatetoaddress\", \"params\": [150, \"$ADDRESS\"]}" \ - -H 'Content-Type:application/json' +#curl -u satoshi:amiens -X POST \ +# 127.0.0.1:18443 \ +# -d "{\"jsonrpc\":\"2.0\",\"id\":\"0\",\"method\":\"generatetoaddress\", \"params\": [150, \"$ADDRESS\"]}" \ +# -H 'Content-Type:application/json' # Note: after this we do not mine Bitcoin blocks anymore. # To create more Bitcoin blocks, we need to run another script: generate-bitcoin-blocks.sh in # a new window. @@ -38,10 +43,10 @@ curl -u satoshi:amiens -X POST \ # Note if we change this address, we need to re-start Bitcoin regtest from scratch. # Ideally we would like to use the transaction id instead of address in order to retrieve the first checkpoint. # 50 is the amount sent (50 bitcoins) -curl -u satoshi:amiens -X POST \ - 127.0.0.1:18443 \ - -d "{\"jsonrpc\": \"1.0\", \"id\":\"wow\", \"method\": \"sendtoaddress\", \"params\": [\"bcrt1pmx76wklv5f2qavaea6leepnnyse3m9xu4apfnrsex705hcse828sr0t3wm\", 50]}" \ - -H 'Content-Type:application/json' +# curl -u satoshi:amiens -X POST \ +# 127.0.0.1:18443 \ +# -d "{\"jsonrpc\": \"1.0\", \"id\":\"wow\", \"method\": \"sendtoaddress\", \"params\": [\"bcrt1pqxuadpegfl0037fkr9rhms8wlavvjggcqrlyeaj9qlkydydh3c2qy26th5\", 50]}" \ +# -H 'Content-Type:application/json' tmux \ @@ -52,3 +57,12 @@ tmux \ split-window 'EUDICO_PATH=$PWD/data/charlie ./eudico wait-api; EUDICO_PATH=$PWD/data/charlie ./eudico log set-level error; EUDICO_PATH=$PWD/data/charlie ./eudico net connect /ip4/127.0.0.1/tcp/3000/p2p/12D3KooWMBbLLKTM9Voo89TXLd98w4MjkJUych6QvECptousGtR4 /ip4/127.0.0.1/tcp/3001/p2p/12D3KooWNTyoBdMB9bpSkf7PVWR863ejGVPq9ssaaAipNvhPeQ4t; sleep 3' \; \ select-pane -t 0 \; \ split-window -v 'EUDICO_PATH=$PWD/data/alice ./eudico wait-api; EUDICO_PATH=$PWD/data/alice ./eudico log set-level error; EUDICO_PATH=$PWD/data/alice ./eudico wallet import --as-default --format=json-lotus kek.key; EUDICO_PATH=$PWD/data/alice ./eudico delegated miner; sleep infinity' \; + +# tmux \ +# new-session 'EUDICO_PATH=$PWD/data/alice ./eudico delegated daemon --genesis=gen.gen; sleep infinity' \; \ +# split-window -h 'EUDICO_PATH=$PWD/data/bob ./eudico delegated daemon --genesis=gen.gen; sleep infinity' \; \ +# split-window 'EUDICO_PATH=$PWD/data/bob ./eudico wait-api; EUDICO_PATH=$PWD/data/bob ./eudico ; EUDICO_PATH=$PWD/data/bob ./eudico net connect /ip4/127.0.0.1/tcp/3000/p2p/12D3KooWMBbLLKTM9Voo89TXLd98w4MjkJUych6QvECptousGtR4; sleep 3' \; \ +# split-window -h 'EUDICO_PATH=$PWD/data/charlie ./eudico delegated daemon --genesis=gen.gen; sleep infinity' \; \ +# split-window 'EUDICO_PATH=$PWD/data/charlie ./eudico wait-api; EUDICO_PATH=$PWD/data/charlie ./eudico ; EUDICO_PATH=$PWD/data/charlie ./eudico net connect /ip4/127.0.0.1/tcp/3000/p2p/12D3KooWMBbLLKTM9Voo89TXLd98w4MjkJUych6QvECptousGtR4 /ip4/127.0.0.1/tcp/3001/p2p/12D3KooWNTyoBdMB9bpSkf7PVWR863ejGVPq9ssaaAipNvhPeQ4t; sleep 3' \; \ +# select-pane -t 0 \; \ +# split-window -v 'EUDICO_PATH=$PWD/data/alice ./eudico wait-api; EUDICO_PATH=$PWD/data/alice ./eudico ; EUDICO_PATH=$PWD/data/alice ./eudico wallet import --as-default --format=json-lotus kek.key; EUDICO_PATH=$PWD/data/alice ./eudico delegated miner; sleep infinity' \; diff --git a/scripts/verification.sh b/scripts/verification.sh index 1911b91b3..42a763074 100755 --- a/scripts/verification.sh +++ b/scripts/verification.sh @@ -1,5 +1,8 @@ #! /bin/bash +# tmux \ +# new-session 'EUDICO_PATH=$PWD/data/dom ./eudico delegated daemon --genesis=gen.gen; sleep infinity' \; \ +# split-window 'EUDICO_PATH=$PWD/data/dom ./eudico wait-api; EUDICO_PATH=$PWD/data/dom ./eudico log set-level error; EUDICO_PATH=$PWD/data/dom ./eudico net connect /ip4/127.0.0.1/tcp/3000/p2p/12D3KooWMBbLLKTM9Voo89TXLd98w4MjkJUych6QvECptousGtR4 /ip4/127.0.0.1/tcp/3001/p2p/12D3KooWNTyoBdMB9bpSkf7PVWR863ejGVPq9ssaaAipNvhPeQ4t /ip4/127.0.0.1/tcp/3002/p2p/12D3KooWF1aFCGUtsGEaqNks3QADLUDZxW7ot7jZPSoDiAFKJuM6; sleep 3' \; \ tmux \ - new-session 'EUDICO_PATH=$PWD/data/dom ./eudico delegated daemon --genesis=gen.gen; sleep infinity' \; \ - split-window 'EUDICO_PATH=$PWD/data/dom ./eudico wait-api; EUDICO_PATH=$PWD/data/dom ./eudico net connect /ip4/127.0.0.1/tcp/3000/p2p/12D3KooWMBbLLKTM9Voo89TXLd98w4MjkJUych6QvECptousGtR4 /ip4/127.0.0.1/tcp/3001/p2p/12D3KooWNTyoBdMB9bpSkf7PVWR863ejGVPq9ssaaAipNvhPeQ4t /ip4/127.0.0.1/tcp/3002/p2p/12D3KooWF1aFCGUtsGEaqNks3QADLUDZxW7ot7jZPSoDiAFKJuM6; sleep 3' \; \ + new-session 'EUDICO_PATH=$PWD/data/dom ./eudico wait-api; EUDICO_PATH=$PWD/data/dom ./eudico log set-level error; EUDICO_PATH=$PWD/data/dom ./eudico net connect /ip4/127.0.0.1/tcp/3000/p2p/12D3KooWMBbLLKTM9Voo89TXLd98w4MjkJUych6QvECptousGtR4 /ip4/127.0.0.1/tcp/3001/p2p/12D3KooWNTyoBdMB9bpSkf7PVWR863ejGVPq9ssaaAipNvhPeQ4t /ip4/127.0.0.1/tcp/3002/p2p/12D3KooWF1aFCGUtsGEaqNks3QADLUDZxW7ot7jZPSoDiAFKJuM6; sleep 3' \; \ + split-window 'EUDICO_PATH=$PWD/data/dom ./eudico delegated daemon --genesis=gen.gen; sleep infinity' \; \ diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 472621c2a..25b84058d 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -17,7 +17,6 @@ description: | https://github.com/filecoin-project/lotus -grade: devel confinement: strict parts: diff --git a/storage/adapter_storage_miner.go b/storage/adapter_storage_miner.go index 0b4b17f96..01ff9d8d3 100644 --- a/storage/adapter_storage_miner.go +++ b/storage/adapter_storage_miner.go @@ -112,6 +112,15 @@ func (s SealingAPIAdapter) StateMinerSectorAllocated(ctx context.Context, maddr return s.delegate.StateMinerSectorAllocated(ctx, maddr, sid, tsk) } +func (s SealingAPIAdapter) StateMinerActiveSectors(ctx context.Context, maddr address.Address, tok sealing.TipSetToken) ([]*miner.SectorOnChainInfo, error) { + tsk, err := types.TipSetKeyFromBytes(tok) + if err != nil { + return nil, xerrors.Errorf("faile dto unmarshal TipSetToken to TipSetKey: %w", err) + } + + return s.delegate.StateMinerActiveSectors(ctx, maddr, tsk) +} + func (s SealingAPIAdapter) StateWaitMsg(ctx context.Context, mcid cid.Cid) (sealing.MsgLookup, error) { wmsg, err := s.delegate.StateWaitMsg(ctx, mcid, build.MessageConfidence, api.LookbackNoLimit, true) if err != nil { diff --git a/storage/miner.go b/storage/miner.go index 0b1f66840..c52b786ee 100644 --- a/storage/miner.go +++ b/storage/miner.go @@ -86,6 +86,7 @@ type fullNodeFilteredAPI interface { StateSectorPartition(ctx context.Context, maddr address.Address, sectorNumber abi.SectorNumber, tok types.TipSetKey) (*miner.SectorLocation, error) StateMinerInfo(context.Context, address.Address, types.TipSetKey) (miner.MinerInfo, error) StateMinerAvailableBalance(ctx context.Context, maddr address.Address, tok types.TipSetKey) (types.BigInt, error) + StateMinerActiveSectors(context.Context, address.Address, types.TipSetKey) ([]*miner.SectorOnChainInfo, error) StateMinerDeadlines(context.Context, address.Address, types.TipSetKey) ([]api.Deadline, error) StateMinerPartitions(context.Context, address.Address, uint64, types.TipSetKey) ([]api.Partition, error) StateMinerProvingDeadline(context.Context, address.Address, types.TipSetKey) (*dline.Info, error) @@ -282,7 +283,7 @@ func (wpp *StorageWpp) GenerateCandidates(ctx context.Context, randomness abi.Po return cds, nil } -func (wpp *StorageWpp) ComputeProof(ctx context.Context, ssi []builtin.SectorInfo, rand abi.PoStRandomness) ([]builtin.PoStProof, error) { +func (wpp *StorageWpp) ComputeProof(ctx context.Context, ssi []builtin.ExtendedSectorInfo, rand abi.PoStRandomness, currEpoch abi.ChainEpoch, nv network.Version) ([]builtin.PoStProof, error) { if build.InsecurePoStValidation { return []builtin.PoStProof{{ProofBytes: []byte("valid proof")}}, nil } diff --git a/storage/miner_sealing.go b/storage/miner_sealing.go index 01b9546a6..d8ef26835 100644 --- a/storage/miner_sealing.go +++ b/storage/miner_sealing.go @@ -71,8 +71,15 @@ func (m *Miner) CommitPending(ctx context.Context) ([]abi.SectorID, error) { return m.sealing.CommitPending(ctx) } -func (m *Miner) MarkForUpgrade(id abi.SectorNumber) error { - return m.sealing.MarkForUpgrade(id) +func (m *Miner) SectorMatchPendingPiecesToOpenSectors(ctx context.Context) error { + return m.sealing.MatchPendingPiecesToOpenSectors(ctx) +} + +func (m *Miner) MarkForUpgrade(ctx context.Context, id abi.SectorNumber, snap bool) error { + if snap { + return m.sealing.MarkForSnapUpgrade(ctx, id) + } + return m.sealing.MarkForUpgrade(ctx, id) } func (m *Miner) IsMarkedForUpgrade(id abi.SectorNumber) bool { diff --git a/storage/sectorblocks/blocks.go b/storage/sectorblocks/blocks.go index ad4ffc0db..231809a9f 100644 --- a/storage/sectorblocks/blocks.go +++ b/storage/sectorblocks/blocks.go @@ -68,11 +68,11 @@ func NewSectorBlocks(sb SectorBuilder, ds dtypes.MetadataDS) *SectorBlocks { return sbc } -func (st *SectorBlocks) writeRef(dealID abi.DealID, sectorID abi.SectorNumber, offset abi.PaddedPieceSize, size abi.UnpaddedPieceSize) error { +func (st *SectorBlocks) writeRef(ctx context.Context, dealID abi.DealID, sectorID abi.SectorNumber, offset abi.PaddedPieceSize, size abi.UnpaddedPieceSize) error { st.keyLk.Lock() // TODO: make this multithreaded defer st.keyLk.Unlock() - v, err := st.keys.Get(DealIDToDsKey(dealID)) + v, err := st.keys.Get(ctx, DealIDToDsKey(dealID)) if err == datastore.ErrNotFound { err = nil } @@ -97,7 +97,7 @@ func (st *SectorBlocks) writeRef(dealID abi.DealID, sectorID abi.SectorNumber, o if err != nil { return xerrors.Errorf("serializing refs: %w", err) } - return st.keys.Put(DealIDToDsKey(dealID), newRef) // TODO: batch somehow + return st.keys.Put(ctx, DealIDToDsKey(dealID), newRef) // TODO: batch somehow } func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize, r io.Reader, d api.PieceDealInfo) (abi.SectorNumber, abi.PaddedPieceSize, error) { @@ -107,7 +107,7 @@ func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize } // TODO: DealID has very low finality here - err = st.writeRef(d.DealID, so.Sector, so.Offset, size) + err = st.writeRef(ctx, d.DealID, so.Sector, so.Offset, size) if err != nil { return 0, 0, xerrors.Errorf("writeRef: %w", err) } @@ -115,8 +115,8 @@ func (st *SectorBlocks) AddPiece(ctx context.Context, size abi.UnpaddedPieceSize return so.Sector, so.Offset, nil } -func (st *SectorBlocks) List() (map[uint64][]api.SealedRef, error) { - res, err := st.keys.Query(query.Query{}) +func (st *SectorBlocks) List(ctx context.Context) (map[uint64][]api.SealedRef, error) { + res, err := st.keys.Query(ctx, query.Query{}) if err != nil { return nil, err } @@ -144,8 +144,8 @@ func (st *SectorBlocks) List() (map[uint64][]api.SealedRef, error) { return out, nil } -func (st *SectorBlocks) GetRefs(dealID abi.DealID) ([]api.SealedRef, error) { // TODO: track local sectors - ent, err := st.keys.Get(DealIDToDsKey(dealID)) +func (st *SectorBlocks) GetRefs(ctx context.Context, dealID abi.DealID) ([]api.SealedRef, error) { // TODO: track local sectors + ent, err := st.keys.Get(ctx, DealIDToDsKey(dealID)) if err == datastore.ErrNotFound { err = ErrNotFound } @@ -161,8 +161,8 @@ func (st *SectorBlocks) GetRefs(dealID abi.DealID) ([]api.SealedRef, error) { // return refs.Refs, nil } -func (st *SectorBlocks) GetSize(dealID abi.DealID) (uint64, error) { - refs, err := st.GetRefs(dealID) +func (st *SectorBlocks) GetSize(ctx context.Context, dealID abi.DealID) (uint64, error) { + refs, err := st.GetRefs(ctx, dealID) if err != nil { return 0, err } @@ -170,7 +170,7 @@ func (st *SectorBlocks) GetSize(dealID abi.DealID) (uint64, error) { return uint64(refs[0].Size), nil } -func (st *SectorBlocks) Has(dealID abi.DealID) (bool, error) { +func (st *SectorBlocks) Has(ctx context.Context, dealID abi.DealID) (bool, error) { // TODO: ensure sector is still there - return st.keys.Has(DealIDToDsKey(dealID)) + return st.keys.Has(ctx, DealIDToDsKey(dealID)) } diff --git a/storage/wdpost_changehandler.go b/storage/wdpost_changehandler.go index 7b80f2744..9540182b5 100644 --- a/storage/wdpost_changehandler.go +++ b/storage/wdpost_changehandler.go @@ -15,7 +15,7 @@ import ( const ( SubmitConfidence = 4 - ChallengeConfidence = 10 + ChallengeConfidence = 1 ) type CompleteGeneratePoSTCb func(posts []miner.SubmitWindowedPoStParams, err error) diff --git a/storage/wdpost_changehandler_test.go b/storage/wdpost_changehandler_test.go index a2283cb7c..2fcbe770e 100644 --- a/storage/wdpost_changehandler_test.go +++ b/storage/wdpost_changehandler_test.go @@ -117,7 +117,7 @@ func (m *mockAPI) startGeneratePoST( completeGeneratePoST CompleteGeneratePoSTCb, ) context.CancelFunc { ctx, cancel := context.WithCancel(ctx) - + log.Errorf("mock posting\n") m.statesLk.Lock() defer m.statesLk.Unlock() m.postStates[deadline.Open] = postStatusProving diff --git a/storage/wdpost_run.go b/storage/wdpost_run.go index 038ed3ac7..6a86656c7 100644 --- a/storage/wdpost_run.go +++ b/storage/wdpost_run.go @@ -19,8 +19,8 @@ import ( "go.opencensus.io/trace" "golang.org/x/xerrors" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" "github.com/filecoin-project/specs-actors/v3/actors/runtime/proof" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" @@ -181,9 +181,10 @@ func (s *WindowPoStScheduler) runSubmitPoST( post.ChainCommitRand = commRand // Submit PoST - sm, submitErr := s.submitPoStMessage(ctx, post) - if submitErr != nil { - log.Errorf("submit window post failed: %+v", submitErr) + sm, err := s.submitPoStMessage(ctx, post) + if err != nil { + log.Errorf("submit window post failed: %+v", err) + submitErr = err } else { s.recordProofsEvent(post.Partitions, sm.Cid()) } @@ -567,7 +568,7 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t for retries := 0; ; retries++ { skipCount := uint64(0) var partitions []miner.PoStPartition - var sinfos []proof2.SectorInfo + var xsinfos []proof7.ExtendedSectorInfo for partIdx, partition := range batch { // TODO: Can do this in parallel toProve, err := bitfield.SubtractBitField(partition.LiveSectors, partition.FaultySectors) @@ -610,14 +611,14 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t continue } - sinfos = append(sinfos, ssi...) + xsinfos = append(xsinfos, ssi...) partitions = append(partitions, miner.PoStPartition{ Index: uint64(batchPartitionStartIdx + partIdx), Skipped: skipped, }) } - if len(sinfos) == 0 { + if len(xsinfos) == 0 { // nothing to prove for this batch break } @@ -636,14 +637,22 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t return nil, err } - postOut, ps, err := s.prover.GenerateWindowPoSt(ctx, abi.ActorID(mid), sinfos, append(abi.PoStRandomness{}, rand...)) + defer func() { + if r := recover(); r != nil { + log.Errorf("recover: %s", r) + } + }() + postOut, ps, err := s.prover.GenerateWindowPoSt(ctx, abi.ActorID(mid), xsinfos, append(abi.PoStRandomness{}, rand...)) elapsed := time.Since(tsStart) - log.Infow("computing window post", "batch", batchIdx, "elapsed", elapsed) - + if err != nil { + log.Errorf("error generating window post: %s", err) + } if err == nil { + // If we proved nothing, something is very wrong. if len(postOut) == 0 { + log.Errorf("len(postOut) == 0") return nil, xerrors.Errorf("received no proofs back from generate window post") } @@ -664,6 +673,14 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t } // If we generated an incorrect proof, try again. + sinfos := make([]proof7.SectorInfo, len(xsinfos)) + for i, xsi := range xsinfos { + sinfos[i] = proof7.SectorInfo{ + SealProof: xsi.SealProof, + SectorNumber: xsi.SectorNumber, + SealedCID: xsi.SealedCID, + } + } if correct, err := s.verifier.VerifyWindowPoSt(ctx, proof.WindowPoStVerifyInfo{ Randomness: abi.PoStRandomness(checkRand), Proofs: postOut, @@ -686,7 +703,7 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t } // Proof generation failed, so retry - + log.Debugf("Proof generation failed, retry") if len(ps) == 0 { // If we didn't skip any new sectors, we failed // for some other reason and we need to abort. @@ -714,10 +731,8 @@ func (s *WindowPoStScheduler) runPoStCycle(ctx context.Context, di dline.Info, t if !somethingToProve { continue } - posts = append(posts, params) } - return posts, nil } @@ -766,7 +781,7 @@ func (s *WindowPoStScheduler) batchPartitions(partitions []api.Partition, nv net return batches, nil } -func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors, allSectors bitfield.BitField, ts *types.TipSet) ([]proof2.SectorInfo, error) { +func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors, allSectors bitfield.BitField, ts *types.TipSet) ([]proof7.ExtendedSectorInfo, error) { sset, err := s.api.StateMinerSectors(ctx, s.actor, &goodSectors, ts.Key()) if err != nil { return nil, err @@ -776,22 +791,24 @@ func (s *WindowPoStScheduler) sectorsForProof(ctx context.Context, goodSectors, return nil, nil } - substitute := proof2.SectorInfo{ + substitute := proof7.ExtendedSectorInfo{ SectorNumber: sset[0].SectorNumber, SealedCID: sset[0].SealedCID, SealProof: sset[0].SealProof, + SectorKey: sset[0].SectorKeyCID, } - sectorByID := make(map[uint64]proof2.SectorInfo, len(sset)) + sectorByID := make(map[uint64]proof7.ExtendedSectorInfo, len(sset)) for _, sector := range sset { - sectorByID[uint64(sector.SectorNumber)] = proof2.SectorInfo{ + sectorByID[uint64(sector.SectorNumber)] = proof7.ExtendedSectorInfo{ SectorNumber: sector.SectorNumber, SealedCID: sector.SealedCID, SealProof: sector.SealProof, + SectorKey: sector.SectorKeyCID, } } - proofSectors := make([]proof2.SectorInfo, 0, len(sset)) + proofSectors := make([]proof7.ExtendedSectorInfo, 0, len(sset)) if err := allSectors.ForEach(func(sectorNo uint64) error { if info, found := sectorByID[sectorNo]; found { proofSectors = append(proofSectors, info) diff --git a/storage/wdpost_run_test.go b/storage/wdpost_run_test.go index 78d9431d4..f3ea5836b 100644 --- a/storage/wdpost_run_test.go +++ b/storage/wdpost_run_test.go @@ -5,6 +5,8 @@ import ( "context" "testing" + proof7 "github.com/filecoin-project/specs-actors/v7/actors/runtime/proof" + builtin5 "github.com/filecoin-project/specs-actors/v5/actors/builtin" miner5 "github.com/filecoin-project/specs-actors/v5/actors/builtin/miner" @@ -22,12 +24,6 @@ import ( "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/dline" "github.com/filecoin-project/go-state-types/network" - builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" - miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" - proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" - tutils "github.com/filecoin-project/specs-actors/v2/support/testing" - proof5 "github.com/filecoin-project/specs-actors/v5/actors/runtime/proof" - "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" @@ -35,6 +31,10 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/extern/sector-storage/storiface" "github.com/filecoin-project/lotus/journal" + builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" + miner2 "github.com/filecoin-project/specs-actors/v2/actors/builtin/miner" + proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" + tutils "github.com/filecoin-project/specs-actors/v2/support/testing" ) type mockStorageMinerAPI struct { @@ -116,11 +116,11 @@ func (m *mockStorageMinerAPI) GasEstimateFeeCap(context.Context, *types.Message, type mockProver struct { } -func (m *mockProver) GenerateWinningPoSt(context.Context, abi.ActorID, []proof2.SectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) { +func (m *mockProver) GenerateWinningPoSt(context.Context, abi.ActorID, []proof7.ExtendedSectorInfo, abi.PoStRandomness) ([]proof2.PoStProof, error) { panic("implement me") } -func (m *mockProver) GenerateWindowPoSt(ctx context.Context, aid abi.ActorID, sis []proof2.SectorInfo, pr abi.PoStRandomness) ([]proof2.PoStProof, []abi.SectorID, error) { +func (m *mockProver) GenerateWindowPoSt(ctx context.Context, aid abi.ActorID, sis []proof7.ExtendedSectorInfo, pr abi.PoStRandomness) ([]proof2.PoStProof, []abi.SectorID, error) { return []proof2.PoStProof{ { PoStProof: abi.RegisteredPoStProof_StackedDrgWindow2KiBV1, @@ -132,7 +132,7 @@ func (m *mockProver) GenerateWindowPoSt(ctx context.Context, aid abi.ActorID, si type mockVerif struct { } -func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info proof2.WinningPoStVerifyInfo) (bool, error) { +func (m mockVerif) VerifyWinningPoSt(ctx context.Context, info proof7.WinningPoStVerifyInfo) (bool, error) { panic("implement me") } @@ -149,7 +149,11 @@ func (m mockVerif) VerifyWindowPoSt(ctx context.Context, info proof2.WindowPoStV return true, nil } -func (m mockVerif) VerifyAggregateSeals(aggregate proof5.AggregateSealVerifyProofAndInfos) (bool, error) { +func (m mockVerif) VerifyAggregateSeals(aggregate proof7.AggregateSealVerifyProofAndInfos) (bool, error) { + panic("implement me") +} + +func (m mockVerif) VerifyReplicaUpdate(update proof7.ReplicaUpdateInfo) (bool, error) { panic("implement me") } diff --git a/storage/wdpost_sched.go b/storage/wdpost_sched.go index 88357c5b3..53801e362 100644 --- a/storage/wdpost_sched.go +++ b/storage/wdpost_sched.go @@ -115,6 +115,7 @@ func (s *WindowPoStScheduler) Run(ctx context.Context) { } gotCur = false + log.Info("restarting window post scheduler") } select { diff --git a/testplans/lotus-soup/go.mod b/testplans/lotus-soup/go.mod index d4f62f068..6b4be1d97 100644 --- a/testplans/lotus-soup/go.mod +++ b/testplans/lotus-soup/go.mod @@ -3,35 +3,35 @@ module github.com/filecoin-project/lotus/testplans/lotus-soup go 1.16 require ( - contrib.go.opencensus.io/exporter/prometheus v0.1.0 + contrib.go.opencensus.io/exporter/prometheus v0.4.0 github.com/codeskyblue/go-sh v0.0.0-20200712050446-30169cf553fe github.com/davecgh/go-spew v1.1.1 - github.com/drand/drand v1.2.1 - github.com/filecoin-project/go-address v0.0.5 - github.com/filecoin-project/go-data-transfer v1.10.1 - github.com/filecoin-project/go-fil-markets v1.12.0 - github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec - github.com/filecoin-project/go-state-types v0.1.1-0.20210915140513-d354ccf10379 - github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b - github.com/filecoin-project/lotus v1.5.0 + github.com/drand/drand v1.3.0 + github.com/filecoin-project/go-address v0.0.6 + github.com/filecoin-project/go-data-transfer v1.12.1 + github.com/filecoin-project/go-fil-markets v1.14.1 + github.com/filecoin-project/go-jsonrpc v0.1.5 + github.com/filecoin-project/go-state-types v0.1.3 + github.com/filecoin-project/go-storedcounter v0.1.0 + github.com/filecoin-project/lotus v0.0.0-00010101000000-000000000000 github.com/filecoin-project/specs-actors v0.9.14 github.com/google/uuid v1.3.0 github.com/gorilla/mux v1.8.0 github.com/hashicorp/go-multierror v1.1.1 github.com/influxdata/influxdb v1.9.4 // indirect github.com/ipfs/go-cid v0.1.0 - github.com/ipfs/go-datastore v0.4.6 - github.com/ipfs/go-ipfs-files v0.0.8 + github.com/ipfs/go-datastore v0.5.1 + github.com/ipfs/go-ipfs-files v0.0.9 github.com/ipfs/go-ipld-format v0.2.0 - github.com/ipfs/go-log/v2 v2.3.0 - github.com/ipfs/go-merkledag v0.3.2 + github.com/ipfs/go-log/v2 v2.4.0 + github.com/ipfs/go-merkledag v0.5.1 github.com/ipfs/go-unixfs v0.2.6 - github.com/ipld/go-car v0.3.1-null-padded-files + github.com/ipld/go-car v0.3.3 github.com/kpacha/opencensus-influxdb v0.0.0-20181102202715-663e2683a27c - github.com/libp2p/go-libp2p v0.15.0 - github.com/libp2p/go-libp2p-core v0.9.0 + github.com/libp2p/go-libp2p v0.17.0 + github.com/libp2p/go-libp2p-core v0.13.0 github.com/libp2p/go-libp2p-pubsub-tracer v0.0.0-20200626141350-e730b32bf1e6 - github.com/multiformats/go-multiaddr v0.4.0 + github.com/multiformats/go-multiaddr v0.4.1 github.com/testground/sdk-go v0.2.6 go.opencensus.io v0.23.0 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c diff --git a/testplans/lotus-soup/go.sum b/testplans/lotus-soup/go.sum index b6246d634..e6e4149c8 100644 --- a/testplans/lotus-soup/go.sum +++ b/testplans/lotus-soup/go.sum @@ -37,9 +37,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE= -contrib.go.opencensus.io/exporter/jaeger v0.1.0/go.mod h1:VYianECmuFPwU37O699Vc1GOcy+y8kOsfaxHRImmjbA= -contrib.go.opencensus.io/exporter/prometheus v0.1.0 h1:SByaIoWwNgMdPSgl5sMqM2KDE5H/ukPWBRo314xiDvg= -contrib.go.opencensus.io/exporter/prometheus v0.1.0/go.mod h1:cGFniUXGZlKRjzOyuZJ6mgB+PgBcCIa79kEKR8YCW+A= +contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs= +contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -78,8 +77,9 @@ github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6L github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= @@ -88,12 +88,13 @@ github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/GeertJohan/go.incremental v1.0.0 h1:7AH+pY1XUgQE4Y1HcXYaMqAI0m9yrFqo/jt0CW30vsg= github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= -github.com/GeertJohan/go.rice v1.0.0 h1:KkI6O9uMaQU3VEKaj01ulavtF7o1fWT7+pk/4voiMLQ= -github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= +github.com/GeertJohan/go.rice v1.0.2 h1:PtRw+Tg3oa3HYwiDBZyvOJ8LdIyf6lAovJJtr7YOAYk= +github.com/GeertJohan/go.rice v1.0.2/go.mod h1:af5vUNlDNkCjOZeSGFgIJxDje9qdjsO6hshx0gTmZt4= github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee h1:8doiS7ib3zi6/K172oDhSKU0dJ/miJramo9NITOMyZQ= github.com/Gurpartap/async v0.0.0-20180927173644-4f7f499dd9ee/go.mod h1:W0GbEAA4uFNYOGG2cJpmFJ04E6SD1NLELPYZB57/7AY= -github.com/HdrHistogram/hdrhistogram-go v1.1.0 h1:6dpdDPTRoo78HxAJ6T1HfMiKSnqhgRRqzCuPshRkQ7I= github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= github.com/Kubuxu/imtui v0.0.0-20210401140320-41663d68d0fa h1:1PPxEyGdIGVkX/kqMvLJ95a1dGS1Sz7tpNEgehEYYt0= @@ -113,8 +114,8 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko github.com/SAP/go-hdb v0.14.1/go.mod h1:7fdQLVC2lER3urZLjZCm0AuMQfApof92n3aylBPEkMo= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/Stebalien/go-bitfield v0.0.1 h1:X3kbSSPUaJK60wV2hjOPZwmpljr6VGCqdq4cBLhbQBo= github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= @@ -146,7 +147,9 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -159,12 +162,15 @@ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/aws/aws-sdk-go v1.29.16/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.32.11/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-sdk-go-v2 v1.3.2/go.mod h1:7OaACgj2SX3XGWnrIjGlJM22h6yD6MEWKvm7levnnM8= +github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= github.com/aws/aws-sdk-go-v2/config v1.1.5/go.mod h1:P3F1hku7qzC81txjwXnwOM6Ex6ezkU6+/557Teyb64E= github.com/aws/aws-sdk-go-v2/credentials v1.1.5/go.mod h1:Ir1R6tPiR1/2y1hes8yOijFMz54hzSmgcmCDo6F45Qc= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.0.6/go.mod h1:0+fWMitrmIpENiY8/1DyhdYPUCAPvd9UNz9mtCsEoLQ= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.1.2/go.mod h1:Azf567f5wBUfUbwpyJJnLM/geFFIzEulGR30L+nQZOE= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.0.4/go.mod h1:BCfU3Uo2fhKcMZFp9zU5QQGQxqWCOYmZ/27Dju3S/do= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.0.6/go.mod h1:L0KWr0ASo83PRZu9NaZaDsw3koS6PspKv137DMDZjHo= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.2.2/go.mod h1:nnutjMLuna0s3GVY/MAkpLX03thyNER06gXvnMAPj5g= @@ -172,12 +178,13 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.5.0/go.mod h1:uwA7gs93Qcss43astPUb1eq github.com/aws/aws-sdk-go-v2/service/sso v1.1.5/go.mod h1:bpGz0tidC4y39sZkQSkpO/J0tzWCMXHbw6FZ0j1GkWM= github.com/aws/aws-sdk-go-v2/service/sts v1.2.2/go.mod h1:ssRzzJ2RZOVuKj2Vx1YE7ypfil/BIlgmQnCSW4DistU= github.com/aws/smithy-go v1.3.1/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= -github.com/benbjohnson/clock v1.0.1/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.0.2/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/benbjohnson/clock v1.2.0 h1:9Re3G2TWxkE06LdMWMpcY6KV81GLXMGiYpPYUPkFAws= +github.com/benbjohnson/clock v1.2.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI= github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -212,22 +219,23 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129 h1:gfAMKE626QEuKG3si0pdTRcr/YEbBoxY+3GOH3gWvl4= -github.com/buger/goterm v0.0.0-20200322175922-2f3e71b85129/go.mod h1:u9UyCz2eTrSGy6fbupqJ54eY5c4IC8gREQ1053dK12U= +github.com/buger/goterm v1.0.3 h1:7V/HeAQHrzPk/U4BvyH2g9u+xbUW9nr4yRPyG59W4fM= +github.com/buger/goterm v1.0.3/go.mod h1:HiFWV3xnkolgrBV3mY8m0X0Pumt4zg4QhbdOzQtB8tE= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34= github.com/cactus/go-statsd-client/statsd v0.0.0-20191106001114-12b4e2b38748/go.mod h1:l/bIBLeOl9eX+wxJAzxS4TveKRtAqlyDpHjhkfO0MEI= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE= github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= @@ -236,20 +244,17 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5O github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.2.0 h1:Fv93L3KKckEcEHR3oApXVzyBTDA8WAm6VXhPE00N3f8= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/cockroachdb/pebble v0.0.0-20200916222308-4e219a90ba5b/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= -github.com/cockroachdb/pebble v0.0.0-20201001221639-879f3bfeef07/go.mod h1:hU7vhtrqonEphNF+xt8/lHdaBprxmV1h8BOGrd9XwmQ= -github.com/cockroachdb/redact v0.0.0-20200622112456-cd282804bbd3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= @@ -269,8 +274,9 @@ github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.1.0 h1:kq/SbG2BCKLkDKkjQf5OWwKWUKj1lgs3lFI4PxnR5lg= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU= github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= @@ -307,13 +313,12 @@ github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6ps github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= -github.com/dgraph-io/badger/v2 v2.0.3/go.mod h1:3KY8+bsP8wI0OEnQJAKpd4wIJW/Mm32yw2j/9FUVnIM= -github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k= -github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= -github.com/dgraph-io/ristretto v0.0.2-0.20200115201040-8f368f2f2ab3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/badger/v2 v2.2007.3 h1:Sl9tQWz92WCbVSe8pj04Tkqlm2boW+KAxd+XSs58SQI= +github.com/dgraph-io/badger/v2 v2.2007.3/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdojIDUqktM6+xJAfScFBsVpXZmqC9dsgJmeA= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ= github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= @@ -327,12 +332,13 @@ github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/drand/bls12-381 v0.3.2/go.mod h1:dtcLgPtYT38L3NO6mPDYH0nbpc5tjPassDqiniuAt4Y= -github.com/drand/drand v1.2.1 h1:KB7z+69YbnQ5z22AH/LMi0ObDR8DzYmrkS6vZXTR9jI= -github.com/drand/drand v1.2.1/go.mod h1:j0P7RGmVaY7E/OuO2yQOcQj7OgeZCuhgu2gdv0JAm+g= +github.com/drand/drand v1.3.0 h1:k/w/PtHzmlU6OmfoAqgirWyrJ4FZH8ESlJrsKF20UkM= +github.com/drand/drand v1.3.0/go.mod h1:D6kAVlxufq1gi71YCGfzN455JrXF4Q272ZJEG975fzo= github.com/drand/kyber v1.0.1-0.20200110225416-8de27ed8c0e2/go.mod h1:UpXoA0Upd1N9l4TvRPHr1qAUBBERj6JQ/mnKI3BPEmw= github.com/drand/kyber v1.0.2/go.mod h1:x6KOpK7avKj0GJ4emhXFP5n7M7W7ChAPmnQh/OL6vRw= -github.com/drand/kyber v1.1.4 h1:YvKM03QWGvLrdTnYmxxP5iURAX+Gdb6qRDUOgg8i60Q= github.com/drand/kyber v1.1.4/go.mod h1:9+IgTq7kadePhZg7eRwSD7+bA+bmvqRK+8DtmoV5a3U= +github.com/drand/kyber v1.1.7 h1:YnOshFoGYSOdhf4K8BiDw4XL/l6caL92vsodAsVQbJI= +github.com/drand/kyber v1.1.7/go.mod h1:UkHLsI4W6+jT5PvNxmc0cvQAgppjTUpX+XCsN9TXmRo= github.com/drand/kyber-bls12381 v0.2.0/go.mod h1:zQip/bHdeEB6HFZSU3v+d3cQE0GaBVQw9aR2E7AdoeI= github.com/drand/kyber-bls12381 v0.2.1 h1:/d5/YAdaCmHpYjF1NZevOEcKGaq6LBbyvkCTIdGqDjs= github.com/drand/kyber-bls12381 v0.2.1/go.mod h1:JwWn4nHO9Mp4F5qCie5sVIPQZ0X6cw8XAeMRvc/GXBE= @@ -344,12 +350,13 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1 github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elastic/go-sysinfo v1.3.0 h1:eb2XFGTMlSwG/yyU9Y8jVAYLIzU2sFzWXwo2gmetyrE= -github.com/elastic/go-sysinfo v1.3.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= +github.com/elastic/go-sysinfo v1.7.0 h1:4vVvcfi255+8+TyQ7TYUTEK3A+G8v5FLE+ZKYL1z1Dg= +github.com/elastic/go-sysinfo v1.7.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= github.com/elastic/go-windows v1.0.0 h1:qLURgZFkkrYyTTkvYpsZIgf83AUsdIHfvlJaqaZ7aSY= github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= -github.com/elastic/gosigar v0.12.0 h1:AsdhYCJlTudhfOYQyFNgx+fIVTfrDO0V1ST0vHgiapU= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/gosigar v0.14.1 h1:T0aQ7n/n2ZA9W7DmAnj60v+qzqKERdBgJBO1CG2W6rc= +github.com/elastic/gosigar v0.14.1/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -358,6 +365,7 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/etclabscore/go-jsonschema-walk v0.0.6/go.mod h1:VdfDY72AFAiUhy0ZXEaWSpveGjMT5JcDIm903NGqFwQ= @@ -367,44 +375,49 @@ github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5 h1:BBso6MBKW github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.8.0/go.mod h1:3l45GVGkyrnYNl9HoIjnp2NnNWvh6hLAqD8yTfGjnw8= -github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/filecoin-project/dagstore v0.4.2/go.mod h1:WY5OoLfnwISCk6eASSF927KKPqLPIlTwmG1qHpA08KY= -github.com/filecoin-project/dagstore v0.4.3 h1:yeFl6+2BRY1gOVp/hrZuFa24s7LY0Qqkqx/Gh8lidZs= -github.com/filecoin-project/dagstore v0.4.3/go.mod h1:dm/91AO5UaDd3bABFjg/5fmRH99vvpS7g1mykqvz6KQ= +github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/filecoin-project/dagstore v0.4.3-0.20211211192320-72b849e131d2/go.mod h1:tlV8C11UljvFq3WWlMh2oMViEaVaPb6uT8eL/YQgDfk= +github.com/filecoin-project/dagstore v0.4.4 h1:luolWahhzp3ulRsapGKE7raoLE3n2cFkQUJjPyqUmF4= +github.com/filecoin-project/dagstore v0.4.4/go.mod h1:7BlOvaTJrFJ1Qemt5jHlLJ4VhDIuSIzGS0IwO/0AXPA= github.com/filecoin-project/go-address v0.0.3/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= -github.com/filecoin-project/go-address v0.0.5 h1:SSaFT/5aLfPXycUlFyemoHYhRgdyXClXCyDdNJKPlDM= github.com/filecoin-project/go-address v0.0.5/go.mod h1:jr8JxKsYx+lQlQZmF5i2U0Z+cGQ59wMIps/8YW/lDj8= +github.com/filecoin-project/go-address v0.0.6 h1:DWQtj38ax+ogHwyH3VULRIoT8E6loyXqsk/p81xoY7M= +github.com/filecoin-project/go-address v0.0.6/go.mod h1:7B0/5DA13n6nHkB8bbGx1gWzG/dbTsZ0fgOJVGsM3TE= github.com/filecoin-project/go-amt-ipld/v2 v2.1.0 h1:t6qDiuGYYngDqaLc2ZUvdtAg4UNxPeOYaXhBWSNsVaM= github.com/filecoin-project/go-amt-ipld/v2 v2.1.0/go.mod h1:nfFPoGyX0CU9SkXX8EoCcSuHN1XcbN0c6KBh7yvP5fs= github.com/filecoin-project/go-amt-ipld/v3 v3.0.0/go.mod h1:Qa95YNAbtoVCTSVtX38aAC1ptBnJfPma1R/zZsKmx4o= github.com/filecoin-project/go-amt-ipld/v3 v3.1.0 h1:ZNJ9tEG5bE72vBWYiuh5bkxJVM3ViHNOmQ7qew9n6RE= github.com/filecoin-project/go-amt-ipld/v3 v3.1.0/go.mod h1:UjM2QhDFrrjD5s1CdnkJkat4ga+LqZBZgTMniypABRo= +github.com/filecoin-project/go-amt-ipld/v4 v4.0.0 h1:XM81BJ4/6h3FV0WfFjh74cIDIgqMbJsMBLM0fIuLUUk= +github.com/filecoin-project/go-amt-ipld/v4 v4.0.0/go.mod h1:gF053YQ4BIpzTNDoEwHZas7U3oAwncDVGvOHyY8oDpE= github.com/filecoin-project/go-bitfield v0.2.0/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-bitfield v0.2.3/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= github.com/filecoin-project/go-bitfield v0.2.4 h1:uZ7MeE+XfM5lqrHJZ93OnhQKc/rveW8p9au0C68JPgk= github.com/filecoin-project/go-bitfield v0.2.4/go.mod h1:CNl9WG8hgR5mttCnUErjcQjGvuiZjRqK9rHVBsQF4oM= -github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2 h1:av5fw6wmm58FYMgJeoB/lK9XXrgdugYiTqkdxjTy9k8= github.com/filecoin-project/go-cbor-util v0.0.0-20191219014500-08c40a1e63a2/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= -github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7 h1:U9Z+76pHCKBmtdxFV7JFZJj7OVm12I6dEKwtMVbq5p0= -github.com/filecoin-project/go-commp-utils v0.1.1-0.20210427191551-70bf140d31c7/go.mod h1:6s95K91mCyHY51RPWECZieD3SGWTqIFLf1mPOes9l5U= -github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03 h1:2pMXdBnCiXjfCYx/hLqFxccPoqsSveQFxVLvNxy9bus= +github.com/filecoin-project/go-cbor-util v0.0.1 h1:E1LYZYTtjfAQwCReho0VXvbu8t3CYAVPiMx8EiV/VAs= +github.com/filecoin-project/go-cbor-util v0.0.1/go.mod h1:pqTiPHobNkOVM5thSRsHYjyQfq7O5QSCMhvuu9JoDlg= +github.com/filecoin-project/go-commp-utils v0.1.3 h1:rTxbkNXZU7FLgdkBk8RsQIEOuPONHykEoX3xGk41Fkw= +github.com/filecoin-project/go-commp-utils v0.1.3/go.mod h1:3ENlD1pZySaUout0p9ANQrY3fDFoXdqyX04J+dWpK30= github.com/filecoin-project/go-crypto v0.0.0-20191218222705-effae4ea9f03/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= -github.com/filecoin-project/go-data-transfer v1.0.1/go.mod h1:UxvfUAY9v3ub0a21BSK9u3pB2aq30Y0KMsG+w9/ysyo= -github.com/filecoin-project/go-data-transfer v1.10.0/go.mod h1:uQtqy6vUAY5v70ZHdkF5mJ8CjVtjj/JA3aOoaqzWTVw= -github.com/filecoin-project/go-data-transfer v1.10.1 h1:YQNLwhizxkdfFxegAyrnn3l7WjgMjqDlqFzr18iWiYI= -github.com/filecoin-project/go-data-transfer v1.10.1/go.mod h1:CSDMCrPK2lVGodNB1wPEogjFvM9nVGyiL1GNbBRTSdw= -github.com/filecoin-project/go-ds-versioning v0.1.0 h1:y/X6UksYTsK8TLCI7rttCKEvl8btmWxyFMEeeWGUxIQ= -github.com/filecoin-project/go-ds-versioning v0.1.0/go.mod h1:mp16rb4i2QPmxBnmanUx8i/XANp+PFCCJWiAb+VW4/s= +github.com/filecoin-project/go-crypto v0.0.1 h1:AcvpSGGCgjaY8y1az6AMfKQWreF/pWO2JJGLl6gCq6o= +github.com/filecoin-project/go-crypto v0.0.1/go.mod h1:+viYnvGtUTgJRdy6oaeF4MTFKAfatX071MPDPBL11EQ= +github.com/filecoin-project/go-data-transfer v1.12.0/go.mod h1:tDrD2jLU2TpVhd+5B8iqBp0fQRV4lP80WZccKXugjYc= +github.com/filecoin-project/go-data-transfer v1.12.1 h1:gAznAZKySVs2FS6T/vDq7R3f0DewLnxeROe0oOE6bZU= +github.com/filecoin-project/go-data-transfer v1.12.1/go.mod h1:j3HL645YiQFxcM+q7uPlGApILSqeweDABNgZQP7pDYU= +github.com/filecoin-project/go-ds-versioning v0.0.0-20211206185234-508abd7c2aff h1:2bG2ggVZ/rInd/YqUfRj4A5siGuYOPxxuD4I8nYLJF0= +github.com/filecoin-project/go-ds-versioning v0.0.0-20211206185234-508abd7c2aff/go.mod h1:C9/l9PnB1+mwPa26BBVpCjG/XQCB0yj/q5CK2J8X1I4= github.com/filecoin-project/go-fil-commcid v0.0.0-20200716160307-8f644712406f/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.0.0-20201016201715-d41df56b4f6a/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commcid v0.1.0 h1:3R4ds1A9r6cr8mvZBfMYxTS88OqLYEo6roi+GiIeOh8= github.com/filecoin-project/go-fil-commcid v0.1.0/go.mod h1:Eaox7Hvus1JgPrL5+M3+h7aSPHc0cVqpSxA+TxIEpZQ= github.com/filecoin-project/go-fil-commp-hashhash v0.1.0 h1:imrrpZWEHRnNqqv0tN7LXep5bFEVOVmQWHJvl2mgsGo= github.com/filecoin-project/go-fil-commp-hashhash v0.1.0/go.mod h1:73S8WSEWh9vr0fDJVnKADhfIv/d6dCbAGaAGWbdJEI8= -github.com/filecoin-project/go-fil-markets v1.0.5-0.20201113164554-c5eba40d5335/go.mod h1:AJySOJC00JRWEZzRG2KsfUnqEf5ITXxeX09BE9N4f9c= -github.com/filecoin-project/go-fil-markets v1.12.0 h1:RpU5bLaMADVrU4CgLxKMGHC2ZUocNV35uINxogQCf00= -github.com/filecoin-project/go-fil-markets v1.12.0/go.mod h1:XuuZFaFujI47nrgfQJiq7jWB+6rRya6nm7Sj6uXQ80U= +github.com/filecoin-project/go-fil-markets v1.14.1 h1:Bx+TSbkAN8K97Hpjgu+MpeRFbXIKH/fNpNp1ZGAEH3I= +github.com/filecoin-project/go-fil-markets v1.14.1/go.mod h1:vXOHH3q2+zLk929W+lIq3etuDFTyJJ8nG2DwGHG2R1E= github.com/filecoin-project/go-hamt-ipld v0.1.5 h1:uoXrKbCQZ49OHpsTCkrThPNelC4W3LPEk0OrS/ytIBM= github.com/filecoin-project/go-hamt-ipld v0.1.5/go.mod h1:6Is+ONR5Cd5R6XZoCse1CWaXZc0Hdb/JeX+EQCQzX24= github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0 h1:b3UDemBYN2HNfk3KOXNuxgTTxlWi3xVvbQP0IT38fvM= @@ -412,53 +425,56 @@ github.com/filecoin-project/go-hamt-ipld/v2 v2.0.0/go.mod h1:7aWZdaQ1b16BVoQUYR+ github.com/filecoin-project/go-hamt-ipld/v3 v3.0.1/go.mod h1:gXpNmr3oQx8l3o7qkGyDjJjYSRX7hp/FGOStdqrWyDI= github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0 h1:rVVNq0x6RGQIzCo1iiJlGFm9AGIZzeifggxtKMU7zmI= github.com/filecoin-project/go-hamt-ipld/v3 v3.1.0/go.mod h1:bxmzgT8tmeVQA1/gvBwFmYdT8SOFUwB3ovSUfG1Ux0g= -github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec h1:rGI5I7fdU4viManxmDdbk5deZO7afe6L1Wc04dAmlOM= -github.com/filecoin-project/go-jsonrpc v0.1.4-0.20210217175800-45ea43ac2bec/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= -github.com/filecoin-project/go-multistore v0.0.3/go.mod h1:kaNqCC4IhU4B1uyr7YWFHd23TL4KM32aChS0jNkyUvQ= +github.com/filecoin-project/go-jsonrpc v0.1.5 h1:ckxqZ09ivBAVf5CSmxxrqqNHC7PJm3GYGtYKiNQ+vGk= +github.com/filecoin-project/go-jsonrpc v0.1.5/go.mod h1:XBBpuKIMaXIIzeqzO1iucq4GvbF8CxmXRFoezRh+Cx4= github.com/filecoin-project/go-padreader v0.0.0-20200903213702-ed5fae088b20/go.mod h1:mPn+LRRd5gEKNAtc+r3ScpW2JRU/pj4NBKdADYWHiak= -github.com/filecoin-project/go-padreader v0.0.0-20210723183308-812a16dc01b1 h1:0BogtftbcgyBx4lP2JWM00ZK7/pXmgnrDqKp9aLTgVs= github.com/filecoin-project/go-padreader v0.0.0-20210723183308-812a16dc01b1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= -github.com/filecoin-project/go-paramfetch v0.0.2 h1:a6W3Ij6CKhwHYYlx+5mqvBIyw4CabZH2ojdEaoAZ6/g= -github.com/filecoin-project/go-paramfetch v0.0.2/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts= +github.com/filecoin-project/go-padreader v0.0.1 h1:8h2tVy5HpoNbr2gBRr+WD6zV6VD6XHig+ynSGJg8ZOs= +github.com/filecoin-project/go-padreader v0.0.1/go.mod h1:VYVPJqwpsfmtoHnAmPx6MUwmrK6HIcDqZJiuZhtmfLQ= +github.com/filecoin-project/go-paramfetch v0.0.3-0.20220111000201-e42866db1a53 h1:+nripp+UI/rhl01w9Gs4V0XDGaVPYPMGU/D/gNVLue0= +github.com/filecoin-project/go-paramfetch v0.0.3-0.20220111000201-e42866db1a53/go.mod h1:1FH85P8U+DUEmWk1Jkw3Bw7FrwTVUNHk/95PSPG+dts= github.com/filecoin-project/go-state-types v0.0.0-20200903145444-247639ffa6ad/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= -github.com/filecoin-project/go-state-types v0.0.0-20200904021452-1883f36ca2f4/go.mod h1:IQ0MBPnonv35CJHtWSN3YY1Hz2gkPru1Q9qoaYLxx9I= github.com/filecoin-project/go-state-types v0.0.0-20200928172055-2df22083d8ab/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.0.0-20201102161440-c8033295a1fc/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.0/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-state-types v0.1.1-0.20210506134452-99b279731c48/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= -github.com/filecoin-project/go-state-types v0.1.1-0.20210810190654-139e0e79e69e h1:XAgb6HmgXaGRklNjhZoNMSIYriKLqjWXIqYMotg6iSs= github.com/filecoin-project/go-state-types v0.1.1-0.20210810190654-139e0e79e69e/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.1.1/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= +github.com/filecoin-project/go-state-types v0.1.3 h1:rzIJyQo5HO2ptc8Jcu8P0qTutnI7NWwTle54eAHoNO0= +github.com/filecoin-project/go-state-types v0.1.3/go.mod h1:ezYnPf0bNkTsDibL/psSz5dy4B5awOJ/E7P2Saeep8g= github.com/filecoin-project/go-statemachine v0.0.0-20200925024713-05bd7c71fbfe/go.mod h1:FGwQgZAt2Gh5mjlwJUlVB62JeYdo+if0xWxSEfBD9ig= github.com/filecoin-project/go-statemachine v1.0.1 h1:LQ60+JDVjMdLxXmVFM2jjontzOYnfVE7u02CXV3WKSw= github.com/filecoin-project/go-statemachine v1.0.1/go.mod h1:jZdXXiHa61n4NmgWFG4w8tnqgvZVHYbJ3yW7+y8bF54= github.com/filecoin-project/go-statestore v0.1.0/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= -github.com/filecoin-project/go-statestore v0.1.1 h1:ufMFq00VqnT2CAuDpcGnwLnCX1I/c3OROw/kXVNSTZk= -github.com/filecoin-project/go-statestore v0.1.1/go.mod h1:LFc9hD+fRxPqiHiaqUEZOinUJB4WARkRfNl10O7kTnI= -github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b h1:fkRZSPrYpk42PV3/lIXiL0LHetxde7vyYYvSsttQtfg= -github.com/filecoin-project/go-storedcounter v0.0.0-20200421200003-1c99c62e8a5b/go.mod h1:Q0GQOBtKf1oE10eSXSlhN45kDBdGvEcVOqMiffqX+N8= -github.com/filecoin-project/specs-actors v0.9.4/go.mod h1:BStZQzx5x7TmCkLv0Bpa07U6cPKol6fd3w9KjMPZ6Z4= -github.com/filecoin-project/specs-actors v0.9.12/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= +github.com/filecoin-project/go-statestore v0.2.0 h1:cRRO0aPLrxKQCZ2UOQbzFGn4WDNdofHZoGPjfNaAo5Q= +github.com/filecoin-project/go-statestore v0.2.0/go.mod h1:8sjBYbS35HwPzct7iT4lIXjLlYyPor80aU7t7a/Kspo= +github.com/filecoin-project/go-storedcounter v0.1.0 h1:Mui6wSUBC+cQGHbDUBcO7rfh5zQkWJM/CpAZa/uOuus= +github.com/filecoin-project/go-storedcounter v0.1.0/go.mod h1:4ceukaXi4vFURIoxYMfKzaRF5Xv/Pinh2oTnoxpv+z8= github.com/filecoin-project/specs-actors v0.9.13/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= github.com/filecoin-project/specs-actors v0.9.14 h1:68PVstg2UB3ZsMLF+DKFTAs/YKsqhKWynkr0IqmVRQY= github.com/filecoin-project/specs-actors v0.9.14/go.mod h1:TS1AW/7LbG+615j4NsjMK1qlpAwaFsG9w0V2tg2gSao= -github.com/filecoin-project/specs-actors/v2 v2.0.1/go.mod h1:v2NZVYinNIKA9acEMBm5wWXxqv5+frFEbekBFemYghY= github.com/filecoin-project/specs-actors/v2 v2.3.5-0.20210114162132-5b58b773f4fb/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= -github.com/filecoin-project/specs-actors/v2 v2.3.5 h1:PbT4tPlSXZ8sRgajhb4D8AOEmiaaZ+jg6tc6BBv8VQc= -github.com/filecoin-project/specs-actors/v2 v2.3.5/go.mod h1:LljnY2Mn2homxZsmokJZCpRuhOPxfXhvcek5gWkmqAc= +github.com/filecoin-project/specs-actors/v2 v2.3.6 h1:UxnWTfQd7JsOae39/aHCK0m1IBjdcyymCJfqxuSkn+g= +github.com/filecoin-project/specs-actors/v2 v2.3.6/go.mod h1:DJMpxVRXvev9t8P0XWA26RmTzN+MHiL9IlItVLT0zUc= github.com/filecoin-project/specs-actors/v3 v3.1.0/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= github.com/filecoin-project/specs-actors/v3 v3.1.1 h1:BE8fsns1GnEOxt1DTE5LxBK2FThXtWmCChgcJoHTg0E= github.com/filecoin-project/specs-actors/v3 v3.1.1/go.mod h1:mpynccOLlIRy0QnR008BwYBwT9fen+sPR13MA1VmMww= github.com/filecoin-project/specs-actors/v4 v4.0.0/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= github.com/filecoin-project/specs-actors/v4 v4.0.1 h1:AiWrtvJZ63MHGe6rn7tPu4nSUY8bA1KDNszqJaD5+Fg= github.com/filecoin-project/specs-actors/v4 v4.0.1/go.mod h1:TkHXf/l7Wyw4ZejyXIPS2rK8bBO0rdwhTZyQQgaglng= -github.com/filecoin-project/specs-actors/v5 v5.0.0-20210512015452-4fe3889fff57/go.mod h1:283yBMMUSDB2abcjP/hhrwTkhb9h3sfM6KGrep/ZlBI= github.com/filecoin-project/specs-actors/v5 v5.0.4 h1:OY7BdxJWlUfUFXWV/kpNBYGXNPasDIedf42T3sGx08s= github.com/filecoin-project/specs-actors/v5 v5.0.4/go.mod h1:5BAKRAMsOOlD8+qCw4UvT/lTLInCJ3JwOWZbX8Ipwq4= -github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506 h1:Ur/l2+6qN+lQiqjozWWc5p9UDaAMDZKTlDS98oRnlIw= -github.com/filecoin-project/specs-storage v0.1.1-0.20201105051918-5188d9774506/go.mod h1:nJRRM7Aa9XVvygr3W9k6xGF46RWzr2zxF/iGoAIfA/g= +github.com/filecoin-project/specs-actors/v6 v6.0.0/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk= +github.com/filecoin-project/specs-actors/v6 v6.0.1 h1:laxvHNsvrq83Y9n+W7znVCePi3oLyRf0Rkl4jFO8Wew= +github.com/filecoin-project/specs-actors/v6 v6.0.1/go.mod h1:V1AYfi5GkHXipx1mnVivoICZh3wtwPxDVuds+fbfQtk= +github.com/filecoin-project/specs-actors/v7 v7.0.0-20211117170924-fd07a4c7dff9/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE= +github.com/filecoin-project/specs-actors/v7 v7.0.0-20211222192039-c83bea50c402/go.mod h1:p6LIOFezA1rgRLMewbvdi3Pp6SAu+q9FtJ9CAleSjrE= +github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1 h1:FuDaXIbcw2hRsFI8SDTmsGGCE+NumpF6aiBoU/2X5W4= +github.com/filecoin-project/specs-actors/v7 v7.0.0-rc1/go.mod h1:TA5FwCna+Yi36POaT7SLKXsgEDvJwc0V/L6ZsO19B9M= +github.com/filecoin-project/specs-storage v0.1.1-0.20211228030229-6d460d25a0c9 h1:oUYOvF7EvdXS0Zmk9mNkaB6Bu0l+WXBYPzVodKMiLug= +github.com/filecoin-project/specs-storage v0.1.1-0.20211228030229-6d460d25a0c9/go.mod h1:Tb88Zq+IBJbvAn3mS89GYj3jdRThBTE/771HCVZdRJU= github.com/filecoin-project/test-vectors/schema v0.0.5/go.mod h1:iQ9QXLpYWL3m7warwvK1JC/pTri8mnfEmKygNDqqY6E= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/flynn/noise v0.0.0-20180327030543-2492fe189ae6/go.mod h1:1i71OnUq3iUe1ma7Lr6yG6/rjvM3emb6yoL7xLFzcVQ= github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= @@ -467,21 +483,21 @@ github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15/go.mod h1:tPg4cp github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.0 h1:+cqqvzZV87b4adx/5ayVOaYZ2CrvM4ejQvUdBzPPUss= +github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1 h1:EzDjxMg43q1tA2c0MV3tNbaontnHLplHyFF6M5KiVP0= -github.com/gbrlsnchs/jwt/v3 v3.0.0-beta.1/go.mod h1:0eHX/BVySxPc6SE2mZRoppGq7qcEagxdmQnA3dzork8= +github.com/gbrlsnchs/jwt/v3 v3.0.1 h1:lbUmgAKpxnClrKloyIwpxm4OuWeDl5wLk52G91ODPw4= +github.com/gbrlsnchs/jwt/v3 v3.0.1/go.mod h1:AncDcjXz18xetI3A6STfXq2w+LuTx8pQ8bGEwRN8zVM= github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= github.com/gdamore/tcell/v2 v2.2.0 h1:vSyEgKwraXPSOkvCk7IwOSyX+Pv3V2cV9CikJMXg4U4= github.com/gdamore/tcell/v2 v2.2.0/go.mod h1:cTTuF84Dlj/RqmaCIV5p4w8uG1zWdk0SF6oBpwHp4fU= github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= @@ -498,17 +514,26 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= +github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0 h1:7i2K3eKTos3Vc0enKCfnVcgHh2olr/MyfboYq7cAcFw= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= -github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.1 h1:DX7uPQ4WgAWfoh+NGGlbJQswnYIVvz0SRlLS3rPZQDA= +github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.0 h1:j4LrlVXgrbIWO83mmQUnK0Hi+YnbD+vzrE1z/EphbFE= +github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= +github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -577,10 +602,10 @@ github.com/go-redis/redis/v7 v7.4.0/go.mod h1:JDNMw23GTyLNC4GZu9njt15ctBQVn7xjRf github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= @@ -607,8 +632,9 @@ github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/V github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968 h1:s+PDl6lozQ+dEUtUtQnO7+A2iPG3sK1pI4liU+jxn90= github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus/v5 v5.0.3 h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4 h1:9349emZab16e7zQvpmsbtjc18ykshndd8y2PG3sgJbA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= @@ -627,16 +653,20 @@ github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRs github.com/gogo/status v1.1.0 h1:+eIkrewn5q6b30y+g/BJINVVdi2xH7je5MPJ3ZPK3JA= github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/geo v0.0.0-20190916061304-5b978397cfec/go.mod h1:QZ0nwyI2jOfgRAoBvP+ab5aRr7c9x7lhGEJrKvBwjWI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -668,8 +698,8 @@ github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf h1:gFVkHXmVAhEbxZVDln5V9GKrLaluNoFHDbrZwAWZgws= -github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -685,8 +715,9 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -744,7 +775,6 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.14.4/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0= -github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= @@ -759,16 +789,21 @@ github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e h1:3YKHER4n github.com/hannahhoward/go-pubsub v0.0.0-20200423002714-8d62886cc36e/go.mod h1:I8h3MITA53gN9OnWGCgaMa0JWVRdXthWw4M3CPM54OY= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU= +github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.12.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= @@ -796,12 +831,15 @@ github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/memberlist v0.2.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.9.0/go.mod h1:YL0HO+FifKOW2u1ke99DGVu1zhcpZzNwrLIqBC7vbYU= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOcDo= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= github.com/huin/goupnp v1.0.2 h1:RfGLP+h3mvisuWEyybxNq5Eft3NWhHLPeUN72kpKZoI= github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM= @@ -810,6 +848,10 @@ github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0 github.com/iancoleman/orderedmap v0.1.0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= +github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94 h1:9tcYMdi+7Rb1y0E9Del1DRHui7Ne3za5lLw6CjMJv/M= +github.com/icza/backscanner v0.0.0-20210726202459-ac2ffc679f94/go.mod h1:GYeBD1CF7AqnKZK+UCytLcY3G+UKo0ByXX/3xfdNyqQ= +github.com/icza/mighty v0.0.0-20180919140131-cfd07d671de6 h1:8UsGZ2rr2ksmEru6lToqnXgA8Mz1DP11X4zSJ159C3k= +github.com/icza/mighty v0.0.0-20180919140131-cfd07d671de6/go.mod h1:xQig96I1VNBDIWGCdTt54nHt6EeI639SmHycLYL7FkA= github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -821,8 +863,9 @@ github.com/influxdata/influxdb v1.9.4 h1:hZMq5fd4enVnruYHd7qCHsqG7kWQ/msA6x+kCvG github.com/influxdata/influxdb v1.9.4/go.mod h1:dR0WCHqaHPpJLaqWnRSl/QHsbXJR+QpofbZXyTc8ccw= github.com/influxdata/influxdb-client-go/v2 v2.3.1-0.20210518120617-5d1fff431040/go.mod h1:vLNHdxTJkIf2mSLvGrpj8TCcISApPoXkaxP8g9uRlW8= github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/influxdata/influxdb1-client v0.0.0-20200515024757-02f0bf5dbca3 h1:k3/6a1Shi7GGCp9QpyYuXsMM6ncTOjCzOE9Fd6CDA+Q= github.com/influxdata/influxdb1-client v0.0.0-20200515024757-02f0bf5dbca3/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab h1:HqW4xhhynfjrtEiiSGcQUd6vrK23iMam1FO8rI7mwig= +github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= github.com/influxdata/influxql v1.1.0/go.mod h1:KpVI7okXjK6PRi3Z5B+mtKZli+R1DnZgb3N+tzevNgo= github.com/influxdata/influxql v1.1.1-0.20210223160523-b6ab99450c93/go.mod h1:gHp9y86a/pxhjJ+zMjNXiQAA197Xk9wLxaz+fGG+kWk= github.com/influxdata/line-protocol v0.0.0-20180522152040-32c6aa80de5e/go.mod h1:4kt73NQhadE3daL3WhR5EJ/J2ocX0PZzwxQ0gXJ7oFE= @@ -839,19 +882,16 @@ github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyq github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3UPrwvis= github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= -github.com/ipfs/go-bitswap v0.1.8/go.mod h1:TOWoxllhccevbWFUR2N7B1MTSVVge1s6XSMiCSA4MzM= -github.com/ipfs/go-bitswap v0.3.4 h1:AhJhRrG8xkxh6x87b4wWs+4U4y3DVB3doI8yFNqgQME= -github.com/ipfs/go-bitswap v0.3.4/go.mod h1:4T7fvNv/LmOys+21tnLzGKncMeeXUYUd1nUiJ2teMvI= +github.com/ipfs/go-bitswap v0.5.1 h1:721YAEDBnLIrvcIMkCHCdqp34hA8jwL9yKMkyJpSpco= +github.com/ipfs/go-bitswap v0.5.1/go.mod h1:P+ckC87ri1xFLvk74NlXdP0Kj9RmWAh4+H78sC6Qopo= github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= github.com/ipfs/go-block-format v0.0.3 h1:r8t66QstRp/pd/or4dpnbVfXT5Gt7lOqRvC+/dDTpMc= github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbRhbvNSdgc/7So= github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= -github.com/ipfs/go-blockservice v0.1.3/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= -github.com/ipfs/go-blockservice v0.1.4-0.20200624145336-a978cec6e834/go.mod h1:OTZhFpkgY48kNzbgyvcexW9cHrpjBYIjSR0KoDOFOLU= -github.com/ipfs/go-blockservice v0.1.5 h1:euqZu96CCbToPyYVwVshu8ENURi8BhFd7FUFfTLi+fQ= -github.com/ipfs/go-blockservice v0.1.5/go.mod h1:yLk8lBJCBRWRqerqCSVi3cE/Dncdt3vGC/PJMVKhLTY= +github.com/ipfs/go-blockservice v0.2.1 h1:NJ4j/cwEfIg60rzAWcCIxRtOwbf6ZPK49MewNxObCPQ= +github.com/ipfs/go-blockservice v0.2.1/go.mod h1:k6SiwmgyYgs4M/qt+ww6amPeUH9EISLRBnvUurKJhi8= github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= @@ -861,7 +901,6 @@ github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67Fexh github.com/ipfs/go-cid v0.0.6-0.20200501230655-7c82f3b81c00/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.0.8-0.20210716091050-de6c03deae1c/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= github.com/ipfs/go-cid v0.1.0 h1:YN33LQulcRHjfom/i25yoOZR4Telp1Hr/2RU3d0PnC0= github.com/ipfs/go-cid v0.1.0/go.mod h1:rH5/Xv83Rfy8Rw6xG+id3DYAMUVmem1MowoKwdXmN2o= github.com/ipfs/go-cidutil v0.0.2 h1:CNOboQf1t7Qp0nuNh8QMmhJs0+Q//bRL1axtCnIB1Yo= @@ -870,15 +909,15 @@ github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAK github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.3.0/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.2/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= -github.com/ipfs/go-datastore v0.4.6 h1:zU2cmweykxJ+ziXnA2cPtsLe8rdR/vrthOipLPuf6kc= -github.com/ipfs/go-datastore v0.4.6/go.mod h1:XSipLSc64rFKSFRFGo1ecQl+WhYce3K7frtpHkyPFUc= +github.com/ipfs/go-datastore v0.4.7-0.20211013204805-28a3721c2e66/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= +github.com/ipfs/go-datastore v0.5.1 h1:WkRhLuISI+XPD0uk3OskB0fYFSyqK8Ob5ZYew9Qa1nQ= +github.com/ipfs/go-datastore v0.5.1/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= @@ -886,39 +925,33 @@ github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaH github.com/ipfs/go-ds-badger v0.0.7/go.mod h1:qt0/fWzZDoPW6jpQeqUjR5kBfhDNB65jd9YlmAvpQBk= github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= -github.com/ipfs/go-ds-badger v0.2.6/go.mod h1:02rnztVKA4aZwDuaRPTf8mpqcKmXP7mLl6JPxd14JHA= -github.com/ipfs/go-ds-badger v0.2.7 h1:ju5REfIm+v+wgVnQ19xGLYPHYHbYLR6qJfmMbCDSK1I= github.com/ipfs/go-ds-badger v0.2.7/go.mod h1:02rnztVKA4aZwDuaRPTf8mpqcKmXP7mLl6JPxd14JHA= -github.com/ipfs/go-ds-badger2 v0.1.0/go.mod h1:pbR1p817OZbdId9EvLOhKBgUVTM3BMCSTan78lDDVaw= -github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e h1:Xi1nil8K2lBOorBS6Ys7+hmUCzH8fr3U9ipdL/IrcEI= -github.com/ipfs/go-ds-badger2 v0.1.1-0.20200708190120-187fc06f714e/go.mod h1:lJnws7amT9Ehqzta0gwMrRsURU04caT0iRPr1W8AsOU= +github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro= +github.com/ipfs/go-ds-badger v0.3.0/go.mod h1:1ke6mXNqeV8K3y5Ak2bAA0osoTfmxUdupVCGm4QUIek= +github.com/ipfs/go-ds-badger2 v0.1.2 h1:sQc2q1gaXrv8YFNeUtxil0neuyDf9hnVHfLsi7lpXfE= +github.com/ipfs/go-ds-badger2 v0.1.2/go.mod h1:3FtQmDv6fMubygEfU43bsFelYpIiXX/XEYA54l9eCwg= github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= github.com/ipfs/go-ds-leveldb v0.1.0/go.mod h1:hqAW8y4bwX5LWcCtku2rFNX3vjDZCy5LZCg+cSZvYb8= github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-leveldb v0.4.2 h1:QmQoAJ9WkPMUfBLnu1sBVy0xWWlJPg0m4kRAiJL9iaw= github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-measure v0.1.0 h1:vE4TyY4aeLeVgnnPBC5QzKIjKrqzha0NCujTfgvVbVQ= -github.com/ipfs/go-ds-measure v0.1.0/go.mod h1:1nDiFrhLlwArTME1Ees2XaBOl49OoCgd2A3f8EchMSY= -github.com/ipfs/go-ds-pebble v0.0.2-0.20200921225637-ce220f8ac459/go.mod h1:oh4liWHulKcDKVhCska5NLelE3MatWl+1FwSz3tY91g= -github.com/ipfs/go-filestore v1.0.0 h1:QR7ekKH+q2AGiWDc7W2Q0qHuYSRZGUJqUn0GsegEPb0= -github.com/ipfs/go-filestore v1.0.0/go.mod h1:/XOCuNtIe2f1YPbiXdYvD0BKLA0JR1MgPiFOdcuu9SM= +github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= +github.com/ipfs/go-ds-leveldb v0.5.0/go.mod h1:d3XG9RUDzQ6V4SHi8+Xgj9j1XuEk1z82lquxrVbml/Q= +github.com/ipfs/go-ds-measure v0.2.0 h1:sG4goQe0KDTccHMyT45CY1XyUbxe5VwTKpg2LjApYyQ= +github.com/ipfs/go-ds-measure v0.2.0/go.mod h1:SEUD/rE2PwRa4IQEC5FuNAmjJCyYObZr9UvVh8V3JxE= +github.com/ipfs/go-filestore v1.1.0 h1:Pu4tLBi1bucu6/HU9llaOmb9yLFk/sgP+pW764zNDoE= +github.com/ipfs/go-filestore v1.1.0/go.mod h1:6e1/5Y6NvLuCRdmda/KA4GUhXJQ3Uat6vcWm2DJfxc8= github.com/ipfs/go-fs-lock v0.0.6 h1:sn3TWwNVQqSeNjlWy6zQ1uUGAZrV3hPOyEA6y1/N2a0= github.com/ipfs/go-fs-lock v0.0.6/go.mod h1:OTR+Rj9sHiRubJh3dRhD15Juhd/+w6VPOY28L7zESmM= -github.com/ipfs/go-graphsync v0.1.0/go.mod h1:jMXfqIEDFukLPZHqDPp8tJMbHO9Rmeb9CEGevngQbmE= -github.com/ipfs/go-graphsync v0.4.2/go.mod h1:/VmbZTUdUMTbNkgzAiCEucIIAU3BkLE2cZrDCVUhyi0= -github.com/ipfs/go-graphsync v0.4.3/go.mod h1:mPOwDYv128gf8gxPFgXnz4fNrSYPsWyqisJ7ych+XDY= -github.com/ipfs/go-graphsync v0.9.0/go.mod h1:J62ahWT9JbPsFL2UWsUM5rOu0lZJ0LOIH1chHdxGGcw= -github.com/ipfs/go-graphsync v0.9.1 h1:jo7ZaAZ3lal89RhKxKoRkPzIO8lmOY6KUWA1mDRZ2+U= -github.com/ipfs/go-graphsync v0.9.1/go.mod h1:J62ahWT9JbPsFL2UWsUM5rOu0lZJ0LOIH1chHdxGGcw= -github.com/ipfs/go-hamt-ipld v0.1.1/go.mod h1:1EZCr2v0jlCnhpa+aZ0JZYp8Tt2w16+JJOAVz17YcDk= +github.com/ipfs/go-graphsync v0.11.0/go.mod h1:wC+c8vGVjAHthsVIl8LKr37cUra2GOaMYcQNNmMxDqE= +github.com/ipfs/go-graphsync v0.11.5 h1:WA5hVxGBtcal6L6nqubKiqRolaZxbexOK3GumGFJRR4= +github.com/ipfs/go-graphsync v0.11.5/go.mod h1:+/sZqRwRCQRrV7NCzgBtufmr5QGpUE98XSa7NlsztmM= github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= -github.com/ipfs/go-ipfs-blockstore v0.1.4/go.mod h1:Jxm3XMVjh6R17WvxFEiyKBLUGr86HgIYJW/D/MwqeYQ= -github.com/ipfs/go-ipfs-blockstore v1.0.0/go.mod h1:knLVdhVU9L7CC4T+T4nvGdeUIPAXlnd9zmXfp+9MIjU= -github.com/ipfs/go-ipfs-blockstore v1.0.1/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= -github.com/ipfs/go-ipfs-blockstore v1.0.3/go.mod h1:MGNZlHNEnR4KGgPHM3/k8lBySIOK2Ve+0KjZubKlaOE= -github.com/ipfs/go-ipfs-blockstore v1.0.4 h1:DZdeya9Vu4ttvlGheQPGrj6kWehXnYZRFCp9EsZQ1hI= -github.com/ipfs/go-ipfs-blockstore v1.0.4/go.mod h1:uL7/gTJ8QIZ3MtA3dWf+s1a0U3fJy2fcEZAsovpRp+w= +github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE= +github.com/ipfs/go-ipfs-blockstore v1.1.0/go.mod h1:5QDUApRqpgPcfGstCxYeMnjt/DYQtXXdJVCvxHHuWVk= +github.com/ipfs/go-ipfs-blockstore v1.1.1/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= +github.com/ipfs/go-ipfs-blockstore v1.1.2 h1:WCXoZcMYnvOTmlpX+RSSnhVN0uCmbWTeepTGX5lgiXw= +github.com/ipfs/go-ipfs-blockstore v1.1.2/go.mod h1:w51tNR9y5+QXB0wkNcHt4O2aSZjTdqaEWaQdSxEyUOY= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= @@ -933,16 +966,20 @@ github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1I github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= -github.com/ipfs/go-ipfs-ds-help v1.0.0 h1:bEQ8hMGs80h0sR8O4tfDgV6B01aaF9qeTrujrTLYV3g= github.com/ipfs/go-ipfs-ds-help v1.0.0/go.mod h1:ujAbkeIgkKAWtxxNkoZHWLCyk5JpPoKnGyCcsoF6ueE= -github.com/ipfs/go-ipfs-exchange-interface v0.0.1 h1:LJXIo9W7CAmugqI+uofioIpRb6rY30GUu7G6LUfpMvM= +github.com/ipfs/go-ipfs-ds-help v1.1.0 h1:yLE2w9RAsl31LtfMt91tRZcrx+e61O5mDxFRR994w4Q= +github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= -github.com/ipfs/go-ipfs-exchange-offline v0.0.1 h1:P56jYKZF7lDDOLx5SotVh5KFxoY6C81I1NSHW1FxGew= +github.com/ipfs/go-ipfs-exchange-interface v0.1.0 h1:TiMekCrOGQuWYtZO3mf4YJXDIdNgnKWZ9IE3fGlnWfo= +github.com/ipfs/go-ipfs-exchange-interface v0.1.0/go.mod h1:ych7WPlyHqFvCi/uQI48zLZuAWVP5iTQPXEfVaw5WEI= github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= +github.com/ipfs/go-ipfs-exchange-offline v0.1.1 h1:mEiXWdbMN6C7vtDG21Fphx8TGCbZPpQnz/496w/PL4g= +github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY= github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= github.com/ipfs/go-ipfs-files v0.0.4/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= -github.com/ipfs/go-ipfs-files v0.0.8 h1:8o0oFJkJ8UkO/ABl8T6ac6tKF3+NIpj67aAB6ZpusRg= github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= +github.com/ipfs/go-ipfs-files v0.0.9 h1:OFyOfmuVDu9c5YtjSDORmwXzE6fmZikzZpzsnNkgFEg= +github.com/ipfs/go-ipfs-files v0.0.9/go.mod h1:aFv2uQ/qxWpL/6lidWvnSQmaVqCrf0TBGoUr+C1Fo84= github.com/ipfs/go-ipfs-http-client v0.0.6 h1:k2QllZyP7Fz5hMgsX5hvHfn1WPG9Ngdy5WknQ7JNhBM= github.com/ipfs/go-ipfs-http-client v0.0.6/go.mod h1:8e2dQbntMZKxLfny+tyXJ7bJHZFERp/2vyzZdvkeLMc= github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= @@ -951,21 +988,26 @@ github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7 github.com/ipfs/go-ipfs-pq v0.0.2 h1:e1vOOW6MuOwG2lqxcLA+wEn93i/9laCY8sXAw76jFOY= github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= github.com/ipfs/go-ipfs-routing v0.0.1/go.mod h1:k76lf20iKFxQTjcJokbPM9iBXVXVZhcOwc360N4nuKs= -github.com/ipfs/go-ipfs-routing v0.1.0 h1:gAJTT1cEeeLj6/DlLX6t+NxD9fQe2ymTO6qWRDI/HQQ= github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= +github.com/ipfs/go-ipfs-routing v0.2.1 h1:E+whHWhJkdN9YeoHZNj5itzc+OR292AJ2uE9FFiW0BY= +github.com/ipfs/go-ipfs-routing v0.2.1/go.mod h1:xiNNiwgjmLqPS1cimvAw6EyB9rkVDbiocA4yY+wRNLM= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= -github.com/ipfs/go-ipld-cbor v0.0.5-0.20200204214505-252690b78669/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= -github.com/ipfs/go-ipld-cbor v0.0.5 h1:ovz4CHKogtG2KB/h1zUp5U0c/IzZrL435rCh5+K/5G8= github.com/ipfs/go-ipld-cbor v0.0.5/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= +github.com/ipfs/go-ipld-cbor v0.0.6-0.20211211231443-5d9b9e1f6fa8/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= +github.com/ipfs/go-ipld-cbor v0.0.6 h1:pYuWHyvSpIsOOLw4Jy7NbBkCyzLDcl64Bf/LZW7eBQ0= +github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= github.com/ipfs/go-ipld-format v0.2.0 h1:xGlJKkArkmBvowr+GMCX0FEZtkro71K1AwiKnL37mwA= github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= +github.com/ipfs/go-ipld-legacy v0.1.0/go.mod h1:86f5P/srAmh9GcIcWQR9lfFLZPrIyyXQeVlOWeeWEuI= +github.com/ipfs/go-ipld-legacy v0.1.1 h1:BvD8PEuqwBHLTKqlGFTHSwrwFOMkVESEvwIYwR2cdcc= +github.com/ipfs/go-ipld-legacy v0.1.1/go.mod h1:8AyKFCjgRPsQFf15ZQgDB8Din4DML/fOmKZkkFkrIEg= github.com/ipfs/go-ipns v0.1.2 h1:O/s/0ht+4Jl9+VoxoUo0zaHjnZUS+aBQIKTuzdZ/ucI= github.com/ipfs/go-ipns v0.1.2/go.mod h1:ioQ0j02o6jdIVW+bmi18f4k2gRf0AV3kZ9KeHYHICnQ= github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= @@ -985,14 +1027,15 @@ github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHn github.com/ipfs/go-log/v2 v2.1.2-0.20200626104915-0016c0b4b3e4/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.2/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= -github.com/ipfs/go-log/v2 v2.3.0 h1:31Re/cPqFHpsRHgyVwjWADPoF0otB1WrjTy8ZFYwEZU= github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= +github.com/ipfs/go-log/v2 v2.4.0 h1:iR/2o9PGWanVJrBgIH5Ff8mPGOwpqLaPIAFqSnsdlzk= +github.com/ipfs/go-log/v2 v2.4.0/go.mod h1:nPZnh7Cj7lwS3LpRU5Mwr2ol1c2gXIEXuF6aywqrtmo= github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= github.com/ipfs/go-merkledag v0.2.4/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= -github.com/ipfs/go-merkledag v0.3.1/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= -github.com/ipfs/go-merkledag v0.3.2 h1:MRqj40QkrWkvPswXs4EfSslhZ4RVPRbxwX11js0t1xY= github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= +github.com/ipfs/go-merkledag v0.5.1 h1:tr17GPP5XtPhvPPiWtu20tSGZiZDuTaJRXBLcr79Umk= +github.com/ipfs/go-merkledag v0.5.1/go.mod h1:cLMZXx8J08idkp5+id62iVftUQV+HlYJ3PIhDfZsjA4= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= github.com/ipfs/go-metrics-prometheus v0.0.2/go.mod h1:ELLU99AQQNi+zX6GCGm2lAgnzdSH3u5UVlCdqSXnEks= @@ -1000,9 +1043,9 @@ github.com/ipfs/go-path v0.0.7 h1:H06hKMquQ0aYtHiHryOMLpQC1qC3QwXwkahcEVD51Ho= github.com/ipfs/go-path v0.0.7/go.mod h1:6KTKmeRnBXgqrTvzFrPV3CamxcgvXX/4z79tfAd2Sno= github.com/ipfs/go-peertaskqueue v0.0.4/go.mod h1:03H8fhyeMfKNFWqzYEVyMbcPUeYrqP1MX6Kd+aN+rMQ= github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.1.1/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.2.0 h1:2cSr7exUGKYyDeUyQ7P/nHPs9P7Ht/B+ROrpN1EJOjc= -github.com/ipfs/go-peertaskqueue v0.2.0/go.mod h1:5/eNrBEbtSKWCG+kQK8K8fGNixoYUnr+P7jivavs9lY= +github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= +github.com/ipfs/go-peertaskqueue v0.7.1 h1:7PLjon3RZwRQMgOTvYccZ+mjzkmds/7YzSWKFlBAypE= +github.com/ipfs/go-peertaskqueue v0.7.1/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= github.com/ipfs/go-todocounter v0.0.1/go.mod h1:l5aErvQc8qKE2r7NDMjmq5UNAvuZy0rC8BHOplkWvZ4= github.com/ipfs/go-unixfs v0.2.2-0.20190827150610-868af2e9e5cb/go.mod h1:IwAAgul1UQIcNZzKPYZWOCijryFBeCV79cNubPzol+k= github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= @@ -1017,30 +1060,30 @@ github.com/ipfs/iptb v1.4.0/go.mod h1:1rzHpCYtNp87/+hTxG5TfCVn/yMY3dKnLn8tBiMfdm github.com/ipfs/iptb-plugins v0.3.0 h1:C1rpq1o5lUZtaAOkLIox5akh6ba4uk/3RwWc6ttVxw0= github.com/ipfs/iptb-plugins v0.3.0/go.mod h1:5QtOvckeIw4bY86gSH4fgh3p3gCSMn3FmIKr4gaBncA= github.com/ipld/go-car v0.1.0/go.mod h1:RCWzaUh2i4mOEkB3W45Vc+9jnS/M6Qay5ooytiBHl3g= -github.com/ipld/go-car v0.1.1-0.20200923150018-8cdef32e2da4/go.mod h1:xrMEcuSq+D1vEwl+YAXsg/JfA98XGpXDwnkIL4Aimqw= -github.com/ipld/go-car v0.1.1-0.20201119040415-11b6074b6d4d/go.mod h1:2Gys8L8MJ6zkh1gktTSXreY63t4UbyvNp5JaudTyxHQ= -github.com/ipld/go-car v0.3.1-0.20210601190600-f512dac51e8e/go.mod h1:wUxBdwOLA9/0HZBi3fnTBzla0MuwlqgJLyrhOg1XaKI= -github.com/ipld/go-car v0.3.1-null-padded-files h1:FMD0Ce4tAM9P5aq7yklw2jnVK3ZuoJ4xK6vkL9VLmxs= -github.com/ipld/go-car v0.3.1-null-padded-files/go.mod h1:wUxBdwOLA9/0HZBi3fnTBzla0MuwlqgJLyrhOg1XaKI= -github.com/ipld/go-car/v2 v2.0.0-beta1.0.20210721090610-5a9d1b217d25/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU= -github.com/ipld/go-car/v2 v2.0.2/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU= -github.com/ipld/go-car/v2 v2.0.3-0.20210811121346-c514a30114d7 h1:6Z0beJSZNsRY+7udoqUl4gQ/tqtrPuRvDySrlsvbqZA= -github.com/ipld/go-car/v2 v2.0.3-0.20210811121346-c514a30114d7/go.mod h1:I2ACeeg6XNBe5pdh5TaR7Ambhfa7If9KXxmXgZsYENU= +github.com/ipld/go-car v0.3.3-0.20211210032800-e6f244225a16/go.mod h1:/wkKF4908ULT4dFIFIUZYcfjAnj+KFnJvlh8Hsz1FbQ= +github.com/ipld/go-car v0.3.3 h1:D6y+jvg9h2ZSv7GLUMWUwg5VTLy1E7Ak+uQw5orOg3I= +github.com/ipld/go-car v0.3.3/go.mod h1:/wkKF4908ULT4dFIFIUZYcfjAnj+KFnJvlh8Hsz1FbQ= +github.com/ipld/go-car/v2 v2.1.1-0.20211211000942-be2525f6bf2d/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI= +github.com/ipld/go-car/v2 v2.1.1 h1:saaKz4nC0AdfCGHLYKeXLGn8ivoPC54fyS55uyOLKwA= +github.com/ipld/go-car/v2 v2.1.1/go.mod h1:+2Yvf0Z3wzkv7NeI69i8tuZ+ft7jyjPYIWZzeVNeFcI= github.com/ipld/go-codec-dagpb v1.2.0/go.mod h1:6nBN7X7h8EOsEejZGqC7tej5drsdBAXbMHyBT+Fne5s= github.com/ipld/go-codec-dagpb v1.3.0 h1:czTcaoAuNNyIYWs6Qe01DJ+sEX7B+1Z0LcXjSatMGe8= github.com/ipld/go-codec-dagpb v1.3.0/go.mod h1:ga4JTU3abYApDC3pZ00BC2RSvC3qfBb9MSJkMLSwnhA= github.com/ipld/go-ipld-prime v0.0.2-0.20191108012745-28a82f04c785/go.mod h1:bDDSvVz7vaK12FNvMeRYnpRFkSUPNQOiCYQezMD/P3w= -github.com/ipld/go-ipld-prime v0.0.2-0.20200428162820-8b59dc292b8e/go.mod h1:uVIwe/u0H4VdKv3kaN1ck7uCb6yD9cFLS9/ELyXbsw8= -github.com/ipld/go-ipld-prime v0.5.1-0.20200828233916-988837377a7f/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= -github.com/ipld/go-ipld-prime v0.5.1-0.20201021195245-109253e8a018/go.mod h1:0xEgdD6MKbZ1vF0GC+YcR/C4SQCAlRuOjIJ2i0HxqzM= github.com/ipld/go-ipld-prime v0.9.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= +github.com/ipld/go-ipld-prime v0.9.1-0.20210324083106-dc342a9917db/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= +github.com/ipld/go-ipld-prime v0.10.0/go.mod h1:KvBLMr4PX1gWptgkzRjVZCrLmSGcZCb/jioOQwCqZN8= github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= -github.com/ipld/go-ipld-prime v0.12.0 h1:JapyKWTsJgmhrPI7hfx4V798c/RClr85sXfBZnH1VIw= -github.com/ipld/go-ipld-prime v0.12.0/go.mod h1:hy8b93WleDMRKumOJnTIrr0MbbFbx9GD6Kzxa53Xppc= +github.com/ipld/go-ipld-prime v0.12.3/go.mod h1:PaeLYq8k6dJLmDUSLrzkEpoGV4PEfe/1OtFN/eALOc8= +github.com/ipld/go-ipld-prime v0.14.0/go.mod h1:9ASQLwUFLptCov6lIYc70GRB4V7UTyLD0IJtrDJe6ZM= +github.com/ipld/go-ipld-prime v0.14.3-0.20211207234443-319145880958/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= +github.com/ipld/go-ipld-prime v0.14.3 h1:cGUmxSws2IHurn00/iLMDapeXsnf9+FyAtYVy8G/JsQ= +github.com/ipld/go-ipld-prime v0.14.3/go.mod h1:QcE4Y9n/ZZr8Ijg5bGPT0GqYWgZ1704nH0RDcQtgTP0= github.com/ipld/go-ipld-prime-proto v0.0.0-20191113031812-e32bd156a1e5/go.mod h1:gcvzoEDBjwycpXt3LBE061wT9f46szXGHAmj9uoP6fU= -github.com/ipld/go-ipld-prime-proto v0.0.0-20200428191222-c1ffdadc01e1/go.mod h1:OAV6xBmuTLsPZ+epzKkPB1e25FHk/vCtyatkdHcArLs= -github.com/ipld/go-ipld-prime-proto v0.0.0-20200922192210-9a2bfd4440a6/go.mod h1:3pHYooM9Ea65jewRwrb2u5uHZCNkNTe9ABsVB+SrkH0= -github.com/ipld/go-ipld-prime-proto v0.1.0/go.mod h1:11zp8f3sHVgIqtb/c9Kr5ZGqpnCLF1IVTNOez9TopzE= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73 h1:TsyATB2ZRRQGTwafJdgEUQkmjOExRV0DNokcihZxbnQ= +github.com/ipld/go-ipld-prime/storage/bsadapter v0.0.0-20211210234204-ce2a1c70cd73/go.mod h1:2PJ0JgxyB08t0b2WKrcuqI3di0V+5n6RS/LTUJhkoxY= +github.com/ipld/go-ipld-selector-text-lite v0.0.1 h1:lNqFsQpBHc3p5xHob2KvEg/iM5dIFn6iw4L/Hh+kS1Y= +github.com/ipld/go-ipld-selector-text-lite v0.0.1/go.mod h1:U2CQmFb+uWzfIEF3I1arrDa5rwtj00PrpiwwCO+k1RM= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52 h1:QG4CGBqCeuBo6aZlGAamSkxWdgWfZGeE49eUOWJPA4c= github.com/ipsn/go-secp256k1 v0.0.0-20180726113642-9d62b9f0bc52/go.mod h1:fdg+/X9Gg4AsAIzWpEHwnqd+QY3b7lajxyjE1m4hkq4= github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= @@ -1048,7 +1091,6 @@ github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+ github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= -github.com/jbenet/go-cienv v0.1.0 h1:Vc/s0QbQtoxX8MwwSLWWh+xNNZvM3Lw7NsTcHrvvhMc= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c h1:uUx61FiAa1GI6ZmVd2wf2vULeQZIKG66eybjNXKYCz4= github.com/jbenet/go-random v0.0.0-20190219211222-123a90aedc0c/go.mod h1:sdx1xVM9UuLw1tXnhJWN3piypTUO3vCIHYmG15KE/dU= @@ -1071,8 +1113,9 @@ github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8 github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.1.1-0.20190114141812-62fb9bc030d1 h1:qBCV/RLV02TSfQa7tFmxTihnG+u+7JXByOkhlkR5rmQ= github.com/jonboulle/clockwork v0.1.1-0.20190114141812-62fb9bc030d1/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -1086,6 +1129,7 @@ github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o= @@ -1115,13 +1159,14 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg= github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDKIzp7y4voR9CX/nvcfymLmg2UiOio= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.6/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.8/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= @@ -1138,8 +1183,9 @@ github.com/kpacha/opencensus-influxdb v0.0.0-20181102202715-663e2683a27c/go.mod github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= @@ -1163,8 +1209,9 @@ github.com/libp2p/go-conn-security v0.0.1/go.mod h1:bGmu51N0KU9IEjX7kl2PQjgZa40J github.com/libp2p/go-conn-security-multistream v0.0.2/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= -github.com/libp2p/go-conn-security-multistream v0.2.1 h1:ft6/POSK7F+vl/2qzegnHDaXFU0iWB4yVTYrioC6Zy0= github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= +github.com/libp2p/go-conn-security-multistream v0.3.0 h1:9UCIKlBL1hC9u7nkMXpD1nkc/T53PKMAn3/k9ivBAVc= +github.com/libp2p/go-conn-security-multistream v0.3.0/go.mod h1:EEP47t4fw/bTelVmEzIDqSe69hO/ip52xBEhZMLWAHM= github.com/libp2p/go-eventbus v0.0.2/go.mod h1:Hr/yGlwxA/stuLnpMiu82lpNKpvRy3EaJxPu40XYOwk= github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= github.com/libp2p/go-eventbus v0.2.1 h1:VanAdErQnpTioN2TowqNcOijf6YwhuODe4pPKSDpxGc= @@ -1178,21 +1225,20 @@ github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68 github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= github.com/libp2p/go-libp2p v0.3.1/go.mod h1:e6bwxbdYH1HqWTz8faTChKGR0BjPc8p+6SyP8GTTR7Y= github.com/libp2p/go-libp2p v0.4.0/go.mod h1:9EsEIf9p2UDuwtPd0DwJsAl0qXVxgAnuDGRvHbfATfI= -github.com/libp2p/go-libp2p v0.6.0/go.mod h1:mfKWI7Soz3ABX+XEBR61lGbg+ewyMtJHVt043oWeqwg= github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= github.com/libp2p/go-libp2p v0.8.3/go.mod h1:EsH1A+8yoWK+L4iKcbPYu6MPluZ+CHWI9El8cTaefiM= -github.com/libp2p/go-libp2p v0.9.2/go.mod h1:cunHNLDVus66Ct9iXXcjKRLdmHdFdHVe1TAnbubJQqQ= github.com/libp2p/go-libp2p v0.10.0/go.mod h1:yBJNpb+mGJdgrwbKAKrhPU0u3ogyNFTfjJ6bdM+Q/G8= -github.com/libp2p/go-libp2p v0.13.0/go.mod h1:pM0beYdACRfHO1WcJlp65WXyG2A6NqYM+t2DTVAJxMo= -github.com/libp2p/go-libp2p v0.14.0/go.mod h1:dsQrWLAoIn+GkHPN/U+yypizkHiB9tnv79Os+kSgQ4Q= +github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= github.com/libp2p/go-libp2p v0.14.4/go.mod h1:EIRU0Of4J5S8rkockZM7eJp2S0UrCyi55m2kJVru3rM= -github.com/libp2p/go-libp2p v0.15.0 h1:jbMbdmtizfpvl1+oQuGJzfGhttAtuxUCavF3enwFncg= -github.com/libp2p/go-libp2p v0.15.0/go.mod h1:8Ljmwon0cZZYKrOCjFeLwQEK8bqR42dOheUZ1kSKhP0= -github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052 h1:BM7aaOF7RpmNn9+9g6uTjGJ0cTzWr5j9i9IKeun2M8U= +github.com/libp2p/go-libp2p v0.16.0/go.mod h1:ump42BsirwAWxKzsCiFnTtN1Yc+DuPu76fyMX364/O4= +github.com/libp2p/go-libp2p v0.17.0 h1:8l4GV401OSd4dFRyHDtIT/mEzdh/aQGoFC8xshYgm5M= +github.com/libp2p/go-libp2p v0.17.0/go.mod h1:Fkin50rsGdv5mm5BshBUtPRZknt9esfmYXBOYcwOTgw= github.com/libp2p/go-libp2p-asn-util v0.0.0-20200825225859-85005c6cf052/go.mod h1:nRMRTab+kZuk0LnKZpxhOVH/ndsdr2Nr//Zltc/vwgo= +github.com/libp2p/go-libp2p-asn-util v0.1.0 h1:rABPCO77SjdbJ/eJ/ynIo8vWICy1VEnL5JAxJbQLo1E= +github.com/libp2p/go-libp2p-asn-util v0.1.0/go.mod h1:wu+AnM9Ii2KgO5jMmS1rz9dvzTdj8BXqsPR9HR0XB7I= github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE= github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= @@ -1200,17 +1246,19 @@ github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQ github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= github.com/libp2p/go-libp2p-autonat v0.2.3/go.mod h1:2U6bNWCNsAG9LEbwccBDQbjzQ8Krdjge1jLTE9rdoMM= -github.com/libp2p/go-libp2p-autonat v0.4.0/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= -github.com/libp2p/go-libp2p-autonat v0.4.2 h1:YMp7StMi2dof+baaxkbxaizXjY1RPvU71CXfxExzcUU= github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= +github.com/libp2p/go-libp2p-autonat v0.6.0/go.mod h1:bFC6kY8jwzNNWoqc8iGE57vsfwyJ/lP4O4DOV1e0B2o= +github.com/libp2p/go-libp2p-autonat v0.7.0 h1:rCP5s+A2dlhM1Xd66wurE0k7S7pPmM0D+FlqqSBXxks= +github.com/libp2p/go-libp2p-autonat v0.7.0/go.mod h1:uPvPn6J7cN+LCfFwW5tpOYvAz5NvPTc4iBamTV/WDMg= github.com/libp2p/go-libp2p-autonat-svc v0.1.0/go.mod h1:fqi8Obl/z3R4PFVLm8xFtZ6PBL9MlV/xumymRFkKq5A= github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc= github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= github.com/libp2p/go-libp2p-blankhost v0.1.3/go.mod h1:KML1//wiKR8vuuJO0y3LUd1uLv+tlkGTAr3jC0S5cLg= github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= github.com/libp2p/go-libp2p-blankhost v0.1.6/go.mod h1:jONCAJqEP+Z8T6EQviGL4JsQcLx1LgTGtVqFNY8EMfQ= -github.com/libp2p/go-libp2p-blankhost v0.2.0 h1:3EsGAi0CBGcZ33GwRuXEYJLLPoVWyXJ1bcJzAJjINkk= github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= +github.com/libp2p/go-libp2p-blankhost v0.3.0 h1:kTnLArltMabZlzY63pgGDA4kkUcLkBFSM98zBssn/IY= +github.com/libp2p/go-libp2p-blankhost v0.3.0/go.mod h1:urPC+7U01nCGgJ3ZsV8jdwTp6Ji9ID0dMTvq+aJ+nZU= github.com/libp2p/go-libp2p-circuit v0.0.9/go.mod h1:uU+IBvEQzCu953/ps7bYzC/D/R0Ho2A9LfKVVCatlqU= github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= github.com/libp2p/go-libp2p-circuit v0.1.1/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= @@ -1222,9 +1270,9 @@ github.com/libp2p/go-libp2p-circuit v0.2.3/go.mod h1:nkG3iE01tR3FoQ2nMm06IUrCpCy github.com/libp2p/go-libp2p-circuit v0.4.0 h1:eqQ3sEYkGTtybWgr6JLqJY6QLtPWRErvFjFDfAOO1wc= github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= github.com/libp2p/go-libp2p-connmgr v0.1.1/go.mod h1:wZxh8veAmU5qdrfJ0ZBLcU8oJe9L82ciVP/fl1VHjXk= -github.com/libp2p/go-libp2p-connmgr v0.2.3/go.mod h1:Gqjg29zI8CwXX21zRxy6gOg8VYu3zVerJRt2KyktzH4= -github.com/libp2p/go-libp2p-connmgr v0.2.4 h1:TMS0vc0TCBomtQJyWr7fYxcVYYhx+q/2gF++G5Jkl/w= github.com/libp2p/go-libp2p-connmgr v0.2.4/go.mod h1:YV0b/RIm8NGPnnNWM7hG9Q38OeQiQfKhHCCs1++ufn0= +github.com/libp2p/go-libp2p-connmgr v0.3.0 h1:yerFXrYa0oxpuVsLlndwm/bLulouHYDcvFrY/4H4fx8= +github.com/libp2p/go-libp2p-connmgr v0.3.0/go.mod h1:RVoyPjJm0J9Vd1m6qUN2Tn7kJm4rL1Ml20pFsFgPGik= github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= @@ -1254,8 +1302,12 @@ github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJB github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.6/go.mod h1:dgHr0l0hIKfWpGpqAMbpo19pen9wJfdCGv51mTmdpmM= -github.com/libp2p/go-libp2p-core v0.9.0 h1:t97Mv0LIBZlP2FXVRNKKVzHJCIjbIWGxYptGId4+htU= github.com/libp2p/go-libp2p-core v0.9.0/go.mod h1:ESsbz31oC3C1AvMJoGx26RTuCkNhmkSRCqZ0kQtJ2/8= +github.com/libp2p/go-libp2p-core v0.10.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.11.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.12.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= +github.com/libp2p/go-libp2p-core v0.13.0 h1:IFG/s8dN6JN2OTrXX9eq2wNU/Zlz2KLdwZUp5FplgXI= +github.com/libp2p/go-libp2p-core v0.13.0/go.mod h1:ECdxehoYosLYHgDDFa2N4yE8Y7aQRAMf0sX9mf2sbGg= github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= @@ -1266,8 +1318,8 @@ github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfx github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= github.com/libp2p/go-libp2p-discovery v0.4.0/go.mod h1:bZ0aJSrFc/eX2llP0ryhb1kpgkPyTo23SJ5b7UQCMh4= github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= -github.com/libp2p/go-libp2p-discovery v0.5.1 h1:CJylx+h2+4+s68GvrM4pGNyfNhOYviWBPtVv5PA7sfo= -github.com/libp2p/go-libp2p-discovery v0.5.1/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= +github.com/libp2p/go-libp2p-discovery v0.6.0 h1:1XdPmhMJr8Tmj/yUfkJMIi8mgwWrLUsCB3bMxdT+DSo= +github.com/libp2p/go-libp2p-discovery v0.6.0/go.mod h1:/u1voHt0tKIe5oIA1RHBKQLVCWPna2dXmPNHc2zR9S8= github.com/libp2p/go-libp2p-host v0.0.1/go.mod h1:qWd+H1yuU0m5CwzAkvbSjqKairayEHdR5MMl7Cwa7Go= github.com/libp2p/go-libp2p-host v0.0.3/go.mod h1:Y/qPyA6C8j2coYyos1dfRm0I8+nvd4TGrDGt4tA7JR8= github.com/libp2p/go-libp2p-interface-connmgr v0.0.1/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= @@ -1275,8 +1327,8 @@ github.com/libp2p/go-libp2p-interface-connmgr v0.0.4/go.mod h1:GarlRLH0LdeWcLnYM github.com/libp2p/go-libp2p-interface-connmgr v0.0.5/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= github.com/libp2p/go-libp2p-interface-pnet v0.0.1/go.mod h1:el9jHpQAXK5dnTpKA4yfCNBZXvrzdOU75zz+C6ryp3k= github.com/libp2p/go-libp2p-kad-dht v0.2.1/go.mod h1:k7ONOlup7HKzQ68dE6lSnp07cdxdkmnRa+6B4Fh9/w0= -github.com/libp2p/go-libp2p-kad-dht v0.13.0 h1:qBNYzee8BVS6RkD8ukIAGRG6LmVz8+kkeponyI7W+yA= -github.com/libp2p/go-libp2p-kad-dht v0.13.0/go.mod h1:NkGf28RNhPrcsGYWJHm6EH8ULkiJ2qxsWmpE7VTL3LI= +github.com/libp2p/go-libp2p-kad-dht v0.15.0 h1:Ke+Oj78gX5UDXnA6HBdrgvi+fStJxgYTDa51U0TsCLo= +github.com/libp2p/go-libp2p-kad-dht v0.15.0/go.mod h1:rZtPxYu1TnHHz6n1RggdGrxUX/tA1C2/Wiw3ZMUDrU0= github.com/libp2p/go-libp2p-kbucket v0.2.1/go.mod h1:/Rtu8tqbJ4WQ2KTCOMJhggMukOLNLNPY1EtEWWLxUvc= github.com/libp2p/go-libp2p-kbucket v0.3.1/go.mod h1:oyjT5O7tS9CQurok++ERgc46YLwEpuGoFq9ubvoUOio= github.com/libp2p/go-libp2p-kbucket v0.4.7 h1:spZAcgxifvFZHBD8tErvppbnNiKA5uokDu3CV7axu70= @@ -1295,17 +1347,17 @@ github.com/libp2p/go-libp2p-mplex v0.4.1 h1:/pyhkP1nLwjG3OM+VuaNJkQT/Pqq73WzB3aD github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= -github.com/libp2p/go-libp2p-nat v0.0.6 h1:wMWis3kYynCbHoyKLPBEMu4YRLltbm8Mk08HGSfvTkU= github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= +github.com/libp2p/go-libp2p-nat v0.1.0 h1:vigUi2MEN+fwghe5ijpScxtbbDz+L/6y8XwlzYOJgSY= +github.com/libp2p/go-libp2p-nat v0.1.0/go.mod h1:DQzAG+QbDYjN1/C3B6vXucLtz3u9rEonLVPtZVzQqks= github.com/libp2p/go-libp2p-net v0.0.1/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= github.com/libp2p/go-libp2p-net v0.0.2/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q= github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= -github.com/libp2p/go-libp2p-noise v0.1.1/go.mod h1:QDFLdKX7nluB7DEnlVPbz7xlLHdwHFA9HiohJRr3vwM= github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= -github.com/libp2p/go-libp2p-noise v0.2.2 h1:MRt5XGfYziDXIUy2udtMWfPmzZqUDYoC1FZoKnqPzwk= -github.com/libp2p/go-libp2p-noise v0.2.2/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= +github.com/libp2p/go-libp2p-noise v0.3.0 h1:NCVH7evhVt9njbTQshzT7N1S3Q6fjj9M11FCgfH5+cA= +github.com/libp2p/go-libp2p-noise v0.3.0/go.mod h1:JNjHbociDJKHD64KTkzGnzqJ0FEV5gHJa6AB00kbCNQ= github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es= github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= @@ -1322,24 +1374,27 @@ github.com/libp2p/go-libp2p-peerstore v0.2.4/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuD github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= github.com/libp2p/go-libp2p-peerstore v0.2.8/go.mod h1:gGiPlXdz7mIHd2vfAsHzBNAMqSDkt2UBFwgcITgw1lA= -github.com/libp2p/go-libp2p-peerstore v0.2.9 h1:tVa7siDymmzOl3b3+SxPYpQUCnicmK13y6Re1PqWK+g= -github.com/libp2p/go-libp2p-peerstore v0.2.9/go.mod h1:zhBaLzxiWpNGQ3+uI17G/OIjmOD8GxKyFuHbrZbgs0w= +github.com/libp2p/go-libp2p-peerstore v0.4.0/go.mod h1:rDJUFyzEWPpXpEwywkcTYYzDHlwza8riYMaUzaN6hX0= +github.com/libp2p/go-libp2p-peerstore v0.6.0 h1:HJminhQSGISBIRb93N6WK3t6Fa8OOTnHd/VBjL4mY5A= +github.com/libp2p/go-libp2p-peerstore v0.6.0/go.mod h1:DGEmKdXrcYpK9Jha3sS7MhqYdInxJy84bIPtSu65bKc= github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= github.com/libp2p/go-libp2p-pubsub v0.1.1/go.mod h1:ZwlKzRSe1eGvSIdU5bD7+8RZN/Uzw0t1Bp9R1znpR/Q= -github.com/libp2p/go-libp2p-pubsub v0.3.2-0.20200527132641-c0712c6e92cf/go.mod h1:TxPOBuo1FPdsTjFnv+FGZbNbWYsp74Culx+4ViQpato= github.com/libp2p/go-libp2p-pubsub v0.3.2/go.mod h1:Uss7/Cfz872KggNb+doCVPHeCDmXB7z500m/R8DaAUk= -github.com/libp2p/go-libp2p-pubsub v0.5.4 h1:rHl9/Xok4zX3zgi0pg0XnUj9Xj2OeXO8oTu85q2+YA8= -github.com/libp2p/go-libp2p-pubsub v0.5.4/go.mod h1:gVOzwebXVdSMDQBTfH8ACO5EJ4SQrvsHqCmYsCZpD0E= +github.com/libp2p/go-libp2p-pubsub v0.6.0 h1:98+RXuEWW17U6cAijK1yaTf6mw/B+n5yPA421z+dlo0= +github.com/libp2p/go-libp2p-pubsub v0.6.0/go.mod h1:nJv87QM2cU0w45KPR1rZicq+FmFIOD16zmT+ep1nOmg= github.com/libp2p/go-libp2p-pubsub-tracer v0.0.0-20200626141350-e730b32bf1e6 h1:2lH7rMlvDPSvXeOR+g7FE6aqiEwxtpxWKQL8uigk5fQ= github.com/libp2p/go-libp2p-pubsub-tracer v0.0.0-20200626141350-e730b32bf1e6/go.mod h1:8ZodgKS4qRLayfw9FDKDd9DX4C16/GMofDxSldG8QPI= github.com/libp2p/go-libp2p-quic-transport v0.1.1/go.mod h1:wqG/jzhF3Pu2NrhJEvE+IE0NTHNXslOPn9JQzyCAxzU= github.com/libp2p/go-libp2p-quic-transport v0.5.0/go.mod h1:IEcuC5MLxvZ5KuHKjRu+dr3LjCT1Be3rcD/4d8JrX8M= github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= -github.com/libp2p/go-libp2p-quic-transport v0.11.2 h1:p1YQDZRHH4Cv2LPtHubqlQ9ggz4CKng/REZuXZbZMhM= github.com/libp2p/go-libp2p-quic-transport v0.11.2/go.mod h1:wlanzKtIh6pHrq+0U3p3DY9PJfGqxMgPaGKaK5LifwQ= +github.com/libp2p/go-libp2p-quic-transport v0.13.0/go.mod h1:39/ZWJ1TW/jx1iFkKzzUg00W6tDJh73FC0xYudjr7Hc= +github.com/libp2p/go-libp2p-quic-transport v0.15.0/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= +github.com/libp2p/go-libp2p-quic-transport v0.15.2 h1:wHBEceRy+1/8Ec8dAIyr+/P7L2YefIGprPVy5LrMM+k= +github.com/libp2p/go-libp2p-quic-transport v0.15.2/go.mod h1:wv4uGwjcqe8Mhjj7N/Ic0aKjA+/10UnMlSzLO0yRpYQ= github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= github.com/libp2p/go-libp2p-record v0.1.1/go.mod h1:VRgKajOyMVgP/F0L5g3kH7SVskp17vFi2xheb5uMJtg= @@ -1364,10 +1419,11 @@ github.com/libp2p/go-libp2p-swarm v0.2.4/go.mod h1:/xIpHFPPh3wmSthtxdGbkHZ0OET1h github.com/libp2p/go-libp2p-swarm v0.2.7/go.mod h1:ZSJ0Q+oq/B1JgfPHJAT2HTall+xYRNYp1xs4S2FBWKA= github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= -github.com/libp2p/go-libp2p-swarm v0.4.0/go.mod h1:XVFcO52VoLoo0eitSxNQWYq4D6sydGOweTOAjJNraCw= github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= -github.com/libp2p/go-libp2p-swarm v0.5.3 h1:hsYaD/y6+kZff1o1Mc56NcuwSg80lIphTS/zDk3mO4M= github.com/libp2p/go-libp2p-swarm v0.5.3/go.mod h1:NBn7eNW2lu568L7Ns9wdFrOhgRlkRnIDg0FLKbuu3i8= +github.com/libp2p/go-libp2p-swarm v0.8.0/go.mod h1:sOMp6dPuqco0r0GHTzfVheVBh6UEL0L1lXUZ5ot2Fvc= +github.com/libp2p/go-libp2p-swarm v0.9.0 h1:LdWjHDVjPMYt3NCG2EHcQiIP8XzA8BHhHz8ZLAYol2Y= +github.com/libp2p/go-libp2p-swarm v0.9.0/go.mod h1:2f8d8uxTJmpeqHF/1ujjdXZp+98nNIbujVOMEZxCbZ8= github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= @@ -1377,22 +1433,26 @@ github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eq github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= -github.com/libp2p/go-libp2p-testing v0.4.2 h1:IOiA5mMigi+eEjf4J+B7fepDhsjtsoWA9QbsCqbNp5U= github.com/libp2p/go-libp2p-testing v0.4.2/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= +github.com/libp2p/go-libp2p-testing v0.5.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= +github.com/libp2p/go-libp2p-testing v0.6.0 h1:tV/wz6mS1VoAYA/5DGTiyzw9TJ+eXMCMvzU5VPLJSgg= +github.com/libp2p/go-libp2p-testing v0.6.0/go.mod h1:QBk8fqIL1XNcno/l3/hhaIEn4aLRijpYOR+zVjjlh+A= github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= -github.com/libp2p/go-libp2p-tls v0.2.0 h1:N8i5wPiHudA+02sfW85R2nUbybPm7agjAywZc6pd3xA= -github.com/libp2p/go-libp2p-tls v0.2.0/go.mod h1:twrp2Ci4lE2GYspA1AnlYm+boYjqVruxDKJJj7s6xrc= +github.com/libp2p/go-libp2p-tls v0.3.0/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= +github.com/libp2p/go-libp2p-tls v0.3.1 h1:lsE2zYte+rZCEOHF72J1Fg3XK3dGQyKvI6i5ehJfEp0= +github.com/libp2p/go-libp2p-tls v0.3.1/go.mod h1:fwF5X6PWGxm6IDRwF3V8AVCCj/hOd5oFlg+wo2FxJDY= github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk= github.com/libp2p/go-libp2p-transport v0.0.5/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A= github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2kn/m1w6YXxcIAYJYeI90h6BGgUc= github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.0/go.mod h1:J4ko0ObtZSmgn5BX5AmegP+dK3CSnU2lMCKsSq/EY0s= github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= github.com/libp2p/go-libp2p-transport-upgrader v0.4.3/go.mod h1:bpkldbOWXMrXhpZbSV1mQxTrefOg2Fi+k1ClDSA4ppw= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.6 h1:SHt3g0FslnqIkEWF25YOB8UCOCTpGAVvHRWQYJ+veiI= github.com/libp2p/go-libp2p-transport-upgrader v0.4.6/go.mod h1:JE0WQuQdy+uLZ5zOaI3Nw9dWGYJIA7mywEtP2lMvnyk= +github.com/libp2p/go-libp2p-transport-upgrader v0.5.0/go.mod h1:Rc+XODlB3yce7dvFV4q/RmyJGsFcCZRkeZMu/Zdg0mo= +github.com/libp2p/go-libp2p-transport-upgrader v0.6.0 h1:GfMCU+2aGGEm1zW3UcOz6wYSn8tXQalFfVfcww99i5A= +github.com/libp2p/go-libp2p-transport-upgrader v0.6.0/go.mod h1:1e07y1ZSZdHo9HPbuU8IztM1Cj+DR5twgycb4pnRzRo= github.com/libp2p/go-libp2p-xor v0.0.0-20210714161855-5c005aca55db/go.mod h1:LSTM5yRnjGZbWNTA/hRwq2gGFrvRIbQJscoIL/u6InY= github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8= github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4= @@ -1404,10 +1464,10 @@ github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhL github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= -github.com/libp2p/go-libp2p-yamux v0.5.1/go.mod h1:dowuvDu8CRWmr0iqySMiSxK+W0iL5cMVO9S94Y6gkv4= -github.com/libp2p/go-libp2p-yamux v0.5.3/go.mod h1:Vy3TMonBAfTMXHWopsMc8iX/XGRYrRlpUaMzaeuHV/s= -github.com/libp2p/go-libp2p-yamux v0.5.4 h1:/UOPtT/6DHPtr3TtKXBHa6g0Le0szYuI33Xc/Xpd7fQ= github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= +github.com/libp2p/go-libp2p-yamux v0.6.0/go.mod h1:MRhd6mAYnFRnSISp4M8i0ClV/j+mWHo2mYLifWGw33k= +github.com/libp2p/go-libp2p-yamux v0.7.0 h1:bVXHbTj/XH4uBBsPrg26BlDABk5WYRlssY73P0SjhPc= +github.com/libp2p/go-libp2p-yamux v0.7.0/go.mod h1:fMyA0CsPfHkIuBU0wjRGrCjTBFiXTXxG0k5M4ETv+08= github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= @@ -1424,12 +1484,14 @@ github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.6 h1:lQ7Uc0kS1wb1EfRxO2Eir/RJoHkHn7t6o+EiwsYIKJA= github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= +github.com/libp2p/go-msgio v0.1.0 h1:8Q7g/528ivAlfXTFWvWhVjTE8XG8sDTkRUKPYh9+5Q8= +github.com/libp2p/go-msgio v0.1.0/go.mod h1:eNlv2vy9V2X/kNldcZ+SShFE++o2Yjxwx6RAYsmgJnE= github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= -github.com/libp2p/go-nat v0.0.5 h1:qxnwkco8RLKqVh1NmjQ+tJ8p8khNLFxuElYG/TwqW4Q= github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= +github.com/libp2p/go-nat v0.1.0 h1:MfVsH6DLcpa04Xr+p8hmVRG4juse0s3J8HyNWYHffXg= +github.com/libp2p/go-nat v0.1.0/go.mod h1:X7teVkwRHNInVNWQiO/tAiAVRwSr5zoRz4YSTC3uRBM= github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= @@ -1442,13 +1504,15 @@ github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO github.com/libp2p/go-openssl v0.0.7 h1:eCAzdLejcNVBzP/iZM9vqHnQm+XyCEbSSIheIPRGNsw= github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= -github.com/libp2p/go-reuseport v0.0.2 h1:XSG94b1FJfGA01BUrT82imejHQyTxO4jEWqheyCXYvU= github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= +github.com/libp2p/go-reuseport v0.1.0 h1:0ooKOx2iwyIkf339WCZ2HN3ujTDbkK0PjC7JVoP1AiM= +github.com/libp2p/go-reuseport v0.1.0/go.mod h1:bQVn9hmfcTaoo0c9v5pBhOarsU1eNOBZdaAd2hzXRKU= github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= -github.com/libp2p/go-reuseport-transport v0.0.5 h1:lJzi+vSYbyJj2faPKLxNGWEIBcaV/uJmyvsUxXy2mLw= github.com/libp2p/go-reuseport-transport v0.0.5/go.mod h1:TC62hhPc8qs5c/RoXDZG6YmjK+/YWUPC0yYmeUecbjc= +github.com/libp2p/go-reuseport-transport v0.1.0 h1:C3PHeHjmnz8m6f0uydObj02tMEoi7CyD1zuN7xQT8gc= +github.com/libp2p/go-reuseport-transport v0.1.0/go.mod h1:vev0C0uMkzriDY59yFHD9v+ujJvYmDQVLowvAjEOmfw= github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= github.com/libp2p/go-sockaddr v0.1.1 h1:yD80l2ZOdGksnOyHrhxDdTDFrf7Oy+v3FMVArIRgZxQ= @@ -1463,11 +1527,11 @@ github.com/libp2p/go-tcp-transport v0.0.4/go.mod h1:+E8HvC8ezEVOxIo3V5vCK9l1y/19 github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= -github.com/libp2p/go-tcp-transport v0.2.1/go.mod h1:zskiJ70MEfWz2MKxvFB/Pv+tPIB1PpPUrHIWQ8aFw7M= +github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= github.com/libp2p/go-tcp-transport v0.2.4/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= github.com/libp2p/go-tcp-transport v0.2.7/go.mod h1:lue9p1b3VmZj1MhhEGB/etmvF/nBQ0X9CW2DutBT3MM= -github.com/libp2p/go-tcp-transport v0.2.8 h1:aLjX+Nkz+kIz3uA56WtlGKRSAnKDvnqKmv1qF4EyyE4= -github.com/libp2p/go-tcp-transport v0.2.8/go.mod h1:64rSfVidkYPLqbzpcN2IwHY4pmgirp67h++hZ/rcndQ= +github.com/libp2p/go-tcp-transport v0.4.0 h1:VDyg4j6en3OuXf90gfDQh5Sy9KowO9udnd0OU8PP6zg= +github.com/libp2p/go-tcp-transport v0.4.0/go.mod h1:0y52Rwrn4076xdJYu/51/qJIdxz+EWDAOG2S45sV3VI= github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I= github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= github.com/libp2p/go-ws-transport v0.0.5/go.mod h1:Qbl4BxPfXXhhd/o0wcrgoaItHqA9tnZjoFZnxykuaXU= @@ -1485,28 +1549,30 @@ github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZ github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.6/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI= github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux/v2 v2.0.0/go.mod h1:NVWira5+sVUIU6tu1JWvaRn1dRnG+cawOJiflsAM+7U= -github.com/libp2p/go-yamux/v2 v2.1.1/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= -github.com/libp2p/go-yamux/v2 v2.2.0 h1:RwtpYZ2/wVviZ5+3pjC8qdQ4TKnrak0/E01N1UWoAFU= github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= -github.com/libp2p/zeroconf/v2 v2.0.0/go.mod h1:J85R/d9joD8u8F9aHM8pBXygtG9W02enEwS+wWeL6yo= +github.com/libp2p/go-yamux/v2 v2.3.0 h1:luRV68GS1vqqr6EFUjtu1kr51d+IbW0gSowu8emYWAI= +github.com/libp2p/go-yamux/v2 v2.3.0/go.mod h1:iTU+lOIn/2h0AgKcL49clNTwfEw+WSfDYrXe05EyKIs= +github.com/libp2p/zeroconf/v2 v2.1.1/go.mod h1:fuJqLnUwZTshS3U/bMRJ3+ow/v9oid1n0DmyYyNO1Xs= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lucas-clemente/quic-go v0.11.2/go.mod h1:PpMmPfPKO9nKJ/psF49ESTAGQSdfXxlg1otPbEB2nOw= github.com/lucas-clemente/quic-go v0.16.0/go.mod h1:I0+fcNTdb9eS1ZcjQZbDVPGchJ86chcIxPALn9lEJqE= github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= -github.com/lucas-clemente/quic-go v0.21.2 h1:8LqqL7nBQFDUINadW0fHV/xSaCQJgmJC0Gv+qUnjd78= github.com/lucas-clemente/quic-go v0.21.2/go.mod h1:vF5M1XqhBAHgbjKcJOXY3JZz3GP0T3FQhz/uyOUS38Q= +github.com/lucas-clemente/quic-go v0.23.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= +github.com/lucas-clemente/quic-go v0.24.0 h1:ToR7SIIEdrgOhgVTHvPgdVRJfgVy+N0wQAagH7L4d5g= +github.com/lucas-clemente/quic-go v0.24.0/go.mod h1:paZuzjXCE5mj6sikVLMvqXk8lJV2AsqtJ6bDhjEfxx0= github.com/lucasb-eyer/go-colorful v1.0.3 h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tWFlaaUAac= github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magefile/mage v1.9.0 h1:t3AU2wNwehMCW97vuqQLtw6puppWXHO+O2MHo5a50XE= +github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1525,12 +1591,12 @@ github.com/marten-seemann/qtls v0.9.1/go.mod h1:T1MmAdDPyISzxlK6kjRr0pcZFBVd1OZb github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/marten-seemann/qtls-go1-15 v0.1.4/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= -github.com/marten-seemann/qtls-go1-15 v0.1.5 h1:Ci4EIUN6Rlb+D6GmLdej/bCQ4nPYNtVXQB+xjiXE1nk= github.com/marten-seemann/qtls-go1-15 v0.1.5/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= github.com/marten-seemann/qtls-go1-16 v0.1.4 h1:xbHbOGGhrenVtII6Co8akhLEdrawwB2iHl5yhJRpnco= github.com/marten-seemann/qtls-go1-16 v0.1.4/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= -github.com/marten-seemann/qtls-go1-17 v0.1.0-rc.1 h1:/rpmWuGvceLwwWuaKPdjpR4JJEUH0tq64/I3hvzaNLM= github.com/marten-seemann/qtls-go1-17 v0.1.0-rc.1/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= +github.com/marten-seemann/qtls-go1-17 v0.1.0 h1:P9ggrs5xtwiqXv/FHNwntmuLMNq3KaSIG93AtAZ48xk= +github.com/marten-seemann/qtls-go1-17 v0.1.0/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= @@ -1540,8 +1606,9 @@ github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9 h1:sqDoxXbdeALODt0DAeJCVp38ps9ZogZEAXjus69YV3U= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -1551,8 +1618,9 @@ github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2y github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA= github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= @@ -1577,7 +1645,6 @@ github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKju github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= @@ -1590,6 +1657,8 @@ github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4S github.com/mileusna/useragent v0.0.0-20190129205925-3e331f0949a5/go.mod h1:JWhYAp2EXqUtsxTKdeGlY8Wp44M7VxThC9FEoNGi2IE= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= +github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= @@ -1598,6 +1667,7 @@ github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -1608,10 +1678,12 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.2.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= @@ -1620,8 +1692,9 @@ github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjW github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg= -github.com/multiformats/go-base32 v0.0.3 h1:tw5+NhuwaOjJCC5Pp82QuXbrmLzWg7uxlMFp8Nq/kkI= github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= +github.com/multiformats/go-base32 v0.0.4 h1:+qMh4a2f37b4xTNs6mqitDinryCI+tfO2dRVMN9mjSE= +github.com/multiformats/go-base32 v0.0.4/go.mod h1:jNLFzjPZtp3aIARHbJRZIaPuspdH0J6q39uUM5pnABM= github.com/multiformats/go-base36 v0.1.0 h1:JR6TyF7JjGd3m6FbLU2cOxhC0Li8z8dLNGQ89tUg4F4= github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= @@ -1635,8 +1708,9 @@ github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= -github.com/multiformats/go-multiaddr v0.4.0 h1:hL/K4ZJhJ5PTw3nwylq9lGU5yArzcAroZmex1ghSEkQ= github.com/multiformats/go-multiaddr v0.4.0/go.mod h1:YcpyLH8ZPudLxQlemYBPhSm0/oCXAT8Z4mzFpyoPyRc= +github.com/multiformats/go-multiaddr v0.4.1 h1:Pq37uLx3hsyNlTDir7FZyU8+cFCTqd5y1KiM2IzOutI= +github.com/multiformats/go-multiaddr v0.4.1/go.mod h1:3afI9HfVW8csiF8UZqtpYRiDyew8pRX7qLIGHu9FLuM= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.3/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= @@ -1660,10 +1734,9 @@ github.com/multiformats/go-multibase v0.0.2/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/g github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multicodec v0.2.0/go.mod h1:/y4YVwkfMyry5kFbMTbLJKErhycTIftytRV+llXdyS4= -github.com/multiformats/go-multicodec v0.2.1-0.20210713081508-b421db6850ae/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= -github.com/multiformats/go-multicodec v0.2.1-0.20210714093213-b2b5bd6fe68b/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= -github.com/multiformats/go-multicodec v0.3.0 h1:tstDwfIjiHbnIjeM5Lp+pMrSeN+LCMsEwOrkPmWm03A= github.com/multiformats/go-multicodec v0.3.0/go.mod h1:qGGaQmioCDh+TeFOnxrbU0DaIPw8yFgAZgFG0V7p1qQ= +github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61 h1:ZrUuMKNgJ52qHPoQ+bx0h0uBfcWmN7Px+4uKSZeesiI= +github.com/multiformats/go-multicodec v0.3.1-0.20210902112759-1539a079fd61/go.mod h1:1Hj/eHRaVWSXiSNNfcEPcwZleTmdNP81xlxDLnWU9GQ= github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= @@ -1671,13 +1744,13 @@ github.com/multiformats/go-multihash v0.0.9/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= -github.com/multiformats/go-multihash v0.0.15 h1:hWOPdrNqDjwHDx82vsYGSDZNyktOJJ2dzZJzFkOV1jM= github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= +github.com/multiformats/go-multihash v0.1.0 h1:CgAgwqk3//SVEw3T+6DqI4mWMyRuDwZtOWcJT0q9+EA= +github.com/multiformats/go-multihash v0.1.0/go.mod h1:RJlXsxt6vHGaia+S8We0ErjhojtKzPP2AH4+kYM7k84= github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= -github.com/multiformats/go-multistream v0.2.0/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= github.com/multiformats/go-multistream v0.2.2 h1:TCYu1BHTDr1F/Qm75qwYISQdzGcRdC21nFgQW7l7GBo= github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= @@ -1692,10 +1765,16 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= +github.com/nats-io/jwt/v2 v2.0.3/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats-server/v2 v2.5.0/go.mod h1:Kj86UtrXAL6LwYRA6H4RqzkHhK0Vcv2ZnKD5WbQ1t3g= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nats.go v1.12.1/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= @@ -1703,8 +1782,8 @@ github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c h1:5bFTChQxSKNwy8ALwOebjekYExl9HTT9urdawqC95tA= github.com/nikkolasg/hexjson v0.0.0-20181101101858-78e39397e00c/go.mod h1:7qN3Y0BvzRUf4LofcoJplQL10lsFDb4PYlePTVwrP28= -github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229 h1:E2B8qYyeSgv5MXpmzZXRNp8IAQ4vjxIjhpAf5hv/tAg= -github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= +github.com/nkovacs/streamquote v1.0.0 h1:PmVIV08Zlx2lZK5fFZlMZ04eHcDTIFJCv/5/0twVUow= +github.com/nkovacs/streamquote v1.0.0/go.mod h1:BN+NaZ2CmdKqUuTUXUEm9j95B2TRbpOWpxbJYzzgUsc= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -1758,6 +1837,7 @@ github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTm github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -1766,6 +1846,7 @@ github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtP github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9 h1:1/WtZae0yGtPq+TI6+Tv1WTxkukpXeMlviSxvL7SRgk= github.com/petar/GoLLRB v0.0.0-20210522233825-ae3b015fd3e9/go.mod h1:x3N5drFsm2uilKKuuYo6LdyD8vZAW55sH/9w+pbo1sw= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= @@ -1789,6 +1870,7 @@ github.com/polydawn/refmt v0.0.0-20190809202753-05966cbd336a/go.mod h1:uIp+gprXx github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e h1:ZOcivgkkFRnjfoTcGsDq3UQYiBmekwLA+qg0OjyB/ls= github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -1801,7 +1883,6 @@ github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeD github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= @@ -1825,6 +1906,7 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289/go.mod h1:FGbBv5OPKjch+jNUJmEQpMZytIdyW0NdBtWFcfSKusc= @@ -1838,17 +1920,18 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.1.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/prometheus v0.0.0-20200609090129-a6600f564e3c/go.mod h1:S5n0C6tSgdnwWshBUceRx5G1OsjLv/EeZ9t3wIfEtsY= +github.com/prometheus/statsd_exporter v0.21.0 h1:hA05Q5RFeIjgwKIYEdFd59xu5Wwaznf33yKI+pyX6T8= +github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= github.com/raulk/clock v1.1.0 h1:dpb29+UKMbLqiU/jqIJptgLR1nn23HLgMY0sTCDza5Y= github.com/raulk/clock v1.1.0/go.mod h1:3MpVxdZ/ODBQDxbN+kzshf5OSZwPjtMDx6BBXBmOeY0= -github.com/raulk/go-watchdog v1.0.1 h1:qgm3DIJAeb+2byneLrQJ7kvmDLGxN2vy3apXyGaDKN4= -github.com/raulk/go-watchdog v1.0.1/go.mod h1:lzSbAl5sh4rtI8tYHU01BWIDzgzqaQLj6RcA1i4mlqI= +github.com/raulk/go-watchdog v1.2.0 h1:konN75pw2BMmZ+AfuAm5rtFsWcJpKF3m02rKituuXNo= +github.com/raulk/go-watchdog v1.2.0/go.mod h1:lzSbAl5sh4rtI8tYHU01BWIDzgzqaQLj6RcA1i4mlqI= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -1862,6 +1945,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= @@ -1957,7 +2042,9 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/streadway/quantile v0.0.0-20150917103942-b0c588724d25/go.mod h1:lbP8tGiBjZ5YWIc2fzuRpTaz0b/53vT6PEs3QuAWzuU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -1993,6 +2080,7 @@ github.com/uber/athenadriver v1.1.4/go.mod h1:tQjho4NzXw55LGfSZEcETuYydpY1vtmixU github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.23.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.23.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.28.0+incompatible h1:G4QSBfvPKvg5ZM2j9MrJFdfI5iSljY/WnJqOGFao6HI= github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v1.5.1-0.20181102163054-1fc5c315e03c/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= @@ -2033,7 +2121,6 @@ github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11 h1:5HZfQkwe0mIf github.com/whyrusleeping/cbor v0.0.0-20171005072247-63513f603b11/go.mod h1:Wlo/SzPmxVp6vXpGt/zaXhHH0fn4IxgqZc82aKg6bpQ= github.com/whyrusleeping/cbor-gen v0.0.0-20191216205031-b047b6acb3c0/go.mod h1:xdlJQaiqipF0HW+Mzpg7XRM3fWbGvfgFlcppuvlkIvY= github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/cbor-gen v0.0.0-20200402171437-3d27c146c105/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200414195334-429a0b5e922e/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= github.com/whyrusleeping/cbor-gen v0.0.0-20200504204219-64967432584d/go.mod h1:W5MvapuoHRP8rz4vxjwCK1pDqF1aQcWsV5PZ+AHbqdg= github.com/whyrusleeping/cbor-gen v0.0.0-20200710004633-5379fc63235d/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= @@ -2045,6 +2132,7 @@ github.com/whyrusleeping/cbor-gen v0.0.0-20200812213548-958ddffe352c/go.mod h1:f github.com/whyrusleeping/cbor-gen v0.0.0-20200826160007-0b9f6c5fb163/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20210118024343-169e9d70c0c2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20210219115102-f37d292932f2/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/cbor-gen v0.0.0-20210303213153-67a261a1d291/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/cbor-gen v0.0.0-20210713220151-be142a5ae1a8 h1:TEv7MId88TyIqIUL4hbf9otOookIolMxlEbN0ro671Y= github.com/whyrusleeping/cbor-gen v0.0.0-20210713220151-be142a5ae1a8/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= @@ -2086,6 +2174,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zondax/hid v0.9.0 h1:eiT3P6vNxAEVxXMw66eZUAAnU2zD33JBkfG/EnfAKl8= github.com/zondax/hid v0.9.0/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= github.com/zondax/ledger-go v0.12.1 h1:hYRcyznPRJp+5mzF2sazTLP2nGvGjYDD2VzhHhFomLU= @@ -2103,6 +2192,10 @@ go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= @@ -2117,13 +2210,28 @@ go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.6-0.20201102222123-380f4078db9f/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel v1.2.0/go.mod h1:aT17Fk0Z1Nor9e0uisf98LrntPGMnk4frBO9+dkf69I= +go.opentelemetry.io/otel v1.3.0 h1:APxLf0eiBwLl+SOXiJJCVYzA1OOJNyAoV8C5RNRyy7Y= +go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= +go.opentelemetry.io/otel/bridge/opencensus v0.25.0/go.mod h1:dkZDdaNwLlIutxK2Kc2m3jwW2M1ISaNf8/rOYVwuVHs= +go.opentelemetry.io/otel/exporters/jaeger v1.2.0/go.mod h1:KJLFbEMKTNPIfOxcg/WikIozEoKcPgJRz3Ce1vLlM8E= +go.opentelemetry.io/otel/internal/metric v0.25.0/go.mod h1:Nhuw26QSX7d6n4duoqAFi5KOQR4AuzyMcl5eXOgwxtc= go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/metric v0.25.0/go.mod h1:E884FSpQfnJOMMUaq+05IWlJ4rjZpk2s/F1Ju+TEEm8= go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk v1.2.0 h1:wKN260u4DesJYhyjxDa7LRFkuhH7ncEVKU37LWcyNIo= +go.opentelemetry.io/otel/sdk v1.2.0/go.mod h1:jNN8QtpvbsKhgaC6V5lHiejMoKD+V8uadoSafgHPx1U= +go.opentelemetry.io/otel/sdk/export/metric v0.25.0/go.mod h1:Ej7NOa+WpN49EIcr1HMUYRvxXXCCnQCg2+ovdt2z8Pk= +go.opentelemetry.io/otel/sdk/metric v0.25.0/go.mod h1:G4xzj4LvC6xDDSsVXpvRVclQCbofGGg4ZU2VKKtDRfg= go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/otel/trace v1.2.0/go.mod h1:N5FLswTubnxKxOJHM7XZC074qpeEdLy3CgAVsdMucK0= +go.opentelemetry.io/otel/trace v1.3.0 h1:doy8Hzb1RJ+I3yFhtDmwNc7tIyw1tNMOIsyPzp1NOGY= +go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -2138,8 +2246,9 @@ go.uber.org/dig v1.10.0/go.mod h1:X34SnWGr8Fyla9zQNO2GSO2D+TIuqB14OS8JhYocIyw= go.uber.org/fx v1.9.0 h1:7OAz8ucp35AU8eydejpYG7QrbE8rLKzGhHbZlJi5LYY= go.uber.org/fx v1.9.0/go.mod h1:mFdUyAUuJ3w4jAckiKSKbldsxy1ojpAMJ+dVZg5Y0Aw= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= @@ -2155,9 +2264,11 @@ go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= go4.org v0.0.0-20200411211856-f5505b9728dd h1:BNJlw5kRTzdmyfh5U8F93HA2OwkP7ZGwA51eJ/0wKOU= go4.org v0.0.0-20200411211856-f5505b9728dd/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= @@ -2187,14 +2298,15 @@ golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200117160349-530e935923ad/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -2206,12 +2318,16 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e h1:VvfwVmMH40bpMeizC9/K7ipM5Qjucuu16RWfneFPyhQ= golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b h1:QAqMVf3pSa6eeTsuklijukjXBlj7Es2QQplab+/RbQ4= +golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2227,7 +2343,6 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/exp v0.0.0-20210615023648-acb5c1269671/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= golang.org/x/exp v0.0.0-20210714144626-1041f73d31d8/go.mod h1:DVyR6MI7P4kEQgvZJSj1fQGrWIi2RzIrfYWycwheUAc= golang.org/x/exp v0.0.0-20210715201039-d37aa40e8013 h1:Jp57DBw4K7mimZNA3F9f7CndVcUt4kJjmyJf2rzJHoI= @@ -2245,8 +2360,9 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= @@ -2257,7 +2373,6 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2311,10 +2426,8 @@ golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200519113804-d87ec0cfa476/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -2329,8 +2442,12 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2367,6 +2484,7 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2399,6 +2517,7 @@ golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2454,7 +2573,10 @@ golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210317225723-c4fcb01b228e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210331175145-43e1dd70ce54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2462,9 +2584,12 @@ golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf h1:2ucpDCmfkl8Bd/FsLtiD653Wf96cW37s+iGx93zsu4k= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211209171907-798191bca915 h1:P+8mCzuEpyszAT6T42q0sxU+eveBAF/cJ2Kp0x6/8+0= +golang.org/x/sys v0.0.0-20211209171907-798191bca915/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M= @@ -2486,6 +2611,7 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2518,6 +2644,7 @@ golang.org/x/tools v0.0.0-20190813034749-528a2984e271/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -2559,15 +2686,15 @@ golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200827010519-17fd2f27a9e3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201112185108-eeaa07dd7696/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1-0.20210225150353-54dc8c5edb56/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ= +golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2584,7 +2711,6 @@ google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+ google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= -google.golang.org/api v0.3.2/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -2645,12 +2771,13 @@ google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987 h1:PDIOdWxZ8eRizhKa1AAvY53xsvLB1cWorMjslvY3VA8= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4 h1:ysnBoUyeL/H6RCvNRhWHjKoDEmguI+mPU+qHgK8qv/w= +google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= @@ -2678,6 +2805,7 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -2754,6 +2882,9 @@ k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/kube-openapi v0.0.0-20200316234421-82d701f24f9d/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU= k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +lukechampine.com/blake3 v1.1.6/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= +lukechampine.com/blake3 v1.1.7 h1:GgRMhmdsuK8+ii6UZFDL8Nb+VyMwadAgcJyfYHxG6n0= +lukechampine.com/blake3 v1.1.7/go.mod h1:tkKEOtDkNtklkXtLNEOGNq5tcV90tJiA1vAA12R78LA= modernc.org/cc v1.0.0 h1:nPibNuDEx6tvYrUAtvDTTw98rx5juGsa5zuDnKwEEQQ= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= @@ -2774,6 +2905,7 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/structured-merge-diff/v2 v2.0.1/go.mod h1:Wb7vfKAodbKgf6tn1Kl0VvGj7mRH6DGaRcixXEJXTsE= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/testplans/lotus-soup/rfwp/chain_state.go b/testplans/lotus-soup/rfwp/chain_state.go index d91acdff9..38b8b504e 100644 --- a/testplans/lotus-soup/rfwp/chain_state.go +++ b/testplans/lotus-soup/rfwp/chain_state.go @@ -31,7 +31,7 @@ import ( "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/chain/actors/builtin/miner" - tstats "github.com/filecoin-project/lotus/tools/stats" + tsync "github.com/filecoin-project/lotus/tools/stats/sync" ) func UpdateChainState(t *testkit.TestEnvironment, m *testkit.LotusMiner) error { @@ -40,7 +40,7 @@ func UpdateChainState(t *testkit.TestEnvironment, m *testkit.LotusMiner) error { ctx := context.Background() - tipsetsCh, err := tstats.GetTips(ctx, &v0api.WrapperV1Full{FullNode: m.FullApi}, abi.ChainEpoch(height), headlag) + tipsetsCh, err := tsync.BufferedTipsetChannel(ctx, &v0api.WrapperV1Full{FullNode: m.FullApi}, abi.ChainEpoch(height), headlag) if err != nil { return err } diff --git a/testplans/lotus-soup/rfwp/html_chain_state.go b/testplans/lotus-soup/rfwp/html_chain_state.go index 7a3d56be4..3c840facd 100644 --- a/testplans/lotus-soup/rfwp/html_chain_state.go +++ b/testplans/lotus-soup/rfwp/html_chain_state.go @@ -11,7 +11,7 @@ import ( "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/cli" - tstats "github.com/filecoin-project/lotus/tools/stats" + tsync "github.com/filecoin-project/lotus/tools/stats/sync" "github.com/ipfs/go-cid" ) @@ -22,8 +22,9 @@ func FetchChainState(t *testkit.TestEnvironment, m *testkit.LotusMiner) error { ctx := context.Background() api := m.FullApi - tipsetsCh, err := tstats.GetTips(ctx, &v0api.WrapperV1Full{FullNode: m.FullApi}, abi.ChainEpoch(height), headlag) + tipsetsCh, err := tsync.BufferedTipsetChannel(ctx, &v0api.WrapperV1Full{FullNode: m.FullApi}, abi.ChainEpoch(height), headlag) if err != nil { + return err } diff --git a/testplans/lotus-soup/testkit/deals.go b/testplans/lotus-soup/testkit/deals.go index f0910537d..703e6888a 100644 --- a/testplans/lotus-soup/testkit/deals.go +++ b/testplans/lotus-soup/testkit/deals.go @@ -12,7 +12,7 @@ import ( "github.com/filecoin-project/lotus/chain/types" "github.com/ipfs/go-cid" - tstats "github.com/filecoin-project/lotus/tools/stats" + tsync "github.com/filecoin-project/lotus/tools/stats/sync" ) func StartDeal(ctx context.Context, minerActorAddr address.Address, client api.FullNode, fcid cid.Cid, fastRetrieval bool) *cid.Cid { @@ -46,7 +46,7 @@ func WaitDealSealed(t *TestEnvironment, ctx context.Context, client api.FullNode cctx, cancel := context.WithCancel(ctx) defer cancel() - tipsetsCh, err := tstats.GetTips(cctx, &v0api.WrapperV1Full{FullNode: client}, abi.ChainEpoch(height), headlag) + tipsetsCh, err := tsync.BufferedTipsetChannel(cctx, &v0api.WrapperV1Full{FullNode: client}, abi.ChainEpoch(height), headlag) if err != nil { panic(err) } diff --git a/testplans/lotus-soup/testkit/node.go b/testplans/lotus-soup/testkit/node.go index e70f58e38..9506c4bf4 100644 --- a/testplans/lotus-soup/testkit/node.go +++ b/testplans/lotus-soup/testkit/node.go @@ -8,8 +8,8 @@ import ( "sort" "time" + "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/chain/beacon" "github.com/filecoin-project/lotus/chain/wallet" "github.com/filecoin-project/lotus/metrics" @@ -17,7 +17,11 @@ import ( "github.com/filecoin-project/lotus/node" "github.com/filecoin-project/lotus/node/modules/dtypes" modtest "github.com/filecoin-project/lotus/node/modules/testing" - tstats "github.com/filecoin-project/lotus/tools/stats" + + tinflux "github.com/filecoin-project/lotus/tools/stats/influx" + tipldstore "github.com/filecoin-project/lotus/tools/stats/ipldstore" + tpoints "github.com/filecoin-project/lotus/tools/stats/points" + tsync "github.com/filecoin-project/lotus/tools/stats/sync" influxdb "github.com/kpacha/opencensus-influxdb" ma "github.com/multiformats/go-multiaddr" @@ -234,7 +238,7 @@ func collectStats(t *TestEnvironment, ctx context.Context, api api.FullNode) err influxPass := "" influxDb := "testground" - influx, err := tstats.InfluxClient(influxAddr, influxUser, influxPass) + influxClient, err := tinflux.NewClient(influxAddr, influxUser, influxPass) if err != nil { t.RecordMessage(err.Error()) return err @@ -246,7 +250,38 @@ func collectStats(t *TestEnvironment, ctx context.Context, api api.FullNode) err go func() { time.Sleep(15 * time.Second) t.RecordMessage("calling tstats.Collect") - tstats.Collect(context.Background(), &v0api.WrapperV1Full{FullNode: api}, influx, influxDb, height, headlag) + + store, err := tipldstore.NewApiIpldStore(ctx, api, 1024) + if err != nil { + t.RecordMessage(err.Error()) + return + } + + collector, err := tpoints.NewChainPointCollector(ctx, store, api) + if err != nil { + t.RecordMessage(err.Error()) + return + } + + tipsets, err := tsync.BufferedTipsetChannel(ctx, api, abi.ChainEpoch(height), headlag) + if err != nil { + t.RecordMessage(err.Error()) + return + } + + wq := tinflux.NewWriteQueue(ctx, influxClient) + defer wq.Close() + + for tipset := range tipsets { + if nb, err := collector.Collect(ctx, tipset); err != nil { + t.RecordMessage(err.Error()) + return + } else { + nb.SetDatabase(influxDb) + wq.AddBatch(nb) + } + } + }() return nil diff --git a/testplans/lotus-soup/testkit/retrieval.go b/testplans/lotus-soup/testkit/retrieval.go index de3dee6be..67e8d1654 100644 --- a/testplans/lotus-soup/testkit/retrieval.go +++ b/testplans/lotus-soup/testkit/retrieval.go @@ -11,6 +11,7 @@ import ( "time" "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/api/v0api" "github.com/ipfs/go-cid" files "github.com/ipfs/go-ipfs-files" ipld "github.com/ipfs/go-ipld-format" @@ -51,7 +52,7 @@ func RetrieveData(t *TestEnvironment, ctx context.Context, client api.FullNode, IsCAR: carExport, } t1 = time.Now() - err = client.ClientRetrieve(ctx, offers[0].Order(caddr), ref) + err = (&v0api.WrapperV1Full{FullNode: client}).ClientRetrieve(ctx, v0api.OfferOrder(offers[0], caddr), ref) if err != nil { return err } @@ -77,7 +78,7 @@ func RetrieveData(t *TestEnvironment, ctx context.Context, client api.FullNode, func ExtractCarData(ctx context.Context, rdata []byte, rpath string) []byte { bserv := dstest.Bserv() - ch, err := car.LoadCar(bserv.Blockstore(), bytes.NewReader(rdata)) + ch, err := car.LoadCar(ctx, bserv.Blockstore(), bytes.NewReader(rdata)) if err != nil { panic(err) } diff --git a/testplans/lotus-soup/testkit/role_miner.go b/testplans/lotus-soup/testkit/role_miner.go index fc821cd4d..7204c71fe 100644 --- a/testplans/lotus-soup/testkit/role_miner.go +++ b/testplans/lotus-soup/testkit/role_miner.go @@ -182,7 +182,7 @@ func PrepareMiner(t *TestEnvironment) (*LotusMiner, error) { return nil, err } - err = ds.Put(datastore.NewKey("miner-address"), minerAddr.Bytes()) + err = ds.Put(context.Background(), datastore.NewKey("miner-address"), minerAddr.Bytes()) if err != nil { return nil, err } diff --git a/testplans/lotus-soup/testkit/role_pubsub_tracer.go b/testplans/lotus-soup/testkit/role_pubsub_tracer.go index 5b13e6b81..401a9824d 100644 --- a/testplans/lotus-soup/testkit/role_pubsub_tracer.go +++ b/testplans/lotus-soup/testkit/role_pubsub_tracer.go @@ -30,7 +30,7 @@ func PreparePubsubTracer(t *TestEnvironment) (*PubsubTracer, error) { tracedIP := t.NetClient.MustGetDataNetworkIP().String() tracedAddr := fmt.Sprintf("/ip4/%s/tcp/4001", tracedIP) - host, err := libp2p.New(ctx, + host, err := libp2p.New( libp2p.Identity(privk), libp2p.ListenAddrStrings(tracedAddr), ) diff --git a/tools/packer/repo/config.toml b/tools/packer/repo/config.toml index 900dad218..380d5a28f 100644 --- a/tools/packer/repo/config.toml +++ b/tools/packer/repo/config.toml @@ -21,6 +21,7 @@ ListenAddresses = ["/ip4/0.0.0.0/tcp/5678", "/ip6/::/tcp/5678"] # IpfsMAddr = "" # IpfsUseForRetrieval = false # SimultaneousTransfersForStorage = 20 +# SimultaneousTransfersForStoragePerClient = 0 # SimultaneousTransfersForRetrieval = 20 # [Metrics] diff --git a/tools/stats/collect.go b/tools/stats/collect.go deleted file mode 100644 index e33ec994b..000000000 --- a/tools/stats/collect.go +++ /dev/null @@ -1,63 +0,0 @@ -package stats - -import ( - "context" - "time" - - "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/api/v0api" - client "github.com/influxdata/influxdb1-client/v2" -) - -func Collect(ctx context.Context, api v0api.FullNode, influx client.Client, database string, height int64, headlag int) { - tipsetsCh, err := GetTips(ctx, api, abi.ChainEpoch(height), headlag) - if err != nil { - log.Fatal(err) - } - - wq := NewInfluxWriteQueue(ctx, influx) - defer wq.Close() - - for tipset := range tipsetsCh { - log.Infow("Collect stats", "height", tipset.Height()) - pl := NewPointList() - height := tipset.Height() - - if err := RecordTipsetPoints(ctx, api, pl, tipset); err != nil { - log.Warnw("Failed to record tipset", "height", height, "error", err) - continue - } - - if err := RecordTipsetMessagesPoints(ctx, api, pl, tipset); err != nil { - log.Warnw("Failed to record messages", "height", height, "error", err) - continue - } - - if err := RecordTipsetStatePoints(ctx, api, pl, tipset); err != nil { - log.Warnw("Failed to record state", "height", height, "error", err) - continue - } - - // Instead of having to pass around a bunch of generic stuff we want for each point - // we will just add them at the end. - - tsTimestamp := time.Unix(int64(tipset.MinTimestamp()), int64(0)) - - nb, err := InfluxNewBatch() - if err != nil { - log.Fatal(err) - } - - for _, pt := range pl.Points() { - pt.SetTime(tsTimestamp) - - nb.AddPoint(NewPointFrom(pt)) - } - - nb.SetDatabase(database) - - log.Infow("Adding points", "count", len(nb.Points()), "height", tipset.Height()) - - wq.AddBatch(nb) - } -} diff --git a/tools/stats/head_buffer.go b/tools/stats/head_buffer.go deleted file mode 100644 index 0a7c63e6e..000000000 --- a/tools/stats/head_buffer.go +++ /dev/null @@ -1,47 +0,0 @@ -package stats - -import ( - "container/list" - - "github.com/filecoin-project/lotus/api" -) - -type headBuffer struct { - buffer *list.List - size int -} - -func newHeadBuffer(size int) *headBuffer { - buffer := list.New() - buffer.Init() - - return &headBuffer{ - buffer: buffer, - size: size, - } -} - -func (h *headBuffer) push(hc *api.HeadChange) (rethc *api.HeadChange) { - if h.buffer.Len() == h.size { - var ok bool - - el := h.buffer.Front() - rethc, ok = el.Value.(*api.HeadChange) - if !ok { - panic("Value from list is not the correct type") - } - - h.buffer.Remove(el) - } - - h.buffer.PushBack(hc) - - return -} - -func (h *headBuffer) pop() { - el := h.buffer.Back() - if el != nil { - h.buffer.Remove(el) - } -} diff --git a/tools/stats/head_buffer_test.go b/tools/stats/head_buffer_test.go deleted file mode 100644 index 4059f730e..000000000 --- a/tools/stats/head_buffer_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package stats - -import ( - "testing" - - "github.com/filecoin-project/lotus/api" - "github.com/stretchr/testify/require" -) - -func TestHeadBuffer(t *testing.T) { - - t.Run("Straight push through", func(t *testing.T) { - hb := newHeadBuffer(5) - require.Nil(t, hb.push(&api.HeadChange{Type: "1"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "2"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "3"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "4"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "5"})) - - hc := hb.push(&api.HeadChange{Type: "6"}) - require.Equal(t, hc.Type, "1") - }) - - t.Run("Reverts", func(t *testing.T) { - hb := newHeadBuffer(5) - require.Nil(t, hb.push(&api.HeadChange{Type: "1"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "2"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "3"})) - hb.pop() - require.Nil(t, hb.push(&api.HeadChange{Type: "3a"})) - hb.pop() - require.Nil(t, hb.push(&api.HeadChange{Type: "3b"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "4"})) - require.Nil(t, hb.push(&api.HeadChange{Type: "5"})) - - hc := hb.push(&api.HeadChange{Type: "6"}) - require.Equal(t, hc.Type, "1") - hc = hb.push(&api.HeadChange{Type: "7"}) - require.Equal(t, hc.Type, "2") - hc = hb.push(&api.HeadChange{Type: "8"}) - require.Equal(t, hc.Type, "3b") - }) -} diff --git a/tools/stats/headbuffer/head_buffer.go b/tools/stats/headbuffer/head_buffer.go new file mode 100644 index 000000000..5f668ab6e --- /dev/null +++ b/tools/stats/headbuffer/head_buffer.go @@ -0,0 +1,56 @@ +package headbuffer + +import ( + "container/list" + + "github.com/filecoin-project/lotus/api" +) + +type HeadChangeStackBuffer struct { + buffer *list.List + size int +} + +// NewHeadChangeStackBuffer buffer HeadChange events to avoid having to +// deal with revert changes. Initialized size should be the average reorg +// size + 1 +func NewHeadChangeStackBuffer(size int) *HeadChangeStackBuffer { + buffer := list.New() + buffer.Init() + + return &HeadChangeStackBuffer{ + buffer: buffer, + size: size, + } +} + +// Push adds a HeadChange to stack buffer. If the length of +// the stack buffer grows larger than the initizlized size, the +// oldest HeadChange is returned. +func (h *HeadChangeStackBuffer) Push(hc *api.HeadChange) (rethc *api.HeadChange) { + if h.buffer.Len() >= h.size { + var ok bool + + el := h.buffer.Front() + rethc, ok = el.Value.(*api.HeadChange) + if !ok { + // This shouldn't be possible, this method is typed and is the only place data + // pushed to the buffer. + panic("A cosmic ray made me do it") + } + + h.buffer.Remove(el) + } + + h.buffer.PushBack(hc) + + return +} + +// Pop removes the last added HeadChange +func (h *HeadChangeStackBuffer) Pop() { + el := h.buffer.Back() + if el != nil { + h.buffer.Remove(el) + } +} diff --git a/tools/stats/headbuffer/head_buffer_test.go b/tools/stats/headbuffer/head_buffer_test.go new file mode 100644 index 000000000..8a748c714 --- /dev/null +++ b/tools/stats/headbuffer/head_buffer_test.go @@ -0,0 +1,42 @@ +package headbuffer + +import ( + "testing" + + "github.com/filecoin-project/lotus/api" + "github.com/stretchr/testify/require" +) + +func TestHeadBuffer(t *testing.T) { + t.Run("Straight Push through", func(t *testing.T) { + hb := NewHeadChangeStackBuffer(5) + require.Nil(t, hb.Push(&api.HeadChange{Type: "1"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "2"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "3"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "4"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "5"})) + + hc := hb.Push(&api.HeadChange{Type: "6"}) + require.Equal(t, hc.Type, "1") + }) + + t.Run("Reverts", func(t *testing.T) { + hb := NewHeadChangeStackBuffer(5) + require.Nil(t, hb.Push(&api.HeadChange{Type: "1"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "2"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "3"})) + hb.Pop() + require.Nil(t, hb.Push(&api.HeadChange{Type: "3a"})) + hb.Pop() + require.Nil(t, hb.Push(&api.HeadChange{Type: "3b"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "4"})) + require.Nil(t, hb.Push(&api.HeadChange{Type: "5"})) + + hc := hb.Push(&api.HeadChange{Type: "6"}) + require.Equal(t, hc.Type, "1") + hc = hb.Push(&api.HeadChange{Type: "7"}) + require.Equal(t, hc.Type, "2") + hc = hb.Push(&api.HeadChange{Type: "8"}) + require.Equal(t, hc.Type, "3b") + }) +} diff --git a/tools/stats/influx/influx.go b/tools/stats/influx/influx.go new file mode 100644 index 000000000..65fb4c0b9 --- /dev/null +++ b/tools/stats/influx/influx.go @@ -0,0 +1,133 @@ +package influx + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/filecoin-project/lotus/build" + + _ "github.com/influxdata/influxdb1-client" + models "github.com/influxdata/influxdb1-client/models" + client "github.com/influxdata/influxdb1-client/v2" +) + +type PointList struct { + points []models.Point +} + +func NewPointList() *PointList { + return &PointList{} +} + +func (pl *PointList) AddPoint(p models.Point) { + pl.points = append(pl.points, p) +} + +func (pl *PointList) Points() []models.Point { + return pl.points +} + +type WriteQueue struct { + ch chan client.BatchPoints +} + +func NewWriteQueue(ctx context.Context, influx client.Client) *WriteQueue { + ch := make(chan client.BatchPoints, 128) + + maxRetries := 10 + + go func() { + main: + for { + select { + case <-ctx.Done(): + return + case batch := <-ch: + for i := 0; i < maxRetries; i++ { + if err := influx.Write(batch); err != nil { + log.Warnw("Failed to write batch", "error", err) + build.Clock.Sleep(3 * time.Second) + continue + } + + continue main + } + + log.Error("dropping batch due to failure to write") + } + } + }() + + return &WriteQueue{ + ch: ch, + } +} + +func (i *WriteQueue) AddBatch(bp client.BatchPoints) { + i.ch <- bp +} + +func (i *WriteQueue) Close() { + close(i.ch) +} + +func NewClient(addr, user, pass string) (client.Client, error) { + return client.NewHTTPClient(client.HTTPConfig{ + Addr: addr, + Username: user, + Password: pass, + }) +} + +func NewBatch() (client.BatchPoints, error) { + return client.NewBatchPoints(client.BatchPointsConfig{}) +} + +func NewPoint(name string, value interface{}) models.Point { + pt, _ := models.NewPoint(name, models.Tags{}, + map[string]interface{}{"value": value}, build.Clock.Now().UTC()) + return pt +} + +func NewPointFrom(p models.Point) *client.Point { + return client.NewPointFrom(p) +} + +func ResetDatabase(influx client.Client, database string) error { + log.Debug("resetting database") + q := client.NewQuery(fmt.Sprintf(`DROP DATABASE "%s"; CREATE DATABASE "%s";`, database, database), "", "") + _, err := influx.Query(q) + if err != nil { + return err + } + log.Infow("database reset", "database", database) + return nil +} + +func GetLastRecordedHeight(influx client.Client, database string) (int64, error) { + log.Debug("retrieving last record height") + q := client.NewQuery(`SELECT "value" FROM "chain.height" ORDER BY time DESC LIMIT 1`, database, "") + res, err := influx.Query(q) + if err != nil { + return 0, err + } + + if len(res.Results) == 0 { + return 0, fmt.Errorf("No results found for last recorded height") + } + + if len(res.Results[0].Series) == 0 { + return 0, fmt.Errorf("No results found for last recorded height") + } + + height, err := (res.Results[0].Series[0].Values[0][1].(json.Number)).Int64() + if err != nil { + return 0, err + } + + log.Infow("last record height", "height", height) + + return height, nil +} diff --git a/tools/stats/influx/log.go b/tools/stats/influx/log.go new file mode 100644 index 000000000..b3637d6b0 --- /dev/null +++ b/tools/stats/influx/log.go @@ -0,0 +1,7 @@ +package influx + +import ( + logging "github.com/ipfs/go-log/v2" +) + +var log = logging.Logger("stats/influx") diff --git a/tools/stats/ipldstore/ipldstore.go b/tools/stats/ipldstore/ipldstore.go new file mode 100644 index 000000000..9adc599fd --- /dev/null +++ b/tools/stats/ipldstore/ipldstore.go @@ -0,0 +1,92 @@ +package ipldstore + +import ( + "bytes" + "context" + "fmt" + + "github.com/filecoin-project/lotus/tools/stats/metrics" + + lru "github.com/hashicorp/golang-lru" + "github.com/ipfs/go-cid" + cbg "github.com/whyrusleeping/cbor-gen" + "go.opencensus.io/stats" +) + +type ApiIpldStore struct { + ctx context.Context + api apiIpldStoreApi + cache *lru.TwoQueueCache + cacheSize int +} + +type apiIpldStoreApi interface { + ChainReadObj(context.Context, cid.Cid) ([]byte, error) +} + +func NewApiIpldStore(ctx context.Context, api apiIpldStoreApi, cacheSize int) (*ApiIpldStore, error) { + store := &ApiIpldStore{ + ctx: ctx, + api: api, + cacheSize: cacheSize, + } + + cache, err := lru.New2Q(store.cacheSize) + if err != nil { + return nil, err + } + + store.cache = cache + + return store, nil +} + +func (ht *ApiIpldStore) Context() context.Context { + return ht.ctx +} + +func (ht *ApiIpldStore) read(ctx context.Context, c cid.Cid) ([]byte, error) { + stats.Record(ctx, metrics.IpldStoreCacheMiss.M(1)) + done := metrics.Timer(ctx, metrics.IpldStoreReadDuration) + defer done() + return ht.api.ChainReadObj(ctx, c) +} + +func (ht *ApiIpldStore) Get(ctx context.Context, c cid.Cid, out interface{}) error { + done := metrics.Timer(ctx, metrics.IpldStoreGetDuration) + defer done() + defer func() { + stats.Record(ctx, metrics.IpldStoreCacheSize.M(int64(ht.cacheSize))) + stats.Record(ctx, metrics.IpldStoreCacheLength.M(int64(ht.cache.Len()))) + }() + + var raw []byte + + if a, ok := ht.cache.Get(c); ok { + stats.Record(ctx, metrics.IpldStoreCacheHit.M(1)) + raw = a.([]byte) + } else { + bs, err := ht.read(ctx, c) + if err != nil { + return err + } + + raw = bs + } + + cu, ok := out.(cbg.CBORUnmarshaler) + if ok { + if err := cu.UnmarshalCBOR(bytes.NewReader(raw)); err != nil { + return err + } + + ht.cache.Add(c, raw) + return nil + } + + return fmt.Errorf("Object does not implement CBORUnmarshaler") +} + +func (ht *ApiIpldStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) { + return cid.Undef, fmt.Errorf("Put is not implemented on ApiIpldStore") +} diff --git a/tools/stats/metrics.go b/tools/stats/metrics.go deleted file mode 100644 index ca3f26336..000000000 --- a/tools/stats/metrics.go +++ /dev/null @@ -1,418 +0,0 @@ -package stats - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "math" - "math/big" - "strings" - "time" - - "github.com/filecoin-project/go-address" - "github.com/filecoin-project/lotus/api/v0api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/actors/builtin/power" - "github.com/filecoin-project/lotus/chain/actors/builtin/reward" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - - "github.com/ipfs/go-cid" - "github.com/multiformats/go-multihash" - "golang.org/x/xerrors" - - cbg "github.com/whyrusleeping/cbor-gen" - - _ "github.com/influxdata/influxdb1-client" - models "github.com/influxdata/influxdb1-client/models" - client "github.com/influxdata/influxdb1-client/v2" - - logging "github.com/ipfs/go-log/v2" -) - -var log = logging.Logger("stats") - -type PointList struct { - points []models.Point -} - -func NewPointList() *PointList { - return &PointList{} -} - -func (pl *PointList) AddPoint(p models.Point) { - pl.points = append(pl.points, p) -} - -func (pl *PointList) Points() []models.Point { - return pl.points -} - -type InfluxWriteQueue struct { - ch chan client.BatchPoints -} - -func NewInfluxWriteQueue(ctx context.Context, influx client.Client) *InfluxWriteQueue { - ch := make(chan client.BatchPoints, 128) - - maxRetries := 10 - - go func() { - main: - for { - select { - case <-ctx.Done(): - return - case batch := <-ch: - for i := 0; i < maxRetries; i++ { - if err := influx.Write(batch); err != nil { - log.Warnw("Failed to write batch", "error", err) - build.Clock.Sleep(15 * time.Second) - continue - } - - continue main - } - - log.Error("Dropping batch due to failure to write") - } - } - }() - - return &InfluxWriteQueue{ - ch: ch, - } -} - -func (i *InfluxWriteQueue) AddBatch(bp client.BatchPoints) { - i.ch <- bp -} - -func (i *InfluxWriteQueue) Close() { - close(i.ch) -} - -func InfluxClient(addr, user, pass string) (client.Client, error) { - return client.NewHTTPClient(client.HTTPConfig{ - Addr: addr, - Username: user, - Password: pass, - }) -} - -func InfluxNewBatch() (client.BatchPoints, error) { - return client.NewBatchPoints(client.BatchPointsConfig{}) -} - -func NewPoint(name string, value interface{}) models.Point { - pt, _ := models.NewPoint(name, models.Tags{}, - map[string]interface{}{"value": value}, build.Clock.Now().UTC()) - return pt -} - -func NewPointFrom(p models.Point) *client.Point { - return client.NewPointFrom(p) -} - -func RecordTipsetPoints(ctx context.Context, api v0api.FullNode, pl *PointList, tipset *types.TipSet) error { - cids := []string{} - for _, cid := range tipset.Cids() { - cids = append(cids, cid.String()) - } - - p := NewPoint("chain.height", int64(tipset.Height())) - p.AddTag("tipset", strings.Join(cids, " ")) - pl.AddPoint(p) - - p = NewPoint("chain.block_count", len(cids)) - pl.AddPoint(p) - - tsTime := time.Unix(int64(tipset.MinTimestamp()), int64(0)) - p = NewPoint("chain.blocktime", tsTime.Unix()) - pl.AddPoint(p) - - totalGasLimit := int64(0) - totalUniqGasLimit := int64(0) - seen := make(map[cid.Cid]struct{}) - for _, blockheader := range tipset.Blocks() { - bs, err := blockheader.Serialize() - if err != nil { - return err - } - p := NewPoint("chain.election", blockheader.ElectionProof.WinCount) - p.AddTag("miner", blockheader.Miner.String()) - pl.AddPoint(p) - - p = NewPoint("chain.blockheader_size", len(bs)) - pl.AddPoint(p) - - msgs, err := api.ChainGetBlockMessages(ctx, blockheader.Cid()) - if err != nil { - return xerrors.Errorf("ChainGetBlockMessages failed: %w", msgs) - } - for _, m := range msgs.BlsMessages { - c := m.Cid() - totalGasLimit += m.GasLimit - if _, ok := seen[c]; !ok { - totalUniqGasLimit += m.GasLimit - seen[c] = struct{}{} - } - } - for _, m := range msgs.SecpkMessages { - c := m.Cid() - totalGasLimit += m.Message.GasLimit - if _, ok := seen[c]; !ok { - totalUniqGasLimit += m.Message.GasLimit - seen[c] = struct{}{} - } - } - } - p = NewPoint("chain.gas_limit_total", totalGasLimit) - pl.AddPoint(p) - p = NewPoint("chain.gas_limit_uniq_total", totalUniqGasLimit) - pl.AddPoint(p) - - { - baseFeeIn := tipset.Blocks()[0].ParentBaseFee - newBaseFee := store.ComputeNextBaseFee(baseFeeIn, totalUniqGasLimit, len(tipset.Blocks()), tipset.Height()) - - baseFeeRat := new(big.Rat).SetFrac(newBaseFee.Int, new(big.Int).SetUint64(build.FilecoinPrecision)) - baseFeeFloat, _ := baseFeeRat.Float64() - p = NewPoint("chain.basefee", baseFeeFloat) - pl.AddPoint(p) - - baseFeeChange := new(big.Rat).SetFrac(newBaseFee.Int, baseFeeIn.Int) - baseFeeChangeF, _ := baseFeeChange.Float64() - p = NewPoint("chain.basefee_change_log", math.Log(baseFeeChangeF)/math.Log(1.125)) - pl.AddPoint(p) - } - { - blks := int64(len(cids)) - p = NewPoint("chain.gas_fill_ratio", float64(totalGasLimit)/float64(blks*build.BlockGasTarget)) - pl.AddPoint(p) - p = NewPoint("chain.gas_capacity_ratio", float64(totalUniqGasLimit)/float64(blks*build.BlockGasTarget)) - pl.AddPoint(p) - p = NewPoint("chain.gas_waste_ratio", float64(totalGasLimit-totalUniqGasLimit)/float64(blks*build.BlockGasTarget)) - pl.AddPoint(p) - } - - return nil -} - -type ApiIpldStore struct { - ctx context.Context - api apiIpldStoreApi -} - -type apiIpldStoreApi interface { - ChainReadObj(context.Context, cid.Cid) ([]byte, error) -} - -func NewApiIpldStore(ctx context.Context, api apiIpldStoreApi) *ApiIpldStore { - return &ApiIpldStore{ctx, api} -} - -func (ht *ApiIpldStore) Context() context.Context { - return ht.ctx -} - -func (ht *ApiIpldStore) Get(ctx context.Context, c cid.Cid, out interface{}) error { - raw, err := ht.api.ChainReadObj(ctx, c) - if err != nil { - return err - } - - cu, ok := out.(cbg.CBORUnmarshaler) - if ok { - if err := cu.UnmarshalCBOR(bytes.NewReader(raw)); err != nil { - return err - } - return nil - } - - return fmt.Errorf("Object does not implement CBORUnmarshaler") -} - -func (ht *ApiIpldStore) Put(ctx context.Context, v interface{}) (cid.Cid, error) { - return cid.Undef, fmt.Errorf("Put is not implemented on ApiIpldStore") -} - -func RecordTipsetStatePoints(ctx context.Context, api v0api.FullNode, pl *PointList, tipset *types.TipSet) error { - attoFil := types.NewInt(build.FilecoinPrecision).Int - - //TODO: StatePledgeCollateral API is not implemented and is commented out - re-enable this block once the API is implemented again. - //pc, err := api.StatePledgeCollateral(ctx, tipset.Key()) - //if err != nil { - //return err - //} - - //pcFil := new(big.Rat).SetFrac(pc.Int, attoFil) - //pcFilFloat, _ := pcFil.Float64() - //p := NewPoint("chain.pledge_collateral", pcFilFloat) - //pl.AddPoint(p) - - netBal, err := api.WalletBalance(ctx, reward.Address) - if err != nil { - return err - } - - netBalFil := new(big.Rat).SetFrac(netBal.Int, attoFil) - netBalFilFloat, _ := netBalFil.Float64() - p := NewPoint("network.balance", netBalFilFloat) - pl.AddPoint(p) - - totalPower, err := api.StateMinerPower(ctx, address.Address{}, tipset.Key()) - if err != nil { - return err - } - - // We divide the power into gibibytes because 2^63 bytes is 8 exbibytes which is smaller than the Filecoin Mainnet. - // Dividing by a gibibyte gives us more room to work with. This will allow the dashboard to report network and miner - // sizes up to 8192 yobibytes. - gibi := types.NewInt(1024 * 1024 * 1024) - p = NewPoint("chain.power", types.BigDiv(totalPower.TotalPower.QualityAdjPower, gibi).Int64()) - pl.AddPoint(p) - - powerActor, err := api.StateGetActor(ctx, power.Address, tipset.Key()) - if err != nil { - return err - } - - powerActorState, err := power.Load(&ApiIpldStore{ctx, api}, powerActor) - if err != nil { - return err - } - - return powerActorState.ForEachClaim(func(addr address.Address, claim power.Claim) error { - // BigCmp returns 0 if values are equal - if types.BigCmp(claim.QualityAdjPower, types.NewInt(0)) == 0 { - return nil - } - - p = NewPoint("chain.miner_power", types.BigDiv(claim.QualityAdjPower, gibi).Int64()) - p.AddTag("miner", addr.String()) - pl.AddPoint(p) - - return nil - }) -} - -type msgTag struct { - actor string - method uint64 - exitcode uint8 -} - -func RecordTipsetMessagesPoints(ctx context.Context, api v0api.FullNode, pl *PointList, tipset *types.TipSet) error { - cids := tipset.Cids() - if len(cids) == 0 { - return fmt.Errorf("no cids in tipset") - } - - msgs, err := api.ChainGetParentMessages(ctx, cids[0]) - if err != nil { - return err - } - - recp, err := api.ChainGetParentReceipts(ctx, cids[0]) - if err != nil { - return err - } - - msgn := make(map[msgTag][]cid.Cid) - - totalGasUsed := int64(0) - for _, r := range recp { - totalGasUsed += r.GasUsed - } - p := NewPoint("chain.gas_used_total", totalGasUsed) - pl.AddPoint(p) - - for i, msg := range msgs { - // FIXME: use float so this doesn't overflow - // FIXME: this doesn't work as time points get overridden - p := NewPoint("chain.message_gaspremium", msg.Message.GasPremium.Int64()) - pl.AddPoint(p) - p = NewPoint("chain.message_gasfeecap", msg.Message.GasFeeCap.Int64()) - pl.AddPoint(p) - - bs, err := msg.Message.Serialize() - if err != nil { - return err - } - - p = NewPoint("chain.message_size", len(bs)) - pl.AddPoint(p) - - actor, err := api.StateGetActor(ctx, msg.Message.To, tipset.Key()) - if err != nil { - return err - } - - dm, err := multihash.Decode(actor.Code.Hash()) - if err != nil { - continue - } - tag := msgTag{ - actor: string(dm.Digest), - method: uint64(msg.Message.Method), - exitcode: uint8(recp[i].ExitCode), - } - - found := false - for _, c := range msgn[tag] { - if c.Equals(msg.Cid) { - found = true - break - } - } - if !found { - msgn[tag] = append(msgn[tag], msg.Cid) - } - } - - for t, m := range msgn { - p := NewPoint("chain.message_count", len(m)) - p.AddTag("actor", t.actor) - p.AddTag("method", fmt.Sprintf("%d", t.method)) - p.AddTag("exitcode", fmt.Sprintf("%d", t.exitcode)) - pl.AddPoint(p) - - } - - return nil -} - -func ResetDatabase(influx client.Client, database string) error { - log.Info("Resetting database") - q := client.NewQuery(fmt.Sprintf(`DROP DATABASE "%s"; CREATE DATABASE "%s";`, database, database), "", "") - _, err := influx.Query(q) - return err -} - -func GetLastRecordedHeight(influx client.Client, database string) (int64, error) { - log.Info("Retrieving last record height") - q := client.NewQuery(`SELECT "value" FROM "chain.height" ORDER BY time DESC LIMIT 1`, database, "") - res, err := influx.Query(q) - if err != nil { - return 0, err - } - - if len(res.Results) == 0 { - return 0, fmt.Errorf("No results found for last recorded height") - } - - if len(res.Results[0].Series) == 0 { - return 0, fmt.Errorf("No results found for last recorded height") - } - - height, err := (res.Results[0].Series[0].Values[0][1].(json.Number)).Int64() - if err != nil { - return 0, err - } - - log.Infow("Last record height", "height", height) - - return height, nil -} diff --git a/tools/stats/metrics/metrics.go b/tools/stats/metrics/metrics.go new file mode 100644 index 000000000..e5178def1 --- /dev/null +++ b/tools/stats/metrics/metrics.go @@ -0,0 +1,110 @@ +package metrics + +import ( + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + + "github.com/filecoin-project/lotus/metrics" +) + +var Timer = metrics.Timer +var SinceInMilliseconds = metrics.SinceInMilliseconds + +// Distribution +var ( + defaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 32, 64, 128, 256, 500, 1000, 2000, 3000, 5000, 10000, 20000, 30000, 40000, 50000, 60000) +) + +// Global Tags +var () + +// Measures +var ( + TipsetCollectionHeight = stats.Int64("tipset_collection/height", "Current Height of the node", stats.UnitDimensionless) + TipsetCollectionHeightExpected = stats.Int64("tipset_collection/height_expected", "Current Height of the node", stats.UnitDimensionless) + TipsetCollectionPoints = stats.Int64("tipset_collection/points", "Counter for total number of points collected", stats.UnitDimensionless) + TipsetCollectionDuration = stats.Float64("tipset_collection/total_ms", "Duration of tipset point collection", stats.UnitMilliseconds) + TipsetCollectionBlockHeaderDuration = stats.Float64("tipset_collection/block_header_ms", "Duration of block header point collection", stats.UnitMilliseconds) + TipsetCollectionMessageDuration = stats.Float64("tipset_collection/message_ms", "Duration of message point collection", stats.UnitMilliseconds) + TipsetCollectionStaterootDuration = stats.Float64("tipset_collection/stateroot_ms", "Duration of stateroot point collection", stats.UnitMilliseconds) + IpldStoreCacheSize = stats.Int64("ipld_store/cache_size", "Initialized size of the object read cache", stats.UnitDimensionless) + IpldStoreCacheLength = stats.Int64("ipld_store/cache_length", "Current length of object read cache", stats.UnitDimensionless) + IpldStoreCacheHit = stats.Int64("ipld_store/cache_hit", "Counter for total cache hits", stats.UnitDimensionless) + IpldStoreCacheMiss = stats.Int64("ipld_store/cache_miss", "Counter for total cache misses", stats.UnitDimensionless) + IpldStoreReadDuration = stats.Float64("ipld_store/read_ms", "Duration of object read request to lotus", stats.UnitMilliseconds) + IpldStoreGetDuration = stats.Float64("ipld_store/get_ms", "Duration of object get from store", stats.UnitMilliseconds) + WriteQueueSize = stats.Int64("write_queue/length", "Current length of the write queue", stats.UnitDimensionless) +) + +// Views +var ( + TipsetCollectionHeightView = &view.View{ + Measure: TipsetCollectionHeight, + Aggregation: view.LastValue(), + } + TipsetCollectionHeightExpectedView = &view.View{ + Measure: TipsetCollectionHeightExpected, + Aggregation: view.LastValue(), + } + TipsetCollectionPointsView = &view.View{ + Measure: TipsetCollectionPoints, + Aggregation: view.Sum(), + } + TipsetCollectionDurationView = &view.View{ + Measure: TipsetCollectionDuration, + Aggregation: defaultMillisecondsDistribution, + } + TipsetCollectionBlockHeaderDurationView = &view.View{ + Measure: TipsetCollectionBlockHeaderDuration, + Aggregation: defaultMillisecondsDistribution, + } + TipsetCollectionMessageDurationView = &view.View{ + Measure: TipsetCollectionMessageDuration, + Aggregation: defaultMillisecondsDistribution, + } + TipsetCollectionStaterootDurationView = &view.View{ + Measure: TipsetCollectionStaterootDuration, + Aggregation: defaultMillisecondsDistribution, + } + IpldStoreCacheSizeView = &view.View{ + Measure: IpldStoreCacheSize, + Aggregation: view.LastValue(), + } + IpldStoreCacheLengthView = &view.View{ + Measure: IpldStoreCacheLength, + Aggregation: view.LastValue(), + } + IpldStoreCacheHitView = &view.View{ + Measure: IpldStoreCacheHit, + Aggregation: view.Count(), + } + IpldStoreCacheMissView = &view.View{ + Measure: IpldStoreCacheMiss, + Aggregation: view.Count(), + } + IpldStoreReadDurationView = &view.View{ + Measure: IpldStoreReadDuration, + Aggregation: defaultMillisecondsDistribution, + } + IpldStoreGetDurationView = &view.View{ + Measure: IpldStoreGetDuration, + Aggregation: defaultMillisecondsDistribution, + } +) + +// DefaultViews is an array of OpenCensus views for metric gathering purposes +var DefaultViews = []*view.View{ + TipsetCollectionHeightView, + TipsetCollectionHeightExpectedView, + TipsetCollectionPointsView, + TipsetCollectionDurationView, + TipsetCollectionBlockHeaderDurationView, + TipsetCollectionMessageDurationView, + TipsetCollectionStaterootDurationView, + IpldStoreCacheSizeView, + IpldStoreCacheLengthView, + IpldStoreCacheHitView, + IpldStoreCacheMissView, + IpldStoreReadDurationView, + IpldStoreGetDurationView, +} diff --git a/tools/stats/points/collect.go b/tools/stats/points/collect.go new file mode 100644 index 000000000..a7c37fcd9 --- /dev/null +++ b/tools/stats/points/collect.go @@ -0,0 +1,363 @@ +package points + +import ( + "context" + "fmt" + "math" + "math/big" + "strings" + "time" + + "github.com/filecoin-project/go-address" + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors/adt" + "github.com/filecoin-project/lotus/chain/actors/builtin/power" + "github.com/filecoin-project/lotus/chain/actors/builtin/reward" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/tools/stats/influx" + "github.com/filecoin-project/lotus/tools/stats/metrics" + + lru "github.com/hashicorp/golang-lru" + client "github.com/influxdata/influxdb1-client/v2" + "github.com/ipfs/go-cid" + "github.com/multiformats/go-multihash" + "go.opencensus.io/stats" + "golang.org/x/xerrors" +) + +type LotusApi interface { + WalletBalance(context.Context, address.Address) (types.BigInt, error) + StateMinerPower(context.Context, address.Address, types.TipSetKey) (*api.MinerPower, error) + StateGetActor(ctx context.Context, actor address.Address, tsk types.TipSetKey) (*types.Actor, error) + ChainGetParentMessages(ctx context.Context, blockCid cid.Cid) ([]api.Message, error) + ChainGetParentReceipts(ctx context.Context, blockCid cid.Cid) ([]*types.MessageReceipt, error) + ChainGetBlockMessages(ctx context.Context, blockCid cid.Cid) (*api.BlockMessages, error) +} + +type ChainPointCollector struct { + ctx context.Context + api LotusApi + store adt.Store + actorDigestCache *lru.TwoQueueCache +} + +func NewChainPointCollector(ctx context.Context, store adt.Store, api LotusApi) (*ChainPointCollector, error) { + actorDigestCache, err := lru.New2Q(2 << 15) + if err != nil { + return nil, err + } + + collector := &ChainPointCollector{ + ctx: ctx, + store: store, + actorDigestCache: actorDigestCache, + api: api, + } + + return collector, nil +} + +func (c *ChainPointCollector) actorDigest(ctx context.Context, addr address.Address, tipset *types.TipSet) (string, error) { + if code, ok := c.actorDigestCache.Get(addr); ok { + return code.(string), nil + } + + actor, err := c.api.StateGetActor(ctx, addr, tipset.Key()) + if err != nil { + return "", err + } + + dm, err := multihash.Decode(actor.Code.Hash()) + if err != nil { + return "", err + } + + digest := string(dm.Digest) + c.actorDigestCache.Add(addr, digest) + + return digest, nil +} + +func (c *ChainPointCollector) Collect(ctx context.Context, tipset *types.TipSet) (client.BatchPoints, error) { + start := time.Now() + done := metrics.Timer(ctx, metrics.TipsetCollectionDuration) + defer func() { + log.Infow("record tipset", "elapsed", time.Now().Sub(start).Seconds()) + done() + }() + + pl := influx.NewPointList() + height := tipset.Height() + + log.Debugw("collecting tipset points", "height", tipset.Height()) + stats.Record(ctx, metrics.TipsetCollectionHeight.M(int64(height))) + + if err := c.collectBlockheaderPoints(ctx, pl, tipset); err != nil { + log.Errorw("failed to record tipset", "height", height, "error", err, "tipset", tipset.Key()) + } + + if err := c.collectMessagePoints(ctx, pl, tipset); err != nil { + log.Errorw("failed to record messages", "height", height, "error", err, "tipset", tipset.Key()) + } + + if err := c.collectStaterootPoints(ctx, pl, tipset); err != nil { + log.Errorw("failed to record state", "height", height, "error", err, "tipset", tipset.Key()) + } + + tsTimestamp := time.Unix(int64(tipset.MinTimestamp()), int64(0)) + + nb, err := influx.NewBatch() + if err != nil { + return nil, err + } + + for _, pt := range pl.Points() { + pt.SetTime(tsTimestamp) + nb.AddPoint(influx.NewPointFrom(pt)) + } + + log.Infow("collected tipset points", "count", len(nb.Points()), "height", tipset.Height()) + + stats.Record(ctx, metrics.TipsetCollectionPoints.M(int64(len(nb.Points())))) + + return nb, nil +} + +func (c *ChainPointCollector) collectBlockheaderPoints(ctx context.Context, pl *influx.PointList, tipset *types.TipSet) error { + start := time.Now() + done := metrics.Timer(ctx, metrics.TipsetCollectionBlockHeaderDuration) + defer func() { + log.Infow("collect blockheader points", "elapsed", time.Now().Sub(start).Seconds()) + done() + }() + + cids := []string{} + for _, cid := range tipset.Cids() { + cids = append(cids, cid.String()) + } + + p := influx.NewPoint("chain.height", int64(tipset.Height())) + p.AddTag("tipset", strings.Join(cids, " ")) + pl.AddPoint(p) + + p = influx.NewPoint("chain.block_count", len(cids)) + pl.AddPoint(p) + + tsTime := time.Unix(int64(tipset.MinTimestamp()), int64(0)) + p = influx.NewPoint("chain.blocktime", tsTime.Unix()) + pl.AddPoint(p) + + totalGasLimit := int64(0) + totalUniqGasLimit := int64(0) + seen := make(map[cid.Cid]struct{}) + for _, blockheader := range tipset.Blocks() { + bs, err := blockheader.Serialize() + if err != nil { + return err + } + p := influx.NewPoint("chain.election", blockheader.ElectionProof.WinCount) + p.AddTag("miner", blockheader.Miner.String()) + pl.AddPoint(p) + + p = influx.NewPoint("chain.blockheader_size", len(bs)) + pl.AddPoint(p) + + msgs, err := c.api.ChainGetBlockMessages(ctx, blockheader.Cid()) + if err != nil { + return xerrors.Errorf("ChainGetBlockMessages failed: %w", msgs) + } + for _, m := range msgs.BlsMessages { + c := m.Cid() + totalGasLimit += m.GasLimit + if _, ok := seen[c]; !ok { + totalUniqGasLimit += m.GasLimit + seen[c] = struct{}{} + } + } + for _, m := range msgs.SecpkMessages { + c := m.Cid() + totalGasLimit += m.Message.GasLimit + if _, ok := seen[c]; !ok { + totalUniqGasLimit += m.Message.GasLimit + seen[c] = struct{}{} + } + } + } + p = influx.NewPoint("chain.gas_limit_total", totalGasLimit) + pl.AddPoint(p) + p = influx.NewPoint("chain.gas_limit_uniq_total", totalUniqGasLimit) + pl.AddPoint(p) + + { + baseFeeIn := tipset.Blocks()[0].ParentBaseFee + newBaseFee := store.ComputeNextBaseFee(baseFeeIn, totalUniqGasLimit, len(tipset.Blocks()), tipset.Height()) + + baseFeeRat := new(big.Rat).SetFrac(newBaseFee.Int, new(big.Int).SetUint64(build.FilecoinPrecision)) + baseFeeFloat, _ := baseFeeRat.Float64() + p = influx.NewPoint("chain.basefee", baseFeeFloat) + pl.AddPoint(p) + + baseFeeChange := new(big.Rat).SetFrac(newBaseFee.Int, baseFeeIn.Int) + baseFeeChangeF, _ := baseFeeChange.Float64() + p = influx.NewPoint("chain.basefee_change_log", math.Log(baseFeeChangeF)/math.Log(1.125)) + pl.AddPoint(p) + } + { + blks := int64(len(cids)) + p = influx.NewPoint("chain.gas_fill_ratio", float64(totalGasLimit)/float64(blks*build.BlockGasTarget)) + pl.AddPoint(p) + p = influx.NewPoint("chain.gas_capacity_ratio", float64(totalUniqGasLimit)/float64(blks*build.BlockGasTarget)) + pl.AddPoint(p) + p = influx.NewPoint("chain.gas_waste_ratio", float64(totalGasLimit-totalUniqGasLimit)/float64(blks*build.BlockGasTarget)) + pl.AddPoint(p) + } + + return nil +} + +func (c *ChainPointCollector) collectStaterootPoints(ctx context.Context, pl *influx.PointList, tipset *types.TipSet) error { + start := time.Now() + done := metrics.Timer(ctx, metrics.TipsetCollectionStaterootDuration) + defer func() { + log.Infow("collect stateroot points", "elapsed", time.Now().Sub(start).Seconds()) + done() + }() + + attoFil := types.NewInt(build.FilecoinPrecision).Int + + netBal, err := c.api.WalletBalance(ctx, reward.Address) + if err != nil { + return err + } + + netBalFil := new(big.Rat).SetFrac(netBal.Int, attoFil) + netBalFilFloat, _ := netBalFil.Float64() + p := influx.NewPoint("network.balance", netBalFilFloat) + pl.AddPoint(p) + + totalPower, err := c.api.StateMinerPower(ctx, address.Address{}, tipset.Key()) + if err != nil { + return err + } + + // We divide the power into gibibytes because 2^63 bytes is 8 exbibytes which is smaller than the Filecoin Mainnet. + // Dividing by a gibibyte gives us more room to work with. This will allow the dashboard to report network and miner + // sizes up to 8192 yobibytes. + gibi := types.NewInt(1024 * 1024 * 1024) + p = influx.NewPoint("chain.power", types.BigDiv(totalPower.TotalPower.QualityAdjPower, gibi).Int64()) + pl.AddPoint(p) + + powerActor, err := c.api.StateGetActor(ctx, power.Address, tipset.Key()) + if err != nil { + return err + } + + powerActorState, err := power.Load(c.store, powerActor) + if err != nil { + return err + } + + return powerActorState.ForEachClaim(func(addr address.Address, claim power.Claim) error { + // BigCmp returns 0 if values are equal + if types.BigCmp(claim.QualityAdjPower, types.NewInt(0)) == 0 { + return nil + } + + p = influx.NewPoint("chain.miner_power", types.BigDiv(claim.QualityAdjPower, gibi).Int64()) + p.AddTag("miner", addr.String()) + pl.AddPoint(p) + + return nil + }) +} + +type msgTag struct { + actor string + method uint64 + exitcode uint8 +} + +func (c *ChainPointCollector) collectMessagePoints(ctx context.Context, pl *influx.PointList, tipset *types.TipSet) error { + start := time.Now() + done := metrics.Timer(ctx, metrics.TipsetCollectionMessageDuration) + defer func() { + log.Infow("collect message points", "elapsed", time.Now().Sub(start).Seconds()) + done() + }() + + cids := tipset.Cids() + if len(cids) == 0 { + return fmt.Errorf("no cids in tipset") + } + + msgs, err := c.api.ChainGetParentMessages(ctx, cids[0]) + if err != nil { + return err + } + + recp, err := c.api.ChainGetParentReceipts(ctx, cids[0]) + if err != nil { + return err + } + + msgn := make(map[msgTag][]cid.Cid) + + totalGasUsed := int64(0) + for _, r := range recp { + totalGasUsed += r.GasUsed + } + p := influx.NewPoint("chain.gas_used_total", totalGasUsed) + pl.AddPoint(p) + + for i, msg := range msgs { + digest, err := c.actorDigest(ctx, msg.Message.To, tipset) + if err != nil { + continue + } + + // FIXME: use float so this doesn't overflow + // FIXME: this doesn't work as time points get overridden + p := influx.NewPoint("chain.message_gaspremium", msg.Message.GasPremium.Int64()) + pl.AddPoint(p) + p = influx.NewPoint("chain.message_gasfeecap", msg.Message.GasFeeCap.Int64()) + pl.AddPoint(p) + + bs, err := msg.Message.Serialize() + if err != nil { + return err + } + + p = influx.NewPoint("chain.message_size", len(bs)) + pl.AddPoint(p) + + tag := msgTag{ + actor: digest, + method: uint64(msg.Message.Method), + exitcode: uint8(recp[i].ExitCode), + } + + found := false + for _, c := range msgn[tag] { + if c.Equals(msg.Cid) { + found = true + break + } + } + if !found { + msgn[tag] = append(msgn[tag], msg.Cid) + } + } + + for t, m := range msgn { + p := influx.NewPoint("chain.message_count", len(m)) + p.AddTag("actor", t.actor) + p.AddTag("method", fmt.Sprintf("%d", t.method)) + p.AddTag("exitcode", fmt.Sprintf("%d", t.exitcode)) + pl.AddPoint(p) + + } + + return nil +} diff --git a/tools/stats/points/log.go b/tools/stats/points/log.go new file mode 100644 index 000000000..e0cb795c0 --- /dev/null +++ b/tools/stats/points/log.go @@ -0,0 +1,7 @@ +package points + +import ( + logging "github.com/ipfs/go-log/v2" +) + +var log = logging.Logger("stats/points") diff --git a/tools/stats/rpc.go b/tools/stats/rpc.go deleted file mode 100644 index 4e503cb39..000000000 --- a/tools/stats/rpc.go +++ /dev/null @@ -1,228 +0,0 @@ -package stats - -import ( - "context" - "net/http" - "time" - - "github.com/filecoin-project/go-jsonrpc" - "github.com/filecoin-project/go-state-types/abi" - manet "github.com/multiformats/go-multiaddr/net" - - "golang.org/x/xerrors" - - "github.com/filecoin-project/lotus/api" - "github.com/filecoin-project/lotus/api/client" - "github.com/filecoin-project/lotus/api/v0api" - "github.com/filecoin-project/lotus/build" - "github.com/filecoin-project/lotus/chain/store" - "github.com/filecoin-project/lotus/chain/types" - "github.com/filecoin-project/lotus/node/repo" -) - -func getAPI(path string) (string, http.Header, error) { - r, err := repo.NewFS(path) - if err != nil { - return "", nil, err - } - - ma, err := r.APIEndpoint() - if err != nil { - return "", nil, xerrors.Errorf("failed to get api endpoint: %w", err) - } - _, addr, err := manet.DialArgs(ma) - if err != nil { - return "", nil, err - } - var headers http.Header - token, err := r.APIToken() - if err != nil { - log.Warnw("Couldn't load CLI token, capabilities may be limited", "error", err) - } else { - headers = http.Header{} - headers.Add("Authorization", "Bearer "+string(token)) - } - - return "ws://" + addr + "/rpc/v0", headers, nil -} - -func WaitForSyncComplete(ctx context.Context, napi v0api.FullNode) error { -sync_complete: - for { - select { - case <-ctx.Done(): - return ctx.Err() - case <-build.Clock.After(5 * time.Second): - state, err := napi.SyncState(ctx) - if err != nil { - return err - } - - for i, w := range state.ActiveSyncs { - if w.Target == nil { - continue - } - - if w.Stage == api.StageSyncErrored { - log.Errorw( - "Syncing", - "worker", i, - "base", w.Base.Key(), - "target", w.Target.Key(), - "target_height", w.Target.Height(), - "height", w.Height, - "error", w.Message, - "stage", w.Stage.String(), - ) - } else { - log.Infow( - "Syncing", - "worker", i, - "base", w.Base.Key(), - "target", w.Target.Key(), - "target_height", w.Target.Height(), - "height", w.Height, - "stage", w.Stage.String(), - ) - } - - if w.Stage == api.StageSyncComplete { - break sync_complete - } - } - } - } - - for { - select { - case <-ctx.Done(): - return ctx.Err() - case <-build.Clock.After(5 * time.Second): - head, err := napi.ChainHead(ctx) - if err != nil { - return err - } - - timestampDelta := build.Clock.Now().Unix() - int64(head.MinTimestamp()) - - log.Infow( - "Waiting for reasonable head height", - "height", head.Height(), - "timestamp_delta", timestampDelta, - ) - - // If we get within 20 blocks of the current exected block height we - // consider sync complete. Block propagation is not always great but we still - // want to be recording stats as soon as we can - if timestampDelta < int64(build.BlockDelaySecs)*20 { - return nil - } - } - } -} - -func GetTips(ctx context.Context, api v0api.FullNode, lastHeight abi.ChainEpoch, headlag int) (<-chan *types.TipSet, error) { - chmain := make(chan *types.TipSet) - - hb := newHeadBuffer(headlag) - - notif, err := api.ChainNotify(ctx) - if err != nil { - return nil, err - } - - go func() { - defer close(chmain) - - ticker := time.NewTicker(30 * time.Second) - defer ticker.Stop() - - for { - select { - case changes, ok := <-notif: - if !ok { - return - } - for _, change := range changes { - log.Infow("Head event", "height", change.Val.Height(), "type", change.Type) - - switch change.Type { - case store.HCCurrent: - tipsets, err := loadTipsets(ctx, api, change.Val, lastHeight) - if err != nil { - log.Info(err) - return - } - - for _, tipset := range tipsets { - chmain <- tipset - } - case store.HCApply: - if out := hb.push(change); out != nil { - chmain <- out.Val - } - case store.HCRevert: - hb.pop() - } - } - case <-ticker.C: - log.Info("Running health check") - - cctx, cancel := context.WithTimeout(ctx, 5*time.Second) - - if _, err := api.ID(cctx); err != nil { - log.Error("Health check failed") - cancel() - return - } - - cancel() - - log.Info("Node online") - case <-ctx.Done(): - return - } - } - }() - - return chmain, nil -} - -func loadTipsets(ctx context.Context, api v0api.FullNode, curr *types.TipSet, lowestHeight abi.ChainEpoch) ([]*types.TipSet, error) { - tipsets := []*types.TipSet{} - for { - if curr.Height() == 0 { - break - } - - if curr.Height() <= lowestHeight { - break - } - - log.Infow("Walking back", "height", curr.Height()) - tipsets = append(tipsets, curr) - - tsk := curr.Parents() - prev, err := api.ChainGetTipSet(ctx, tsk) - if err != nil { - return tipsets, err - } - - curr = prev - } - - for i, j := 0, len(tipsets)-1; i < j; i, j = i+1, j-1 { - tipsets[i], tipsets[j] = tipsets[j], tipsets[i] - } - - return tipsets, nil -} - -func GetFullNodeAPI(ctx context.Context, repo string) (v0api.FullNode, jsonrpc.ClientCloser, error) { - addr, headers, err := getAPI(repo) - if err != nil { - return nil, nil, err - } - - return client.NewFullNodeRPCV0(ctx, addr, headers) -} diff --git a/tools/stats/sync/log.go b/tools/stats/sync/log.go new file mode 100644 index 000000000..1c2233cc8 --- /dev/null +++ b/tools/stats/sync/log.go @@ -0,0 +1,7 @@ +package sync + +import ( + logging "github.com/ipfs/go-log/v2" +) + +var log = logging.Logger("stats/sync") diff --git a/tools/stats/sync/sync.go b/tools/stats/sync/sync.go new file mode 100644 index 000000000..c8db1c543 --- /dev/null +++ b/tools/stats/sync/sync.go @@ -0,0 +1,192 @@ +package sync + +import ( + "context" + "time" + + "github.com/filecoin-project/go-state-types/abi" + + "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/store" + "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/tools/stats/headbuffer" +) + +type SyncWaitApi interface { + SyncState(context.Context) (*api.SyncState, error) + ChainHead(context.Context) (*types.TipSet, error) +} + +// SyncWait returns when ChainHead is within 20 epochs of the expected height +func SyncWait(ctx context.Context, napi SyncWaitApi) error { + for { + state, err := napi.SyncState(ctx) + if err != nil { + return err + } + + if len(state.ActiveSyncs) == 0 { + build.Clock.Sleep(time.Second) + continue + } + + head, err := napi.ChainHead(ctx) + if err != nil { + return err + } + + working := -1 + for i, ss := range state.ActiveSyncs { + switch ss.Stage { + case api.StageSyncComplete: + default: + working = i + case api.StageIdle: + // not complete, not actively working + } + } + + if working == -1 { + working = len(state.ActiveSyncs) - 1 + } + + ss := state.ActiveSyncs[working] + + if ss.Base == nil || ss.Target == nil { + log.Infow( + "syncing", + "height", ss.Height, + "stage", ss.Stage.String(), + ) + } else { + log.Infow( + "syncing", + "base", ss.Base.Key(), + "target", ss.Target.Key(), + "target_height", ss.Target.Height(), + "height", ss.Height, + "stage", ss.Stage.String(), + ) + } + + if build.Clock.Now().Unix()-int64(head.MinTimestamp()) < int64(build.BlockDelaySecs)*30 { + break + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-build.Clock.After(time.Duration(int64(build.BlockDelaySecs) * int64(time.Second))): + } + } + + return nil +} + +type BufferedTipsetChannelApi interface { + ChainNotify(context.Context) (<-chan []*api.HeadChange, error) + Version(context.Context) (api.APIVersion, error) + ChainGetTipSet(context.Context, types.TipSetKey) (*types.TipSet, error) +} + +// BufferedTipsetChannel returns an unbuffered channel of tipsets. Buffering occurs internally to handle revert +// ChainNotify changes. The returned channel can output tipsets at the same height twice if a reorg larger the the +// provided `size` occurs. +func BufferedTipsetChannel(ctx context.Context, api BufferedTipsetChannelApi, lastHeight abi.ChainEpoch, size int) (<-chan *types.TipSet, error) { + chmain := make(chan *types.TipSet) + + hb := headbuffer.NewHeadChangeStackBuffer(size) + + notif, err := api.ChainNotify(ctx) + if err != nil { + return nil, err + } + + go func() { + defer close(chmain) + + ticker := time.NewTicker(30 * time.Second) + defer ticker.Stop() + + for { + select { + case changes, ok := <-notif: + if !ok { + return + } + for _, change := range changes { + log.Debugw("head event", "height", change.Val.Height(), "type", change.Type) + + switch change.Type { + case store.HCCurrent: + tipsets, err := loadTipsets(ctx, api, change.Val, lastHeight) + if err != nil { + log.Info(err) + return + } + + for _, tipset := range tipsets { + chmain <- tipset + } + case store.HCApply: + if out := hb.Push(change); out != nil { + chmain <- out.Val + } + case store.HCRevert: + hb.Pop() + } + } + case <-ticker.C: + log.Debug("running health check") + + cctx, cancel := context.WithTimeout(ctx, 5*time.Second) + + if _, err := api.Version(cctx); err != nil { + log.Error("health check failed") + cancel() + return + } + + cancel() + + log.Debug("node online") + case <-ctx.Done(): + return + } + } + }() + + return chmain, nil +} + +func loadTipsets(ctx context.Context, api BufferedTipsetChannelApi, curr *types.TipSet, lowestHeight abi.ChainEpoch) ([]*types.TipSet, error) { + log.Infow("loading tipsets", "to_height", lowestHeight, "from_height", curr.Height()) + tipsets := []*types.TipSet{} + for { + if curr.Height() == 0 { + break + } + + if curr.Height() <= lowestHeight { + break + } + + log.Debugw("walking back", "height", curr.Height()) + tipsets = append(tipsets, curr) + + tsk := curr.Parents() + prev, err := api.ChainGetTipSet(ctx, tsk) + if err != nil { + return tipsets, err + } + + curr = prev + } + + for i, j := 0, len(tipsets)-1; i < j; i, j = i+1, j-1 { + tipsets[i], tipsets[j] = tipsets[j], tipsets[i] + } + + return tipsets, nil +}