diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..a3a4889 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,5 @@ +repos: + - repo: https://github.com/doublify/pre-commit-rust + rev: v1.0 + hooks: + - id: fmt \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 9329289..80690b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -510,7 +510,8 @@ dependencies = [ [[package]] name = "ark-transcript" version = "0.0.2" -source = "git+https://github.com/w3f/ring-vrf?rev=e9782f9#e9782f938629c90f3adb3fff2358bc8d1386af3e" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "563084372d89271122bd743ef0a608179726f5fad0566008ba55bd0f756489b8" dependencies = [ "ark-ff", "ark-serialize", @@ -523,7 +524,7 @@ dependencies = [ [[package]] name = "ark-transcript" version = "0.0.2" -source = "git+https://github.com/w3f/ring-vrf#0fef8266d851932ad25d6b41bc4b34d834d1e11d" +source = "git+https://github.com/w3f/ring-vrf?rev=e9782f9#e9782f938629c90f3adb3fff2358bc8d1386af3e" dependencies = [ "ark-ff", "ark-serialize", @@ -639,9 +640,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.83" +version = "0.1.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" +checksum = "a27b8a3a6e1a44fa4c8baf1f653e4172e81486d4941f2237e20dc2d0cf4ddff1" dependencies = [ "proc-macro2", "quote", @@ -761,9 +762,9 @@ dependencies = [ [[package]] name = "binary-merkle-tree" -version = "15.0.0" +version = "15.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b5c0fd4282c30c05647e1052d71bf1a0c8067ab1e9a8fc6d0c292dce0ecb237" +checksum = "336bf780dd7526a9a4bc1521720b25c1994dc132cccd59553431923fa4d1a693" dependencies = [ "hash-db", "log", @@ -960,9 +961,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.7.2" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" [[package]] name = "c2-chacha" @@ -1008,9 +1009,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.21" +version = "1.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" +checksum = "45bcde016d64c21da4be18b655631e5ab6d3107607e71a73a9f53eb48aae23fb" dependencies = [ "jobserver", "libc", @@ -1164,7 +1165,7 @@ dependencies = [ [[package]] name = "common" version = "0.1.0" -source = "git+https://github.com/w3f/ring-proof#1472ce9cd87cee49c56ce7869a0aba872d837c51" +source = "git+https://github.com/w3f/ring-proof#652286c32f96beb9ce7f5793f5e2c2c923f63b73" dependencies = [ "ark-ec", "ark-ff", @@ -2320,9 +2321,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.33" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "324a1be68054ef05ad64b861cc9eaf1d623d2d8cb25b4bf2cb9cdd902b4bf253" +checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" dependencies = [ "crc32fast", "libz-sys", @@ -3569,15 +3570,6 @@ dependencies = [ "either", ] -[[package]] -name = "itertools" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" -dependencies = [ - "either", -] - [[package]] name = "itoa" version = "1.0.11" @@ -3690,9 +3682,9 @@ dependencies = [ [[package]] name = "k256" -version = "0.13.4" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" +checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" dependencies = [ "cfg-if", "ecdsa", @@ -3744,9 +3736,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.159" +version = "0.2.158" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" +checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439" [[package]] name = "libm" @@ -4155,7 +4147,7 @@ checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ "bitflags 2.6.0", "libc", - "redox_syscall 0.5.6", + "redox_syscall 0.5.4", ] [[package]] @@ -4365,18 +4357,18 @@ dependencies = [ [[package]] name = "lz4" -version = "1.27.0" +version = "1.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a231296ca742e418c43660cb68e082486ff2538e8db432bc818580f3965025ed" +checksum = "4d1febb2b4a79ddd1980eede06a8f7902197960aa0383ffcfdd62fe723036725" dependencies = [ "lz4-sys", ] [[package]] name = "lz4-sys" -version = "1.11.0" +version = "1.11.1+lz4-1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb44a01837a858d47e5a630d2ccf304c8efcc4b83b8f9f75b7a9ee4fcc6e57d" +checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6" dependencies = [ "cc", "libc", @@ -4760,7 +4752,7 @@ dependencies = [ [[package]] name = "murmur-core" version = "0.1.0" -source = "git+https://github.com/ideal-lab5/murmur.git?branch=dev#990808ddf11054168398663dfe727a42704c15d4" +source = "git+https://github.com/ideal-lab5/murmur.git?branch=dev#5f6cac18d770089129c08d58420f6c4f335a49ce" dependencies = [ "ark-bls12-377", "ark-ff", @@ -4777,12 +4769,13 @@ dependencies = [ "sha3", "totp-rs", "w3f-bls 0.1.4", + "zeroize", ] [[package]] name = "murmur-test-utils" version = "0.1.0" -source = "git+https://github.com/ideal-lab5/murmur.git?branch=dev#990808ddf11054168398663dfe727a42704c15d4" +source = "git+https://github.com/ideal-lab5/murmur.git?branch=dev#5f6cac18d770089129c08d58420f6c4f335a49ce" dependencies = [ "ark-bls12-377", "ark-ff", @@ -5377,13 +5370,13 @@ dependencies = [ [[package]] name = "pallet-murmur" -version = "1.0.0-dev" +version = "0.1.0-dev" dependencies = [ "ark-bls12-381", "ark-serialize", "ark-std", "array-bytes 4.2.0", - "binary-merkle-tree 15.0.0", + "binary-merkle-tree 15.0.1", "ckb-merkle-mountain-range", "etf-crypto-primitives 0.2.4 (git+https://github.com/ideal-lab5/etf-sdk.git?branch=dev)", "frame-benchmarking 28.0.0", @@ -5455,7 +5448,7 @@ dependencies = [ [[package]] name = "pallet-proxy" -version = "28.1.0" +version = "0.1.0-dev" dependencies = [ "frame-benchmarking 28.0.0", "frame-support 28.0.0", @@ -5756,7 +5749,7 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.6", + "redox_syscall 0.5.4", "smallvec", "windows-targets 0.52.6", ] @@ -6006,9 +5999,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d30538d42559de6b034bc76fd6dd4c38961b1ee5c6c56e3808c50128fdbc22ce" +checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" [[package]] name = "powerfmt" @@ -6258,7 +6251,7 @@ checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" dependencies = [ "bytes", "heck 0.5.0", - "itertools 0.12.1", + "itertools 0.11.0", "log", "multimap 0.10.0", "once_cell", @@ -6291,7 +6284,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", - "itertools 0.12.1", + "itertools 0.11.0", "proc-macro2", "quote", "syn 2.0.77", @@ -6590,9 +6583,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.6" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "355ae415ccd3a04315d3f8246e86d67689ea74d88d915576e1589a351062a13b" +checksum = "0884ad60e090bf1345b93da0a5de8923c93884cd03f40dfcfddd3b4bee661853" dependencies = [ "bitflags 2.6.0", ] @@ -6720,14 +6713,14 @@ dependencies = [ [[package]] name = "ring" version = "0.1.0" -source = "git+https://github.com/w3f/ring-proof#1472ce9cd87cee49c56ce7869a0aba872d837c51" +source = "git+https://github.com/w3f/ring-proof#652286c32f96beb9ce7f5793f5e2c2c923f63b73" dependencies = [ "ark-ec", "ark-ff", "ark-poly", "ark-serialize", "ark-std", - "ark-transcript 0.0.2 (git+https://github.com/w3f/ring-vrf)", + "ark-transcript 0.0.2 (registry+https://github.com/rust-lang/crates.io-index)", "arrayvec", "blake2 0.10.6", "common", @@ -8131,9 +8124,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.8" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" dependencies = [ "serde", ] @@ -8754,7 +8747,7 @@ dependencies = [ [[package]] name = "sp-crypto-ec-utils" version = "0.10.0" -source = "git+https://github.com/paritytech/polkadot-sdk#c77095f51119d2eccdc54d2f3518bed0ffbd6d53" +source = "git+https://github.com/paritytech/polkadot-sdk#a8d8596fd2dc36aa7c4e1bb63536c30fef2855ea" dependencies = [ "ark-bls12-377", "ark-bls12-377-ext", @@ -8849,7 +8842,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#c77095f51119d2eccdc54d2f3518bed0ffbd6d53" +source = "git+https://github.com/paritytech/polkadot-sdk#a8d8596fd2dc36aa7c4e1bb63536c30fef2855ea" dependencies = [ "proc-macro2", "quote", @@ -8869,7 +8862,7 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.25.0" -source = "git+https://github.com/paritytech/polkadot-sdk#c77095f51119d2eccdc54d2f3518bed0ffbd6d53" +source = "git+https://github.com/paritytech/polkadot-sdk#a8d8596fd2dc36aa7c4e1bb63536c30fef2855ea" dependencies = [ "environmental", "parity-scale-codec", @@ -9214,7 +9207,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "24.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#c77095f51119d2eccdc54d2f3518bed0ffbd6d53" +source = "git+https://github.com/paritytech/polkadot-sdk#a8d8596fd2dc36aa7c4e1bb63536c30fef2855ea" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -9265,7 +9258,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "17.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#c77095f51119d2eccdc54d2f3518bed0ffbd6d53" +source = "git+https://github.com/paritytech/polkadot-sdk#a8d8596fd2dc36aa7c4e1bb63536c30fef2855ea" dependencies = [ "Inflector", "expander", @@ -9405,7 +9398,7 @@ source = "git+https://github.com/ideal-lab5/polkadot-sdk.git?branch=testing#0bd1 [[package]] name = "sp-std" version = "14.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#c77095f51119d2eccdc54d2f3518bed0ffbd6d53" +source = "git+https://github.com/paritytech/polkadot-sdk#a8d8596fd2dc36aa7c4e1bb63536c30fef2855ea" [[package]] name = "sp-storage" @@ -9422,7 +9415,7 @@ dependencies = [ [[package]] name = "sp-storage" version = "19.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#c77095f51119d2eccdc54d2f3518bed0ffbd6d53" +source = "git+https://github.com/paritytech/polkadot-sdk#a8d8596fd2dc36aa7c4e1bb63536c30fef2855ea" dependencies = [ "impl-serde", "parity-scale-codec", @@ -9469,7 +9462,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "16.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#c77095f51119d2eccdc54d2f3518bed0ffbd6d53" +source = "git+https://github.com/paritytech/polkadot-sdk#a8d8596fd2dc36aa7c4e1bb63536c30fef2855ea" dependencies = [ "parity-scale-codec", "tracing", @@ -9628,7 +9621,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "20.0.0" -source = "git+https://github.com/paritytech/polkadot-sdk#c77095f51119d2eccdc54d2f3518bed0ffbd6d53" +source = "git+https://github.com/paritytech/polkadot-sdk#a8d8596fd2dc36aa7c4e1bb63536c30fef2855ea" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -10067,9 +10060,9 @@ checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" [[package]] name = "tempfile" -version = "3.12.0" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if", "fastrand", @@ -10095,18 +10088,18 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.64" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" +checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.64" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" +checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", @@ -10298,9 +10291,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.22" +version = "0.22.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" +checksum = "3b072cee73c449a636ffd6f32bd8de3a9f7119139aff882f44943ce2986dc5cf" dependencies = [ "indexmap 2.5.0", "serde", @@ -10669,9 +10662,9 @@ checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "unicode-xid" -version = "0.2.6" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" +checksum = "229730647fbc343e3a80e463c1db7f78f3855d3f3739bee0dda773c9a037c90a" [[package]] name = "universal-hash" @@ -11510,9 +11503,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.20" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" +checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" dependencies = [ "memchr", ] diff --git a/Cargo.toml b/Cargo.toml index 8f009fc..c62af83 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,6 +18,7 @@ edition = "2021" authors = ["Ideal Labs "] repository = "https://github.com/ideal-lab5/pallets" homepage = "https://idealabs.network" +license = "Apache-2.0" [workspace.lints.rust] suspicious_double_ref_op = { level = "allow", priority = 2 } @@ -60,5 +61,5 @@ serde-big-array = { version = "0.3.2" } serde_derive = { version = "1.0.117" } serde_json = { version = "1.0.114", default-features = false } serde_yaml = { version = "0.9" } -syn = { versicargon = "2.0.53" } +syn = "2.0.53" thiserror = { version = "1.0.48" } diff --git a/GPL-3.0.md b/GPL-3.0.md new file mode 100644 index 0000000..496acdb --- /dev/null +++ b/GPL-3.0.md @@ -0,0 +1,675 @@ +# GNU GENERAL PUBLIC LICENSE + +Version 3, 29 June 2007 + +Copyright (C) 2007 Free Software Foundation, Inc. + + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +## Preamble + +The GNU General Public License is a free, copyleft license for +software and other kinds of works. + +The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom +to share and change all versions of a program--to make sure it remains +free software for all its users. We, the Free Software Foundation, use +the GNU General Public License for most of our software; it applies +also to any other work released this way by its authors. You can apply +it to your programs, too. + +When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + +To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you +have certain responsibilities if you distribute copies of the +software, or if you modify it: responsibilities to respect the freedom +of others. + +For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + +Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + +For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + +Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the +manufacturer can do so. This is fundamentally incompatible with the +aim of protecting users' freedom to change the software. The +systematic pattern of such abuse occurs in the area of products for +individuals to use, which is precisely where it is most unacceptable. +Therefore, we have designed this version of the GPL to prohibit the +practice for those products. If such problems arise substantially in +other domains, we stand ready to extend this provision to those +domains in future versions of the GPL, as needed to protect the +freedom of users. + +Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish +to avoid the special danger that patents applied to a free program +could make it effectively proprietary. To prevent this, the GPL +assures that patents cannot be used to render the program non-free. + +The precise terms and conditions for copying, distribution and +modification follow. + +## TERMS AND CONDITIONS + +### 0. Definitions. + +"This License" refers to version 3 of the GNU General Public License. + +"Copyright" also means copyright-like laws that apply to other kinds +of works, such as semiconductor masks. + +"The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + +To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of +an exact copy. The resulting work is called a "modified version" of +the earlier work or a work "based on" the earlier work. + +A "covered work" means either the unmodified Program or a work based +on the Program. + +To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + +To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user +through a computer network, with no transfer of a copy, is not +conveying. + +An interactive user interface displays "Appropriate Legal Notices" to +the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + +### 1. Source Code. + +The "source code" for a work means the preferred form of the work for +making modifications to it. "Object code" means any non-source form of +a work. + +A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + +The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + +The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + +The Corresponding Source need not include anything that users can +regenerate automatically from other parts of the Corresponding Source. + +The Corresponding Source for a work in source code form is that same +work. + +### 2. Basic Permissions. + +All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + +You may make, run and propagate covered works that you do not convey, +without conditions so long as your license otherwise remains in force. +You may convey covered works to others for the sole purpose of having +them make modifications exclusively for you, or provide you with +facilities for running those works, provided that you comply with the +terms of this License in conveying all material for which you do not +control copyright. Those thus making or running the covered works for +you must do so exclusively on your behalf, under your direction and +control, on terms that prohibit them from making any copies of your +copyrighted material outside their relationship with you. + +Conveying under any other circumstances is permitted solely under the +conditions stated below. Sublicensing is not allowed; section 10 makes +it unnecessary. + +### 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + +No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + +When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such +circumvention is effected by exercising rights under this License with +respect to the covered work, and you disclaim any intention to limit +operation or modification of the work as a means of enforcing, against +the work's users, your or third parties' legal rights to forbid +circumvention of technological measures. + +### 4. Conveying Verbatim Copies. + +You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + +You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + +### 5. Conveying Modified Source Versions. + +You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these +conditions: + +- a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. +- b) The work must carry prominent notices stating that it is + released under this License and any conditions added under + section 7. This requirement modifies the requirement in section 4 + to "keep intact all notices". +- c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. +- d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + +A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + +### 6. Conveying Non-Source Forms. + +You may convey a covered work in object code form under the terms of +sections 4 and 5, provided that you also convey the machine-readable +Corresponding Source under the terms of this License, in one of these +ways: + +- a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. +- b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the Corresponding + Source from a network server at no charge. +- c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. +- d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. +- e) Convey the object code using peer-to-peer transmission, + provided you inform other peers where the object code and + Corresponding Source of the work are being offered to the general + public at no charge under subsection 6d. + +A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + +A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, +family, or household purposes, or (2) anything designed or sold for +incorporation into a dwelling. In determining whether a product is a +consumer product, doubtful cases shall be resolved in favor of +coverage. For a particular product received by a particular user, +"normally used" refers to a typical or common use of that class of +product, regardless of the status of the particular user or of the way +in which the particular user actually uses, or expects or is expected +to use, the product. A product is a consumer product regardless of +whether the product has substantial commercial, industrial or +non-consumer uses, unless such uses represent the only significant +mode of use of the product. + +"Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to +install and execute modified versions of a covered work in that User +Product from a modified version of its Corresponding Source. The +information must suffice to ensure that the continued functioning of +the modified object code is in no case prevented or interfered with +solely because modification has been made. + +If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + +The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or +updates for a work that has been modified or installed by the +recipient, or for the User Product in which it has been modified or +installed. Access to a network may be denied when the modification +itself materially and adversely affects the operation of the network +or violates the rules and protocols for communication across the +network. + +Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + +### 7. Additional Terms. + +"Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + +When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + +Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders +of that material) supplement the terms of this License with terms: + +- a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or +- b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or +- c) Prohibiting misrepresentation of the origin of that material, + or requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or +- d) Limiting the use for publicity purposes of names of licensors + or authors of the material; or +- e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or +- f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions + of it) with contractual assumptions of liability to the recipient, + for any liability that these contractual assumptions directly + impose on those licensors and authors. + +All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + +If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + +Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; the +above requirements apply either way. + +### 8. Termination. + +You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + +However, if you cease all violation of this License, then your license +from a particular copyright holder is reinstated (a) provisionally, +unless and until the copyright holder explicitly and finally +terminates your license, and (b) permanently, if the copyright holder +fails to notify you of the violation by some reasonable means prior to +60 days after the cessation. + +Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + +Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + +### 9. Acceptance Not Required for Having Copies. + +You are not required to accept this License in order to receive or run +a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + +### 10. Automatic Licensing of Downstream Recipients. + +Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + +An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + +You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + +### 11. Patents. + +A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + +A contributor's "essential patent claims" are all patent claims owned +or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + +Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + +In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + +If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + +If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + +A patent license is "discriminatory" if it does not include within the +scope of its coverage, prohibits the exercise of, or is conditioned on +the non-exercise of one or more of the rights that are specifically +granted under this License. You may not convey a covered work if you +are a party to an arrangement with a third party that is in the +business of distributing software, under which you make payment to the +third party based on the extent of your activity of conveying the +work, and under which the third party grants, to any of the parties +who would receive the covered work from you, a discriminatory patent +license (a) in connection with copies of the covered work conveyed by +you (or copies made from those copies), or (b) primarily for and in +connection with specific products or compilations that contain the +covered work, unless you entered into that arrangement, or that patent +license was granted, prior to 28 March 2007. + +Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + +### 12. No Surrender of Others' Freedom. + +If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under +this License and any other pertinent obligations, then as a +consequence you may not convey it at all. For example, if you agree to +terms that obligate you to collect a royalty for further conveying +from those to whom you convey the Program, the only way you could +satisfy both those terms and this License would be to refrain entirely +from conveying the Program. + +### 13. Use with the GNU Affero General Public License. + +Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + +### 14. Revised Versions of this License. + +The Free Software Foundation may publish revised and/or new versions +of the GNU General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in +detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies that a certain numbered version of the GNU General Public +License "or any later version" applies to it, you have the option of +following the terms and conditions either of that numbered version or +of any later version published by the Free Software Foundation. If the +Program does not specify a version number of the GNU General Public +License, you may choose any version ever published by the Free +Software Foundation. + +If the Program specifies that a proxy can decide which future versions +of the GNU General Public License can be used, that proxy's public +statement of acceptance of a version permanently authorizes you to +choose that version for the Program. + +Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + +### 15. Disclaimer of Warranty. + +THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT +WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND +PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE +DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR +CORRECTION. + +### 16. Limitation of Liability. + +IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR +CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES +ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT +NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR +LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM +TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER +PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +### 17. Interpretation of Sections 15 and 16. + +If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + +END OF TERMS AND CONDITIONS + +## How to Apply These Terms to Your New Programs + +If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these +terms. + +To do so, attach the following notices to the program. It is safest to +attach them to the start of each source file to most effectively state +the exclusion of warranty; and each file should have at least the +"copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper +mail. + +If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands \`show w' and \`show c' should show the +appropriate parts of the General Public License. Of course, your +program's commands might be different; for a GUI interface, you would +use an "about box". + +You should also get your employer (if you work as a programmer) or +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. For more information on this, and how to apply and follow +the GNU GPL, see . + +The GNU General Public License does not permit incorporating your +program into proprietary programs. If your program is a subroutine +library, you may consider it more useful to permit linking proprietary +applications with the library. If this is what you want to do, use the +GNU Lesser General Public License instead of this License. But first, +please read . diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..d70a3dd --- /dev/null +++ b/LICENSE @@ -0,0 +1,52 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + + You must give any other recipients of the Work or Derivative Works a copy of this License; and + You must cause any modified files to carry prominent notices stating that You changed the files; and + You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and + If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. + +You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS diff --git a/cargo-nightly-fmt.sh b/cargo-nightly-fmt.sh new file mode 100755 index 0000000..cf78bc2 --- /dev/null +++ b/cargo-nightly-fmt.sh @@ -0,0 +1,2 @@ +#!/bin/sh +cargo +nightly fmt -- --check \ No newline at end of file diff --git a/client/consensus/beefy-etf/rpc/src/lib.rs b/client/consensus/beefy-etf/rpc/src/lib.rs index 988ef9d..68f4c03 100644 --- a/client/consensus/beefy-etf/rpc/src/lib.rs +++ b/client/consensus/beefy-etf/rpc/src/lib.rs @@ -218,7 +218,7 @@ mod tests { if response != not_ready { assert_eq!(response, expected); // Success - return + return; } tokio::time::sleep(std::time::Duration::from_millis(50)).await; } diff --git a/client/consensus/beefy-etf/src/communication/gossip.rs b/client/consensus/beefy-etf/src/communication/gossip.rs index ac2da4d..43befa3 100644 --- a/client/consensus/beefy-etf/src/communication/gossip.rs +++ b/client/consensus/beefy-etf/src/communication/gossip.rs @@ -23,12 +23,11 @@ use sc_network_gossip::{MessageIntent, ValidationResult, Validator, ValidatorCon use sp_runtime::traits::{Block, Hash, Header, NumberFor}; use codec::{Decode, DecodeAll, Encode}; -use log::{debug, trace, info}; +use log::{debug, info, trace}; use parking_lot::{Mutex, RwLock}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use wasm_timer::Instant; - use crate::{ communication::{ benefit, cost, @@ -47,9 +46,7 @@ use sp_consensus_beefy_etf::bls_crypto::{AuthorityId, Signature}; #[cfg(not(feature = "bls-experimental"))] use sp_consensus_beefy_etf::ecdsa_crypto::{AuthorityId, Signature}; -use sp_consensus_beefy_etf::{ - ValidatorSet, ValidatorSetId, VoteMessage, -}; +use sp_consensus_beefy_etf::{ValidatorSet, ValidatorSetId, VoteMessage}; // Timeout for rebroadcasting messages. #[cfg(not(test))] @@ -159,12 +156,13 @@ impl Filter { f.start = cfg.start; f.end = cfg.end; }, - _ => + _ => { self.inner = Some(FilterInner { start: cfg.start, end: cfg.end, validator_set: cfg.validator_set.clone(), - }), + }) + }, } } @@ -307,15 +305,15 @@ where .unwrap_or(false) { debug!(target: LOG_TARGET, "Message from voter not in validator set: {}", vote.id); - return Action::Discard(cost::UNKNOWN_VOTER) + return Action::Discard(cost::UNKNOWN_VOTER); } } if BeefyKeystore::verify(&vote.id, &vote.signature, &vote.commitment.encode()) { - info!( - target: LOG_TARGET, - "🎲 The etf signature was verified", - ); + info!( + target: LOG_TARGET, + "🎲 The etf signature was verified", + ); Action::Keep(self.votes_topic, benefit::VOTE_MESSAGE) } else { debug!( @@ -348,7 +346,7 @@ where } if guard.is_already_proven(round) { - return Action::Discard(benefit::NOT_INTERESTED) + return Action::Discard(benefit::NOT_INTERESTED); } // Verify justification signatures. @@ -466,7 +464,7 @@ where let filter = self.gossip_filter.read(); Box::new(move |_who, intent, _topic, mut data| { if let MessageIntent::PeriodicRebroadcast = intent { - return do_rebroadcast + return do_rebroadcast; } match GossipMessage::::decode_all(&mut data) { @@ -570,7 +568,8 @@ pub(crate) mod tests { #[test] fn should_validate_messages() { let keys = vec![Keyring::::Alice.public()]; - let validator_set = ValidatorSet::::new(keys.clone(), keys.clone(), 0).unwrap(); + let validator_set = + ValidatorSet::::new(keys.clone(), keys.clone(), 0).unwrap(); let (gv, mut report_stream) = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); let sender = PeerId::random(); @@ -670,8 +669,12 @@ pub(crate) mod tests { assert_eq!(report_stream.try_recv().unwrap(), expected_report); // reject proof, bad signatures (Bob instead of Alice) - let bad_validator_set = - ValidatorSet::::new(vec![Keyring::Bob.public()], vec![Keyring::Bob.public()], 0).unwrap(); + let bad_validator_set = ValidatorSet::::new( + vec![Keyring::Bob.public()], + vec![Keyring::Bob.public()], + 0, + ) + .unwrap(); let proof = dummy_proof(21, &bad_validator_set); let encoded_proof = GossipMessage::::FinalityProof(proof).encode(); let res = gv.validate(&mut context, &sender, &encoded_proof); @@ -684,7 +687,8 @@ pub(crate) mod tests { #[test] fn messages_allowed_and_expired() { let keys = vec![Keyring::Alice.public()]; - let validator_set = ValidatorSet::::new(keys.clone(), keys.clone(), 0).unwrap(); + let validator_set = + ValidatorSet::::new(keys.clone(), keys.clone(), 0).unwrap(); let (gv, _) = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); gv.update_filter(GossipFilterCfg { start: 0, end: 10, validator_set: &validator_set }); let sender = sc_network::PeerId::random(); @@ -720,7 +724,8 @@ pub(crate) mod tests { assert!(allowed(&sender, intent, &topic, &mut encoded_proof)); assert!(!expired(topic, &mut encoded_proof)); // using wrong set_id -> !allowed, expired - let bad_validator_set = ValidatorSet::::new(keys.clone(), keys.clone(), 1).unwrap(); + let bad_validator_set = + ValidatorSet::::new(keys.clone(), keys.clone(), 1).unwrap(); let proof = dummy_proof(2, &bad_validator_set); let mut encoded_proof = GossipMessage::::FinalityProof(proof).encode(); assert!(!allowed(&sender, intent, &topic, &mut encoded_proof)); @@ -761,7 +766,8 @@ pub(crate) mod tests { #[test] fn messages_rebroadcast() { let keys = vec![Keyring::Alice.public()]; - let validator_set = ValidatorSet::::new(keys.clone(), keys.clone(), 0).unwrap(); + let validator_set = + ValidatorSet::::new(keys.clone(), keys.clone(), 0).unwrap(); let (gv, _) = GossipValidator::::new(Arc::new(Mutex::new(KnownPeers::new()))); gv.update_filter(GossipFilterCfg { start: 0, end: 10, validator_set: &validator_set }); let sender = sc_network::PeerId::random(); diff --git a/client/consensus/beefy-etf/src/communication/mod.rs b/client/consensus/beefy-etf/src/communication/mod.rs index d0b7258..3c93368 100644 --- a/client/consensus/beefy-etf/src/communication/mod.rs +++ b/client/consensus/beefy-etf/src/communication/mod.rs @@ -62,7 +62,6 @@ pub(crate) mod beefy_protocol_name { } } - /// Returns the configuration value to put in /// [`sc_network::config::FullNetworkConfiguration`]. /// For standard protocol name see [`beefy_protocol_name::gossip_protocol_name`]. diff --git a/client/consensus/beefy-etf/src/communication/request_response/outgoing_requests_engine.rs b/client/consensus/beefy-etf/src/communication/request_response/outgoing_requests_engine.rs index 3340655..910054d 100644 --- a/client/consensus/beefy-etf/src/communication/request_response/outgoing_requests_engine.rs +++ b/client/consensus/beefy-etf/src/communication/request_response/outgoing_requests_engine.rs @@ -111,7 +111,7 @@ impl OnDemandJustificationsEngine { let live = self.live_peers.lock(); while let Some(peer) = self.peers_cache.pop_front() { if live.contains(&peer) { - return Some(peer) + return Some(peer); } } None @@ -145,7 +145,7 @@ impl OnDemandJustificationsEngine { pub fn request(&mut self, block: NumberFor, active_set: ValidatorSet) { // ignore new requests while there's already one pending if matches!(self.state, State::AwaitingResponse(_, _, _)) { - return + return; } self.reset_peers_cache_for_block(block); @@ -236,7 +236,7 @@ impl OnDemandJustificationsEngine { let (peer, req_info, resp) = match &mut self.state { State::Idle => { futures::future::pending::<()>().await; - return ResponseInfo::Pending + return ResponseInfo::Pending; }, State::AwaitingResponse(peer, req_info, receiver) => { let resp = receiver.await; diff --git a/client/consensus/beefy-etf/src/import.rs b/client/consensus/beefy-etf/src/import.rs index 2b82a49..b76c7a4 100644 --- a/client/consensus/beefy-etf/src/import.rs +++ b/client/consensus/beefy-etf/src/import.rs @@ -106,7 +106,7 @@ where .map_err(|e| ImportError(e.to_string()))? .ok_or_else(|| ImportError("Unknown BEEFY genesis".to_string()))?; if number < beefy_genesis { - return Err(ImportError("BEEFY genesis is set for future block".to_string())) + return Err(ImportError("BEEFY genesis is set for future block".to_string())); } let validator_set = self .runtime @@ -155,7 +155,7 @@ where // The block is imported as part of some chain sync. // The voter doesn't need to process it now. // It will be detected and processed as part of the voter state init. - return Ok(inner_import_result) + return Ok(inner_import_result); }, } diff --git a/client/consensus/beefy-etf/src/justification.rs b/client/consensus/beefy-etf/src/justification.rs index b59aacd..071dec7 100644 --- a/client/consensus/beefy-etf/src/justification.rs +++ b/client/consensus/beefy-etf/src/justification.rs @@ -26,9 +26,7 @@ use sp_consensus_beefy_etf::bls_crypto::{AuthorityId, Signature}; #[cfg(not(feature = "bls-experimental"))] use sp_consensus_beefy_etf::ecdsa_crypto::{AuthorityId, Signature}; -use sp_consensus_beefy_etf::{ - ValidatorSet, ValidatorSetId, VersionedFinalityProof, -}; +use sp_consensus_beefy_etf::{ValidatorSet, ValidatorSetId, VersionedFinalityProof}; use sp_runtime::traits::{Block as BlockT, NumberFor}; /// A finality proof with matching BEEFY authorities' signatures. @@ -38,8 +36,9 @@ pub(crate) fn proof_block_num_and_set_id( proof: &BeefyVersionedFinalityProof, ) -> (NumberFor, ValidatorSetId) { match proof { - VersionedFinalityProof::V1(sc) => - (sc.commitment.block_number, sc.commitment.validator_set_id), + VersionedFinalityProof::V1(sc) => { + (sc.commitment.block_number, sc.commitment.validator_set_id) + }, } } @@ -64,11 +63,11 @@ pub(crate) fn verify_with_validator_set( let mut signatures_checked = 0u32; match proof { VersionedFinalityProof::V1(signed_commitment) => { - if signed_commitment.signatures.len() != validator_set.len() || - signed_commitment.commitment.validator_set_id != validator_set.id() || - signed_commitment.commitment.block_number != target_number + if signed_commitment.signatures.len() != validator_set.len() + || signed_commitment.commitment.validator_set_id != validator_set.id() + || signed_commitment.commitment.block_number != target_number { - return Err((ConsensusError::InvalidJustification, 0)) + return Err((ConsensusError::InvalidJustification, 0)); } // Arrangement of signatures in the commitment should be in the same order @@ -107,11 +106,11 @@ pub(crate) fn verify_with_validator_set( let mut signatures_checked = 0u32; match proof { VersionedFinalityProof::V1(signed_commitment) => { - if signed_commitment.signatures.len() != validator_set.len() || - signed_commitment.commitment.validator_set_id != validator_set.id() || - signed_commitment.commitment.block_number != target_number + if signed_commitment.signatures.len() != validator_set.len() + || signed_commitment.commitment.validator_set_id != validator_set.id() + || signed_commitment.commitment.block_number != target_number { - return Err((ConsensusError::InvalidJustification, 0)) + return Err((ConsensusError::InvalidJustification, 0)); } // Arrangement of signatures in the commitment should be in the same order @@ -169,7 +168,8 @@ pub(crate) mod tests { #[test] fn should_verify_with_validator_set() { let keys = &[Keyring::Alice, Keyring::Bob, Keyring::Charlie]; - let validator_set = ValidatorSet::new(make_beefy_ids(keys), make_beefy_ids(keys), 0).unwrap(); + let validator_set = + ValidatorSet::new(make_beefy_ids(keys), make_beefy_ids(keys), 0).unwrap(); // build valid justification let block_num = 42; @@ -235,7 +235,8 @@ pub(crate) mod tests { #[test] fn should_decode_and_verify_finality_proof() { let keys = &[Keyring::Alice, Keyring::Bob]; - let validator_set = ValidatorSet::new(make_beefy_ids(keys), make_beefy_ids(keys), 0).unwrap(); + let validator_set = + ValidatorSet::new(make_beefy_ids(keys), make_beefy_ids(keys), 0).unwrap(); let block_num = 1; // build valid justification diff --git a/client/consensus/beefy-etf/src/keystore.rs b/client/consensus/beefy-etf/src/keystore.rs index aec29eb..3f6c3c1 100644 --- a/client/consensus/beefy-etf/src/keystore.rs +++ b/client/consensus/beefy-etf/src/keystore.rs @@ -16,20 +16,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use sp_application_crypto::{ - key_types::BEEFY as BEEFY_KEY_TYPE, - AppCrypto, - RuntimeAppPublic -}; +use sp_application_crypto::{key_types::BEEFY as BEEFY_KEY_TYPE, AppCrypto, RuntimeAppPublic}; use sp_consensus_beefy_etf::{ - bls_crypto::AuthorityId as BeefyId, - AuthorityIdBound, - BeefyAuthorityId, - BeefySignatureHasher + bls_crypto::AuthorityId as BeefyId, AuthorityIdBound, BeefyAuthorityId, BeefySignatureHasher, }; use sp_core::ecdsa; #[cfg(feature = "bls-experimental")] -use sp_core::{bls377, ecdsa_bls377, crypto::KeyTypeId}; +use sp_core::{bls377, crypto::KeyTypeId, ecdsa_bls377}; use sp_crypto_hashing::keccak_256; use sp_keystore::KeystorePtr; @@ -110,8 +103,7 @@ impl BeefyKeystore { #[cfg(feature = "bls-experimental")] bls377::CRYPTO_ID => { - let public: bls377::Public = - bls377::Public::try_from(public.as_slice()).unwrap(); + let public: bls377::Public = bls377::Public::try_from(public.as_slice()).unwrap(); let sig = store .bls377_sign(BEEFY_KEY_TYPE, &public, &message) .map_err(|e| error::Error::Keystore(e.to_string()))? @@ -207,51 +199,45 @@ impl BeefyKeystore { BeefyAuthorityId::::verify(public, sig, message) } - /// produces a BLS signature on the message - /// using ETF round keys derived ad-hoc (via ACSS.Recover) - #[cfg(feature = "bls-experimental")] - pub fn etf_sign( - &self, - public: &AuthorityId, - pok_bytes: &[u8], - message: &[u8], - threshold: u8, - ) -> Result< - (BeefyId, ::Signature), - error::Error - > { - // debug!( - // target: LOG_TARGET, - // "🎲 [ETF][etf_sign] Public: {:?}, pok_bytes: {:?}, message: {:?}, threshold: {:?}", public, pok_bytes, message, threshold); - let store = self - .0 - .clone() - .ok_or_else(|| error::Error::Keystore("no Keystore".into()))?; - - let public: bls377::Public = bls377::Public::try_from(public.as_slice()).unwrap(); - // debug!(target: LOG_TARGET, "🎲 [ETF][etf_sign] Public: {:?}", public); - let (etf_pubkey_bytes, sig) = store - .acss_recover(BEEFY_KEY_TYPE, &public, pok_bytes, message, threshold) - .map_err(|e| { - log::error!(target: LOG_TARGET, "🎲 [ETF][etf_sign] Error: {:?}", e); - error::Error::Signature(format!( - "Failed to recover a key from the provided proof of knowledge" - )) - })?; + /// produces a BLS signature on the message + /// using ETF round keys derived ad-hoc (via ACSS.Recover) + #[cfg(feature = "bls-experimental")] + pub fn etf_sign( + &self, + public: &AuthorityId, + pok_bytes: &[u8], + message: &[u8], + threshold: u8, + ) -> Result<(BeefyId, ::Signature), error::Error> { + // debug!( + // target: LOG_TARGET, + // "🎲 [ETF][etf_sign] Public: {:?}, pok_bytes: {:?}, message: {:?}, threshold: {:?}", + // public, pok_bytes, message, threshold); + let store = self.0.clone().ok_or_else(|| error::Error::Keystore("no Keystore".into()))?; + + let public: bls377::Public = bls377::Public::try_from(public.as_slice()).unwrap(); + // debug!(target: LOG_TARGET, "🎲 [ETF][etf_sign] Public: {:?}", public); + let (etf_pubkey_bytes, sig) = store + .acss_recover(BEEFY_KEY_TYPE, &public, pok_bytes, message, threshold) + .map_err(|e| { + log::error!(target: LOG_TARGET, "🎲 [ETF][etf_sign] Error: {:?}", e); + error::Error::Signature(format!( + "Failed to recover a key from the provided proof of knowledge" + )) + })?; let mut signature_byte_array: &[u8] = sig.as_ref(); - let signature = ::Signature::decode( - &mut signature_byte_array, - ).map_err(|_| { - error::Error::Signature(format!( - "invalid signature {:?} for key {:?}", - signature_byte_array, public - )) - })?; -; - let beef: BeefyId = BeefyId::from(etf_pubkey_bytes); - Ok((beef, signature)) - } + let signature = + ::Signature::decode(&mut signature_byte_array) + .map_err(|_| { + error::Error::Signature(format!( + "invalid signature {:?} for key {:?}", + signature_byte_array, public + )) + })?; + let beef: BeefyId = BeefyId::from(etf_pubkey_bytes); + Ok((beef, signature)) + } } impl From> for BeefyKeystore @@ -346,10 +332,8 @@ pub mod tests { }, #[cfg(feature = "bls-experimental")] bls377::CRYPTO_ID => { - let pk = store - .bls377_generate_new(key_type, optional_seed.as_deref()) - .ok() - .unwrap(); + let pk = + store.bls377_generate_new(key_type, optional_seed.as_deref()).ok().unwrap(); AuthorityId::decode(&mut pk.as_ref()).unwrap() }, _ => panic!("Requested CRYPTO_ID is not supported by the BEEFY Keyring"), diff --git a/client/consensus/beefy-etf/src/lib.rs b/client/consensus/beefy-etf/src/lib.rs index 3b7039f..0145bed 100644 --- a/client/consensus/beefy-etf/src/lib.rs +++ b/client/consensus/beefy-etf/src/lib.rs @@ -39,12 +39,12 @@ use sc_client_api::{Backend, BlockBackend, BlockchainEvents, FinalityNotificatio use sc_consensus::BlockImport; use sc_network::{NetworkRequest, NotificationService, ProtocolName}; use sc_network_gossip::{GossipEngine, Network as GossipNetwork, Syncing as GossipSyncing}; -use sp_api::{ApiExt, ProvideRuntimeApi}; -use sp_blockchain::{Backend as BlockchainBackend, HeaderBackend}; -use sp_consensus::{Error as ConsensusError, SyncOracle}; use sc_transaction_pool_api::{ LocalTransactionPool, OffchainTransactionPoolFactory, TransactionPool, }; +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_blockchain::{Backend as BlockchainBackend, HeaderBackend}; +use sp_consensus::{Error as ConsensusError, SyncOracle}; #[cfg(feature = "bls-experimental")] use sp_consensus_beefy_etf::bls_crypto::AuthorityId; @@ -52,10 +52,7 @@ use sp_consensus_beefy_etf::bls_crypto::AuthorityId; use sp_consensus_beefy_etf::ecdsa_crypto::AuthorityId; use sp_consensus_beefy_etf::{ - BeefyApi, - ConsensusLog, MmrRootHash, - PayloadProvider, ValidatorSet, - BEEFY_ENGINE_ID, + BeefyApi, ConsensusLog, MmrRootHash, PayloadProvider, ValidatorSet, BEEFY_ENGINE_ID, }; use sp_keystore::KeystorePtr; use sp_mmr_primitives::MmrApi; @@ -386,7 +383,7 @@ where beefy_genesis, ) .ok_or_else(|| Error::Backend("Invalid BEEFY chain".into()))?; - break state + break state; } if *header.number() == beefy_genesis { @@ -409,7 +406,7 @@ where min_block_delta, beefy_genesis, ) - .ok_or_else(|| Error::Backend("Invalid BEEFY chain".into()))? + .ok_or_else(|| Error::Backend("Invalid BEEFY chain".into()))?; } if let Some(active) = find_authorities_change::(&header) { @@ -472,7 +469,7 @@ where ); state.init_session_at(new_session_start, validator_set, key_store, metrics); } - return Ok(state) + return Ok(state); } // No valid voter-state persisted, re-initialize from pallet genesis. @@ -564,7 +561,7 @@ pub async fn start_beefy_gadget( // OffchainTransactionPoolFactory::new(transaction_pool.clone()); // runtime.runtime_api().register_extension(offchain_tx_pool_factory // .offchain_transaction_pool(::Hash::default())); //TODO - + // We re-create and re-run the worker in this loop in order to quickly reinit and resume after // select recoverable errors. loop { @@ -619,15 +616,17 @@ pub async fn start_beefy_gadget( futures::future::Either::Left(((error::Error::ConsensusReset, reuse_comms), _)) => { error!(target: LOG_TARGET, "🥩 Error: {:?}. Restarting voter.", error::Error::ConsensusReset); beefy_comms = reuse_comms; - continue + continue; }, // On other errors, bring down / finish the task. - futures::future::Either::Left(((worker_err, _), _)) => - error!(target: LOG_TARGET, "🥩 Error: {:?}. Terminating.", worker_err), - futures::future::Either::Right((odj_handler_err, _)) => - error!(target: LOG_TARGET, "🥩 Error: {:?}. Terminating.", odj_handler_err), + futures::future::Either::Left(((worker_err, _), _)) => { + error!(target: LOG_TARGET, "🥩 Error: {:?}. Terminating.", worker_err) + }, + futures::future::Either::Right((odj_handler_err, _)) => { + error!(target: LOG_TARGET, "🥩 Error: {:?}. Terminating.", odj_handler_err) + }, }; - return + return; } } @@ -697,7 +696,7 @@ where "🥩 BEEFY pallet available: block {:?} beefy genesis {:?}", notif.header.number(), start ); - return Ok((start, notif.header)) + return Ok((start, notif.header)); } } } @@ -732,16 +731,17 @@ where loop { debug!(target: LOG_TARGET, "🥩 Looking for auth set change at block number: {:?}", *header.number()); if let Ok(Some(active)) = runtime.runtime_api().validator_set(header.hash()) { - return Ok(active) + return Ok(active); } else { match find_authorities_change::(&header) { Some(active) => return Ok(active), // Move up the chain. Ultimately we'll get it from chain genesis state, or error out // there. - None => + None => { header = wait_for_parent_header(blockchain, header, HEADER_SYNC_DELAY) .await - .map_err(|e| Error::Backend(e.to_string()))?, + .map_err(|e| Error::Backend(e.to_string()))? + }, } } } diff --git a/client/consensus/beefy-etf/src/round.rs b/client/consensus/beefy-etf/src/round.rs index 06dfc91..a273611 100644 --- a/client/consensus/beefy-etf/src/round.rs +++ b/client/consensus/beefy-etf/src/round.rs @@ -22,10 +22,10 @@ use codec::{Decode, Encode}; use log::{debug, info}; // #[cfg(feature = "bls-experimental")] -use sp_consensus_beefy_etf::bls_crypto::{AuthorityId, Signature}; +use sp_consensus_beefy_etf::bls_crypto::{AuthorityId, Signature}; // #[cfg(not(feature = "bls-experimental"))] -// use sp_consensus_beefy_etf::ecdsa_crypto::{AuthorityId, Signature}; +// use sp_consensus_beefy_etf::ecdsa_crypto::{AuthorityId, Signature}; use sp_consensus_beefy_etf::{ Commitment, EquivocationProof, SignedCommitment, ValidatorSet, ValidatorSetId, VoteMessage, @@ -45,7 +45,7 @@ pub(crate) struct RoundTracker { impl RoundTracker { fn add_vote(&mut self, vote: (AuthorityId, Signature)) -> bool { if self.votes.contains_key(&vote.0) { - return false + return false; } self.votes.insert(vote.0, vote.1); @@ -134,7 +134,7 @@ where if num < self.session_start || Some(num) <= self.best_done { debug!(target: LOG_TARGET, "🥩 received vote for old stale round {:?}, ignoring", num); - return VoteImportResult::Stale + return VoteImportResult::Stale; } else if vote.commitment.validator_set_id != self.validator_set_id() { debug!( target: LOG_TARGET, @@ -142,14 +142,14 @@ where self.validator_set_id(), vote, ); - return VoteImportResult::Invalid + return VoteImportResult::Invalid; } else if !self.validators().iter().any(|id| &vote.id == id) { debug!( target: LOG_TARGET, "🥩 received vote {:?} from validator that is not in the validator set, ignoring", vote ); - return VoteImportResult::Invalid + return VoteImportResult::Invalid; } if let Some(previous_vote) = self.previous_votes.get(&vote_key) { @@ -162,7 +162,7 @@ where return VoteImportResult::Equivocation(EquivocationProof { first: previous_vote.clone(), second: vote, - }) + }); } } else { // this is the first vote sent by `id` for `num`, all good @@ -171,11 +171,11 @@ where // add valid vote let round = self.rounds.entry(vote.commitment.clone()).or_default(); - if round.add_vote((vote.id, vote.signature)) && - round.is_done(threshold(self.validator_set.len())) + if round.add_vote((vote.id, vote.signature)) + && round.is_done(threshold(self.validator_set.len())) { if let Some(round) = self.rounds.remove_entry(&vote.commitment) { - return VoteImportResult::RoundConcluded(self.signed_commitment(round)) + return VoteImportResult::RoundConcluded(self.signed_commitment(round)); } } VoteImportResult::Ok diff --git a/client/consensus/beefy-etf/src/tests.rs b/client/consensus/beefy-etf/src/tests.rs index f8fe82b..34bc8cd 100644 --- a/client/consensus/beefy-etf/src/tests.rs +++ b/client/consensus/beefy-etf/src/tests.rs @@ -213,7 +213,8 @@ impl TestNetFactory for BeefyTestNet { Self::PeerData, ) { let keys = &[BeefyKeyring::Alice, BeefyKeyring::Bob]; - let validator_set = ValidatorSet::new(make_beefy_ids(keys), make_beefy_ids(keys), 0).unwrap(); + let validator_set = + ValidatorSet::new(make_beefy_ids(keys), make_beefy_ids(keys), 0).unwrap(); let api = Arc::new(TestApi::new(self.beefy_genesis, &validator_set, GOOD_MMR_ROOT)); let inner = BlockImportAdapter::new(client.clone()); let (block_import, voter_links, rpc_links) = @@ -598,7 +599,8 @@ async fn beefy_finalizing_blocks() { sp_tracing::try_init_simple(); let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob]; - let validator_set = ValidatorSet::new(make_beefy_ids(&peers), make_beefy_ids(&peers), 0).unwrap(); + let validator_set = + ValidatorSet::new(make_beefy_ids(&peers), make_beefy_ids(&peers), 0).unwrap(); let session_len = 10; let min_block_delta = 4; @@ -638,7 +640,8 @@ async fn lagging_validators() { sp_tracing::try_init_simple(); let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie]; - let validator_set = ValidatorSet::new(make_beefy_ids(&peers), make_beefy_ids(&peers), 0).unwrap(); + let validator_set = + ValidatorSet::new(make_beefy_ids(&peers), make_beefy_ids(&peers), 0).unwrap(); let session_len = 30; let min_block_delta = 1; @@ -706,7 +709,8 @@ async fn correct_beefy_payload() { sp_tracing::try_init_simple(); let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie, BeefyKeyring::Dave]; - let validator_set = ValidatorSet::new(make_beefy_ids(&peers), make_beefy_ids(&peers), 0).unwrap(); + let validator_set = + ValidatorSet::new(make_beefy_ids(&peers), make_beefy_ids(&peers), 0).unwrap(); let session_len = 20; let min_block_delta = 2; @@ -930,7 +934,8 @@ async fn on_demand_beefy_justification_sync() { let all_peers = [BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie, BeefyKeyring::Dave]; - let validator_set = ValidatorSet::new(make_beefy_ids(&all_peers), make_beefy_ids(&all_peers), 0).unwrap(); + let validator_set = + ValidatorSet::new(make_beefy_ids(&all_peers), make_beefy_ids(&all_peers), 0).unwrap(); let session_len = 5; let min_block_delta = 4; @@ -1092,7 +1097,8 @@ async fn should_initialize_voter_at_custom_genesis() { // now re-init after genesis changes // should ignore existing aux db state and reinit at new genesis - let new_validator_set = ValidatorSet::new(make_beefy_ids(keys), make_beefy_ids(keys), 42).unwrap(); + let new_validator_set = + ValidatorSet::new(make_beefy_ids(keys), make_beefy_ids(keys), 42).unwrap(); let new_pallet_genesis = 10; let api = TestApi::new(new_pallet_genesis, &new_validator_set, GOOD_MMR_ROOT); @@ -1164,7 +1170,8 @@ async fn should_initialize_voter_when_last_final_is_session_boundary() { // verify block 10 is correctly marked as finalized assert_eq!(persisted_state.best_beefy(), 10); assert_eq!(persisted_state.best_grandpa_number(), 13); - // verify next vote target is diff-power-of-two block 12 -> no longer the case with modifications + // verify next vote target is diff-power-of-two block 12 -> no longer the case with + // modifications assert_eq!(persisted_state.voting_oracle().voting_target(), Some(13)); // verify state also saved to db @@ -1341,7 +1348,8 @@ async fn beefy_finalizing_after_pallet_genesis() { sp_tracing::try_init_simple(); let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob]; - let validator_set = ValidatorSet::new(make_beefy_ids(&peers), make_beefy_ids(&peers), 14).unwrap(); + let validator_set = + ValidatorSet::new(make_beefy_ids(&peers), make_beefy_ids(&peers), 14).unwrap(); let session_len = 10; let min_block_delta = 1; let pallet_genesis = 15; @@ -1370,10 +1378,10 @@ async fn beefy_finalizing_after_pallet_genesis() { finalize_block_and_wait_for_beefy(&net, peers.clone(), &hashes[21], &[20, 21]).await; } -// note to the reviewer: for the moment, equivocations do not work as intended. +// note to the reviewer: for the moment, equivocations do not work as intended. // This is a consequence of the changes made to the beefy worker, where the worker -// signs a VoteMessage with an empty message payload. -// In the next phase of the protocol when we address interoperability, +// signs a VoteMessage with an empty message payload. +// In the next phase of the protocol when we address interoperability, // we will revisit this logic to complete the solution. // for now, lack of equivocation reporting does not cause any negative side effects @@ -1382,8 +1390,8 @@ async fn beefy_finalizing_after_pallet_genesis() { // sp_tracing::try_init_simple(); // let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie]; -// let validator_set = ValidatorSet::new(make_beefy_ids(&peers), make_beefy_ids(&peers), 0).unwrap(); -// let session_len = 10; +// let validator_set = ValidatorSet::new(make_beefy_ids(&peers), make_beefy_ids(&peers), +// 0).unwrap(); let session_len = 10; // let min_block_delta = 4; // let mut net = BeefyTestNet::new(3); @@ -1456,7 +1464,8 @@ async fn gossipped_finality_proofs() { let validators = [BeefyKeyring::Alice, BeefyKeyring::Bob, BeefyKeyring::Charlie]; // Only Alice and Bob are running the voter -> finality threshold not reached let peers = [BeefyKeyring::Alice, BeefyKeyring::Bob]; - let validator_set = ValidatorSet::new(make_beefy_ids(&validators), make_beefy_ids(&validators), 0).unwrap(); + let validator_set = + ValidatorSet::new(make_beefy_ids(&validators), make_beefy_ids(&validators), 0).unwrap(); let session_len = 10; let min_block_delta = 1; diff --git a/client/consensus/beefy-etf/src/worker.rs b/client/consensus/beefy-etf/src/worker.rs index 14149b7..5639b57 100644 --- a/client/consensus/beefy-etf/src/worker.rs +++ b/client/consensus/beefy-etf/src/worker.rs @@ -35,14 +35,13 @@ use codec::{Codec, Decode, DecodeAll, Encode}; use futures::{stream::Fuse, FutureExt, StreamExt}; use log::{debug, error, info, log_enabled, trace, warn}; use sc_client_api::{Backend, FinalityNotification, FinalityNotifications, HeaderBackend}; -use sc_utils::notification::NotificationReceiver; -use sp_api::ProvideRuntimeApi; -use sp_arithmetic::traits::{AtLeast32Bit, Saturating}; -use sp_consensus::SyncOracle; use sc_transaction_pool_api::{ LocalTransactionPool, OffchainTransactionPoolFactory, TransactionPool, }; -use sp_api::ApiExt; +use sc_utils::notification::NotificationReceiver; +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_arithmetic::traits::{AtLeast32Bit, Saturating}; +use sp_consensus::SyncOracle; #[cfg(feature = "bls-experimental")] use sp_consensus_beefy_etf::bls_crypto::{AuthorityId, Signature}; @@ -51,9 +50,9 @@ use sp_consensus_beefy_etf::bls_crypto::{AuthorityId, Signature}; use sp_consensus_beefy_etf::ecdsa_crypto::{AuthorityId, Signature}; use sp_consensus_beefy_etf::{ - check_equivocation_proof, - BeefyApi, BeefySignatureHasher, Commitment, EquivocationProof, PayloadProvider, ValidatorSetId, - ValidatorSet, VersionedFinalityProof, VoteMessage, BEEFY_ENGINE_ID, Payload, known_payloads, + check_equivocation_proof, known_payloads, BeefyApi, BeefySignatureHasher, Commitment, + EquivocationProof, Payload, PayloadProvider, ValidatorSet, ValidatorSetId, + VersionedFinalityProof, VoteMessage, BEEFY_ENGINE_ID, }; use sp_runtime::{ generic::BlockId, @@ -116,24 +115,24 @@ impl VoterOracle { let mut validate = || -> bool { let best_grandpa = *grandpa_header.number(); if sessions.is_empty() || best_beefy > best_grandpa { - return false + return false; } for (idx, session) in sessions.iter().enumerate() { let start = session.session_start(); if session.validators().is_empty() { - return false + return false; } if start > best_grandpa || start <= prev_start { - return false + return false; } #[cfg(not(test))] if let Some(prev_id) = prev_validator_id { if session.validator_set_id() <= prev_id { - return false + return false; } } if idx != 0 && session.mandatory_done() { - return false + return false; } prev_start = session.session_start(); prev_validator_id = Some(session.validator_set_id()); @@ -273,7 +272,6 @@ impl VoterOracle { target ); target - } /// if we are running etf, we vote on every grandpa block for now @@ -281,7 +279,6 @@ impl VoterOracle { pub fn voting_target(&self) -> Option> { let best_grandpa = *self.best_grandpa_block_header.number(); Some(best_grandpa) - } } @@ -523,7 +520,7 @@ where ) -> Result<(), Error> { let block_num = vote.commitment.block_number; match self.voting_oracle().triage_round(block_num)? { - RoundAction::Process => + RoundAction::Process => { if let Some(finality_proof) = self.handle_vote(vote)? { let gossip_proof = GossipMessage::::FinalityProof(finality_proof); let encoded_proof = gossip_proof.encode(); @@ -532,7 +529,8 @@ where encoded_proof, true, ); - }, + } + }, RoundAction::Drop => metric_inc!(self.metrics, beefy_stale_votes), RoundAction::Enqueue => error!(target: LOG_TARGET, "🥩 unexpected vote: {:?}.", vote), }; @@ -564,9 +562,9 @@ where } else { metric_inc!(self.metrics, beefy_buffered_justifications_dropped); warn!( - target: LOG_TARGET, - "🥩 Buffer justification dropped for round: {:?}.", block_num - ); + target: LOG_TARGET, + "🥩 Buffer justification dropped for round: {:?}.", block_num + ); } }, RoundAction::Drop => metric_inc!(self.metrics, beefy_stale_justifications), @@ -592,7 +590,7 @@ where // New state is persisted after finalization. self.finalize(finality_proof.clone())?; metric_inc!(self.metrics, beefy_good_votes_processed); - return Ok(Some(finality_proof)) + return Ok(Some(finality_proof)); }, VoteImportResult::Ok => { // Persist state after handling mandatory block vote. @@ -631,7 +629,7 @@ where if block_num <= self.persisted_state.voting_oracle.best_beefy_block { // we've already finalized this round before, short-circuit. - return Ok(()) + return Ok(()); } // Finalize inner round and update voting_oracle state. @@ -675,21 +673,16 @@ where .gossip_filter_config() .map(|filter| self.comms.gossip_validator.update_filter(filter))?; - // finally update the latest signatures in storage - let best_header = self.persisted_state - .voting_oracle - .best_grandpa_block_header - .clone(); - let best_hash = best_header.hash(); + // finally update the latest signatures in storage + let best_header = self.persisted_state.voting_oracle.best_grandpa_block_header.clone(); + let best_hash = best_header.hash(); - let mut signatures: Vec> = match finality_proof { - VersionedFinalityProof::V1(ref sc) => sc.signatures.clone() - }; + let mut signatures: Vec> = match finality_proof { + VersionedFinalityProof::V1(ref sc) => sc.signatures.clone(), + }; - let signatures: Vec> = signatures.iter() - .flatten() - .map(|sig| sig.encode()) - .collect::>(); + let signatures: Vec> = + signatures.iter().flatten().map(|sig| sig.encode()).collect::>(); info!( target: LOG_TARGET, @@ -708,11 +701,7 @@ where runtime_api .register_extension(self.offchain_tx_pool_factory.offchain_transaction_pool(best_hash)); - runtime_api.submit_unsigned_pulse( - best_hash, - signatures, - block_num, - ); + runtime_api.submit_unsigned_pulse(best_hash, signatures, block_num); Ok(()) } @@ -803,21 +792,22 @@ where target: LOG_TARGET, "🥩 Missing validator id - can't vote for: {:?}", target_hash ); - return Ok(()) + return Ok(()); }; - if let Some((signature, _id, commitment)) = self.get_signed_payload( - target_number, - target_header, - // target_hash, - validator_set_id, - authority_id.clone(), - ).map_err(|err| { - error!(target: LOG_TARGET, "🥩 Error calculating the signature {:?}", err); - // return Ok(()); - return err; - })? { - + if let Some((signature, _id, commitment)) = self + .get_signed_payload( + target_number, + target_header, + // target_hash, + validator_set_id, + authority_id.clone(), + ) + .map_err(|err| { + error!(target: LOG_TARGET, "🥩 Error calculating the signature {:?}", err); + // return Ok(()); + return err; + })? { let vote = VoteMessage { commitment, id: authority_id, signature }; if let Some(finality_proof) = self.handle_vote(vote.clone()).map_err(|err| { error!(target: LOG_TARGET, "🥩 Error handling self vote: {}", err); @@ -833,7 +823,7 @@ where let encoded_vote = GossipMessage::::Vote(vote).encode(); self.comms.gossip_engine.gossip_message(votes_topic::(), encoded_vote, false); } - + // Persist state after vote to avoid double voting in case of voter restarts. self.persisted_state.best_voted = target_number; metric_set!(self.metrics, beefy_best_voted, target_number); @@ -857,14 +847,14 @@ where let commitment = Commitment { payload, block_number: target_number, validator_set_id }; let encoded_commitment = commitment.encode(); - let (etf_authority_id, signature) = - match self.etf_extract(target_hash, authority_id.clone(), &encoded_commitment) { - Some(sig) => sig, - None => { - error!(target: LOG_TARGET, "🎲 Error calculating ETF signature"); - return Ok(None); - } - }; + let (etf_authority_id, signature) = + match self.etf_extract(target_hash, authority_id.clone(), &encoded_commitment) { + Some(sig) => sig, + None => { + error!(target: LOG_TARGET, "🎲 Error calculating ETF signature"); + return Ok(None); + }, + }; info!( target: LOG_TARGET, @@ -889,7 +879,7 @@ where hash } else { warn!(target: LOG_TARGET, "🥩 No MMR root digest found for: {:?}", target_hash); - return Ok(None) + return Ok(None); }; let commitment = Commitment { payload, block_number: target_number, validator_set_id }; let encoded_commitment = commitment.encode(); @@ -898,7 +888,7 @@ where Ok(sig) => sig, Err(err) => { warn!(target: LOG_TARGET, "🥩 Error signing commitment: {:?}", err); - return Ok(None) + return Ok(None); }, }; @@ -933,38 +923,36 @@ where } /// execute the ETF extract algorithm - /// outputs a (threshold) IBE secret and corresponding DLEQ proof - #[cfg(feature = "bls-experimental")] - fn etf_extract( - &mut self, - hash: B::Hash, - id: AuthorityId, - message: &[u8] - ) -> Option<(AuthorityId, Signature)> { - let runtime_api = self.runtime.runtime_api(); - - info!( + /// outputs a (threshold) IBE secret and corresponding DLEQ proof + #[cfg(feature = "bls-experimental")] + fn etf_extract( + &mut self, + hash: B::Hash, + id: AuthorityId, + message: &[u8], + ) -> Option<(AuthorityId, Signature)> { + let runtime_api = self.runtime.runtime_api(); + + info!( + target: LOG_TARGET, + "🎲 run ACSS recovery at best grandpa: #{:?}.", + hash + ); + if let Some(Some(validator_set)) = runtime_api.validator_set(hash).ok() { + debug!(target: LOG_TARGET, "🎲 [ETF] validator_set: {:?}", validator_set); + if let Some(Some(pok_bytes)) = runtime_api.read_share(hash, id.clone()).ok() { + debug!(target: LOG_TARGET, "🎲 [ETF] pok_bytes: {:?}", pok_bytes); + match self.key_store.etf_sign(&id, &pok_bytes, &message, validator_set.len() as u8) + { + Ok((pk, sig)) => return Some((pk, sig)), + Err(e) => error!(target: LOG_TARGET, "🎲 [ETF] Error signing: {:?}", e), + } + } + } + debug!( target: LOG_TARGET, - "🎲 run ACSS recovery at best grandpa: #{:?}.", - hash - ); - if let Some(Some(validator_set)) = runtime_api.validator_set(hash).ok() { - debug!(target: LOG_TARGET, "🎲 [ETF] validator_set: {:?}", validator_set); - if let Some(Some(pok_bytes)) = runtime_api.read_share(hash, id.clone()).ok() { - debug!(target: LOG_TARGET, "🎲 [ETF] pok_bytes: {:?}", pok_bytes); - match self - .key_store - .etf_sign(&id, &pok_bytes, &message, validator_set.len() as u8) - { - Ok((pk, sig)) => return Some((pk, sig)), - Err(e) => error!(target: LOG_TARGET, "🎲 [ETF] Error signing: {:?}", e), - } - } - } - debug!( - target: LOG_TARGET, - "🎲 [ETF] extract failed with id: {:?} and message: {:?}", - id, + "🎲 [ETF] extract failed with id: {:?} and message: {:?}", + id, message); None } @@ -1024,7 +1012,7 @@ where // Process finality notifications first since these drive the voter. notification = finality_notifications.next() => { if let Some(notif) = notification { - + if let Err(err) = self.handle_finality_notification(¬if) { break err; } @@ -1118,11 +1106,11 @@ where if !check_equivocation_proof::<_, _, BeefySignatureHasher>(&proof) { debug!(target: LOG_TARGET, "🥩 Skip report for bad equivocation {:?}", proof); - return Ok(()) + return Ok(()); } else if let Some(local_id) = self.key_store.authority_id(validators) { if offender_id == local_id { warn!(target: LOG_TARGET, "🥩 Skip equivocation report for own equivocation"); - return Ok(()) + return Ok(()); } } @@ -1150,7 +1138,7 @@ where target: LOG_TARGET, "🥩 Equivocation offender not part of the authority set." ); - return Ok(()) + return Ok(()); }, }; diff --git a/pallets/beefy-etf/src/equivocation.rs b/pallets/beefy-etf/src/equivocation.rs index e4b9ae7..1487348 100644 --- a/pallets/beefy-etf/src/equivocation.rs +++ b/pallets/beefy-etf/src/equivocation.rs @@ -208,7 +208,7 @@ where // Validate equivocation proof (check votes are different and signatures are valid). if !sp_consensus_beefy_etf::check_equivocation_proof(&equivocation_proof) { - return Err(Error::::InvalidEquivocationProof.into()) + return Err(Error::::InvalidEquivocationProof.into()); } // Check that the session id for the membership proof is within the @@ -216,7 +216,7 @@ where let set_id_session_index = crate::SetIdSession::::get(set_id).ok_or(Error::::InvalidEquivocationProof)?; if session_index != set_id_session_index { - return Err(Error::::InvalidEquivocationProof.into()) + return Err(Error::::InvalidEquivocationProof.into()); } let offence = EquivocationOffence { @@ -248,7 +248,7 @@ impl Pallet { target: LOG_TARGET, "rejecting unsigned report equivocation transaction because it is not local/in-block." ); - return InvalidTransaction::Call.into() + return InvalidTransaction::Call.into(); }, } diff --git a/pallets/beefy-etf/src/lib.rs b/pallets/beefy-etf/src/lib.rs index 4ba8c4c..bcc7807 100644 --- a/pallets/beefy-etf/src/lib.rs +++ b/pallets/beefy-etf/src/lib.rs @@ -174,10 +174,7 @@ pub mod pallet { // use block number one instead of chain-genesis. let genesis_block = Some(One::one()); // by default, etf consensus will fail, must be intentionally seeded - Self { - authorities: Vec::new(), - genesis_block, - } + Self { authorities: Vec::new(), genesis_block } } } @@ -206,7 +203,6 @@ pub mod pallet { #[pallet::call] impl Pallet { - /// Report voter equivocation/misbehavior. This method will verify the /// equivocation proof and validate the given key ownership proof /// against the extracted offender. If both are valid, the offence @@ -363,11 +359,9 @@ impl Pallet { ); Ok(()) } - } impl Pallet { - /// Return the current validator set id pub fn validator_set_id() -> sp_consensus_beefy_etf::ValidatorSetId { >::get() @@ -376,7 +370,8 @@ impl Pallet { /// Return the current active BEEFY validator set. pub fn validator_set() -> Option> { let validators: BoundedVec = Authorities::::get(); - let commitments: BoundedVec = T::RoundCommitmentProvider::get(); + let commitments: BoundedVec = + T::RoundCommitmentProvider::get(); let id: sp_consensus_beefy_etf::ValidatorSetId = ValidatorSetId::::get(); ValidatorSet::::new(validators, commitments, id) } @@ -395,47 +390,48 @@ impl Pallet { T::EquivocationReportSystem::publish_evidence((equivocation_proof, key_owner_proof)).ok() } - fn change_authorities( - new: BoundedVec, - queued: BoundedVec, - ) { - Authorities::::put(&new); - - // for now.. never update the validator set id - let new_id = ValidatorSetId::::get(); - // let new_id = ValidatorSetId::::get() + 1u64; - // ValidatorSetId::::put(new_id); - - NextAuthorities::::put(&queued); - - // TODO: for now we assume the commitments are static - // we still need to implement authority rotation (ACSS Reshare + Recover - let commitments: BoundedVec = T::RoundCommitmentProvider::get(); - - if let Some(validator_set) = ValidatorSet::::new(new, commitments.clone(), new_id) { - let log = DigestItem::Consensus( - BEEFY_ENGINE_ID, - ConsensusLog::AuthoritiesChange(validator_set.clone()).encode(), - ); - frame_system::Pallet::::deposit_log(log); - - let next_id = new_id + 1; - if let Some(next_validator_set) = ValidatorSet::::new(queued, commitments, next_id) { - >::on_new_validator_set( - &validator_set, - &next_validator_set, - ); - } - } - } + // fn change_authorities( + // new: BoundedVec, + // queued: BoundedVec, + // ) { + // Authorities::::put(&new); + + // // for now.. never update the validator set id + // let new_id = ValidatorSetId::::get(); + // // let new_id = ValidatorSetId::::get() + 1u64; + // // ValidatorSetId::::put(new_id); + + // NextAuthorities::::put(&queued); + + // // TODO: for now we assume the commitments are static + // // we still need to implement authority rotation (ACSS Reshare + Recover + // let commitments: BoundedVec = + // T::RoundCommitmentProvider::get(); + + // if let Some(validator_set) = ValidatorSet::::new(new, commitments.clone(), + // new_id) { let log = DigestItem::Consensus( + // BEEFY_ENGINE_ID, + // ConsensusLog::AuthoritiesChange(validator_set.clone()).encode(), + // ); + // frame_system::Pallet::::deposit_log(log); + + // let next_id = new_id + 1; + // if let Some(next_validator_set) = ValidatorSet::::new(queued, commitments, + // next_id) { >::on_new_validator_set( + // &validator_set, + // &next_validator_set, + // ); + // } + // } + // } fn initialize(authorities: &Vec) -> Result<(), ()> { if authorities.is_empty() { - return Ok(()) + return Ok(()); } if !Authorities::::get().is_empty() { - return Err(()) + return Err(()); } let bounded_authorities = @@ -450,8 +446,9 @@ impl Pallet { let public_commitments: Vec = T::RoundCommitmentProvider::get().into(); - if let Some(validator_set) = ValidatorSet::::new( - authorities.clone(), public_commitments.clone(), id) { + if let Some(validator_set) = + ValidatorSet::::new(authorities.clone(), public_commitments.clone(), id) + { let next_id = id + 1; if let Some(next_validator_set) = ValidatorSet::::new(authorities.clone(), public_commitments, next_id) @@ -505,8 +502,8 @@ where T::MaxAuthorities::get(), ); } - let bounded_next_authorities = - BoundedVec::<_, T::MaxAuthorities>::truncate_from(next_authorities); + // let bounded_next_authorities = + // BoundedVec::<_, T::MaxAuthorities>::truncate_from(next_authorities); let next_queued_authorities = queued_validators.map(|(_, k)| k).collect::>(); if next_queued_authorities.len() as u32 > T::MaxAuthorities::get() { @@ -517,8 +514,8 @@ where T::MaxAuthorities::get(), ); } - let bounded_next_queued_authorities = - BoundedVec::<_, T::MaxAuthorities>::truncate_from(next_queued_authorities); + // let bounded_next_queued_authorities = + // BoundedVec::<_, T::MaxAuthorities>::truncate_from(next_queued_authorities); // Always issue a change on each `session`, even if validator set hasn't changed. // We want to have at least one BEEFY mandatory block per session. @@ -555,4 +552,4 @@ impl IsMember for Pallet { pub trait WeightInfo { fn report_equivocation(validator_count: u32, max_nominators_per_validator: u32) -> Weight; fn set_new_genesis() -> Weight; -} \ No newline at end of file +} diff --git a/pallets/beefy-etf/src/mock.rs b/pallets/beefy-etf/src/mock.rs index 5f091e0..1ba4a1e 100644 --- a/pallets/beefy-etf/src/mock.rs +++ b/pallets/beefy-etf/src/mock.rs @@ -42,9 +42,7 @@ pub use sp_consensus_beefy_etf::bls_crypto::AuthorityId as BeefyId; // #[cfg(not(feature = "bls-experimental"))] // pub use sp_consensus_beefy_etf::ecdsa_crypto::AuthorityId as BeefyId; -pub use sp_consensus_beefy_etf::{ - ConsensusLog, BEEFY_ENGINE_ID -}; +pub use sp_consensus_beefy_etf::{ConsensusLog, BEEFY_ENGINE_ID}; impl_opaque_keys! { pub struct MockSessionKeys { @@ -272,16 +270,10 @@ impl ExtBuilder { .assimilate_storage(&mut t) .unwrap(); - let genesis_resharing = self - .commitments - .iter() - .map(|comm| (comm.clone(), vec![2])) - .collect(); + let genesis_resharing = + self.commitments.iter().map(|comm| (comm.clone(), vec![2])).collect(); - pallet_etf::GenesisConfig:: { - genesis_resharing: genesis_resharing, - round_pubkey: vec![1] - } + pallet_etf::GenesisConfig:: { genesis_resharing, round_pubkey: vec![1] } .assimilate_storage(&mut t) .unwrap(); @@ -301,10 +293,8 @@ impl ExtBuilder { staking_config.assimilate_storage(&mut t).unwrap(); - let beefy_config = pallet_beefy::GenesisConfig:: { - authorities: vec![], - genesis_block: None, - }; + let beefy_config = + pallet_beefy::GenesisConfig:: { authorities: vec![], genesis_block: None }; beefy_config.assimilate_storage(&mut t).unwrap(); diff --git a/pallets/beefy-etf/src/tests.rs b/pallets/beefy-etf/src/tests.rs index 6e0b63e..9f6701a 100644 --- a/pallets/beefy-etf/src/tests.rs +++ b/pallets/beefy-etf/src/tests.rs @@ -50,20 +50,20 @@ fn genesis_session_initializes_authorities() { .add_authorities(authorities.clone()) .add_commitments(authorities) .build_and_execute(|| { - let authorities = beefy::Authorities::::get(); + let authorities = beefy::Authorities::::get(); - assert_eq!(authorities.len(), 4); - assert_eq!(want[0], authorities[0]); - assert_eq!(want[1], authorities[1]); + assert_eq!(authorities.len(), 4); + assert_eq!(want[0], authorities[0]); + assert_eq!(want[1], authorities[1]); - assert!(beefy::ValidatorSetId::::get() == 0); + assert!(beefy::ValidatorSetId::::get() == 0); - let next_authorities = beefy::NextAuthorities::::get(); + let next_authorities = beefy::NextAuthorities::::get(); - assert_eq!(next_authorities.len(), 4); - assert_eq!(want[0], next_authorities[0]); - assert_eq!(want[1], next_authorities[1]); - }); + assert_eq!(next_authorities.len(), 4); + assert_eq!(want[0], next_authorities[0]); + assert_eq!(want[1], next_authorities[1]); + }); } #[test] @@ -92,11 +92,8 @@ fn session_change_updates_authorities() { assert!(2 == beefy::ValidatorSetId::::get()); let want = beefy_log(ConsensusLog::AuthoritiesChange( - ValidatorSet::new( - vec![mock_beefy_id(2), mock_beefy_id(4)], - authorities.clone(), - 2 - ).unwrap(), + ValidatorSet::new(vec![mock_beefy_id(2), mock_beefy_id(4)], authorities.clone(), 2) + .unwrap(), )); let log = System::digest().logs[1].clone(); @@ -287,79 +284,79 @@ fn report_equivocation_current_set_works() { .add_authorities(authorities.clone()) .add_commitments(authorities.clone()) .build_and_execute(|| { - assert_eq!(Staking::current_era(), Some(0)); - assert_eq!(Session::current_index(), 0); + assert_eq!(Staking::current_era(), Some(0)); + assert_eq!(Session::current_index(), 0); + + start_era(1); + + let block_num = System::block_number(); + let validator_set = Beefy::validator_set().unwrap(); + let authorities = validator_set.validators(); + let set_id = validator_set.id(); + let validators = Session::validators(); + + // make sure that all validators have the same balance + for validator in &validators { + assert_eq!(Balances::total_balance(validator), 10_000_000); + assert_eq!(Staking::slashable_balance_of(validator), 10_000); + + assert_eq!( + Staking::eras_stakers(1, &validator), + pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, + ); + } - start_era(1); + assert_eq!(authorities.len(), 2); + let equivocation_authority_index = 1; + let equivocation_key = &authorities[equivocation_authority_index]; + let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); + + let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); + let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); + // generate an equivocation proof, with two votes in the same round for + // different payloads signed by the same key + let equivocation_proof = generate_equivocation_proof( + (block_num, payload1, set_id, &equivocation_keyring), + (block_num, payload2, set_id, &equivocation_keyring), + ); - let block_num = System::block_number(); - let validator_set = Beefy::validator_set().unwrap(); - let authorities = validator_set.validators(); - let set_id = validator_set.id(); - let validators = Session::validators(); + // create the key ownership proof + let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); - // make sure that all validators have the same balance - for validator in &validators { - assert_eq!(Balances::total_balance(validator), 10_000_000); - assert_eq!(Staking::slashable_balance_of(validator), 10_000); + // report the equivocation and the tx should be dispatched successfully + assert_ok!(Beefy::report_equivocation_unsigned( + RuntimeOrigin::none(), + Box::new(equivocation_proof), + key_owner_proof, + ),); - assert_eq!( - Staking::eras_stakers(1, &validator), - pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, - ); - } - - assert_eq!(authorities.len(), 2); - let equivocation_authority_index = 1; - let equivocation_key = &authorities[equivocation_authority_index]; - let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); - - let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); - let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - // generate an equivocation proof, with two votes in the same round for - // different payloads signed by the same key - let equivocation_proof = generate_equivocation_proof( - (block_num, payload1, set_id, &equivocation_keyring), - (block_num, payload2, set_id, &equivocation_keyring), - ); - - // create the key ownership proof - let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); - - // report the equivocation and the tx should be dispatched successfully - assert_ok!(Beefy::report_equivocation_unsigned( - RuntimeOrigin::none(), - Box::new(equivocation_proof), - key_owner_proof, - ),); - - start_era(2); - - // check that the balance of 0-th validator is slashed 100%. - let equivocation_validator_id = validators[equivocation_authority_index]; - - assert_eq!(Balances::total_balance(&equivocation_validator_id), 10_000_000 - 10_000); - assert_eq!(Staking::slashable_balance_of(&equivocation_validator_id), 0); - assert_eq!( - Staking::eras_stakers(2, &equivocation_validator_id), - pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, - ); - - // check that the balances of all other validators are left intact. - for validator in &validators { - if *validator == equivocation_validator_id { - continue - } + start_era(2); - assert_eq!(Balances::total_balance(validator), 10_000_000); - assert_eq!(Staking::slashable_balance_of(validator), 10_000); + // check that the balance of 0-th validator is slashed 100%. + let equivocation_validator_id = validators[equivocation_authority_index]; + assert_eq!(Balances::total_balance(&equivocation_validator_id), 10_000_000 - 10_000); + assert_eq!(Staking::slashable_balance_of(&equivocation_validator_id), 0); assert_eq!( - Staking::eras_stakers(2, &validator), - pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, + Staking::eras_stakers(2, &equivocation_validator_id), + pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, ); - } - }); + + // check that the balances of all other validators are left intact. + for validator in &validators { + if *validator == equivocation_validator_id { + continue; + } + + assert_eq!(Balances::total_balance(validator), 10_000_000); + assert_eq!(Staking::slashable_balance_of(validator), 10_000); + + assert_eq!( + Staking::eras_stakers(2, &validator), + pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, + ); + } + }); } #[test] @@ -370,82 +367,82 @@ fn report_equivocation_old_set_works() { .add_authorities(authorities.clone()) .add_commitments(authorities) .build_and_execute(|| { - start_era(1); + start_era(1); - let block_num = System::block_number(); - let validator_set = Beefy::validator_set().unwrap(); - let authorities = validator_set.validators(); - let validators = Session::validators(); - let old_set_id = validator_set.id(); + let block_num = System::block_number(); + let validator_set = Beefy::validator_set().unwrap(); + let authorities = validator_set.validators(); + let validators = Session::validators(); + let old_set_id = validator_set.id(); - assert_eq!(authorities.len(), 2); - let equivocation_authority_index = 0; - let equivocation_key = &authorities[equivocation_authority_index]; + assert_eq!(authorities.len(), 2); + let equivocation_authority_index = 0; + let equivocation_key = &authorities[equivocation_authority_index]; - // create the key ownership proof in the "old" set - let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); + // create the key ownership proof in the "old" set + let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); - start_era(2); + start_era(2); - // make sure that all authorities have the same balance - for validator in &validators { - assert_eq!(Balances::total_balance(validator), 10_000_000); - assert_eq!(Staking::slashable_balance_of(validator), 10_000); + // make sure that all authorities have the same balance + for validator in &validators { + assert_eq!(Balances::total_balance(validator), 10_000_000); + assert_eq!(Staking::slashable_balance_of(validator), 10_000); - assert_eq!( - Staking::eras_stakers(2, &validator), - pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, - ); - } - - let validator_set = Beefy::validator_set().unwrap(); - let new_set_id = validator_set.id(); - assert_eq!(old_set_id + 3, new_set_id); - - let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); - - let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); - let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - // generate an equivocation proof for the old set, - let equivocation_proof = generate_equivocation_proof( - (block_num, payload1, old_set_id, &equivocation_keyring), - (block_num, payload2, old_set_id, &equivocation_keyring), - ); - - // report the equivocation and the tx should be dispatched successfully - assert_ok!(Beefy::report_equivocation_unsigned( - RuntimeOrigin::none(), - Box::new(equivocation_proof), - key_owner_proof, - ),); - - start_era(3); - - // check that the balance of 0-th validator is slashed 100%. - let equivocation_validator_id = validators[equivocation_authority_index]; - - assert_eq!(Balances::total_balance(&equivocation_validator_id), 10_000_000 - 10_000); - assert_eq!(Staking::slashable_balance_of(&equivocation_validator_id), 0); - assert_eq!( - Staking::eras_stakers(3, &equivocation_validator_id), - pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, - ); - - // check that the balances of all other validators are left intact. - for validator in &validators { - if *validator == equivocation_validator_id { - continue + assert_eq!( + Staking::eras_stakers(2, &validator), + pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, + ); } - assert_eq!(Balances::total_balance(validator), 10_000_000); - assert_eq!(Staking::slashable_balance_of(validator), 10_000); + let validator_set = Beefy::validator_set().unwrap(); + let new_set_id = validator_set.id(); + assert_eq!(old_set_id + 3, new_set_id); + let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); + + let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); + let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); + // generate an equivocation proof for the old set, + let equivocation_proof = generate_equivocation_proof( + (block_num, payload1, old_set_id, &equivocation_keyring), + (block_num, payload2, old_set_id, &equivocation_keyring), + ); + + // report the equivocation and the tx should be dispatched successfully + assert_ok!(Beefy::report_equivocation_unsigned( + RuntimeOrigin::none(), + Box::new(equivocation_proof), + key_owner_proof, + ),); + + start_era(3); + + // check that the balance of 0-th validator is slashed 100%. + let equivocation_validator_id = validators[equivocation_authority_index]; + + assert_eq!(Balances::total_balance(&equivocation_validator_id), 10_000_000 - 10_000); + assert_eq!(Staking::slashable_balance_of(&equivocation_validator_id), 0); assert_eq!( - Staking::eras_stakers(3, &validator), - pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, + Staking::eras_stakers(3, &equivocation_validator_id), + pallet_staking::Exposure { total: 0, own: 0, others: vec![] }, ); - } - }); + + // check that the balances of all other validators are left intact. + for validator in &validators { + if *validator == equivocation_validator_id { + continue; + } + + assert_eq!(Balances::total_balance(validator), 10_000_000); + assert_eq!(Staking::slashable_balance_of(validator), 10_000); + + assert_eq!( + Staking::eras_stakers(3, &validator), + pallet_staking::Exposure { total: 10_000, own: 10_000, others: vec![] }, + ); + } + }); } #[test] @@ -456,37 +453,37 @@ fn report_equivocation_invalid_set_id() { .add_authorities(authorities.clone()) .add_commitments(authorities) .build_and_execute(|| { - start_era(1); + start_era(1); - let block_num = System::block_number(); - let validator_set = Beefy::validator_set().unwrap(); - let authorities = validator_set.validators(); - let set_id = validator_set.id(); + let block_num = System::block_number(); + let validator_set = Beefy::validator_set().unwrap(); + let authorities = validator_set.validators(); + let set_id = validator_set.id(); - let equivocation_authority_index = 0; - let equivocation_key = &authorities[equivocation_authority_index]; - let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); + let equivocation_authority_index = 0; + let equivocation_key = &authorities[equivocation_authority_index]; + let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); - let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); + let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); - let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); - let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - // generate an equivocation for a future set - let equivocation_proof = generate_equivocation_proof( - (block_num, payload1, set_id + 1, &equivocation_keyring), - (block_num, payload2, set_id + 1, &equivocation_keyring), - ); + let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); + let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); + // generate an equivocation for a future set + let equivocation_proof = generate_equivocation_proof( + (block_num, payload1, set_id + 1, &equivocation_keyring), + (block_num, payload2, set_id + 1, &equivocation_keyring), + ); - // the call for reporting the equivocation should error - assert_err!( - Beefy::report_equivocation_unsigned( - RuntimeOrigin::none(), - Box::new(equivocation_proof), - key_owner_proof, - ), - Error::::InvalidEquivocationProof, - ); - }); + // the call for reporting the equivocation should error + assert_err!( + Beefy::report_equivocation_unsigned( + RuntimeOrigin::none(), + Box::new(equivocation_proof), + key_owner_proof, + ), + Error::::InvalidEquivocationProof, + ); + }); } #[test] @@ -497,42 +494,42 @@ fn report_equivocation_invalid_session() { .add_authorities(authorities.clone()) .add_commitments(authorities) .build_and_execute(|| { - start_era(1); + start_era(1); - let block_num = System::block_number(); - let validator_set = Beefy::validator_set().unwrap(); - let authorities = validator_set.validators(); + let block_num = System::block_number(); + let validator_set = Beefy::validator_set().unwrap(); + let authorities = validator_set.validators(); - let equivocation_authority_index = 0; - let equivocation_key = &authorities[equivocation_authority_index]; - let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); + let equivocation_authority_index = 0; + let equivocation_key = &authorities[equivocation_authority_index]; + let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); - // generate a key ownership proof at current era set id - let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); + // generate a key ownership proof at current era set id + let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); - start_era(2); + start_era(2); - let set_id = Beefy::validator_set().unwrap().id(); + let set_id = Beefy::validator_set().unwrap().id(); - let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); - let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - // generate an equivocation proof at following era set id = 2 - let equivocation_proof = generate_equivocation_proof( - (block_num, payload1, set_id, &equivocation_keyring), - (block_num, payload2, set_id, &equivocation_keyring), - ); + let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); + let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); + // generate an equivocation proof at following era set id = 2 + let equivocation_proof = generate_equivocation_proof( + (block_num, payload1, set_id, &equivocation_keyring), + (block_num, payload2, set_id, &equivocation_keyring), + ); - // report an equivocation for the current set using an key ownership - // proof from the previous set, the session should be invalid. - assert_err!( - Beefy::report_equivocation_unsigned( - RuntimeOrigin::none(), - Box::new(equivocation_proof), - key_owner_proof, - ), - Error::::InvalidEquivocationProof, - ); - }); + // report an equivocation for the current set using an key ownership + // proof from the previous set, the session should be invalid. + assert_err!( + Beefy::report_equivocation_unsigned( + RuntimeOrigin::none(), + Box::new(equivocation_proof), + key_owner_proof, + ), + Error::::InvalidEquivocationProof, + ); + }); } #[test] @@ -543,48 +540,47 @@ fn report_equivocation_invalid_key_owner_proof() { .add_authorities(authorities.clone()) .add_commitments(authorities) .build_and_execute(|| { + start_era(1); + + let block_num = System::block_number(); + let validator_set = Beefy::validator_set().unwrap(); + let authorities = validator_set.validators(); + let set_id = validator_set.id(); + + let invalid_owner_authority_index = 1; + let invalid_owner_key = &authorities[invalid_owner_authority_index]; + + // generate a key ownership proof for the authority at index 1 + let invalid_key_owner_proof = + Historical::prove((BEEFY_KEY_TYPE, &invalid_owner_key)).unwrap(); + + let equivocation_authority_index = 0; + let equivocation_key = &authorities[equivocation_authority_index]; + let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); + + let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); + let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); + // generate an equivocation proof for the authority at index 0 + let equivocation_proof = generate_equivocation_proof( + (block_num, payload1, set_id + 1, &equivocation_keyring), + (block_num, payload2, set_id + 1, &equivocation_keyring), + ); - start_era(1); - - let block_num = System::block_number(); - let validator_set = Beefy::validator_set().unwrap(); - let authorities = validator_set.validators(); - let set_id = validator_set.id(); - - let invalid_owner_authority_index = 1; - let invalid_owner_key = &authorities[invalid_owner_authority_index]; - - // generate a key ownership proof for the authority at index 1 - let invalid_key_owner_proof = - Historical::prove((BEEFY_KEY_TYPE, &invalid_owner_key)).unwrap(); - - let equivocation_authority_index = 0; - let equivocation_key = &authorities[equivocation_authority_index]; - let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); - - let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); - let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - // generate an equivocation proof for the authority at index 0 - let equivocation_proof = generate_equivocation_proof( - (block_num, payload1, set_id + 1, &equivocation_keyring), - (block_num, payload2, set_id + 1, &equivocation_keyring), - ); - - // we need to start a new era otherwise the key ownership proof won't be - // checked since the authorities are part of the current session - start_era(2); + // we need to start a new era otherwise the key ownership proof won't be + // checked since the authorities are part of the current session + start_era(2); - // report an equivocation for the current set using a key ownership - // proof for a different key than the one in the equivocation proof. - assert_err!( - Beefy::report_equivocation_unsigned( - RuntimeOrigin::none(), - Box::new(equivocation_proof), - invalid_key_owner_proof, - ), - Error::::InvalidKeyOwnershipProof, - ); - }); + // report an equivocation for the current set using a key ownership + // proof for a different key than the one in the equivocation proof. + assert_err!( + Beefy::report_equivocation_unsigned( + RuntimeOrigin::none(), + Box::new(equivocation_proof), + invalid_key_owner_proof, + ), + Error::::InvalidKeyOwnershipProof, + ); + }); } #[test] @@ -595,67 +591,67 @@ fn report_equivocation_invalid_equivocation_proof() { .add_authorities(authorities.clone()) .add_commitments(authorities) .build_and_execute(|| { - start_era(1); + start_era(1); + + let block_num = System::block_number(); + let validator_set = Beefy::validator_set().unwrap(); + let authorities = validator_set.validators(); + let set_id = validator_set.id(); + + let equivocation_authority_index = 0; + let equivocation_key = &authorities[equivocation_authority_index]; + let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); + + // generate a key ownership proof at set id in era 1 + let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); + + let assert_invalid_equivocation_proof = |equivocation_proof| { + assert_err!( + Beefy::report_equivocation_unsigned( + RuntimeOrigin::none(), + Box::new(equivocation_proof), + key_owner_proof.clone(), + ), + Error::::InvalidEquivocationProof, + ); + }; + + start_era(2); + + let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); + let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); + + // both votes target the same block number and payload, + // there is no equivocation. + assert_invalid_equivocation_proof(generate_equivocation_proof( + (block_num, payload1.clone(), set_id, &equivocation_keyring), + (block_num, payload1.clone(), set_id, &equivocation_keyring), + )); - let block_num = System::block_number(); - let validator_set = Beefy::validator_set().unwrap(); - let authorities = validator_set.validators(); - let set_id = validator_set.id(); + // votes targeting different rounds, there is no equivocation. + assert_invalid_equivocation_proof(generate_equivocation_proof( + (block_num, payload1.clone(), set_id, &equivocation_keyring), + (block_num + 1, payload2.clone(), set_id, &equivocation_keyring), + )); - let equivocation_authority_index = 0; - let equivocation_key = &authorities[equivocation_authority_index]; - let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); + // votes signed with different authority keys + assert_invalid_equivocation_proof(generate_equivocation_proof( + (block_num, payload1.clone(), set_id, &equivocation_keyring), + (block_num, payload1.clone(), set_id, &BeefyKeyring::Charlie), + )); - // generate a key ownership proof at set id in era 1 - let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); + // votes signed with a key that isn't part of the authority set + assert_invalid_equivocation_proof(generate_equivocation_proof( + (block_num, payload1.clone(), set_id, &equivocation_keyring), + (block_num, payload1.clone(), set_id, &BeefyKeyring::Dave), + )); - let assert_invalid_equivocation_proof = |equivocation_proof| { - assert_err!( - Beefy::report_equivocation_unsigned( - RuntimeOrigin::none(), - Box::new(equivocation_proof), - key_owner_proof.clone(), - ), - Error::::InvalidEquivocationProof, - ); - }; - - start_era(2); - - let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); - let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - - // both votes target the same block number and payload, - // there is no equivocation. - assert_invalid_equivocation_proof(generate_equivocation_proof( - (block_num, payload1.clone(), set_id, &equivocation_keyring), - (block_num, payload1.clone(), set_id, &equivocation_keyring), - )); - - // votes targeting different rounds, there is no equivocation. - assert_invalid_equivocation_proof(generate_equivocation_proof( - (block_num, payload1.clone(), set_id, &equivocation_keyring), - (block_num + 1, payload2.clone(), set_id, &equivocation_keyring), - )); - - // votes signed with different authority keys - assert_invalid_equivocation_proof(generate_equivocation_proof( - (block_num, payload1.clone(), set_id, &equivocation_keyring), - (block_num, payload1.clone(), set_id, &BeefyKeyring::Charlie), - )); - - // votes signed with a key that isn't part of the authority set - assert_invalid_equivocation_proof(generate_equivocation_proof( - (block_num, payload1.clone(), set_id, &equivocation_keyring), - (block_num, payload1.clone(), set_id, &BeefyKeyring::Dave), - )); - - // votes targeting different set ids - assert_invalid_equivocation_proof(generate_equivocation_proof( - (block_num, payload1, set_id, &equivocation_keyring), - (block_num, payload2, set_id + 1, &equivocation_keyring), - )); - }); + // votes targeting different set ids + assert_invalid_equivocation_proof(generate_equivocation_proof( + (block_num, payload1, set_id, &equivocation_keyring), + (block_num, payload2, set_id + 1, &equivocation_keyring), + )); + }); } #[test] @@ -671,84 +667,85 @@ fn report_equivocation_validate_unsigned_prevents_duplicates() { .add_authorities(authorities.clone()) .add_commitments(authorities) .build_and_execute(|| { - start_era(1); - - let block_num = System::block_number(); - let validator_set = Beefy::validator_set().unwrap(); - let authorities = validator_set.validators(); - let set_id = validator_set.id(); - - // generate and report an equivocation for the validator at index 0 - let equivocation_authority_index = 0; - let equivocation_key = &authorities[equivocation_authority_index]; - let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); - - let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); - let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - let equivocation_proof = generate_equivocation_proof( - (block_num, payload1, set_id, &equivocation_keyring), - (block_num, payload2, set_id, &equivocation_keyring), - ); - - let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); - - let call = Call::report_equivocation_unsigned { - equivocation_proof: Box::new(equivocation_proof.clone()), - key_owner_proof: key_owner_proof.clone(), - }; - - // only local/inblock reports are allowed - assert_eq!( - ::validate_unsigned( - TransactionSource::External, - &call, - ), - InvalidTransaction::Call.into(), - ); - - // the transaction is valid when passed as local - let tx_tag = (equivocation_key, set_id, 3u64); - - assert_eq!( - ::validate_unsigned( - TransactionSource::Local, - &call, - ), - TransactionValidity::Ok(ValidTransaction { - priority: TransactionPriority::max_value(), - requires: vec![], - provides: vec![("BeefyEquivocation", tx_tag).encode()], - longevity: ReportLongevity::get(), - propagate: false, - }) - ); - - // the pre dispatch checks should also pass - assert_ok!(::pre_dispatch(&call)); - - // we submit the report - Beefy::report_equivocation_unsigned( - RuntimeOrigin::none(), - Box::new(equivocation_proof), - key_owner_proof, - ) - .unwrap(); - - // the report should now be considered stale and the transaction is invalid - // the check for staleness should be done on both `validate_unsigned` and on `pre_dispatch` - assert_err!( - ::validate_unsigned( - TransactionSource::Local, - &call, - ), - InvalidTransaction::Stale, - ); - - assert_err!( - ::pre_dispatch(&call), - InvalidTransaction::Stale, - ); - }); + start_era(1); + + let block_num = System::block_number(); + let validator_set = Beefy::validator_set().unwrap(); + let authorities = validator_set.validators(); + let set_id = validator_set.id(); + + // generate and report an equivocation for the validator at index 0 + let equivocation_authority_index = 0; + let equivocation_key = &authorities[equivocation_authority_index]; + let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); + + let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); + let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); + let equivocation_proof = generate_equivocation_proof( + (block_num, payload1, set_id, &equivocation_keyring), + (block_num, payload2, set_id, &equivocation_keyring), + ); + + let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); + + let call = Call::report_equivocation_unsigned { + equivocation_proof: Box::new(equivocation_proof.clone()), + key_owner_proof: key_owner_proof.clone(), + }; + + // only local/inblock reports are allowed + assert_eq!( + ::validate_unsigned( + TransactionSource::External, + &call, + ), + InvalidTransaction::Call.into(), + ); + + // the transaction is valid when passed as local + let tx_tag = (equivocation_key, set_id, 3u64); + + assert_eq!( + ::validate_unsigned( + TransactionSource::Local, + &call, + ), + TransactionValidity::Ok(ValidTransaction { + priority: TransactionPriority::max_value(), + requires: vec![], + provides: vec![("BeefyEquivocation", tx_tag).encode()], + longevity: ReportLongevity::get(), + propagate: false, + }) + ); + + // the pre dispatch checks should also pass + assert_ok!(::pre_dispatch(&call)); + + // we submit the report + Beefy::report_equivocation_unsigned( + RuntimeOrigin::none(), + Box::new(equivocation_proof), + key_owner_proof, + ) + .unwrap(); + + // the report should now be considered stale and the transaction is invalid + // the check for staleness should be done on both `validate_unsigned` and on + // `pre_dispatch` + assert_err!( + ::validate_unsigned( + TransactionSource::Local, + &call, + ), + InvalidTransaction::Stale, + ); + + assert_err!( + ::pre_dispatch(&call), + InvalidTransaction::Stale, + ); + }); } #[test] @@ -778,67 +775,67 @@ fn valid_equivocation_reports_dont_pay_fees() { .add_authorities(authorities.clone()) .add_commitments(authorities) .build_and_execute(|| { - start_era(1); - - let block_num = System::block_number(); - let validator_set = Beefy::validator_set().unwrap(); - let authorities = validator_set.validators(); - let set_id = validator_set.id(); - - let equivocation_authority_index = 0; - let equivocation_key = &authorities[equivocation_authority_index]; - let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); - - // generate equivocation proof - let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); - let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); - let equivocation_proof = generate_equivocation_proof( - (block_num, payload1, set_id, &equivocation_keyring), - (block_num, payload2, set_id, &equivocation_keyring), - ); - - // create the key ownership proof. - let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); - - // check the dispatch info for the call. - let info = Call::::report_equivocation_unsigned { - equivocation_proof: Box::new(equivocation_proof.clone()), - key_owner_proof: key_owner_proof.clone(), - } - .get_dispatch_info(); - - // it should have non-zero weight and the fee has to be paid. - assert!(info.weight.any_gt(Weight::zero())); - assert_eq!(info.pays_fee, Pays::Yes); - - // report the equivocation. - let post_info = Beefy::report_equivocation_unsigned( - RuntimeOrigin::none(), - Box::new(equivocation_proof.clone()), - key_owner_proof.clone(), - ) - .unwrap(); - - // the original weight should be kept, but given that the report - // is valid the fee is waived. - assert!(post_info.actual_weight.is_none()); - assert_eq!(post_info.pays_fee, Pays::No); - - // report the equivocation again which is invalid now since it is - // duplicate. - let post_info = Beefy::report_equivocation_unsigned( - RuntimeOrigin::none(), - Box::new(equivocation_proof), - key_owner_proof, - ) - .err() - .unwrap() - .post_info; - - // the fee is not waived and the original weight is kept. - assert!(post_info.actual_weight.is_none()); - assert_eq!(post_info.pays_fee, Pays::Yes); - }) + start_era(1); + + let block_num = System::block_number(); + let validator_set = Beefy::validator_set().unwrap(); + let authorities = validator_set.validators(); + let set_id = validator_set.id(); + + let equivocation_authority_index = 0; + let equivocation_key = &authorities[equivocation_authority_index]; + let equivocation_keyring = BeefyKeyring::from_public(equivocation_key).unwrap(); + + // generate equivocation proof + let payload1 = Payload::from_single_entry(MMR_ROOT_ID, vec![42]); + let payload2 = Payload::from_single_entry(MMR_ROOT_ID, vec![128]); + let equivocation_proof = generate_equivocation_proof( + (block_num, payload1, set_id, &equivocation_keyring), + (block_num, payload2, set_id, &equivocation_keyring), + ); + + // create the key ownership proof. + let key_owner_proof = Historical::prove((BEEFY_KEY_TYPE, &equivocation_key)).unwrap(); + + // check the dispatch info for the call. + let info = Call::::report_equivocation_unsigned { + equivocation_proof: Box::new(equivocation_proof.clone()), + key_owner_proof: key_owner_proof.clone(), + } + .get_dispatch_info(); + + // it should have non-zero weight and the fee has to be paid. + assert!(info.weight.any_gt(Weight::zero())); + assert_eq!(info.pays_fee, Pays::Yes); + + // report the equivocation. + let post_info = Beefy::report_equivocation_unsigned( + RuntimeOrigin::none(), + Box::new(equivocation_proof.clone()), + key_owner_proof.clone(), + ) + .unwrap(); + + // the original weight should be kept, but given that the report + // is valid the fee is waived. + assert!(post_info.actual_weight.is_none()); + assert_eq!(post_info.pays_fee, Pays::No); + + // report the equivocation again which is invalid now since it is + // duplicate. + let post_info = Beefy::report_equivocation_unsigned( + RuntimeOrigin::none(), + Box::new(equivocation_proof), + key_owner_proof, + ) + .err() + .unwrap() + .post_info; + + // the fee is not waived and the original weight is kept. + assert!(post_info.actual_weight.is_none()); + assert_eq!(post_info.pays_fee, Pays::Yes); + }) } #[test] @@ -846,22 +843,22 @@ fn set_new_genesis_works() { let authorities = test_authorities(); ExtBuilder::default() - .add_authorities(authorities.clone()) - .add_commitments(authorities) - .build_and_execute(|| { - start_era(1); - - let new_genesis_delay = 10u64; - // the call for setting new genesis should work - assert_ok!(Beefy::set_new_genesis(RuntimeOrigin::root(), new_genesis_delay,)); - let expected = System::block_number() + new_genesis_delay; - // verify new genesis was set - assert_eq!(beefy::GenesisBlock::::get(), Some(expected)); - - // setting delay < 1 should fail - assert_err!( - Beefy::set_new_genesis(RuntimeOrigin::root(), 0u64,), - Error::::InvalidConfiguration, - ); - }); + .add_authorities(authorities.clone()) + .add_commitments(authorities) + .build_and_execute(|| { + start_era(1); + + let new_genesis_delay = 10u64; + // the call for setting new genesis should work + assert_ok!(Beefy::set_new_genesis(RuntimeOrigin::root(), new_genesis_delay,)); + let expected = System::block_number() + new_genesis_delay; + // verify new genesis was set + assert_eq!(beefy::GenesisBlock::::get(), Some(expected)); + + // setting delay < 1 should fail + assert_err!( + Beefy::set_new_genesis(RuntimeOrigin::root(), 0u64,), + Error::::InvalidConfiguration, + ); + }); } diff --git a/pallets/beefy-mmr-etf/Cargo.toml b/pallets/beefy-mmr-etf/Cargo.toml index e93dd3a..4893b69 100644 --- a/pallets/beefy-mmr-etf/Cargo.toml +++ b/pallets/beefy-mmr-etf/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true license = "Apache-2.0" description = "BEEFY + MMR runtime utilities" repository.workspace = true -repository.homepage = true +homepage.workspace = true [lints] workspace = true diff --git a/pallets/beefy-mmr-etf/src/lib.rs b/pallets/beefy-mmr-etf/src/lib.rs index 63aea59..5b2c22e 100644 --- a/pallets/beefy-mmr-etf/src/lib.rs +++ b/pallets/beefy-mmr-etf/src/lib.rs @@ -56,7 +56,8 @@ mod tests; /// A BEEFY consensus digest item with MMR root hash. pub struct DepositBeefyDigest(sp_std::marker::PhantomData); -impl pallet_mmr::primitives::OnNewRoot for DepositBeefyDigest +impl pallet_mmr::primitives::OnNewRoot + for DepositBeefyDigest where T: pallet_mmr::Config, T: pallet_beefy::Config, @@ -96,8 +97,8 @@ impl Convert> for Beefy // .to_eth_address() // .map(|v| v.to_vec()) // .map_err(|_| { - // log::debug!(target: "runtime::beefy", "Failed to convert BEEFY PublicKey to ETH address!"); - // }) + // log::debug!(target: "runtime::beefy", "Failed to convert BEEFY PublicKey to ETH + // address!"); }) // .unwrap_or_default() } } @@ -172,7 +173,8 @@ impl LeafDataProvider for Pallet { } } -impl sp_consensus_beefy_etf::OnNewValidatorSet<::BeefyId> for Pallet +impl sp_consensus_beefy_etf::OnNewValidatorSet<::BeefyId> + for Pallet where T: pallet::Config, { diff --git a/pallets/beefy-mmr-etf/src/mock.rs b/pallets/beefy-mmr-etf/src/mock.rs index 0c9e52f..657dbc5 100644 --- a/pallets/beefy-mmr-etf/src/mock.rs +++ b/pallets/beefy-mmr-etf/src/mock.rs @@ -188,15 +188,9 @@ pub fn new_test_ext_raw_authorities(authorities: Vec<(u64, BeefyId)>) -> TestExt } }); - let genesis_resharing = authorities - .iter() - .map(|(_idx, id)| (id.clone(), vec![2])) - .collect(); + let genesis_resharing = authorities.iter().map(|(_idx, id)| (id.clone(), vec![2])).collect(); - pallet_etf::GenesisConfig:: { - genesis_resharing: genesis_resharing, - round_pubkey: vec![1] - } + pallet_etf::GenesisConfig:: { genesis_resharing, round_pubkey: vec![1] } .assimilate_storage(&mut t) .unwrap(); diff --git a/pallets/beefy-mmr-etf/src/tests.rs b/pallets/beefy-mmr-etf/src/tests.rs index b3360eb..a367e92 100644 --- a/pallets/beefy-mmr-etf/src/tests.rs +++ b/pallets/beefy-mmr-etf/src/tests.rs @@ -68,10 +68,16 @@ fn should_contain_mmr_digest() { vec![ beefy_log(ConsensusLog::AuthoritiesChange( ValidatorSet::new( - vec![mock_beefy_id(1), mock_beefy_id(2)], - vec![mock_beefy_id(1), mock_beefy_id(2), mock_beefy_id(3), mock_beefy_id(4)], + vec![mock_beefy_id(1), mock_beefy_id(2)], + vec![ + mock_beefy_id(1), + mock_beefy_id(2), + mock_beefy_id(3), + mock_beefy_id(4) + ], 1 - ).unwrap() + ) + .unwrap() )), beefy_log(ConsensusLog::MmrRoot(array_bytes::hex_n_into_unchecked( "95803defe6ea9f41e7ec6afa497064f21bfded027d8812efacbdf984e630cbdc" @@ -87,10 +93,16 @@ fn should_contain_mmr_digest() { vec![ beefy_log(ConsensusLog::AuthoritiesChange( ValidatorSet::new( - vec![mock_beefy_id(1), mock_beefy_id(2)], - vec![mock_beefy_id(1), mock_beefy_id(2), mock_beefy_id(3), mock_beefy_id(4)], + vec![mock_beefy_id(1), mock_beefy_id(2)], + vec![ + mock_beefy_id(1), + mock_beefy_id(2), + mock_beefy_id(3), + mock_beefy_id(4) + ], 1 - ).unwrap() + ) + .unwrap() )), beefy_log(ConsensusLog::MmrRoot(array_bytes::hex_n_into_unchecked( "95803defe6ea9f41e7ec6afa497064f21bfded027d8812efacbdf984e630cbdc" @@ -98,9 +110,15 @@ fn should_contain_mmr_digest() { beefy_log(ConsensusLog::AuthoritiesChange( ValidatorSet::new( vec![mock_beefy_id(3), mock_beefy_id(4)], - vec![mock_beefy_id(1), mock_beefy_id(2), mock_beefy_id(3), mock_beefy_id(4)], + vec![ + mock_beefy_id(1), + mock_beefy_id(2), + mock_beefy_id(3), + mock_beefy_id(4) + ], 2 - ).unwrap() + ) + .unwrap() )), beefy_log(ConsensusLog::MmrRoot(array_bytes::hex_n_into_unchecked( "a73271a0974f1e67d6e9b8dd58e506177a2e556519a330796721e98279a753e2" diff --git a/pallets/drand/src/bls12_381.rs b/pallets/drand/src/bls12_381.rs index f6725b0..8ce5d9a 100644 --- a/pallets/drand/src/bls12_381.rs +++ b/pallets/drand/src/bls12_381.rs @@ -2,8 +2,8 @@ use crate::utils::ScalarFieldFor; use ark_bls12_381::{G1Affine, G1Projective, G2Affine, G2Projective}; use ark_ec::{pairing::Pairing, short_weierstrass::SWCurveConfig, AffineRepr}; use sp_ark_bls12_381::{ - Bls12_381 as Bls12_381Opt, G1Affine as G1AffineOpt, - G1Projective as G1ProjectiveOpt, G2Affine as G2AffineOpt, G2Projective as G2ProjectiveOpt, + Bls12_381 as Bls12_381Opt, G1Affine as G1AffineOpt, G1Projective as G1ProjectiveOpt, + G2Affine as G2AffineOpt, G2Projective as G2ProjectiveOpt, }; #[inline] @@ -74,4 +74,4 @@ pub fn mul_affine_g2(base: &G2Affine, scalar: &[u64]) { #[inline] pub fn mul_affine_g2_opt(base: &G2AffineOpt, scalar: &[u64]) { let _out = ::mul_affine(base, scalar); -} \ No newline at end of file +} diff --git a/pallets/drand/src/lib.rs b/pallets/drand/src/lib.rs index 73146cd..fcc0b30 100644 --- a/pallets/drand/src/lib.rs +++ b/pallets/drand/src/lib.rs @@ -1,13 +1,14 @@ //! # Drand Bridge Pallet //! -//! A pallet to bridge to [drand](drand.love)'s Quicknet, injecting publicly verifiable randomness into the runtime +//! A pallet to bridge to [drand](drand.love)'s Quicknet, injecting publicly verifiable randomness +//! into the runtime //! //! ## Overview //! //! Quicknet chain runs in an 'unchained' mode, producing a fresh pulse of randomness every 3s -//! This pallet implements an offchain worker that consumes pulses from quicket and then sends a signed -//! transaction to encode them in the runtime. The runtime uses the optimized arkworks host functions -//! to efficiently verify the pulse. +//! This pallet implements an offchain worker that consumes pulses from quicket and then sends a +//! signed transaction to encode them in the runtime. The runtime uses the optimized arkworks host +//! functions to efficiently verify the pulse. //! //! Run `cargo doc --package pallet-drand --open` to view this pallet's documentation. @@ -24,12 +25,14 @@ use alloc::{format, string::String, vec, vec::Vec}; use ark_ec::{hashing::HashToCurve, AffineRepr}; use ark_serialize::CanonicalSerialize; use codec::{Decode, Encode}; -use frame_support::pallet_prelude::*; -use frame_support::traits::Randomness; -use frame_system::offchain::SignedPayload; -use frame_system::offchain::SigningTypes; -use frame_system::offchain::{AppCrypto, CreateSignedTransaction, SendUnsignedTransaction, Signer}; -use frame_system::pallet_prelude::BlockNumberFor; +use frame_support::{pallet_prelude::*, traits::Randomness}; +use frame_system::{ + offchain::{ + AppCrypto, CreateSignedTransaction, SendUnsignedTransaction, SignedPayload, Signer, + SigningTypes, + }, + pallet_prelude::BlockNumberFor, +}; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; use sp_ark_bls12_381::{G1Affine as G1AffineOpt, G2Affine as G2AffineOpt}; @@ -435,9 +438,9 @@ pub mod pallet { let current_block = frame_system::Pallet::::block_number(); let mut last_block = current_block.clone(); - // TODO: improve this, it's not efficient as it can be very slow when the history is large. - // We could set a new storage value with the latest round. - // Retrieve the lastest pulse and verify the round number + // TODO: improve this, it's not efficient as it can be very slow when the + // history is large. We could set a new storage value with the latest + // round. Retrieve the lastest pulse and verify the round number // https://github.com/ideal-lab5/pallet-drand/issues/4 loop { if let Some(last_pulse) = Pulses::::get(last_block) { @@ -455,7 +458,8 @@ pub mod pallet { // Store the new pulse Pulses::::insert(current_block, pulse_payload.pulse.clone()); - // now increment the block number at which we expect next unsigned transaction. + // now increment the block number at which we expect next unsigned + // transaction. >::put(current_block + One::one()); // Emit event for new pulse Self::deposit_event(Event::NewPulse { round: pulse_payload.pulse.round }); @@ -474,7 +478,6 @@ pub mod pallet { /// /// * `origin`: the root user /// * `config`: the beacon configuration - /// #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::set_beacon_config())] pub fn set_beacon_config( @@ -722,16 +725,16 @@ pub trait Verifier { /// A verifier to check values received from quicknet. It outputs true if valid, false otherwise /// /// [Quicknet](https://drand.love/blog/quicknet-is-live-on-the-league-of-entropy-mainnet) operates in an unchained mode, -/// so messages contain only the round number. in addition, public keys are in G2 and signatures are in G1 +/// so messages contain only the round number. in addition, public keys are in G2 and signatures are +/// in G1 /// /// Values are valid if the pairing equality holds: /// $e(sig, g_2) == e(msg_on_curve, pk)$ /// where $sig \in \mathbb{G}_1$ is the signature /// $g_2 \in \mathbb{G}_2$ is a generator -/// $msg_on_curve \in \mathbb{G}_1$ is a hash of the message that drand signed (hash(round_number)) -/// $pk \in \mathbb{G}_2$ is the public key, read from the input public parameters -/// -/// +/// $msg_on_curve \in \mathbb{G}_1$ is a hash of the message that drand signed +/// (hash(round_number)) $pk \in \mathbb{G}_2$ is the public key, read from the input public +/// parameters pub struct QuicknetVerifier; impl Verifier for QuicknetVerifier { diff --git a/pallets/drand/src/utils.rs b/pallets/drand/src/utils.rs index 3b2304a..8a2ada7 100644 --- a/pallets/drand/src/utils.rs +++ b/pallets/drand/src/utils.rs @@ -57,4 +57,4 @@ pub fn serialize_argument(argument: impl CanonicalSerialize) -> Vec { let mut buf = vec![0; argument.serialized_size(Compress::No)]; argument.serialize_uncompressed(&mut buf.as_mut_slice()).unwrap(); buf -} \ No newline at end of file +} diff --git a/pallets/drand/substrate-node-template/node/src/chain_spec.rs b/pallets/drand/substrate-node-template/node/src/chain_spec.rs index a8f4d27..66a4759 100644 --- a/pallets/drand/substrate-node-template/node/src/chain_spec.rs +++ b/pallets/drand/substrate-node-template/node/src/chain_spec.rs @@ -1,5 +1,5 @@ -use sc_service::ChainType; use node_template_runtime::{AccountId, Signature, WASM_BINARY}; +use sc_service::ChainType; use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_consensus_grandpa::AuthorityId as GrandpaId; use sp_core::{sr25519, Pair, Public}; diff --git a/pallets/drand/substrate-node-template/node/src/command.rs b/pallets/drand/substrate-node-template/node/src/command.rs index cc607b7..8001b0c 100644 --- a/pallets/drand/substrate-node-template/node/src/command.rs +++ b/pallets/drand/substrate-node-template/node/src/command.rs @@ -5,9 +5,9 @@ use crate::{ service, }; use frame_benchmarking_cli::{BenchmarkCmd, ExtrinsicFactory, SUBSTRATE_REFERENCE_HARDWARE}; +use node_template_runtime::{Block, EXISTENTIAL_DEPOSIT}; use sc_cli::SubstrateCli; use sc_service::PartialComponents; -use node_template_runtime::{Block, EXISTENTIAL_DEPOSIT}; use sp_keyring::Sr25519Keyring; impl SubstrateCli for Cli { @@ -39,8 +39,9 @@ impl SubstrateCli for Cli { Ok(match id { "dev" => Box::new(chain_spec::development_config()?), "" | "local" => Box::new(chain_spec::local_testnet_config()?), - path => - Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?), + path => { + Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(path))?) + }, }) } } @@ -165,8 +166,9 @@ pub fn run() -> sc_cli::Result<()> { cmd.run(client, inherent_benchmark_data()?, Vec::new(), &ext_factory) }, - BenchmarkCmd::Machine(cmd) => - cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()), + BenchmarkCmd::Machine(cmd) => { + cmd.run(&config, SUBSTRATE_REFERENCE_HARDWARE.clone()) + }, } }) }, diff --git a/pallets/drand/substrate-node-template/node/src/rpc.rs b/pallets/drand/substrate-node-template/node/src/rpc.rs index 289896c..d7587cb 100644 --- a/pallets/drand/substrate-node-template/node/src/rpc.rs +++ b/pallets/drand/substrate-node-template/node/src/rpc.rs @@ -38,7 +38,7 @@ pub struct FullDeps { /// The client instance to use. pub client: Arc, /// Transaction pool instance. - pub pool: Arc

, + pub pool: Arc

, /// Whether to deny unsafe calls pub deny_unsafe: DenyUnsafe, } diff --git a/pallets/drand/substrate-node-template/node/src/service.rs b/pallets/drand/substrate-node-template/node/src/service.rs index f7c5a19..0a20db8 100644 --- a/pallets/drand/substrate-node-template/node/src/service.rs +++ b/pallets/drand/substrate-node-template/node/src/service.rs @@ -232,7 +232,8 @@ pub fn new_full< let pool = transaction_pool.clone(); Box::new(move |deny_unsafe, _| { - let deps = crate::rpc::FullDeps { client: client.clone(), pool: pool.clone(), deny_unsafe }; + let deps = + crate::rpc::FullDeps { client: client.clone(), pool: pool.clone(), deny_unsafe }; crate::rpc::create_full(deps).map_err(Into::into) }) }; diff --git a/pallets/etf/src/lib.rs b/pallets/etf/src/lib.rs index 07afa38..714737d 100644 --- a/pallets/etf/src/lib.rs +++ b/pallets/etf/src/lib.rs @@ -17,11 +17,7 @@ #![cfg_attr(not(feature = "std"), no_std)] use codec::MaxEncodedLen; -use frame_support::{ - pallet_prelude::*, - traits::Get, - BoundedVec, Parameter, -}; +use frame_support::{pallet_prelude::*, traits::Get, BoundedVec, Parameter}; use sp_runtime::traits::Member; use sp_std::prelude::*; @@ -62,44 +58,39 @@ pub mod pallet { /// publicly verifiable shares for the current round (a resharing) #[pallet::storage] - pub type Shares = + pub type Shares = StorageValue<_, BoundedVec>, T::MaxAuthorities>, ValueQuery>; /// public commitments of the the expected validator to etf pubkey - /// assumes order follows the same as the Authorities StorageValue + /// assumes order follows the same as the Authorities StorageValue #[pallet::storage] - pub type Commitments = + pub type Commitments = StorageValue<_, BoundedVec, ValueQuery>; /// the public key for the round (or rounds) #[pallet::storage] - pub type RoundPublic = - StorageValue<_, BoundedVec>, ValueQuery>; + pub type RoundPublic = StorageValue<_, BoundedVec>, ValueQuery>; #[pallet::genesis_config] pub struct GenesisConfig { /// (beefy id, commitment, BatchPoK (which technically contains the commitment...)) pub genesis_resharing: Vec<(T::BeefyId, Vec)>, - /// the round pubkey is the IBE master secret multiplied by a given group generator (e.g r = sP) + /// the round pubkey is the IBE master secret multiplied by a given group generator (e.g r + /// = sP) pub round_pubkey: Vec, } impl Default for GenesisConfig { fn default() -> Self { - Self { - genesis_resharing: Vec::new(), - round_pubkey: Vec::new(), - } + Self { genesis_resharing: Vec::new(), round_pubkey: Vec::new() } } } #[pallet::genesis_build] impl BuildGenesisConfig for GenesisConfig { fn build(&self) { - Pallet::::initialize( - &self.genesis_resharing, - self.round_pubkey.clone(), - ).expect("The genesis resharing should be correctly derived"); + Pallet::::initialize(&self.genesis_resharing, self.round_pubkey.clone()) + .expect("The genesis resharing should be correctly derived"); } } @@ -109,44 +100,37 @@ pub mod pallet { } #[pallet::call] - impl Pallet { - - } - + impl Pallet {} } impl Pallet { - fn initialize( genesis_resharing: &Vec<(T::BeefyId, Vec)>, round_key: Vec, - ) -> Result<(), ()> { - let bounded_rk = - BoundedVec::>::try_from(round_key) - .expect("The serialized round key should be 144 bytes."); + ) -> Result<(), ()> { + let bounded_rk = BoundedVec::>::try_from(round_key) + .expect("The serialized round key should be 144 bytes."); >::put(bounded_rk); let mut unbounded_shares: Vec>> = Vec::new(); - + genesis_resharing.iter().for_each(|(_commitment, pok_bytes)| { - let bounded_pok = - BoundedVec::>::try_from(pok_bytes.clone()) - .expect("genesis poks should be well formatted"); + let bounded_pok = BoundedVec::>::try_from(pok_bytes.clone()) + .expect("genesis poks should be well formatted"); unbounded_shares.push(bounded_pok); }); - + let bounded_shares = BoundedVec::>, T::MaxAuthorities>::try_from( - unbounded_shares - ).expect("There should be the correct number of genesis resharings"); + unbounded_shares, + ) + .expect("There should be the correct number of genesis resharings"); >::put(bounded_shares); - let bounded_commitments = - BoundedVec::::try_from( - genesis_resharing.iter() - .map(|g| g.0.clone()) - .collect::>() - ).map_err(|_| ())?; + let bounded_commitments = BoundedVec::::try_from( + genesis_resharing.iter().map(|g| g.0.clone()).collect::>(), + ) + .map_err(|_| ())?; Commitments::::put(bounded_commitments); Ok(()) @@ -167,7 +151,6 @@ pub trait RoundCommitmentProvider { } impl RoundCommitmentProvider for Pallet { - fn get() -> BoundedVec { Commitments::::get() } diff --git a/pallets/etf/src/mock.rs b/pallets/etf/src/mock.rs index 4c254c8..bca36bf 100644 --- a/pallets/etf/src/mock.rs +++ b/pallets/etf/src/mock.rs @@ -1,22 +1,13 @@ use std::vec; -use frame_support::{ - construct_runtime, derive_impl, - traits::ConstU32, -}; +use frame_support::{construct_runtime, derive_impl, traits::ConstU32}; use sp_io::TestExternalities; -use sp_runtime::{ - app_crypto::bls381::Public, - traits::{OpaqueKeys}, - BuildStorage, -}; +use sp_runtime::{app_crypto::bls381::Public, traits::OpaqueKeys, BuildStorage}; use sp_state_machine::BasicExternalities; use crate as pallet_etf; -pub use sp_consensus_beefy_etf::{ - bls_crypto::AuthorityId as BeefyId, -}; +pub use sp_consensus_beefy_etf::bls_crypto::AuthorityId as BeefyId; type Block = frame_system::mocking::MockBlock; @@ -39,7 +30,8 @@ impl pallet_etf::Config for Test { } // Note, that we can't use `UintAuthorityId` here. Reason is that the implementation // of `to_public_key()` assumes, that a public key is 32 bytes long. This is true for -// ed25519 and sr25519 but *not* for aggregatable BLS. A compressed aggregated BLS public key is 144 bytes +// ed25519 and sr25519 but *not* for aggregatable BLS. A compressed aggregated BLS public key is 144 +// bytes pub fn mock_beefy_id(id: u8) -> BeefyId { let mut buf: [u8; 144] = [id; 144]; // Set to something valid. @@ -59,15 +51,9 @@ pub fn new_test_ext(ids: Vec) -> TestExternalities { pub fn new_test_ext_raw_authorities(authorities: Vec<(u64, BeefyId)>) -> TestExternalities { let mut t = frame_system::GenesisConfig::::default().build_storage().unwrap(); - let genesis_resharing = authorities - .iter() - .map(|(_idx, id)| (id.clone(), vec![2])) - .collect(); + let genesis_resharing = authorities.iter().map(|(_idx, id)| (id.clone(), vec![2])).collect(); - pallet_etf::GenesisConfig:: { - genesis_resharing: genesis_resharing, - round_pubkey: vec![1] - } + pallet_etf::GenesisConfig:: { genesis_resharing, round_pubkey: vec![1] } .assimilate_storage(&mut t) .unwrap(); diff --git a/pallets/etf/src/tests.rs b/pallets/etf/src/tests.rs index 1c0130d..edd95ef 100644 --- a/pallets/etf/src/tests.rs +++ b/pallets/etf/src/tests.rs @@ -14,19 +14,15 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -use std::vec; use crate::{self as etf, mock::*, Call, Config, Error, Weight}; +use std::vec; #[test] fn genesis_session_initializes_resharing_and_commitments_with_valid_values() { - let genesis_resharing = vec![ - (1, vec![2]), - (2, vec![2]), - (3, vec![2]) - ]; + let genesis_resharing = vec![(1, vec![2]), (2, vec![2]), (3, vec![2])]; let want_resharing = genesis_resharing.clone(); - let genesis_roundkey = [1;96].to_vec(); + let genesis_roundkey = [1; 96].to_vec(); new_test_ext(vec![1, 2, 3]).execute_with(|| { // resharings are populated @@ -34,6 +30,6 @@ fn genesis_session_initializes_resharing_and_commitments_with_valid_values() { assert_eq!(resharings.len(), 3); assert_eq!(resharings[0], want_resharing[0].1); assert_eq!(resharings[1], want_resharing[1].1); - assert_eq!(resharings[2], want_resharing[2].1); + assert_eq!(resharings[2], want_resharing[2].1); }); -} \ No newline at end of file +} diff --git a/pallets/murmur/Cargo.toml b/pallets/murmur/Cargo.toml index df583c1..9ab7831 100644 --- a/pallets/murmur/Cargo.toml +++ b/pallets/murmur/Cargo.toml @@ -1,13 +1,13 @@ [package] name = "pallet-murmur" -version = "1.0.0-dev" +version = "0.1.0-dev" description = "FRAME pallet to create and execute murmur wallets" -authors = ["Ideal Labs "] -homepage = "https://idealabs.network" -edition = "2021" -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +authors.workspace = true +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true publish = false -repository = "https://github.com/ideal-lab5/pallets/" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/pallets/murmur/README.md b/pallets/murmur/README.md index f153108..04d31dd 100644 --- a/pallets/murmur/README.md +++ b/pallets/murmur/README.md @@ -1,14 +1,90 @@ -# EtF Pallet +# Murmur Pallet -The EtF (Encryption to the Future) pallet enables EtF consensus when added to a runtime. It stores public parameters required for identity based encryption. In this initial version, parameters are set on genesis and only modifiable by the root node. +The Murmur Pallet is a FRAME pallet designed to create and execute Murmur wallets. It provides functionalities to create time-based proxy accounts and proxy calls after verifying ciphertexts using Merkle proofs. -## Runtime Storage +## Overview -- `IBEParams`: The publicly known generator required for the IBE block seals. +The Murmur Pallet allows users to create proxy accounts with unique names and execute proxy calls securely. It leverages Merkle Mountain Range (MMR) for proof verification and integrates with the `pallet_proxy` for proxy call execution. -## Extrinsics +## Features -- `update_ibe_params`: Update the IBE public parameter. Only callable by the root node. +- **Create Proxy Accounts**: Create time-based proxy accounts with unique names. +- **Proxy Calls**: Proxy calls after verifying the ciphertext and Merkle proof. + +## Usage + +### Create a Proxy Account + +To create a proxy account, use the `create` dispatchable function: + +```rust +pub fn create( + origin: OriginFor, + root: Vec, + size: u64, + name: BoundedVec>, +) -> DispatchResult +``` + +### Proxy a Call + +To proxy a call, use the `proxy` dispatchable function: + +```rust +pub fn proxy( + _origin: OriginFor, + name: BoundedVec>, + position: u64, + hash: Vec, + ciphertext: Vec, + proof: Vec>, + size: u64, + call: sp_std::boxed::Box<::RuntimeCall>, +) -> DispatchResult +``` + +## Events + +The pallet emits the following events: + +- `OtpProxyCreated`: Emitted when a new proxy account is created. +- `OtpProxyExecuted`: Emitted when a proxy call is executed. + +## Errors + +The pallet can return the following errors: + +- `BadCiphertext`: The provided ciphertext is invalid. +- `DuplicateName`: The provided name is already in use. +- `InvalidOTP`: The provided OTP is invalid. +- `InvalidMerkleProof`: The provided Merkle proof is invalid. +- `InvalidProxy`: The proxy account is invalid. +- `ProxyDNE`: The proxy account does not exist. + +## Build + +To build the project, use the following command: + +```shell +cargo build +``` + +## Testing + +To run the tests, use the following command: + +```shell +cargo test +``` + +## Contributing + +Contributions are welcome! Please open an issue or submit a pull request. ## License -GPLv3.0 \ No newline at end of file + +This project is licensed under the Apache-2.0. See the [LICENSE](../../LICENSE) file for details. + +## Contact + +For any inquiries, please contact [Ideal Labs](https://idealabs.network). diff --git a/pallets/murmur/src/benchmarking.rs b/pallets/murmur/src/benchmarking.rs deleted file mode 100644 index d3e76f9..0000000 --- a/pallets/murmur/src/benchmarking.rs +++ /dev/null @@ -1,29 +0,0 @@ -// //! Benchmarking setup for pallet-etf -// #![cfg(feature = "runtime-benchmarks")] -// use super::*; - -// #[allow(unused)] -// use crate::Pallet as Etf; -// use frame_benchmarking::v2::*; -// use frame_system::RawOrigin; - -// #[benchmarks] -// mod benchmarks { -// use super::*; - -// #[benchmark] -// fn update_ibe_params() { -// let g1_bytes = array_bytes::hex2bytes_unchecked("a191b705ef18a6e4e5bd4cc56de0b8f94b1f3c908f3e3fcbd4d1dc12eb85059be7e7d801edc1856c8cfbe6d63a681c1f"); -// let g2_bytes = array_bytes::hex2bytes_unchecked("878c5832d9519a9a22cee4d790be6bef6a0bc55e2c4c38185bf497061fb2712309f59e9eed0cdac8f8c97a61427bf35003065d0f83dca6defed8f50d715bb9430375153dff0b52bae38acf8d3aeb1612248856a8deae883f32dacaa04e3fba26"); -// #[extrinsic_call] -// update_ibe_params(RawOrigin::Root, g1_bytes.clone(), g2_bytes.clone(), g2_bytes.clone()); -// assert_eq!(IBEParams::::get(), (g1_bytes.clone(), g2_bytes.clone(), g2_bytes.clone())); -// } - -// impl_benchmark_test_suite!( -// Etf, crate::mock::new_test_ext( -// &"a191b705ef18a6e4e5bd4cc56de0b8f94b1f3c908f3e3fcbd4d1dc12eb85059be7e7d801edc1856c8cfbe6d63a681c1f", -// &"878c5832d9519a9a22cee4d790be6bef6a0bc55e2c4c38185bf497061fb2712309f59e9eed0cdac8f8c97a61427bf35003065d0f83dca6defed8f50d715bb9430375153dff0b52bae38acf8d3aeb1612248856a8deae883f32dacaa04e3fba26", -// ), -// crate::mock::Test); -// } diff --git a/pallets/murmur/src/lib.rs b/pallets/murmur/src/lib.rs index 355de01..9be9509 100644 --- a/pallets/murmur/src/lib.rs +++ b/pallets/murmur/src/lib.rs @@ -1,9 +1,22 @@ +/* + * Copyright 2024 by Ideal Labs, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + #![cfg_attr(not(feature = "std"), no_std)] //! # Murmur Pallet -//! -//! -//! pub use pallet::*; #[cfg(test)] @@ -12,33 +25,23 @@ mod mock; #[cfg(test)] mod tests; -#[cfg(feature = "runtime-benchmarks")] -mod benchmarking; -// pub mod weights; TODO -// pub use weights::WeightInfo; - -use ckb_merkle_mountain_range::{ - util::{MemMMR, MemStore}, - Merge, MerkleProof, Result as MMRResult, MMR, -}; +use ckb_merkle_mountain_range::MerkleProof; use codec::{Decode, Encode}; use frame_support::{ dispatch::GetDispatchInfo, pallet_prelude::*, traits::{ConstU32, IsSubType}, }; -use log::info; use murmur_core::{ murmur, - types::{BlockNumber, Leaf, MergeLeaves}, + types::{Leaf, MergeLeaves}, }; +use pallet_randomness_beacon::TimelockEncryptionProvider; use scale_info::TypeInfo; -use sp_runtime::{traits::Dispatchable, DispatchResult}; -use sp_std::{prelude::ToOwned, vec, vec::Vec}; - -use pallet_randomness_beacon::{Ciphertext, TimelockEncryptionProvider}; +use sp_runtime::traits::Dispatchable; +use sp_std::{vec, vec::Vec}; -/// a bounded name +/// A bounded name pub type Name = BoundedVec>; /// A struct to represent specific details of a murmur proxy account @@ -85,14 +88,12 @@ pub mod pallet { + From> + IsSubType> + IsType<::RuntimeCall>; - // / Type representing the weight of this pallet - // type WeightInfo: WeightInfo; - /// something that can decrypt messages locked for the current slot + /// Something that can decrypt messages locked for the current slot type TlockProvider: TimelockEncryptionProvider>; } - /// a registry to track registered 'usernames' for OTP wallets - /// Q: what happens when this map becomes very large? in terms of query time? + /// A registry to track registered 'usernames' for OTP wallets + // Q: what happens when this map becomes very large? in terms of query time? #[pallet::storage] pub(super) type Registry = StorageMap<_, Blake2_256, Name, MurmurProxyDetails, OptionQuery>; @@ -117,13 +118,11 @@ pub mod pallet { #[pallet::call] impl Pallet { - /// Create a time-based proxy account /// /// * `root`: The MMR root /// * `size`: The size (number of leaves) of the MMR /// * `name`: The name to assign to the murmur proxy - /// #[pallet::weight(0)] #[pallet::call_index(0)] pub fn create( @@ -164,12 +163,12 @@ pub mod pallet { /// /// * `name`: The uid of the murmur proxy /// * `position`: The position in the MMR of the encrypted OTP code - /// * `target_leaf`: The target leaf data (ciphertext) /// * `hash`: A hash to commit to the OTP code and call data - /// * `proof`: A merkle proof that the target leaf is in the expected MMR at the given position + /// * `ciphertext`: The encrypted OTP code + /// * `proof`: A merkle proof that the target leaf is in the expected MMR at the given + /// position /// * `size`: The size of the Merkle proof /// * `call`: The call to be proxied - /// #[pallet::weight(0)] #[pallet::call_index(1)] pub fn proxy( @@ -188,7 +187,7 @@ pub mod pallet { let result = T::TlockProvider::decrypt_at(&ciphertext, when) .map_err(|_| Error::::BadCiphertext)?; - let mut otp = result.message; + let otp = result.message; let leaves: Vec = proof.clone().into_iter().map(|p| Leaf(p)).collect::>(); let merkle_proof = MerkleProof::::new(size, leaves.clone()); @@ -201,7 +200,7 @@ pub mod pallet { ciphertext, otp, call.encode().to_vec(), - position + position, ); frame_support::ensure!(validity, Error::::InvalidMerkleProof); diff --git a/pallets/murmur/src/mock.rs b/pallets/murmur/src/mock.rs index 14262be..d9f9760 100644 --- a/pallets/murmur/src/mock.rs +++ b/pallets/murmur/src/mock.rs @@ -17,29 +17,27 @@ use super::*; use std::vec; +use crate as pallet_murmur; use codec::Encode; +use etf_crypto_primitives::encryption::tlock::DecryptionResult; use frame_support::{ construct_runtime, derive_impl, parameter_types, - traits::{ConstU128, ConstU32, ConstU64, Contains, InstanceFilter}, + traits::{ConstU128, ConstU32, ConstU64, InstanceFilter}, }; -use frame_system::Call as SystemCall; -use sp_consensus_beefy_etf::{mmr::MmrLeafVersion, test_utils::etf_genesis}; +use murmur_test_utils::BOTPGenerator; +use sha3::Digest; +use sp_consensus_beefy_etf::mmr::MmrLeafVersion; use sp_core::Pair; use sp_io::TestExternalities; use sp_runtime::{ - app_crypto::bls377::Public, impl_opaque_keys, testing::TestXt, traits::{BlakeTwo256, ConvertInto, Keccak256, OpaqueKeys}, BuildStorage, }; -use murmur_test_utils::BOTPGenerator; use sp_state_machine::BasicExternalities; -use etf_crypto_primitives::encryption::tlock::DecryptionResult; -use sha3::Digest; -use crate as pallet_murmur; -pub use sp_consensus_beefy_etf::{bls_crypto::AuthorityId as BeefyId, mmr::BeefyDataProvider}; +pub use sp_consensus_beefy_etf::bls_crypto::AuthorityId as BeefyId; impl_opaque_keys! { pub struct MockSessionKeys { @@ -91,13 +89,6 @@ impl pallet_session::Config for Test { type WeightInfo = (); } -// pub type MmrLeaf = sp_consensus_beefy_etf::mmr::MmrLeaf< -// frame_system::pallet_prelude::BlockNumberFor, -// ::Hash, -// pallet_beefy_mmr::MerkleRootOf, -// Vec, -// >; - impl pallet_mmr::Config for Test { const INDEXING_PREFIX: &'static [u8] = b"mmr"; type Hashing = Keccak256; @@ -138,22 +129,9 @@ impl pallet_beefy_mmr_etf::Config for Test { type BeefyDataProvider = (); } -pub struct DummyDataProvider; -impl BeefyDataProvider> for DummyDataProvider { - fn extra_data() -> Vec { - let mut col = vec![(15, vec![1, 2, 3]), (5, vec![4, 5, 6])]; - col.sort(); - binary_merkle_tree::merkle_root::<::Hashing, _>( - col.into_iter().map(|pair| pair.encode()), - ) - .as_ref() - .to_vec() - } -} - impl pallet_balances::Config for Test { type MaxLocks = (); - type MaxReserves = (); + type MaxReserves = (); type ReserveIdentifier = [u8; 8]; type Balance = u128; type DustRemoval = (); @@ -174,8 +152,10 @@ impl pallet_randomness_beacon::Config for Test { pub struct DummyTlockProvider; impl TimelockEncryptionProvider for DummyTlockProvider { - - fn decrypt_at(bytes: &[u8], when: u64) -> Result { + fn decrypt_at( + _bytes: &[u8], + when: u64, + ) -> Result { let seed = b"seed".to_vec(); let mut hasher = sha3::Sha3_256::default(); hasher.update(seed); @@ -183,10 +163,7 @@ impl TimelockEncryptionProvider for DummyTlockProvider { let generator = BOTPGenerator::new(hash.to_vec()); let otp_code = generator.generate(when as u32); - Ok(DecryptionResult { - message: otp_code.as_bytes().to_vec(), - secret: [0;32], - }) + Ok(DecryptionResult { message: otp_code.as_bytes().to_vec(), secret: [0; 32] }) } fn latest() -> u64 { @@ -238,17 +215,6 @@ impl InstanceFilter for ProxyType { self == &ProxyType::Any || self == o } } -pub struct BaseFilter; -impl Contains for BaseFilter { - fn contains(c: &RuntimeCall) -> bool { - match *c { - // Remark is used as a no-op call in the benchmarking - RuntimeCall::System(SystemCall::remark { .. }) => true, - RuntimeCall::System(_) => false, - _ => true, - } - } -} impl pallet_proxy::Config for Test { type RuntimeEvent = RuntimeEvent; @@ -319,16 +285,24 @@ pub fn new_test_ext_raw_authorities(authorities: Vec<(u64, BeefyId)>) -> TestExt } }); - let (round_pubkey, genesis_resharing) = etf_genesis::( - authorities.iter().map(|(_, id)| id.clone()).collect::>(), - ); - - pallet_etf::GenesisConfig:: { - genesis_resharing, - round_pubkey, - } - .assimilate_storage(&mut t) - .unwrap(); + // mock the genesis config + let genesis_resharing: Vec<(sp_consensus_beefy_etf::bls_crypto::Public, Vec)> = + vec![(mock_beefy_id(123), [1u8; 32].into())]; + let round_pubkey = [ + 144, 122, 123, 77, 192, 77, 117, 246, 132, 139, 163, 31, 26, 99, 75, 76, 23, 206, 24, 252, + 200, 112, 18, 199, 82, 203, 96, 23, 70, 76, 156, 253, 67, 126, 106, 164, 154, 25, 154, 95, + 155, 32, 173, 48, 126, 0, 123, 129, 86, 203, 71, 65, 207, 131, 55, 168, 72, 235, 88, 180, + 5, 20, 167, 118, 31, 36, 35, 125, 250, 33, 33, 224, 230, 106, 155, 79, 79, 137, 130, 57, + 146, 66, 236, 129, 17, 178, 199, 180, 48, 108, 247, 161, 0, 139, 7, 0, 180, 41, 114, 7, 69, + 134, 33, 178, 54, 23, 119, 67, 67, 173, 76, 36, 94, 29, 1, 134, 114, 228, 28, 69, 152, 14, + 57, 17, 38, 6, 83, 43, 155, 211, 188, 64, 91, 193, 205, 125, 222, 52, 19, 237, 173, 184, + 129, 128, + ] + .into(); + + pallet_etf::GenesisConfig:: { genesis_resharing, round_pubkey } + .assimilate_storage(&mut t) + .unwrap(); pallet_session::GenesisConfig:: { keys: session_keys } .assimilate_storage(&mut t) diff --git a/pallets/murmur/src/tests.rs b/pallets/murmur/src/tests.rs index 27723cf..3ecd10a 100644 --- a/pallets/murmur/src/tests.rs +++ b/pallets/murmur/src/tests.rs @@ -1,42 +1,27 @@ -use crate::{self as murmur, mock::*, Error}; -use ark_serialize::CanonicalSerialize; -use ark_std::{test_rng, UniformRand}; -use frame_support::{ - assert_noop, assert_ok, BoundedVec, - traits::{ - ConstU32, - OnInitialize, - }, -}; +/* + * Copyright 2024 by Ideal Labs, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +use crate::{self as murmur, mock::*}; +use codec::Encode; +use frame_support::{assert_ok, traits::ConstU32, BoundedVec}; use frame_system::Call as SystemCall; - -use sha3::Digest; - -use murmur_core::types::{BlockNumber, Identity, IdentityBuilder, Leaf, MergeLeaves}; - -use murmur_test_utils::{ - BOTPGenerator, - MurmurStore -}; -use sp_core::{bls377, Pair, ByteArray}; -use ckb_merkle_mountain_range::{ - util::{MemMMR, MemStore}, - MerkleProof, -}; - -use codec::{Decode, Encode}; -use sp_consensus_beefy_etf::{ - known_payloads, AuthorityIndex, BeefyAuthorityId, Commitment, ConsensusLog, EquivocationProof, - OnNewValidatorSet, Payload, ValidatorSet, BEEFY_ENGINE_ID, GENESIS_AUTHORITY_SET_ID, -}; -use ark_serialize::CanonicalDeserialize; -use w3f_bls::{ - DoublePublicKey, - DoubleSignature, - EngineBLS, - SerializableToBytes, - TinyBLS377 -}; +use murmur_core::types::{BlockNumber, Identity, IdentityBuilder}; +use murmur_test_utils::MurmurStore; +use sp_consensus_beefy_etf::{known_payloads, Commitment, Payload}; +use w3f_bls::{DoublePublicKey, SerializableToBytes, TinyBLS377}; #[derive(Debug)] pub struct BasicIdBuilder; @@ -87,11 +72,6 @@ fn it_can_create_new_proxy_with_unique_name() { }); } -fn init_block(block: u64) { - System::set_block_number(block); - Session::on_initialize(block); -} - #[test] fn it_can_proxy_valid_calls() { let seed = b"seed".to_vec(); @@ -123,19 +103,16 @@ fn it_can_proxy_valid_calls() { // the beacon would write a new pulse here, but we will mock it instead // but here, we can just generate the expected OTP code when we mock decryption - - // now we want to proxy a call + + // now we want to proxy a call let call = call_remark(vec![1, 2, 3, 4, 5]); // We want to use the ciphertext for block = 1 - let (proof, commitment, ciphertext, pos) = mmr_store.execute( - seed.clone(), - when.clone() as u32, - call.encode().to_vec(), - ).unwrap(); + let (proof, commitment, ciphertext, pos) = mmr_store + .execute(seed.clone(), when.clone() as u32, call.encode().to_vec()) + .unwrap(); - let proof_items: Vec> = proof.proof_items().iter() - .map(|leaf| leaf.0.to_vec()) - .collect::>(); + let proof_items: Vec> = + proof.proof_items().iter().map(|leaf| leaf.0.to_vec()).collect::>(); assert_ok!(Murmur::proxy( RuntimeOrigin::signed(0), @@ -144,18 +121,12 @@ fn it_can_proxy_valid_calls() { commitment, ciphertext, proof_items, + size, Box::new(call), )); - }); } -fn calculate_signature(id: u8, serialized_resharing: &[u8], message: &[u8]) -> (bls377::Public, bls377::Signature) { - let kp = sp_core::bls::Pair::from_seed_slice(&[id;32]).unwrap(); - let etf_kp = kp.acss_recover(serialized_resharing, 1).unwrap(); - (etf_kp.public(), etf_kp.sign(message)) -} - fn call_remark(value: Vec) -> RuntimeCall { RuntimeCall::System(SystemCall::remark { remark: value }) } diff --git a/pallets/murmur/src/weights.rs b/pallets/murmur/src/weights.rs deleted file mode 100644 index c42bba2..0000000 --- a/pallets/murmur/src/weights.rs +++ /dev/null @@ -1,55 +0,0 @@ - -// //! Autogenerated weights for `pallet_etf` -// //! -// //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -// //! DATE: 2023-07-28, STEPS: `50`, REPEAT: `20`, LOW RANGE: `[]`, HIGH RANGE: `[]` -// //! WORST CASE MAP SIZE: `1000000` -// //! HOSTNAME: `DESKTOP-RN9BJOQ`, CPU: `Intel(R) Core(TM) i7-9700KF CPU @ 3.60GHz` -// //! EXECUTION: None, WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 1024 - -// // Executed Command: -// // ./target/release/node-template -// // benchmark -// // pallet -// // --chain -// // dev -// // --wasm-execution=compiled -// // --pallet -// // pallet_etf -// // --extrinsic -// // * -// // --steps -// // 50 -// // --repeat -// // 20 -// // --output -// // bin/node-template/pallets/etf/src/weight.rs - -// #![cfg_attr(rustfmt, rustfmt_skip)] -// #![allow(unused_parens)] -// #![allow(unused_imports)] -// #![allow(missing_docs)] - -// use frame_support::{traits::Get, weights::Weight}; -// use core::marker::PhantomData; -// use crate as pallet_etf; - -// pub trait WeightInfo { -// fn update_ibe_params() -> Weight; -// } - -// /// Weight functions for `pallet_etf`. -// pub struct SubstrateWeightInfo(PhantomData); -// impl WeightInfo for SubstrateWeightInfo { -// /// Storage: Etf IBEParams (r:0 w:1) -// /// Proof Skipped: Etf IBEParams (max_values: Some(1), max_size: None, mode: Measured) -// fn update_ibe_params() -> Weight { -// // Proof Size summary in bytes: -// // Measured: `0` -// // Estimated: `0` -// // Minimum execution time: 80_700_000 picoseconds. -// Weight::from_parts(81_500_000, 0) -// .saturating_add(Weight::from_parts(0, 0)) -// .saturating_add(T::DbWeight::get().writes(1)) -// } -// } diff --git a/pallets/proxy/Cargo.toml b/pallets/proxy/Cargo.toml index 2bfa328..cfc97f5 100644 --- a/pallets/proxy/Cargo.toml +++ b/pallets/proxy/Cargo.toml @@ -1,17 +1,14 @@ [package] name = "pallet-proxy" -version = "28.1.0" +version = "0.1.0-dev" authors.workspace = true edition.workspace = true -license = "Apache-2.0" -homepage = "https://substrate.io" +license.workspace = true +homepage.workspace = true repository.workspace = true description = "FRAME proxying pallet" readme = "README.md" -# [lints] -# workspace = true - [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/pallets/proxy/README.md b/pallets/proxy/README.md index c52a881..02ac0c9 100644 --- a/pallets/proxy/README.md +++ b/pallets/proxy/README.md @@ -1,21 +1,63 @@ -# Proxy Module -A module allowing accounts to give permission to other accounts to dispatch types of calls from -their signed origin. +# Proxy Pallet -The accounts to which permission is delegated may be required to announce the action that they -wish to execute some duration prior to execution happens. In this case, the target account may -reject the announcement and in doing so, veto the execution. - -- [`Config`](https://docs.rs/pallet-proxy/latest/pallet_proxy/pallet/trait.Config.html) -- [`Call`](https://docs.rs/pallet-proxy/latest/pallet_proxy/pallet/enum.Call.html) +This is a FRAME pallet that allows accounts to delegate permission to other accounts to dispatch specific types of calls from their signed origin. This delegation can include requirements for the delegate to announce their intended actions before execution, giving the original account the opportunity to veto the action. ## Overview -## Interface +The Proxy Module provides a flexible mechanism for account delegation, enabling various use cases such as account recovery, multi-signature wallets, and more. It supports time-based announcements and vetoes, ensuring that the original account retains control over critical actions. + +## Features + +- **Account Delegation**: Delegate permission to other accounts to perform specific actions. +- **Announcement and Veto**: Require delegates to announce actions before execution, allowing the original account to veto if necessary. + +## Events + +The module emits the following events: + +- `ProxyAdded`: Emitted when a new proxy is added. +- `Announced`: Emitted when a proxy call is announced. +- `ProxyExecuted`: Emitted when a proxy call is executed. +- `PureCreated`: Emitted when a new pure proxy is created. +- `ProxyRemoved`: Emitted when a proxy is removed. + +## Errors + +The module can return the following errors: + +- `TooMany`: The account has too many proxies. +- `NotFound`: The proxy was not found. +- `NotProxy`: The account is not a proxy. +- `Unproxyable`: The call is not allowed to be proxied. +- `Duplicate`: The proxy is already in use. +- `NoPermission`: The account does not have permission to proxy the call. +- `Unannounced`: The call was not announced. +- `NoSelfProxy`: An account cannot proxy to itself. + +## Build + +To build the project, use the following command: + +```shell +cargo build +``` + +## Testing + +To run the tests, use the following command: + +```shell +cargo test +``` + +## Contributing + +Contributions are welcome! Please open an issue or submit a pull request. + +## License -### Dispatchable Functions +This project is licensed under the Apache-2.0. See the [LICENSE](../../LICENSE) file for details. -[`Call`]: ./enum.Call.html -[`Config`]: ./trait.Config.html +## Contact -License: Apache-2.0 +For any inquiries, please contact [Ideal Labs](https://idealabs.network). diff --git a/pallets/proxy/src/benchmarking.rs b/pallets/proxy/src/benchmarking.rs index e0d1416..5059f92 100644 --- a/pallets/proxy/src/benchmarking.rs +++ b/pallets/proxy/src/benchmarking.rs @@ -1,19 +1,18 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +/* + * Copyright 2024 by Ideal Labs, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ // Benchmarks for Proxy Pallet @@ -225,7 +224,8 @@ benchmarks! { RawOrigin::Signed(caller.clone()), T::ProxyType::default(), BlockNumberFor::::zero(), - 0 + 0, + false ) verify { let pure_account = Pallet::::pure_account(&caller, &T::ProxyType::default(), 0, None); @@ -247,7 +247,8 @@ benchmarks! { RawOrigin::Signed(whitelisted_caller()).into(), T::ProxyType::default(), BlockNumberFor::::zero(), - 0 + 0, + false )?; let height = system::Pallet::::block_number(); let ext_index = system::Pallet::::extrinsic_index().unwrap_or(0); diff --git a/pallets/proxy/src/lib.rs b/pallets/proxy/src/lib.rs index 082261d..6d15f74 100644 --- a/pallets/proxy/src/lib.rs +++ b/pallets/proxy/src/lib.rs @@ -1,19 +1,18 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +/* + * Copyright 2024 by Ideal Labs, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ //! # Proxy Pallet //! A pallet allowing accounts to give permission to other accounts to dispatch types of calls from @@ -216,9 +215,9 @@ pub mod pallet { /// The dispatch origin for this call must be _Signed_. /// /// Parameters: - /// - `proxy`: The account that the `caller` would like to make a proxy. + /// - `delegate`: The account that the `caller` would like to make a proxy. /// - `proxy_type`: The permissions allowed for this proxy account. - /// - `delay`: The announcement period required of the initial proxy. Will generally be + /// - `delay`: The announcement period required of the initial proxy. /// zero. #[pallet::call_index(1)] #[pallet::weight(T::WeightInfo::add_proxy(T::MaxProxies::get()))] @@ -238,8 +237,9 @@ pub mod pallet { /// The dispatch origin for this call must be _Signed_. /// /// Parameters: - /// - `proxy`: The account that the `caller` would like to remove as a proxy. + /// - `delegate`: The account that the `caller` would like to remove as a proxy. /// - `proxy_type`: The permissions currently enabled for the removed proxy account. + /// - `delay`: The announcement period required of the initial proxy. #[pallet::call_index(2)] #[pallet::weight(T::WeightInfo::remove_proxy(T::MaxProxies::get()))] pub fn remove_proxy( @@ -275,11 +275,12 @@ pub mod pallet { /// - `proxy_type`: The type of the proxy that the sender will be registered as over the /// new account. This will almost always be the most permissive `ProxyType` possible to /// allow for maximum flexibility. + /// - `delay`: The announcement period required of the initial proxy. Will generally be + /// zero. /// - `index`: A disambiguation index, in case this is called multiple times in the same /// transaction (e.g. with `utility::batch`). Unless you're using `batch` you probably just /// want to use `0`. - /// - `delay`: The announcement period required of the initial proxy. Will generally be - /// zero. + /// - `anonymous`: Whether the account should be anonymous. /// /// Fails with `Duplicate` if this has already been called in this transaction, from the /// same sender, with the same parameters. @@ -300,13 +301,9 @@ pub mod pallet { ensure!(!Proxies::::contains_key(&pure), Error::::Duplicate); // if anonymous, then set no delegate - let delegate = match anonymous { - True => None, - False => Some(who.clone()) - }; + let delegate = if anonymous { None } else { Some(who.clone()) }; - let proxy_def = - ProxyDefinition { delegate: delegate, proxy_type: proxy_type.clone(), delay }; + let proxy_def = ProxyDefinition { delegate, proxy_type: proxy_type.clone(), delay }; let bounded_proxies: BoundedVec<_, T::MaxProxies> = vec![proxy_def].try_into().map_err(|_| Error::::TooMany)?; @@ -333,8 +330,8 @@ pub mod pallet { /// `pure` with corresponding parameters. /// /// - `spawner`: The account that originally called `pure` to create this account. - /// - `index`: The disambiguation index originally passed to `pure`. Probably `0`. /// - `proxy_type`: The proxy type originally passed to `pure`. + /// - `index`: The disambiguation index originally passed to `pure`. Probably `0`. /// - `height`: The height of the chain when the call to `pure` was processed. /// - `ext_index`: The extrinsic index in which the call to `pure` was processed. /// @@ -482,6 +479,7 @@ pub mod pallet { /// The dispatch origin for this call must be _Signed_. /// /// Parameters: + /// - `delegate`: The account that the `caller` would like to make a proxy. /// - `real`: The account that the proxy will make a call on behalf of. /// - `force_proxy_type`: Specify the exact proxy type to be used and checked for this call. /// - `call`: The call to be made by the `real` account. @@ -509,9 +507,9 @@ pub mod pallet { let call_hash = T::CallHasher::hash_of(&call); let now = system::Pallet::::block_number(); Self::edit_announcements(&delegate.clone(), |ann| { - ann.real != real || - ann.call_hash != call_hash || - now.saturating_sub(ann.height) < def.delay + ann.real != real + || ann.call_hash != call_hash + || now.saturating_sub(ann.height) < def.delay }) .map_err(|_| Error::::Unannounced)?; @@ -681,7 +679,7 @@ impl Pallet { /// - `delegator`: The delegator account. /// - `delegatee`: The account that the `delegator` would like to make a proxy. /// - `proxy_type`: The permissions allowed for this proxy account. - /// - `delay`: The announcement period required of the initial proxy. Will generally be + /// - `delay`: The announcement period required of the initial proxy. /// zero. pub fn remove_proxy_delegate( delegator: &T::AccountId, @@ -771,8 +769,8 @@ impl Pallet { force_proxy_type: Option, ) -> Result>, DispatchError> { let f = |x: &ProxyDefinition>| -> bool { - x.delegate.clone() == delegate && - force_proxy_type.as_ref().map_or(true, |y| &x.proxy_type == y) + x.delegate.clone() == delegate + && force_proxy_type.as_ref().map_or(true, |y| &x.proxy_type == y) }; Ok(Proxies::::get(real).0.into_iter().find(f).ok_or(Error::::NotProxy)?) } @@ -790,15 +788,19 @@ impl Pallet { match c.is_sub_type() { // Proxy call cannot add or remove a proxy with more permissions than it already // has. - Some(Call::add_proxy { ref proxy_type, .. }) | - Some(Call::remove_proxy { ref proxy_type, .. }) + Some(Call::add_proxy { ref proxy_type, .. }) + | Some(Call::remove_proxy { ref proxy_type, .. }) if !def.proxy_type.is_superset(proxy_type) => - false, + { + false + }, // Proxy call cannot remove all proxies or kill pure proxies unless it has full // permissions. Some(Call::remove_proxies { .. }) | Some(Call::kill_pure { .. }) if def.proxy_type != T::ProxyType::default() => - false, + { + false + }, _ => def.proxy_type.filter(c), } }); diff --git a/pallets/proxy/src/tests.rs b/pallets/proxy/src/tests.rs index 67a662e..71dbcbb 100644 --- a/pallets/proxy/src/tests.rs +++ b/pallets/proxy/src/tests.rs @@ -1,19 +1,18 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +/* + * Copyright 2024 by Ideal Labs, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ // Tests for Proxy Pallet @@ -534,7 +533,7 @@ fn proxying_works() { fn pure_works() { new_test_ext().execute_with(|| { Balances::make_free_balance_be(&1, 11); // An extra one for the ED. - assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0)); + assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0, false)); let anon = Proxy::pure_account(&1, &ProxyType::Any, 0, None); System::assert_last_event( ProxyEvent::PureCreated { @@ -547,19 +546,25 @@ fn pure_works() { ); // other calls to pure allowed as long as they're not exactly the same. - assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::JustTransfer, 0, 0)); - assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::Any, 0, 1)); + assert_ok!(Proxy::create_pure( + RuntimeOrigin::signed(1), + ProxyType::JustTransfer, + 0, + 0, + false + )); + assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::Any, 0, 1, false)); let anon2 = Proxy::pure_account(&2, &ProxyType::Any, 0, None); - assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(2), ProxyType::Any, 0, 0)); + assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(2), ProxyType::Any, 0, 0, false)); assert_noop!( - Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0), + Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0, false), Error::::Duplicate ); System::set_extrinsic_index(1); - assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0)); + assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0, false)); System::set_extrinsic_index(0); System::set_block_number(2); - assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0)); + assert_ok!(Proxy::create_pure(RuntimeOrigin::signed(1), ProxyType::Any, 0, 0, false)); let call = Box::new(call_transfer(6, 1)); assert_ok!(Balances::transfer_allow_death(RuntimeOrigin::signed(3), anon, 5)); diff --git a/pallets/proxy/src/weights.rs b/pallets/proxy/src/weights.rs index f30fe73..53e2724 100644 --- a/pallets/proxy/src/weights.rs +++ b/pallets/proxy/src/weights.rs @@ -1,19 +1,18 @@ -// This file is part of Substrate. - -// Copyright (C) Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +/* + * Copyright 2024 by Ideal Labs, LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ //! Autogenerated weights for pallet_proxy //! diff --git a/pallets/randomness-beacon/src/lib.rs b/pallets/randomness-beacon/src/lib.rs index 35170c6..e038775 100644 --- a/pallets/randomness-beacon/src/lib.rs +++ b/pallets/randomness-beacon/src/lib.rs @@ -17,48 +17,38 @@ #![cfg_attr(not(feature = "std"), no_std)] use codec::MaxEncodedLen; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use frame_support::{ + dispatch::{DispatchResultWithPostInfo, Pays}, pallet_prelude::*, traits::{Get, Randomness}, BoundedVec, - dispatch::{DispatchResultWithPostInfo, Pays}, }; +use frame_system::{offchain::SendTransactionTypes, pallet_prelude::*}; use sp_std::prelude::*; -use frame_system::{ - pallet_prelude::*, - offchain::SendTransactionTypes, -}; - -use sp_staking::{offence::OffenceReportSystem, SessionIndex}; use codec::{Decode, Encode}; use scale_info::TypeInfo; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; -use etf_crypto_primitives::utils::interpolate_threshold_bls; -use etf_crypto_primitives::{ - encryption::tlock::TLECiphertext, -}; +use etf_crypto_primitives::{encryption::tlock::TLECiphertext, utils::interpolate_threshold_bls}; -use sp_session::{GetSessionNumber, GetValidatorCount}; -use w3f_bls::{DoublePublicKey, DoubleSignature, EngineBLS, Message, SerializableToBytes, TinyBLS377}; -use sp_consensus_beefy_etf::{ - Commitment, ValidatorSetId, Payload, known_payloads, BeefyAuthorityId, +use sp_consensus_beefy_etf::{known_payloads, Commitment, Payload, ValidatorSetId}; +use w3f_bls::{ + DoublePublicKey, DoubleSignature, EngineBLS, Message, SerializableToBytes, TinyBLS377, }; +use log::{error, info}; +use sha3::{Digest, Sha3_512}; use sp_runtime::{ + traits::Hash, transaction_validity::{ InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, - TransactionValidityError, ValidTransaction, + ValidTransaction, }, - DispatchError, KeyTypeId, Perbill, RuntimeAppPublic, - traits::Hash, }; -use sha3::{Digest, Sha3_512}; -use log::{info, debug, error}; #[cfg(test)] mod mock; @@ -72,31 +62,60 @@ const LOG_TARGET: &str = "runtime::randomness-beacon"; pub type OpaqueSignature = BoundedVec>; #[derive( - Default, Clone, Eq, PartialEq, RuntimeDebugNoBound, - Encode, Decode, TypeInfo, MaxEncodedLen, Serialize, Deserialize)] + Default, + Clone, + Eq, + PartialEq, + RuntimeDebugNoBound, + Encode, + Decode, + TypeInfo, + MaxEncodedLen, + Serialize, + Deserialize, +)] pub struct PulseHeader { pub block_number: BN, // pub hash_prev: BoundedVec> } #[derive( - Default, Clone, Eq, PartialEq, RuntimeDebugNoBound, - Encode, Decode, TypeInfo, MaxEncodedLen, Serialize, Deserialize)] + Default, + Clone, + Eq, + PartialEq, + RuntimeDebugNoBound, + Encode, + Decode, + TypeInfo, + MaxEncodedLen, + Serialize, + Deserialize, +)] pub struct PulseBody { pub signature: BoundedVec>, pub randomness: BoundedVec>, } #[derive( - Default, Clone, Eq, PartialEq, RuntimeDebugNoBound, - Encode, Decode, TypeInfo, MaxEncodedLen, Serialize, Deserialize)] + Default, + Clone, + Eq, + PartialEq, + RuntimeDebugNoBound, + Encode, + Decode, + TypeInfo, + MaxEncodedLen, + Serialize, + Deserialize, +)] pub struct Pulse { header: PulseHeader, - body: PulseBody, + body: PulseBody, } impl Pulse { - // builds the next pulse from a previous one pub fn build_next( signature: OpaqueSignature, @@ -115,15 +134,9 @@ impl Pulse { // hash_prev: bounded_hash }; - let body = PulseBody { - signature, - randomness: bounded_rand, - }; + let body = PulseBody { signature, randomness: bounded_rand }; - Pulse { - header, - body, - } + Pulse { header, body } } } @@ -132,13 +145,15 @@ pub mod pallet { use super::*; #[pallet::config] - pub trait Config: frame_system::Config + SendTransactionTypes> + pallet_etf::Config { + pub trait Config: + frame_system::Config + SendTransactionTypes> + pallet_etf::Config + { /// The overarching event type. type RuntimeEvent: From> + IsType<::RuntimeEvent>; /// The maximum number of pulses to store in runtime storage #[pallet::constant] type MaxPulses: Get; - + // TODO // /// Weights for this pallet. // type WeightInfo: WeightInfo; @@ -152,16 +167,11 @@ pub mod pallet { Self::validate_unsigned(source, call) } } - + /// the chain of randomness #[pallet::storage] - pub type Pulses = StorageMap< - _, - Blake2_128Concat, - BlockNumberFor, - Pulse>, - OptionQuery, - >; + pub type Pulses = + StorageMap<_, Blake2_128Concat, BlockNumberFor, Pulse>, OptionQuery>; /// the highest block number for which we have encoded a pulse #[pallet::storage] @@ -177,18 +187,15 @@ pub mod pallet { impl Default for GenesisConfig { fn default() -> Self { - Self { - genesis_pulse: Pulse::default(), - } + Self { genesis_pulse: Pulse::default() } } } #[pallet::genesis_build] impl BuildGenesisConfig for GenesisConfig { fn build(&self) { - Pallet::::initialize( - &self.genesis_pulse - ).expect("The genesis pulse must be well formatted."); + Pallet::::initialize(&self.genesis_pulse) + .expect("The genesis pulse must be well formatted."); } } @@ -213,8 +220,10 @@ pub mod pallet { /// Writes a new block from the randomness beacon into storage if it can be verified /// - /// * `signatures`: A set of threshold bls signatures (sigma, proof) output from the beacon protocol - /// * `block_number`: The block number on which the pulse was generated (required for verification) + /// * `signatures`: A set of threshold bls signatures (sigma, proof) output from the beacon + /// protocol + /// * `block_number`: The block number on which the pulse was generated (required for + /// verification) #[pallet::call] impl Pallet { #[pallet::call_index(0)] @@ -226,16 +235,10 @@ pub mod pallet { ) -> DispatchResultWithPostInfo { ensure_none(origin)?; let round_pk_bytes: Vec = >::round_pubkey().to_vec(); - let rk = DoublePublicKey::::deserialize_compressed( - &round_pk_bytes[..] - ).unwrap(); - let validator_set_id = 0;//>::validator_set_id(); - let _ = Self::try_add_pulse( - signatures, - block_number, - rk, - validator_set_id - )?; + let rk = + DoublePublicKey::::deserialize_compressed(&round_pk_bytes[..]).unwrap(); + let validator_set_id = 0; //>::validator_set_id(); + let _ = Self::try_add_pulse(signatures, block_number, rk, validator_set_id)?; Height::::set(block_number); Self::deposit_event(Event::PulseStored); @@ -247,9 +250,7 @@ pub mod pallet { impl Pallet { /// initialize the genesis state for this pallet - fn initialize( - genesis_pulse: &Pulse>, - ) -> Result<(), Error> { + fn initialize(genesis_pulse: &Pulse>) -> Result<(), Error> { let current_block = >::block_number(); >::insert(current_block, genesis_pulse); Ok(()) @@ -259,30 +260,22 @@ impl Pallet { fn try_add_pulse( raw_signatures: Vec>, block_number: BlockNumberFor, - rk: DoublePublicKey, + _rk: DoublePublicKey, validator_set_id: ValidatorSetId, ) -> Result<(), Error> { - let payload = Payload::from_single_entry( - known_payloads::ETF_SIGNATURE, - Vec::new() - ); - let commitment = Commitment { - payload, - block_number, - validator_set_id, - }; + let payload = Payload::from_single_entry(known_payloads::ETF_SIGNATURE, Vec::new()); + let commitment = Commitment { payload, block_number, validator_set_id }; // // TODO: error handling let mut good_sigs = Vec::new(); raw_signatures.iter().enumerate().for_each(|(idx, rs)| { let etf_pk = >::commitments()[idx].encode(); - let pk = DoublePublicKey::::deserialize_compressed( - &etf_pk[..] - ).unwrap(); + let pk = DoublePublicKey::::deserialize_compressed(&etf_pk[..]).unwrap(); - if let Ok(sig) = DoubleSignature::::from_bytes(&rs) { + if let Ok(sig) = DoubleSignature::::from_bytes(&rs) { if sig.verify(&Message::new(b"", &commitment.encode()), &pk) { - good_sigs.push((::Scalar::from((idx as u8) + 1), sig.0)); + good_sigs + .push((::Scalar::from((idx as u8) + 1), sig.0)); } } }); @@ -290,13 +283,12 @@ impl Pallet { let sig = interpolate_threshold_bls::(good_sigs); let mut bytes = Vec::new(); sig.serialize_compressed(&mut bytes).unwrap(); - let bounded_sig = - BoundedVec::>::try_from(bytes) - .map_err(|_| Error::::InvalidSignature)?; + let bounded_sig = BoundedVec::>::try_from(bytes) + .map_err(|_| Error::::InvalidSignature)?; let pulse = Pulse::build_next( - bounded_sig, - block_number, + bounded_sig, + block_number, // last_pulse ); @@ -314,7 +306,7 @@ impl Pallet { /// validate an unsigned transaction sent to this module pub fn validate_unsigned(source: TransactionSource, call: &Call) -> TransactionValidity { - if let Call::write_pulse { signatures, block_number } = call { + if let Call::write_pulse { signatures: _, block_number: _ } = call { // discard pulses not coming from the local node match source { TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ }, @@ -323,14 +315,14 @@ impl Pallet { target: LOG_TARGET, "rejecting unsigned beacon pulse because it is not local/in-block." ); - return InvalidTransaction::Call.into() + return InvalidTransaction::Call.into(); }, } ValidTransaction::with_tag_prefix("RandomnessBeacon") // We assign the maximum priority for any equivocation report. .priority(TransactionPriority::MAX) - .longevity(3) + .longevity(3) // We don't propagate this. This can never be included on a remote node. .propagate(false) .build() @@ -340,15 +332,9 @@ impl Pallet { } /// submit an unsigned transaction to write a new pulse into storage - pub fn publish_pulse( - signatures: Vec>, - block_number: BlockNumberFor, - ) -> Option<()> { - use frame_system::offchain::{Signer, SubmitTransaction}; - let call = Call::write_pulse { - signatures, - block_number, - }; + pub fn publish_pulse(signatures: Vec>, block_number: BlockNumberFor) -> Option<()> { + use frame_system::offchain::SubmitTransaction; + let call = Call::write_pulse { signatures, block_number }; let res = SubmitTransaction::>::submit_unsigned_transaction(call.into()); match res { @@ -382,10 +368,7 @@ pub struct Ciphertext { /// provides timelock encryption using the current slot pub trait TimelockEncryptionProvider { /// attempt to decrypt the ciphertext with the current slot secret - fn decrypt_at( - ciphertext: &[u8], - block_number: BN - ) -> Result; + fn decrypt_at(ciphertext: &[u8], block_number: BN) -> Result; /// get the latest block number for which randomness is known fn latest() -> BN; @@ -395,25 +378,25 @@ pub trait TimelockEncryptionProvider { // use w3f_bls::{EngineBLS}; use etf_crypto_primitives::encryption::tlock::DecryptionResult; -impl TimelockEncryptionProvider> for Pallet { +impl TimelockEncryptionProvider> for Pallet { fn decrypt_at( - ciphertext_bytes: &[u8], - block_number: BlockNumberFor + ciphertext_bytes: &[u8], + block_number: BlockNumberFor, ) -> Result { if let Some(secret) = Pulses::::get(block_number) { - let pk = >::round_pubkey(); + // let pk = >::round_pubkey(); // TODO: replace with optimized arkworks types? - let ciphertext:TLECiphertext = + let ciphertext: TLECiphertext = TLECiphertext::deserialize_compressed(ciphertext_bytes) .map_err(|_| TimelockError::DecodeFailure)?; - let sig: ::SignatureGroup = + let sig: ::SignatureGroup = ::SignatureGroup::deserialize_compressed( - &secret.body.signature.to_vec()[..] - ).map_err(|_| TimelockError::DecodeFailure)?; + &secret.body.signature.to_vec()[..], + ) + .map_err(|_| TimelockError::DecodeFailure)?; - let plaintext = ciphertext.tld(sig) - .map_err(|_| TimelockError::DecryptionFailed)?; + let plaintext = ciphertext.tld(sig).map_err(|_| TimelockError::DecryptionFailed)?; return Ok(plaintext); } @@ -421,13 +404,12 @@ impl TimelockEncryptionProvider> for Pallet { } fn latest() -> BlockNumberFor { - return Height::::get() + return Height::::get(); } } // use frame_support::StorageHasher; - impl Randomness> for Pallet { // this function hashes together the subject with the latest known randomness fn random(subject: &[u8]) -> (T::Hash, BlockNumberFor) { @@ -435,10 +417,10 @@ impl Randomness> for Pallet { let mut entropy = T::Hash::default(); if let Some(pulse) = Pulses::::get(height) { - entropy = (subject, height, pulse.body.randomness.clone()) - .using_encoded(T::Hashing::hash); + entropy = + (subject, height, pulse.body.randomness.clone()).using_encoded(T::Hashing::hash); } (entropy, height) } -} \ No newline at end of file +} diff --git a/pallets/randomness-beacon/src/mock.rs b/pallets/randomness-beacon/src/mock.rs index d5dec53..ade399b 100644 --- a/pallets/randomness-beacon/src/mock.rs +++ b/pallets/randomness-beacon/src/mock.rs @@ -21,10 +21,8 @@ use frame_support::{ construct_runtime, derive_impl, parameter_types, traits::{ConstU32, ConstU64}, }; -use sp_consensus_beefy_etf::{ - mmr::MmrLeafVersion, - test_utils::etf_genesis, -}; +use sp_consensus_beefy_etf::{mmr::MmrLeafVersion, test_utils::etf_genesis}; +use sp_core::Pair; use sp_io::TestExternalities; use sp_runtime::{ app_crypto::bls377::Public, @@ -32,14 +30,11 @@ use sp_runtime::{ traits::{ConvertInto, Keccak256, OpaqueKeys}, BuildStorage, }; -use sp_core::Pair; use sp_state_machine::BasicExternalities; use crate as pallet_randomness_beacon; -pub use sp_consensus_beefy_etf::{ - bls_crypto::AuthorityId as BeefyId, mmr::BeefyDataProvider, -}; +pub use sp_consensus_beefy_etf::{bls_crypto::AuthorityId as BeefyId, mmr::BeefyDataProvider}; impl_opaque_keys! { pub struct MockSessionKeys { @@ -55,7 +50,7 @@ construct_runtime!( System: frame_system, Session: pallet_session, Mmr: pallet_mmr, - Beacon: pallet_randomness_beacon, + Beacon: pallet_randomness_beacon, Etf: pallet_etf, Beefy: pallet_beefy, BeefyMmr: pallet_beefy_mmr, @@ -140,8 +135,8 @@ impl BeefyDataProvider> for DummyDataProvider { } impl pallet_randomness_beacon::Config for Test { - type RuntimeEvent = RuntimeEvent; - type MaxPulses = ConstU32<256000>; + type RuntimeEvent = RuntimeEvent; + type MaxPulses = ConstU32<256000>; } pub struct MockSessionManager; @@ -165,7 +160,7 @@ impl pallet_session::SessionManager for MockSessionManager { // with the first one containing information to reconstruct the uncompressed key. pub fn mock_beefy_id(id: u8) -> BeefyId { // generate a new keypair and get the public key - let kp = sp_core::bls::Pair::from_seed_slice(&[id;32]).unwrap(); + let kp = sp_core::bls::Pair::from_seed_slice(&[id; 32]).unwrap(); BeefyId::from(kp.public()) } @@ -193,19 +188,17 @@ pub fn new_test_ext_raw_authorities(authorities: Vec<(u64, BeefyId)>) -> TestExt }); let (round_pubkey, genesis_resharing) = etf_genesis::( - authorities.iter() - .map(|(_, id)| id.clone()) - .collect::>() + authorities.iter().map(|(_, id)| id.clone()).collect::>(), ); - pallet_etf::GenesisConfig:: { - genesis_resharing: genesis_resharing, - round_pubkey: round_pubkey, + pallet_etf::GenesisConfig:: { + genesis_resharing, + round_pubkey, _phantom: Default::default(), } - .assimilate_storage(&mut t) - .unwrap(); - + .assimilate_storage(&mut t) + .unwrap(); + pallet_session::GenesisConfig:: { keys: session_keys } .assimilate_storage(&mut t) .unwrap(); diff --git a/pallets/randomness-beacon/src/tests.rs b/pallets/randomness-beacon/src/tests.rs index c85a61f..8b2a776 100644 --- a/pallets/randomness-beacon/src/tests.rs +++ b/pallets/randomness-beacon/src/tests.rs @@ -14,195 +14,179 @@ * limitations under the License. */ -use std::vec; +use crate::{self as beacon, mock::*, BlockNumberFor, Call, Config, Error, Weight}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use codec::Encode; use frame_support::{assert_ok, traits::OnInitialize}; -use crate::{ - self as beacon, - BlockNumberFor, - mock::*, - Call, Config, - Error, Weight -}; use sha2::Sha256; -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; -use sp_core::{bls377, Pair, ByteArray}; -use sp_consensus_beefy_etf::{ - Commitment, ValidatorSetId, Payload, known_payloads, -}; +use sp_consensus_beefy_etf::{known_payloads, Commitment, Payload, ValidatorSetId}; +use sp_core::{bls377, ByteArray, Pair}; +use std::vec; use ark_ff::Zero; use etf_crypto_primitives::{ - utils::interpolate_threshold_bls, - proofs::hashed_el_gamal_sigma::BatchPoK, + proofs::hashed_el_gamal_sigma::BatchPoK, utils::interpolate_threshold_bls, }; -use w3f_bls::{Signature, DoublePublicKey, DoubleSignature, EngineBLS, Message, TinyBLS377, SerializableToBytes}; use w3f_bls::{ - single_pop_aggregator::SignatureAggregatorAssumingPoP, DoublePublicKeyScheme, Keypair, PublicKey, PublicKeyInSignatureGroup, Signed, TinyBLS, + single_pop_aggregator::SignatureAggregatorAssumingPoP, DoublePublicKey, DoublePublicKeyScheme, + DoubleSignature, EngineBLS, Keypair, Message, PublicKey, PublicKeyInSignatureGroup, + SerializableToBytes, Signature, Signed, TinyBLS, TinyBLS377, }; - fn init_block(block: u64) { System::set_block_number(block); Session::on_initialize(block); } -fn calculate_signature(id: u8, serialized_resharing: &[u8], message: &[u8]) -> (bls377::Public, bls377::Signature) { - let kp = sp_core::bls::Pair::from_seed_slice(&[id;32]).unwrap(); - let etf_kp = kp.acss_recover(serialized_resharing, 1).unwrap(); - (etf_kp.public(), etf_kp.sign(message)) +fn calculate_signature( + id: u8, + serialized_resharing: &[u8], + message: &[u8], +) -> (bls377::Public, bls377::Signature) { + let kp = sp_core::bls::Pair::from_seed_slice(&[id; 32]).unwrap(); + let etf_kp = kp.acss_recover(serialized_resharing, 1).unwrap(); + (etf_kp.public(), etf_kp.sign(message)) } #[test] fn test_genesis() { - // for simplicity of simulating a beacon, we use a single validator model - new_test_ext(vec![1]).execute_with(|| { - let pulses = beacon::Pulses::::get(); - assert!(pulses.is_empty()); - }); + // for simplicity of simulating a beacon, we use a single validator model + new_test_ext(vec![1]).execute_with(|| { + let pulses = beacon::Pulses::::get(); + assert!(pulses.is_empty()); + }); } #[test] fn test_can_write_single_pulse() { new_test_ext(vec![1, 2, 3]).execute_with(|| { - let pulses = beacon::Pulses::::get(); - assert_eq!(pulses.len(), 0); - - let round_pk_bytes: Vec = >::round_pubkey().to_vec(); - let rk = DoublePublicKey::::deserialize_compressed( - &round_pk_bytes[..] - ).unwrap(); - // now we write a new pulse... - let resharing_bytes_1 = &pallet_etf::Shares::::get()[0]; - let resharing_bytes_2 = &pallet_etf::Shares::::get()[1]; - let resharing_bytes_3 = &pallet_etf::Shares::::get()[2]; - - // // convert to batchpok - let etf_pk_1 = &pallet_etf::Commitments::::get()[0]; - let etf_pk_2 = &pallet_etf::Commitments::::get()[1]; - let etf_pk_3 = &pallet_etf::Commitments::::get()[2]; - - let payload = Payload::from_single_entry( - known_payloads::ETF_SIGNATURE, - Vec::new() - ); - let validator_set_id = >::validator_set_id(); - let block_number: BlockNumberFor = 1; - let commitment = Commitment { - payload, - block_number, - validator_set_id, - }; - - // let mut pub_keys_in_sig_grp: Vec> = Vec::new(); - - let (_pk1, signature_1) = calculate_signature(1, resharing_bytes_1, &commitment.encode()); - - let pk1_ref: &[u8] = etf_pk_1.as_ref(); - let pk1_bytes_pub = &pk1_ref[48..144]; - let pk1_bytes_sig = &pk1_ref[0..48]; - - let pk1_pub = ::PublicKeyGroup::deserialize_compressed(pk1_bytes_pub).unwrap(); - let pk1_sig = ::SignatureGroup::deserialize_compressed(pk1_bytes_sig).unwrap(); - - let sig_bytes_1: &[u8] = signature_1.as_ref(); - let sig_1 = DoubleSignature::::from_bytes(sig_bytes_1).unwrap(); - - let (_pk2, signature_2) = calculate_signature(2, resharing_bytes_2, &commitment.encode()); - let sig_bytes_2: &[u8] = signature_2.as_ref(); - - let sig_2 = DoubleSignature::::from_bytes(sig_bytes_2).unwrap(); - - let mut pk2_bytes: &[u8] = etf_pk_2.as_ref(); - let pk2_bytes_pub = &pk2_bytes[48..144]; - let pk2_bytes_sig = &pk2_bytes[0..48]; - let pk2_pub = ::PublicKeyGroup::deserialize_compressed(pk2_bytes_pub).unwrap(); - let pk2_sig = ::SignatureGroup::deserialize_compressed(pk2_bytes_sig).unwrap(); - - let (_pk3, signature_3) = calculate_signature(3, resharing_bytes_3, &commitment.encode()); - let sig_bytes_3: &[u8] = signature_3.as_ref(); - let sig_3 = DoubleSignature::::from_bytes(sig_bytes_3).unwrap(); - - let mut pk3_bytes: &[u8] = etf_pk_3.as_ref(); - let pk3_bytes_pub = &pk3_bytes[48..144]; - let pk3_bytes_sig = &pk3_bytes[0..48]; - let pk3_pub = ::PublicKeyGroup::deserialize_compressed(pk3_bytes_pub).unwrap(); - let pk3_sig = ::SignatureGroup::deserialize_compressed(pk3_bytes_sig).unwrap(); - - let message = Message::new(b"", &commitment.encode()); - let mut prover_aggregator = - SignatureAggregatorAssumingPoP::::new(message.clone()); - - let mut serialized_sig = Vec::new(); - let sig = &(&prover_aggregator).signature(); - sig.serialize_compressed(&mut serialized_sig).unwrap(); - - assert_ok!(Beacon::write_pulse( - RuntimeOrigin::none(), - // serialized_sig.to_vec(), - vec![sig_bytes_1.to_vec(), sig_bytes_2.to_vec(), sig_bytes_3.to_vec()], - 1, - )); - // step to next block - init_block(1); - - let pulses = beacon::Pulses::::get(); - assert_eq!(pulses.len(), 1); + let pulses = beacon::Pulses::::get(); + assert_eq!(pulses.len(), 0); + + let round_pk_bytes: Vec = >::round_pubkey().to_vec(); + let rk = + DoublePublicKey::::deserialize_compressed(&round_pk_bytes[..]).unwrap(); + // now we write a new pulse... + let resharing_bytes_1 = &pallet_etf::Shares::::get()[0]; + let resharing_bytes_2 = &pallet_etf::Shares::::get()[1]; + let resharing_bytes_3 = &pallet_etf::Shares::::get()[2]; + + // // convert to batchpok + let etf_pk_1 = &pallet_etf::Commitments::::get()[0]; + let etf_pk_2 = &pallet_etf::Commitments::::get()[1]; + let etf_pk_3 = &pallet_etf::Commitments::::get()[2]; + + let payload = Payload::from_single_entry(known_payloads::ETF_SIGNATURE, Vec::new()); + let validator_set_id = >::validator_set_id(); + let block_number: BlockNumberFor = 1; + let commitment = Commitment { payload, block_number, validator_set_id }; + + // let mut pub_keys_in_sig_grp: Vec> = Vec::new(); + + let (_pk1, signature_1) = calculate_signature(1, resharing_bytes_1, &commitment.encode()); + + let pk1_ref: &[u8] = etf_pk_1.as_ref(); + let pk1_bytes_pub = &pk1_ref[48..144]; + let pk1_bytes_sig = &pk1_ref[0..48]; + + let pk1_pub = + ::PublicKeyGroup::deserialize_compressed(pk1_bytes_pub) + .unwrap(); + let pk1_sig = + ::SignatureGroup::deserialize_compressed(pk1_bytes_sig) + .unwrap(); + + let sig_bytes_1: &[u8] = signature_1.as_ref(); + let sig_1 = DoubleSignature::::from_bytes(sig_bytes_1).unwrap(); + + let (_pk2, signature_2) = calculate_signature(2, resharing_bytes_2, &commitment.encode()); + let sig_bytes_2: &[u8] = signature_2.as_ref(); + + let sig_2 = DoubleSignature::::from_bytes(sig_bytes_2).unwrap(); + + let mut pk2_bytes: &[u8] = etf_pk_2.as_ref(); + let pk2_bytes_pub = &pk2_bytes[48..144]; + let pk2_bytes_sig = &pk2_bytes[0..48]; + let pk2_pub = + ::PublicKeyGroup::deserialize_compressed(pk2_bytes_pub) + .unwrap(); + let pk2_sig = + ::SignatureGroup::deserialize_compressed(pk2_bytes_sig) + .unwrap(); + + let (_pk3, signature_3) = calculate_signature(3, resharing_bytes_3, &commitment.encode()); + let sig_bytes_3: &[u8] = signature_3.as_ref(); + let sig_3 = DoubleSignature::::from_bytes(sig_bytes_3).unwrap(); + + let mut pk3_bytes: &[u8] = etf_pk_3.as_ref(); + let pk3_bytes_pub = &pk3_bytes[48..144]; + let pk3_bytes_sig = &pk3_bytes[0..48]; + let pk3_pub = + ::PublicKeyGroup::deserialize_compressed(pk3_bytes_pub) + .unwrap(); + let pk3_sig = + ::SignatureGroup::deserialize_compressed(pk3_bytes_sig) + .unwrap(); + + let message = Message::new(b"", &commitment.encode()); + let mut prover_aggregator = + SignatureAggregatorAssumingPoP::::new(message.clone()); + + let mut serialized_sig = Vec::new(); + let sig = &(&prover_aggregator).signature(); + sig.serialize_compressed(&mut serialized_sig).unwrap(); + + assert_ok!(Beacon::write_pulse( + RuntimeOrigin::none(), + // serialized_sig.to_vec(), + vec![sig_bytes_1.to_vec(), sig_bytes_2.to_vec(), sig_bytes_3.to_vec()], + 1, + )); + // step to next block + init_block(1); + + let pulses = beacon::Pulses::::get(); + assert_eq!(pulses.len(), 1); }); } #[test] fn test_can_write_many_pulses() { - new_test_ext(vec![1]).execute_with(|| { - let pulses = beacon::Pulses::::get(); - assert_eq!(pulses.len(), 0); - - let round_pk_bytes: Vec = >::round_pubkey().to_vec(); - let rk = DoublePublicKey::::deserialize_compressed( - &round_pk_bytes[..] - ).unwrap(); - // now we write a new pulse... - let resharing_bytes_1 = &pallet_etf::Shares::::get()[0]; - - // // convert to batchpok - let etf_pk_1 = &pallet_etf::Commitments::::get()[0]; - - let payload = Payload::from_single_entry( - known_payloads::ETF_SIGNATURE, - Vec::new() - ); - let validator_set_id = >::validator_set_id(); - let block_number: BlockNumberFor = 1; - let commitment = Commitment { - payload, - block_number, - validator_set_id, - }; - - let (_pk1, signature_1) = calculate_signature(1, resharing_bytes_1, &commitment.encode()); - let sig_bytes_1: &[u8] = signature_1.as_ref(); - assert_ok!(Beacon::write_pulse( - RuntimeOrigin::none(), - vec![sig_bytes_1.to_vec()], - 1, - )); - // step to next block - init_block(1); - - let pulses = beacon::Pulses::::get(); - assert_eq!(pulses.len(), 1); - - let (_pk1, signature_1_next) = calculate_signature(1, resharing_bytes_1, &commitment.encode()); - let sig_bytes_1_next: &[u8] = signature_1_next.as_ref(); - assert_ok!(Beacon::write_pulse( - RuntimeOrigin::none(), - vec![sig_bytes_1_next.to_vec()], - 2, - )); - // step to next block - init_block(2); - - let pulses = beacon::Pulses::::get(); - assert_eq!(pulses.len(), 2); + new_test_ext(vec![1]).execute_with(|| { + let pulses = beacon::Pulses::::get(); + assert_eq!(pulses.len(), 0); + + let round_pk_bytes: Vec = >::round_pubkey().to_vec(); + let rk = + DoublePublicKey::::deserialize_compressed(&round_pk_bytes[..]).unwrap(); + // now we write a new pulse... + let resharing_bytes_1 = &pallet_etf::Shares::::get()[0]; + + // // convert to batchpok + let etf_pk_1 = &pallet_etf::Commitments::::get()[0]; + + let payload = Payload::from_single_entry(known_payloads::ETF_SIGNATURE, Vec::new()); + let validator_set_id = >::validator_set_id(); + let block_number: BlockNumberFor = 1; + let commitment = Commitment { payload, block_number, validator_set_id }; + + let (_pk1, signature_1) = calculate_signature(1, resharing_bytes_1, &commitment.encode()); + let sig_bytes_1: &[u8] = signature_1.as_ref(); + assert_ok!(Beacon::write_pulse(RuntimeOrigin::none(), vec![sig_bytes_1.to_vec()], 1,)); + // step to next block + init_block(1); + + let pulses = beacon::Pulses::::get(); + assert_eq!(pulses.len(), 1); + + let (_pk1, signature_1_next) = + calculate_signature(1, resharing_bytes_1, &commitment.encode()); + let sig_bytes_1_next: &[u8] = signature_1_next.as_ref(); + assert_ok!(Beacon::write_pulse(RuntimeOrigin::none(), vec![sig_bytes_1_next.to_vec()], 2,)); + // step to next block + init_block(2); + + let pulses = beacon::Pulses::::get(); + assert_eq!(pulses.len(), 2); }); -} \ No newline at end of file +} diff --git a/pallets/scheduler/src/benchmarking.rs b/pallets/scheduler/src/benchmarking.rs index c4d0bb1..64bee81 100644 --- a/pallets/scheduler/src/benchmarking.rs +++ b/pallets/scheduler/src/benchmarking.rs @@ -23,7 +23,7 @@ use frame_support::{ ensure, traits::{schedule::Priority, BoundedInline, ConstU32}, }; -use frame_system::{pallet_prelude::BlockNumberFor}; +use frame_system::pallet_prelude::BlockNumberFor; use sp_std::{prelude::*, vec}; use crate::Pallet as Scheduler; @@ -63,7 +63,7 @@ fn fill_schedule_signed( n: u32, ) -> Result<(), &'static str> { let t = DispatchTime::At(when); - let origin: ::PalletsOrigin = + let origin: ::PalletsOrigin = frame_system::RawOrigin::Signed(account("origin", 0, SEED)).into(); for i in 0..n { let call = make_call::(None); @@ -75,7 +75,6 @@ fn fill_schedule_signed( Ok(()) } - fn u32_to_name(i: u32) -> TaskName { i.using_encoded(blake2_256) } @@ -101,10 +100,10 @@ fn make_task( maybe_id, priority, maybe_ciphertext: None, - maybe_call: Some(call), + maybe_call: Some(call), maybe_periodic, - origin, - _phantom: PhantomData + origin, + _phantom: PhantomData, } } @@ -126,11 +125,11 @@ fn make_call(maybe_lookup_len: Option) -> BoundedCallOf { Some(x) => x, None => { len -= 1; - continue + continue; }, }; if c.lookup_needed() == maybe_lookup_len.is_some() { - break c + break c; } if maybe_lookup_len.is_some() { len += 1; @@ -138,7 +137,7 @@ fn make_call(maybe_lookup_len: Option) -> BoundedCallOf { if len > 0 { len -= 1; } else { - break c + break c; } } } diff --git a/pallets/scheduler/src/lib.rs b/pallets/scheduler/src/lib.rs index 7dd5ab4..c39ac93 100644 --- a/pallets/scheduler/src/lib.rs +++ b/pallets/scheduler/src/lib.rs @@ -91,8 +91,8 @@ use frame_support::{ ensure, traits::{ schedule::{self, DispatchTime, MaybeHashed}, - Bounded, CallerTrait, EnsureOrigin, Get, IsType, OriginTrait, - PrivilegeCmp, QueryPreimage, StorageVersion, StorePreimage, + Bounded, CallerTrait, EnsureOrigin, Get, IsType, OriginTrait, PrivilegeCmp, QueryPreimage, + StorageVersion, StorePreimage, }, weights::{Weight, WeightMeter}, }; @@ -100,15 +100,15 @@ use frame_system::{ pallet_prelude::BlockNumberFor, {self as system}, }; +pub use pallet::*; +use pallet_randomness_beacon::TimelockEncryptionProvider; use scale_info::TypeInfo; use sp_io::hashing::blake2_256; use sp_runtime::{ - traits::{BadOrigin, Dispatchable, One, Saturating, Zero, ConstU32}, + traits::{BadOrigin, ConstU32, Dispatchable, One, Saturating, Zero}, BoundedVec, DispatchError, RuntimeDebug, }; use sp_std::{borrow::Borrow, cmp::Ordering, marker::PhantomData, prelude::*}; -use pallet_randomness_beacon::{TimelockEncryptionProvider}; -pub use pallet::*; /// Just a simple index for naming period tasks. pub type PeriodicIndex = u32; @@ -497,7 +497,7 @@ impl Pallet { }; if when <= now { - return Err(Error::::TargetBlockNumberInPast.into()) + return Err(Error::::TargetBlockNumberInPast.into()); } Ok(when) @@ -529,10 +529,10 @@ impl Pallet { let _ = agenda.try_push(Some(what)); agenda.len() as u32 - 1 } else if let Some(hole_index) = agenda.iter().position(|i| i.is_none()) { - agenda[hole_index] = Some(what); - hole_index as u32 + agenda[hole_index] = Some(what); + hole_index as u32 } else { - return Err((DispatchError::Exhausted, what)) + return Err((DispatchError::Exhausted, what)); }; Agenda::::insert(when, agenda); Ok(index) @@ -602,7 +602,7 @@ impl Pallet { T::OriginPrivilegeCmp::cmp_privilege(o, &s.origin), Some(Ordering::Less) | None ) { - return Err(BadOrigin.into()) + return Err(BadOrigin.into()); } }; Ok(s.take()) @@ -610,11 +610,10 @@ impl Pallet { ) })?; if let Some(s) = scheduled { - if s.maybe_ciphertext.is_none() && s.maybe_call.is_some() { T::Preimages::drop(&s.maybe_call.clone().unwrap()); } - + if let Some(id) = s.maybe_id { Lookup::::remove(id); } @@ -633,7 +632,7 @@ impl Pallet { let new_time = Self::resolve_time(new_time)?; if new_time == when { - return Err(Error::::RescheduleNoChange.into()) + return Err(Error::::RescheduleNoChange.into()); } let task = Agenda::::try_mutate(when, |agenda| { @@ -657,7 +656,7 @@ impl Pallet { ) -> Result>, DispatchError> { // ensure id it is unique if Lookup::::contains_key(id) { - return Err(Error::::FailedToSchedule.into()) + return Err(Error::::FailedToSchedule.into()); } let when = Self::resolve_time(when)?; @@ -700,7 +699,7 @@ impl Pallet { T::OriginPrivilegeCmp::cmp_privilege(o, &s.origin), Some(Ordering::Less) | None ) { - return Err(BadOrigin.into()) + return Err(BadOrigin.into()); } T::Preimages::drop(&s.maybe_call.clone().unwrap()); } @@ -727,7 +726,7 @@ impl Pallet { let (when, index) = lookup.ok_or(Error::::NotFound)?; if new_time == when { - return Err(Error::::RescheduleNoChange.into()) + return Err(Error::::RescheduleNoChange.into()); } let task = Agenda::::try_mutate(when, |agenda| { @@ -776,7 +775,7 @@ impl Pallet { /// Service up to `max` agendas queue starting from earliest incompletely executed agenda. fn service_agendas(weight: &mut WeightMeter, now: BlockNumberFor, max: u32) { if weight.try_consume(T::WeightInfo::service_agendas_base()).is_err() { - return + return; } let mut incomplete_since = now + One::one(); @@ -791,7 +790,7 @@ impl Pallet { if !Self::service_agenda(weight, &mut executed, now, when, then, u32::max_value()) { incomplete_since = incomplete_since.min(when); } - + when.saturating_inc(); count_down.saturating_dec(); } @@ -832,7 +831,6 @@ impl Pallet { let mut dropped = 0; for (agenda_index, _) in ordered.into_iter().take(max as usize) { - let mut task = match agenda[agenda_index as usize].take() { None => continue, Some(t) => t, @@ -840,31 +838,36 @@ impl Pallet { if let Some(ref ciphertext) = task.maybe_ciphertext { // the task should be delayed until `then` == `when` - if then == when { + if then == when { task.maybe_call = T::TlockProvider::decrypt_at(&ciphertext.clone(), then) .map_err(|_| pallet_randomness_beacon::TimelockError::DecryptionFailed) .and_then(|bare| { - if let Ok(call) = ::RuntimeCall::decode(&mut bare.message.as_slice()) { + if let Ok(call) = + ::RuntimeCall::decode(&mut bare.message.as_slice()) + { Ok(call) } else { Err(pallet_randomness_beacon::TimelockError::DecryptionFailed) } }) - .and_then(|call| T::Preimages::bound(call) - .map_err(|_| pallet_randomness_beacon::TimelockError::DecryptionFailed)) + .and_then(|call| { + T::Preimages::bound(call).map_err(|_| { + pallet_randomness_beacon::TimelockError::DecryptionFailed + }) + }) .ok(); } else { // insert the task back into the agenda and continue agenda[agenda_index as usize] = Some(task); postponed += 1; - continue + continue; } } // if we haven't dispatched the call and the call data is empty // then there is no valid call, so ignore this task if task.maybe_call.is_none() { - continue + continue; } let base_weight = T::WeightInfo::service_task( @@ -875,7 +878,7 @@ impl Pallet { ); if !weight.can_consume(base_weight) { postponed += 1; - break + break; } let result = Self::service_task(weight, now, when, agenda_index, *executed == 0, task); agenda[agenda_index as usize] = match result { @@ -929,7 +932,7 @@ impl Pallet { id: task.maybe_id, }); - return Err((Unavailable, Some(task))) + return Err((Unavailable, Some(task))); }, }; @@ -1004,14 +1007,15 @@ impl Pallet { let max_weight = base_weight.saturating_add(call_weight); if !weight.can_consume(max_weight) { - return Err(()) + return Err(()); } let dispatch_origin = origin.into(); let (maybe_actual_call_weight, result) = match call.dispatch(dispatch_origin) { Ok(post_info) => (post_info.actual_weight, Ok(())), - Err(error_and_info) => - (error_and_info.post_info.actual_weight, Err(error_and_info.error)), + Err(error_and_info) => { + (error_and_info.post_info.actual_weight, Err(error_and_info.error)) + }, }; let call_weight = maybe_actual_call_weight.unwrap_or(call_weight); let _ = weight.try_consume(base_weight); @@ -1059,7 +1063,7 @@ impl schedule::v2::Named, ::RuntimeCal { type Address = TaskAddress>; type Hash = T::Hash; - + fn schedule_named( id: Vec, when: DispatchTime>, diff --git a/pallets/scheduler/src/mock.rs b/pallets/scheduler/src/mock.rs index 9d89e77..4b34f2d 100644 --- a/pallets/scheduler/src/mock.rs +++ b/pallets/scheduler/src/mock.rs @@ -23,8 +23,8 @@ // use frame_support::{ // ord_parameter_types, parameter_types, // traits::{ -// ConstU32, ConstU64, ConstBool, -// Contains, EitherOfDiverse, EqualPrivilegeOnly, +// ConstU32, ConstU64, ConstBool, +// Contains, EitherOfDiverse, EqualPrivilegeOnly, // OnFinalize, OnInitialize, // }, // weights::constants::RocksDbWeight, @@ -330,14 +330,14 @@ // let ibe_pp_bytes: [u8;96] = convert_to_bytes::(G2::generator()) // .try_into() // .expect("The slot secret should be valid; qed;"); - + // let pt = DefaultEtfClient::::decrypt( -// ibe_pp_bytes.to_vec(), -// ciphertext.ciphertext.to_vec(), -// ciphertext.nonce.to_vec(), -// vec![ciphertext.capsule.to_vec()], +// ibe_pp_bytes.to_vec(), +// ciphertext.ciphertext.to_vec(), +// ciphertext.nonce.to_vec(), +// vec![ciphertext.capsule.to_vec()], // vec![sk.to_vec()], // ).map_err(|_| TimelockError::DecryptionFailed)?; // Ok(pt) // } -// } \ No newline at end of file +// } diff --git a/pallets/scheduler/src/tests.rs b/pallets/scheduler/src/tests.rs index 4bafc3b..275707e 100644 --- a/pallets/scheduler/src/tests.rs +++ b/pallets/scheduler/src/tests.rs @@ -19,33 +19,23 @@ use super::*; use crate::mock::{ - logger, new_test_ext, root, run_to_block, LoggerCall, RuntimeCall, Scheduler, Preimage, Test, *, + logger, new_test_ext, root, run_to_block, LoggerCall, Preimage, RuntimeCall, Scheduler, Test, *, }; use frame_support::{ assert_err, assert_noop, assert_ok, - traits::{ - Contains, - OnInitialize, QueryPreimage, - StorePreimage, ConstU32}, + traits::{ConstU32, Contains, OnInitialize, QueryPreimage, StorePreimage}, Hashable, }; use sp_runtime::traits::Hash; use substrate_test_utils::assert_eq_uvec; -use ark_std::{ - rand::SeedableRng, - ops::Mul, - One, -}; use ark_bls12_381::{Fr, G2Projective as G2}; use ark_ec::Group; -use etf_crypto_primitives::utils::convert_to_bytes; +use ark_std::{ops::Mul, rand::SeedableRng, One}; use etf_crypto_primitives::{ - client::etf_client::{ - DefaultEtfClient, - EtfClient - }, + client::etf_client::{DefaultEtfClient, EtfClient}, ibe::fullident::BfIbe, + utils::convert_to_bytes, }; use rand_chacha::ChaCha20Rng; @@ -684,11 +674,11 @@ fn on_initialize_weight_is_correct() { // Will include the named periodic only assert_eq!( Scheduler::on_initialize(1), - TestWeightInfo::service_agendas_base() + - TestWeightInfo::service_agenda_base(1) + - ::service_task(None, true, true) + - TestWeightInfo::execute_dispatch_unsigned() + - call_weight + Weight::from_parts(4, 0) + TestWeightInfo::service_agendas_base() + + TestWeightInfo::service_agenda_base(1) + + ::service_task(None, true, true) + + TestWeightInfo::execute_dispatch_unsigned() + + call_weight + Weight::from_parts(4, 0) ); assert_eq!(IncompleteSince::::get(), None); assert_eq!(logger::log(), vec![(root(), 2600u32)]); @@ -696,14 +686,14 @@ fn on_initialize_weight_is_correct() { // Will include anon and anon periodic assert_eq!( Scheduler::on_initialize(2), - TestWeightInfo::service_agendas_base() + - TestWeightInfo::service_agenda_base(2) + - ::service_task(None, false, true) + - TestWeightInfo::execute_dispatch_unsigned() + - call_weight + Weight::from_parts(3, 0) + - ::service_task(None, false, false) + - TestWeightInfo::execute_dispatch_unsigned() + - call_weight + Weight::from_parts(2, 0) + TestWeightInfo::service_agendas_base() + + TestWeightInfo::service_agenda_base(2) + + ::service_task(None, false, true) + + TestWeightInfo::execute_dispatch_unsigned() + + call_weight + Weight::from_parts(3, 0) + + ::service_task(None, false, false) + + TestWeightInfo::execute_dispatch_unsigned() + + call_weight + Weight::from_parts(2, 0) ); assert_eq!(IncompleteSince::::get(), None); assert_eq!(logger::log(), vec![(root(), 2600u32), (root(), 69u32), (root(), 42u32)]); @@ -711,11 +701,11 @@ fn on_initialize_weight_is_correct() { // Will include named only assert_eq!( Scheduler::on_initialize(3), - TestWeightInfo::service_agendas_base() + - TestWeightInfo::service_agenda_base(1) + - ::service_task(None, true, false) + - TestWeightInfo::execute_dispatch_unsigned() + - call_weight + Weight::from_parts(1, 0) + TestWeightInfo::service_agendas_base() + + TestWeightInfo::service_agenda_base(1) + + ::service_task(None, true, false) + + TestWeightInfo::execute_dispatch_unsigned() + + call_weight + Weight::from_parts(1, 0) ); assert_eq!(IncompleteSince::::get(), None); assert_eq!( @@ -902,15 +892,28 @@ fn test_migrate_origin() { new_test_ext().execute_with(|| { for i in 0..3u64 { let k = i.twox_64_concat(); - let old: Vec, BoundedVec>, u64, u32, u64>>> = vec![ + let old: Vec< + Option< + Scheduled< + [u8; 32], + BoundedCallOf, + BoundedVec>, + u64, + u32, + u64, + >, + >, + > = vec![ Some(Scheduled { maybe_id: None, priority: i as u8 + 10, - maybe_call: Some(Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 96, - weight: Weight::from_parts(100, 0), - })) - .unwrap()), + maybe_call: Some( + Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 96, + weight: Weight::from_parts(100, 0), + })) + .unwrap(), + ), maybe_ciphertext: None, origin: 3u32, maybe_periodic: None, @@ -921,11 +924,13 @@ fn test_migrate_origin() { maybe_id: Some(blake2_256(&b"test"[..])), priority: 123, origin: 2u32, - maybe_call: Some(Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_parts(10, 0), - })) - .unwrap()), + maybe_call: Some( + Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_parts(10, 0), + })) + .unwrap(), + ), maybe_ciphertext: None, maybe_periodic: Some((456u64, 10)), _phantom: Default::default(), @@ -955,11 +960,13 @@ fn test_migrate_origin() { Some(ScheduledOf:: { maybe_id: None, priority: 10, - maybe_call: Some(Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 96, - weight: Weight::from_parts(100, 0) - })) - .unwrap()), + maybe_call: Some( + Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 96, + weight: Weight::from_parts(100, 0) + })) + .unwrap() + ), maybe_ciphertext: None, maybe_periodic: None, origin: system::RawOrigin::Root.into(), @@ -969,11 +976,13 @@ fn test_migrate_origin() { Some(Scheduled { maybe_id: Some(blake2_256(&b"test"[..])), priority: 123, - maybe_call: Some(Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_parts(10, 0) - })) - .unwrap()), + maybe_call: Some( + Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_parts(10, 0) + })) + .unwrap() + ), maybe_ciphertext: None, maybe_periodic: Some((456u64, 10)), origin: system::RawOrigin::None.into(), @@ -987,11 +996,13 @@ fn test_migrate_origin() { Some(Scheduled { maybe_id: None, priority: 11, - maybe_call: Some(Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 96, - weight: Weight::from_parts(100, 0) - })) - .unwrap()), + maybe_call: Some( + Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 96, + weight: Weight::from_parts(100, 0) + })) + .unwrap() + ), maybe_ciphertext: None, maybe_periodic: None, origin: system::RawOrigin::Root.into(), @@ -1001,11 +1012,13 @@ fn test_migrate_origin() { Some(Scheduled { maybe_id: Some(blake2_256(&b"test"[..])), priority: 123, - maybe_call: Some(Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_parts(10, 0) - })) - .unwrap()), + maybe_call: Some( + Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_parts(10, 0) + })) + .unwrap() + ), maybe_ciphertext: None, maybe_periodic: Some((456u64, 10)), origin: system::RawOrigin::None.into(), @@ -1019,11 +1032,13 @@ fn test_migrate_origin() { Some(Scheduled { maybe_id: None, priority: 12, - maybe_call: Some(Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 96, - weight: Weight::from_parts(100, 0) - })) - .unwrap()), + maybe_call: Some( + Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 96, + weight: Weight::from_parts(100, 0) + })) + .unwrap() + ), maybe_ciphertext: None, maybe_periodic: None, origin: system::RawOrigin::Root.into(), @@ -1033,11 +1048,13 @@ fn test_migrate_origin() { Some(Scheduled { maybe_id: Some(blake2_256(&b"test"[..])), priority: 123, - maybe_call: Some(Preimage::bound(RuntimeCall::Logger(LoggerCall::log { - i: 69, - weight: Weight::from_parts(10, 0) - })) - .unwrap()), + maybe_call: Some( + Preimage::bound(RuntimeCall::Logger(LoggerCall::log { + i: 69, + weight: Weight::from_parts(10, 0) + })) + .unwrap() + ), maybe_ciphertext: None, maybe_periodic: Some((456u64, 10)), origin: system::RawOrigin::None.into(), @@ -1813,11 +1830,9 @@ fn unavailable_call_is_detected() { #[test] #[docify::export] fn timelock_basic_scheduling_works() { - let mut rng = ChaCha20Rng::from_seed([4;32]); + let mut rng = ChaCha20Rng::from_seed([4; 32]); - let ids = vec![ - 4u64.to_string().as_bytes().to_vec(), - ]; + let ids = vec![4u64.to_string().as_bytes().to_vec()]; let t = 1; let ibe_pp: G2 = G2::generator().into(); @@ -1830,12 +1845,11 @@ fn timelock_basic_scheduling_works() { // Q: how can we mock the decryption trait so that we can d1o whatever? // probably don't really need to perform decryption here? new_test_ext().execute_with(|| { - let _ = Etf::set_ibe_params( - // RuntimeOrigin::root(), - &vec![], - &ibe_pp_bytes.into(), - &p_pub_bytes.into() + // RuntimeOrigin::root(), + &vec![], + &ibe_pp_bytes.into(), + &p_pub_bytes.into(), ); // Call to schedule @@ -1843,7 +1857,7 @@ fn timelock_basic_scheduling_works() { RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_parts(10, 0) }); // // then we convert to bytes and encrypt the call - let ct: etf_crypto_primitives::client::etf_client::AesIbeCt = + let ct: etf_crypto_primitives::client::etf_client::AesIbeCt = DefaultEtfClient::::encrypt( ibe_pp_bytes.to_vec(), p_pub_bytes.to_vec(), @@ -1851,38 +1865,30 @@ fn timelock_basic_scheduling_works() { ids, t, &mut rng, - ).unwrap(); - + ) + .unwrap(); let mut bounded_ct: BoundedVec> = BoundedVec::new(); ct.aes_ct.ciphertext.iter().enumerate().for_each(|(idx, i)| { - let _= bounded_ct.try_insert(idx, *i); + let _ = bounded_ct.try_insert(idx, *i); }); let mut bounded_nonce: BoundedVec> = BoundedVec::new(); ct.aes_ct.nonce.iter().enumerate().for_each(|(idx, i)| { - let _= bounded_nonce.try_insert(idx, *i); + let _ = bounded_nonce.try_insert(idx, *i); }); let mut bounded_capsule: BoundedVec> = BoundedVec::new(); // assumes we only care about a single point in the future ct.etf_ct[0].iter().enumerate().for_each(|(idx, i)| { - let _= bounded_capsule.try_insert(idx, *i); + let _ = bounded_capsule.try_insert(idx, *i); }); - let ciphertext = Ciphertext { - ciphertext: bounded_ct, - nonce: bounded_nonce, - capsule: bounded_capsule, - }; + let ciphertext = + Ciphertext { ciphertext: bounded_ct, nonce: bounded_nonce, capsule: bounded_capsule }; // Schedule call to be executed at the 4th block - assert_ok!(Scheduler::do_schedule_sealed( - DispatchTime::At(4), - 127, - root(), - ciphertext, - )); + assert_ok!(Scheduler::do_schedule_sealed(DispatchTime::At(4), 127, root(), ciphertext,)); // `log` runtime call should not have executed yet run_to_block(3); @@ -1901,11 +1907,9 @@ fn timelock_basic_scheduling_works() { #[test] #[docify::export] fn timelock_undecryptable_ciphertext_no_execution() { - let mut rng = ChaCha20Rng::from_seed([4;32]); + let mut rng = ChaCha20Rng::from_seed([4; 32]); - let bad_ids = vec![ - 3u64.to_string().as_bytes().to_vec(), - ]; + let bad_ids = vec![3u64.to_string().as_bytes().to_vec()]; let t = 1; @@ -1918,19 +1922,18 @@ fn timelock_undecryptable_ciphertext_no_execution() { new_test_ext().execute_with(|| { let _ = Etf::set_ibe_params( - // RuntimeOrigin::root(), - &vec![], - &ibe_pp_bytes.into(), - &p_pub_bytes.into() + // RuntimeOrigin::root(), + &vec![], + &ibe_pp_bytes.into(), + &p_pub_bytes.into(), ); - // Call to schedule let call = RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_parts(10, 0) }); // encrypts the ciphertext for the wrong identity - let ct: etf_crypto_primitives::client::etf_client::AesIbeCt = + let ct: etf_crypto_primitives::client::etf_client::AesIbeCt = DefaultEtfClient::::encrypt( ibe_pp_bytes.to_vec(), p_pub_bytes.to_vec(), @@ -1938,38 +1941,30 @@ fn timelock_undecryptable_ciphertext_no_execution() { bad_ids, t, &mut rng, - ).unwrap(); - + ) + .unwrap(); let mut bounded_ct: BoundedVec> = BoundedVec::new(); ct.aes_ct.ciphertext.iter().enumerate().for_each(|(idx, i)| { - let _= bounded_ct.try_insert(idx, *i); + let _ = bounded_ct.try_insert(idx, *i); }); let mut bounded_nonce: BoundedVec> = BoundedVec::new(); ct.aes_ct.nonce.iter().enumerate().for_each(|(idx, i)| { - let _= bounded_nonce.try_insert(idx, *i); + let _ = bounded_nonce.try_insert(idx, *i); }); let mut bounded_capsule: BoundedVec> = BoundedVec::new(); // assumes we only care about a single point in the future ct.etf_ct[0].iter().enumerate().for_each(|(idx, i)| { - let _= bounded_capsule.try_insert(idx, *i); + let _ = bounded_capsule.try_insert(idx, *i); }); - let ciphertext = Ciphertext { - ciphertext: bounded_ct, - nonce: bounded_nonce, - capsule: bounded_capsule, - }; + let ciphertext = + Ciphertext { ciphertext: bounded_ct, nonce: bounded_nonce, capsule: bounded_capsule }; // Schedule call to be executed at the 4th block - assert_ok!(Scheduler::do_schedule_sealed( - DispatchTime::At(4), - 127, - root(), - ciphertext, - )); + assert_ok!(Scheduler::do_schedule_sealed(DispatchTime::At(4), 127, root(), ciphertext,)); // `log` runtime call should not have executed yet run_to_block(3); @@ -1984,11 +1979,9 @@ fn timelock_undecryptable_ciphertext_no_execution() { #[test] #[docify::export] fn timelock_undecodable_runtime_call_no_execution() { - let mut rng = ChaCha20Rng::from_seed([4;32]); + let mut rng = ChaCha20Rng::from_seed([4; 32]); - let ids = vec![ - 4u64.to_string().as_bytes().to_vec(), - ]; + let ids = vec![4u64.to_string().as_bytes().to_vec()]; let t = 1; let ibe_pp: G2 = G2::generator().into(); @@ -2000,19 +1993,18 @@ fn timelock_undecodable_runtime_call_no_execution() { new_test_ext().execute_with(|| { let _ = Etf::set_ibe_params( - // RuntimeOrigin::root(), - &vec![], - &ibe_pp_bytes.into(), - &p_pub_bytes.into() + // RuntimeOrigin::root(), + &vec![], + &ibe_pp_bytes.into(), + &p_pub_bytes.into(), ); - // Call to schedule // let call = // RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_parts(10, 0) }); // encrypts the ciphertext for the wrong identity - let ct: etf_crypto_primitives::client::etf_client::AesIbeCt = + let ct: etf_crypto_primitives::client::etf_client::AesIbeCt = DefaultEtfClient::::encrypt( ibe_pp_bytes.to_vec(), p_pub_bytes.to_vec(), @@ -2020,38 +2012,30 @@ fn timelock_undecodable_runtime_call_no_execution() { ids, t, &mut rng, - ).unwrap(); - + ) + .unwrap(); let mut bounded_ct: BoundedVec> = BoundedVec::new(); ct.aes_ct.ciphertext.iter().enumerate().for_each(|(idx, i)| { - let _= bounded_ct.try_insert(idx, *i); + let _ = bounded_ct.try_insert(idx, *i); }); let mut bounded_nonce: BoundedVec> = BoundedVec::new(); ct.aes_ct.nonce.iter().enumerate().for_each(|(idx, i)| { - let _= bounded_nonce.try_insert(idx, *i); + let _ = bounded_nonce.try_insert(idx, *i); }); let mut bounded_capsule: BoundedVec> = BoundedVec::new(); // assumes we only care about a single point in the future ct.etf_ct[0].iter().enumerate().for_each(|(idx, i)| { - let _= bounded_capsule.try_insert(idx, *i); + let _ = bounded_capsule.try_insert(idx, *i); }); - let ciphertext = Ciphertext { - ciphertext: bounded_ct, - nonce: bounded_nonce, - capsule: bounded_capsule, - }; + let ciphertext = + Ciphertext { ciphertext: bounded_ct, nonce: bounded_nonce, capsule: bounded_capsule }; // Schedule call to be executed at the 4th block - assert_ok!(Scheduler::do_schedule_sealed( - DispatchTime::At(4), - 127, - root(), - ciphertext, - )); + assert_ok!(Scheduler::do_schedule_sealed(DispatchTime::At(4), 127, root(), ciphertext,)); // `log` runtime call should not have executed yet run_to_block(3); @@ -2066,11 +2050,9 @@ fn timelock_undecodable_runtime_call_no_execution() { #[test] #[docify::export] fn timelock_cancel_works() { - let mut rng = ChaCha20Rng::from_seed([4;32]); + let mut rng = ChaCha20Rng::from_seed([4; 32]); - let ids = vec![ - 4u64.to_string().as_bytes().to_vec(), - ]; + let ids = vec![4u64.to_string().as_bytes().to_vec()]; let t = 1; let ibe_pp: G2 = G2::generator().into(); @@ -2083,12 +2065,11 @@ fn timelock_cancel_works() { // Q: how can we mock the decryption trait so that we can d1o whatever? // probably don't really need to perform decryption here? new_test_ext().execute_with(|| { - let _ = Etf::set_ibe_params( - // RuntimeOrigin::root(), - &vec![], - &ibe_pp_bytes.into(), - &p_pub_bytes.into() + // RuntimeOrigin::root(), + &vec![], + &ibe_pp_bytes.into(), + &p_pub_bytes.into(), ); // Call to schedule @@ -2096,7 +2077,7 @@ fn timelock_cancel_works() { RuntimeCall::Logger(LoggerCall::log { i: 42, weight: Weight::from_parts(10, 0) }); // // then we convert to bytes and encrypt the call - let ct: etf_crypto_primitives::client::etf_client::AesIbeCt = + let ct: etf_crypto_primitives::client::etf_client::AesIbeCt = DefaultEtfClient::::encrypt( ibe_pp_bytes.to_vec(), p_pub_bytes.to_vec(), @@ -2104,50 +2085,39 @@ fn timelock_cancel_works() { ids, t, &mut rng, - ).unwrap(); - + ) + .unwrap(); let mut bounded_ct: BoundedVec> = BoundedVec::new(); ct.aes_ct.ciphertext.iter().enumerate().for_each(|(idx, i)| { - let _= bounded_ct.try_insert(idx, *i); + let _ = bounded_ct.try_insert(idx, *i); }); let mut bounded_nonce: BoundedVec> = BoundedVec::new(); ct.aes_ct.nonce.iter().enumerate().for_each(|(idx, i)| { - let _= bounded_nonce.try_insert(idx, *i); + let _ = bounded_nonce.try_insert(idx, *i); }); let mut bounded_capsule: BoundedVec> = BoundedVec::new(); // assumes we only care about a single point in the future ct.etf_ct[0].iter().enumerate().for_each(|(idx, i)| { - let _= bounded_capsule.try_insert(idx, *i); + let _ = bounded_capsule.try_insert(idx, *i); }); - let ciphertext = Ciphertext { - ciphertext: bounded_ct, - nonce: bounded_nonce, - capsule: bounded_capsule, - }; + let ciphertext = + Ciphertext { ciphertext: bounded_ct, nonce: bounded_nonce, capsule: bounded_capsule }; // Schedule call to be executed at the 4th block - assert_ok!(Scheduler::do_schedule_sealed( - DispatchTime::At(4), - 127, - root(), - ciphertext, - )); + assert_ok!(Scheduler::do_schedule_sealed(DispatchTime::At(4), 127, root(), ciphertext,)); // `log` runtime call should not have executed yet run_to_block(3); assert!(logger::log().is_empty()); - // now cancel - assert_ok!(Scheduler::do_cancel( - None, (4, 0), - )); + assert_ok!(Scheduler::do_cancel(None, (4, 0),)); run_to_block(4); assert!(logger::log().is_empty()); }); -} \ No newline at end of file +} diff --git a/primitives/consensus/beefy-etf/src/lib.rs b/primitives/consensus/beefy-etf/src/lib.rs index d450ee6..570dc27 100644 --- a/primitives/consensus/beefy-etf/src/lib.rs +++ b/primitives/consensus/beefy-etf/src/lib.rs @@ -16,7 +16,6 @@ // limitations under the License. #![cfg_attr(not(feature = "std"), no_std)] -#![warn(missing_docs)] //! Primitives for BEEFY protocol. //! @@ -209,9 +208,9 @@ pub mod ecdsa_bls_crypto { // TODO: I'm not actually using this currently, just doing this so it will compile.. false // this is the original code but I can't get it to compile for some reason... - // something about a missing panic handler appears when I try to enable the full_crypto feature - // for sp-core and sp-application-crypto - // not sure what I broke exactly to cause this, but we + // something about a missing panic handler appears when I try to enable the full_crypto + // feature for sp-core and sp-application-crypto + // not sure what I broke exactly to cause this, but we // EcdsaBlsPair::verify_with_hasher::( // signature.as_inner_ref(), // msg, @@ -246,7 +245,7 @@ pub type ValidatorSetId = u64; // #[cfg(feature = "bls-experimental")] #[derive(Decode, Encode, Debug, PartialEq, Clone, TypeInfo)] pub struct ValidatorSet { - /// Public keys of the validator set elements + /// Public keys of the validator set elements validators: Vec, /// Public round key commitments for the validators commitments: Vec, @@ -391,12 +390,12 @@ where // have different validator set ids, // or both votes have the same commitment, // --> the equivocation is invalid. - if first.id != second.id || - first.commitment.block_number != second.commitment.block_number || - first.commitment.validator_set_id != second.commitment.validator_set_id || - first.commitment.payload == second.commitment.payload + if first.id != second.id + || first.commitment.block_number != second.commitment.block_number + || first.commitment.validator_set_id != second.commitment.validator_set_id + || first.commitment.payload == second.commitment.payload { - return false + return false; } // check signatures on both votes are valid @@ -404,7 +403,7 @@ where let valid_second = check_commitment_signature(&second.commitment, &second.id, &second.signature); - return valid_first && valid_second + return valid_first && valid_second; } /// New BEEFY validator set notification hook. @@ -488,7 +487,7 @@ sp_api::decl_runtime_apis! { // #[cfg(feature = "bls-experimental")] /// Return a proof of knowledge for async secret sharing fn read_share(who: AuthorityId) -> Option>; - + fn submit_unsigned_pulse( signature_bytes: Vec>, block_number: NumberFor, @@ -513,11 +512,9 @@ mod tests { let alice = ecdsa::Pair::from_string("//Alice", None).unwrap(); let alice_stash = ecdsa::Pair::from_string("//AliceStash", None).unwrap(); let set_id = 0; - let validators = ValidatorSet::::new( - vec![alice.public()], - vec![alice_stash.public()], - set_id - ).unwrap(); + let validators = + ValidatorSet::::new(vec![alice.public()], vec![alice_stash.public()], set_id) + .unwrap(); assert_eq!(validators.id(), set_id); assert_eq!(validators.validators(), &vec![alice.public()]); diff --git a/primitives/consensus/beefy-etf/src/mmr.rs b/primitives/consensus/beefy-etf/src/mmr.rs index 7dc36d3..5e7d073 100644 --- a/primitives/consensus/beefy-etf/src/mmr.rs +++ b/primitives/consensus/beefy-etf/src/mmr.rs @@ -26,11 +26,11 @@ //! but we imagine they will be useful for other chains that either want to bridge with Polkadot //! or are completely standalone, but heavily inspired by Polkadot. -use crate::{ConsensusLog, MmrRootHash, BEEFY_ENGINE_ID}; -#[cfg(not(feature = "bls-experimental"))] -use crate::ecdsa_crypto::AuthorityId; #[cfg(feature = "bls-experimental")] use crate::bls_crypto::AuthorityId; +#[cfg(not(feature = "bls-experimental"))] +use crate::ecdsa_crypto::AuthorityId; +use crate::{ConsensusLog, MmrRootHash, BEEFY_ENGINE_ID}; use alloc::vec::Vec; use codec::{Decode, Encode, MaxEncodedLen}; use scale_info::TypeInfo; diff --git a/primitives/consensus/beefy-etf/src/payload.rs b/primitives/consensus/beefy-etf/src/payload.rs index 1e87efa..bdce517 100644 --- a/primitives/consensus/beefy-etf/src/payload.rs +++ b/primitives/consensus/beefy-etf/src/payload.rs @@ -32,7 +32,6 @@ pub mod known_payloads { /// Encoded value should contain a [`crate::MmrRootHash`] type (i.e. 32-bytes hash). pub const MMR_ROOT_ID: BeefyPayloadId = *b"mh"; - /// A [`Payload`](super::Payload) identifier for empty ETF payloads. pub const ETF_SIGNATURE: BeefyPayloadId = *b"ef"; } diff --git a/primitives/consensus/beefy-etf/src/test_utils.rs b/primitives/consensus/beefy-etf/src/test_utils.rs index 00d5673..e95d5e2 100644 --- a/primitives/consensus/beefy-etf/src/test_utils.rs +++ b/primitives/consensus/beefy-etf/src/test_utils.rs @@ -143,7 +143,6 @@ pub fn test_generate_equivocation_proof( EquivocationProof { first, second } } - /// Create a new `EquivocationProof` based on given arguments. pub fn generate_equivocation_proof( vote1: (u64, Payload, ValidatorSetId, &Keyring),