diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index c7aaf9e58..b1a5f53d2 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -45,18 +45,34 @@ jobs: cargo update -p proptest --precise "1.2.0" --verbose # proptest 1.3.0 requires rustc 1.64.0 cargo update -p regex --precise "1.9.6" --verbose # regex 1.10.0 requires rustc 1.65.0 cargo update -p home --precise "0.5.5" --verbose # home v0.5.9 requires rustc 1.70 or newer - cargo update -p url --precise "2.5.0" --verbose # url v2.5.1 requires rustc 1.67 or newer + cargo update -p tokio --precise "1.38.1" --verbose # tokio v1.39.0 requires rustc 1.70 or newer + cargo update -p tokio-util --precise "0.7.11" --verbose # tokio-util v0.7.12 requires rustc 1.70 or newer - name: Set RUSTFLAGS to deny warnings if: "matrix.toolchain == 'stable'" run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV" - - name: Download bitcoind/electrs and set environment variables - if: "matrix.platform != 'windows-latest'" + - name: Enable caching for bitcoind + id: cache-bitcoind + uses: actions/cache@v4 + with: + path: bin/bitcoind-${{ runner.os }}-${{ runner.arch }} + key: bitcoind-${{ runner.os }}-${{ runner.arch }} + - name: Enable caching for electrs + id: cache-electrs + uses: actions/cache@v4 + with: + path: bin/electrs-${{ runner.os }}-${{ runner.arch }} + key: electrs-${{ runner.os }}-${{ runner.arch }} + - name: Download bitcoind/electrs + if: "matrix.platform != 'windows-latest' && (steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true')" run: | source ./scripts/download_bitcoind_electrs.sh - cp "$BITCOIND_EXE" "$HOME"/bitcoind - cp "$ELECTRS_EXE" "$HOME"/electrs - echo "BITCOIND_EXE=$HOME/bitcoind" >> "$GITHUB_ENV" - echo "ELECTRS_EXE=$HOME/electrs" >> "$GITHUB_ENV" + mkdir bin + mv "$BITCOIND_EXE" bin/bitcoind-${{ runner.os }}-${{ runner.arch }} + mv "$ELECTRS_EXE" bin/electrs-${{ runner.os }}-${{ runner.arch }} + - name: Set bitcoind/electrs environment variables + run: | + echo "BITCOIND_EXE=$( pwd )/bin/bitcoind-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" + echo "ELECTRS_EXE=$( pwd )/bin/electrs-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" - name: Build on Rust ${{ matrix.toolchain }} run: cargo build --verbose --color always - name: Build with UniFFI support on Rust ${{ matrix.toolchain }} diff --git a/.github/workflows/vss-integration.yml b/.github/workflows/vss-integration.yml index df739abce..83544313b 100644 --- a/.github/workflows/vss-integration.yml +++ b/.github/workflows/vss-integration.yml @@ -68,14 +68,14 @@ jobs: ./gradlew --version ./gradlew build - docker cp app/build/libs/app-1.0.war tomcat:/usr/local/tomcat/webapps/vss.war + docker cp app/build/libs/vss-1.0.war tomcat:/usr/local/tomcat/webapps/vss.war cd ../ - name: Run VSS Integration tests against vss-instance. run: | cd ldk-node export TEST_VSS_BASE_URL="http://localhost:8080/vss" - RUSTFLAGS="--cfg vss_test --cfg vss" cargo build --verbose --color always - RUSTFLAGS="--cfg vss_test --cfg vss" cargo test --test integration_tests_vss + RUSTFLAGS="--cfg vss_test" cargo build --verbose --color always + RUSTFLAGS="--cfg vss_test" cargo test --test integration_tests_vss - name: Cleanup run: | diff --git a/.gitignore b/.gitignore index 9d3368337..357af4883 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ # Generated by Cargo # will have compiled files and executables /target/ +/bindings/uniffi-bindgen/target/ # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html @@ -24,5 +25,7 @@ swift.swiftdoc /bindings/swift/LDKNodeFFI.xcframework /bindings/kotlin/ldk-node-android/lib/src/main/jniLibs /bindings/kotlin/ldk-node-android/lib/src/main/kotlin/org/lightningdevkit/ldknode/ldk_node.kt +/bindings/kotlin/ldk-node-jvm/lib/src/main/kotlin/org/lightningdevkit/ldknode/ldk_node.kt +/bindings/kotlin/ldk-node-jvm/lib/src/main/resources/ /ffi/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a3894899..353d4744f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,49 @@ +# 0.4.1 - Oct 18, 2024 + +Fixes a wallet syncing issue where full syncs were used instead of incremental syncs, and vice versa (#383). + +# 0.4.0 - Oct 17, 2024 + +Besides numerous API improvements and bugfixes this fourth minor release notably adds support for sourcing chain and fee rate data from a Bitcoin Core RPC backend, as well as experimental support for the [VSS] remote storage backend. + +## Feature and API updates +- Support for multiple chain sources has been added. To this end, Esplora-specific configuration options can now be given via `EsploraSyncConfig` to `Builder::set_chain_source_esplora`. Furthermore, all configuration objects (including the main `Config`) is now exposed via the `config` sub-module (#365). +- Support for sourcing chain and fee estimation data from a Bitcoin Core RPC backed has been added (#370). +- Initial experimental support for an encrypted [VSS] remote storage backend has been added (#369, #376, #378). + - **Caution**: VSS support is in **alpha** and is considered experimental. Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are unrecoverable, i.e., if they remain unresolved after internal retries are exhausted. +- Support for setting the `NodeAlias` in public node announcements as been added. We now ensure that announced channels can only be opened and accepted when the required configuration options to operate as a public forwarding node are set (listening addresses and node alias). As part of this `Node::connect_open_channel` was split into `open_channel` and `open_announced_channel` API methods. (#330, #366). +- The `Node` can now be started via a new `Node::start_with_runtime` call that allows to reuse an outer `tokio` runtime context, avoiding runtime stacking when run in `async` environments (#319). +- Support for generating and paying unified QR codes has been added (#302). +- Support for `quantity` and `payer_note` fields when sending or receiving BOLT12 payments has been added (#327). +- Support for setting additional parameters when sending BOLT11 payments has been added (#336, #351). + +## Bug Fixes +- The `ChannelConfig` object has been refactored, now allowing to query the currently applied `MaxDustHTLCExposure` limit (#350). +- A bug potentially leading to panicking on shutdown when stacking `tokio` runtime contexts has been fixed (#373). +- We now no longer panic when hitting a persistence failure during event handling. Instead, events will be replayed until successful (#374). +, +## Compatibility Notes +- The LDK dependency has been updated to version 0.0.125 (#358, #375). +- The BDK dependency has been updated to version 1.0-beta.4 (#358). + - Going forward, the BDK state will be persisted in the configured `KVStore` backend. + - **Note**: The old descriptor state will *not* be automatically migrated on upgrade, potentially leading to address reuse. Privacy-concious users might want to manually advance the descriptor by requesting new addresses until it reaches the previously observed height. + - After the node as been successfully upgraded users may safely delete `bdk_wallet_*.sqlite` from the storage path. +- The `rust-bitcoin` dependency has been updated to version 0.32.2 (#358). +- The UniFFI dependency has been updated to version 0.27.3 (#379). +- The `bip21` dependency has been updated to version 0.5 (#358). +- The `rust-esplora-client` has been updated to version 0.9 (#358). + +In total, this release features 55 files changed, 6134 insertions, 2184 deletions in 166 commits from 6 authors, in alphabetical order: + +- G8XSU +- Ian Slane +- jbesraa +- Elias Rohrer +- elnosh +- Enigbe Ochekliye + +[VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md + # 0.3.0 - June 21, 2024 This third minor release notably adds support for BOLT12 payments, Anchor diff --git a/Cargo.toml b/Cargo.toml index b40daca08..3a2b05369 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ldk-node" -version = "0.3.0" +version = "0.4.1" authors = ["Elias Rohrer "] homepage = "https://lightningdevkit.org/" license = "MIT OR Apache-2.0" @@ -28,51 +28,78 @@ panic = 'abort' # Abort on panic default = [] [dependencies] -lightning = { version = "0.0.123", features = ["std"] } -lightning-invoice = { version = "0.31.0" } -lightning-net-tokio = { version = "0.0.123" } -lightning-persister = { version = "0.0.123" } -lightning-background-processor = { version = "0.0.123", features = ["futures"] } -lightning-rapid-gossip-sync = { version = "0.0.123" } -lightning-transaction-sync = { version = "0.0.123", features = ["esplora-async-https", "time"] } -lightning-liquidity = { version = "=0.1.0-alpha.4", features = ["std"] } - +lightning = { version = "0.0.125", features = ["std"] } +lightning-invoice = { version = "0.32.0" } +lightning-net-tokio = { version = "0.0.125" } +lightning-persister = { version = "0.0.125" } +lightning-background-processor = { version = "0.0.125", features = ["futures"] } +lightning-rapid-gossip-sync = { version = "0.0.125" } +lightning-block-sync = { version = "0.0.125", features = ["rpc-client", "tokio"] } +lightning-transaction-sync = { version = "0.0.125", features = ["esplora-async-https", "time"] } +lightning-liquidity = { version = "0.1.0-alpha.6", features = ["std"] } + +#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main", features = ["std"] } +#lightning-invoice = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main" } +#lightning-net-tokio = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main" } +#lightning-persister = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main" } +#lightning-background-processor = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main", features = ["futures"] } +#lightning-rapid-gossip-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main" } +#lightning-transaction-sync = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main", features = ["esplora-async"] } +#lightning-liquidity = { git = "https://github.com/lightningdevkit/lightning-liquidity", branch="main", features = ["std"] } + +#lightning = { path = "../rust-lightning/lightning", features = ["std"] } +#lightning-invoice = { path = "../rust-lightning/lightning-invoice" } +#lightning-net-tokio = { path = "../rust-lightning/lightning-net-tokio" } +#lightning-persister = { path = "../rust-lightning/lightning-persister" } +#lightning-background-processor = { path = "../rust-lightning/lightning-background-processor", features = ["futures"] } +#lightning-rapid-gossip-sync = { path = "../rust-lightning/lightning-rapid-gossip-sync" } +#lightning-transaction-sync = { path = "../rust-lightning/lightning-transaction-sync", features = ["esplora-async"] } +#lightning-liquidity = { path = "../lightning-liquidity", features = ["std"] } + +bdk_chain = { version = "=0.19.0", default-features = false, features = ["std"] } +bdk_esplora = { version = "=0.18.0", default-features = false, features = ["async-https-rustls"]} +bdk_wallet = { version = "=1.0.0-beta.4", default-features = false, features = ["std", "keys-bip39"]} + +# TODO: re-enable # Alby uses forked BDK version with CACHE_ADDR_BATCH_SIZE=1 to reduce unnecessary esplora calls and speed up wallet sync -bdk = { git = "https://github.com/getAlby/bdk.git", branch = "0.29-alby-v2", default-features = false, features = ["std", "async-interface", "use-esplora-async", "sqlite-bundled", "keys-bip39"]} -#bdk = { version = "0.29.0", default-features = false, features = ["std", "async-interface", "use-esplora-async", "sqlite-bundled", "keys-bip39"]} +#bdk = { git = "https://github.com/getAlby/bdk.git", branch = "0.29-alby-v2", default-features = false, features = ["std", "async-interface", "use-esplora-async", "sqlite-bundled", "keys-bip39"]} + reqwest = { version = "0.11", default-features = false, features = ["json", "rustls-tls"] } rusqlite = { version = "0.28.0", features = ["bundled"] } -bitcoin = "0.30.2" +bitcoin = "0.32.2" bip39 = "2.0.0" +bip21 = { version = "0.5", features = ["std"], default-features = false } +base64 = { version = "0.22.1", default-features = false, features = ["std"] } rand = "0.8.5" chrono = { version = "0.4", default-features = false, features = ["clock"] } -tokio = { version = "1.37", default-features = false, features = [ "rt-multi-thread", "time", "sync" ] } -esplora-client = { version = "0.6", default-features = false } +tokio = { version = "1.37", default-features = false, features = [ "rt-multi-thread", "time", "sync", "macros" ] } +esplora-client = { version = "0.9", default-features = false } libc = "0.2" uniffi = { version = "0.25.3", features = ["build"], optional = true } +serde = { version = "1.0.210", default-features = false, features = ["std", "derive"] } +serde_json = { version = "1.0.128", default-features = false, features = ["std"] } -[target.'cfg(vss)'.dependencies] -vss-client = "0.2" +vss-client = "0.3" prost = { version = "0.11.6", default-features = false} [target.'cfg(windows)'.dependencies] winapi = { version = "0.3", features = ["winbase"] } [dev-dependencies] -lightning = { version = "0.0.123", features = ["std", "_test_utils"] } -#lightning = { git = "https://github.com/tnull/rust-lightning", branch = "2024-03-invoice-generated-event", features = ["std", "_test_utils"] } -electrum-client = { version = "0.15.1", default-features = true } -bitcoincore-rpc = { version = "0.17.0", default-features = false } +lightning = { version = "0.0.125", features = ["std", "_test_utils"] } +#lightning = { git = "https://github.com/lightningdevkit/rust-lightning", branch="main", features = ["std", "_test_utils"] } +electrum-client = { version = "0.21.0", default-features = true } +bitcoincore-rpc = { version = "0.19.0", default-features = false } proptest = "1.0.0" regex = "1.5.6" [target.'cfg(not(no_download))'.dev-dependencies] -electrsd = { version = "0.26.0", features = ["legacy", "esplora_a33e97e1", "bitcoind_25_0"] } +electrsd = { version = "0.29.0", features = ["legacy", "esplora_a33e97e1", "bitcoind_25_0"] } [target.'cfg(no_download)'.dev-dependencies] -electrsd = { version = "0.26.0", features = ["legacy"] } +electrsd = { version = "0.29.0", features = ["legacy"] } [target.'cfg(cln_test)'.dev-dependencies] clightningrpc = { version = "0.3.0-beta.8", default-features = false } @@ -87,3 +114,13 @@ rpath = true [profile.dev] panic = "abort" + +[lints.rust.unexpected_cfgs] +level = "forbid" +# When adding a new cfg attribute, ensure that it is added to this list. +check-cfg = [ + "cfg(vss_test)", + "cfg(ldk_bench)", + "cfg(tokio_unstable)", + "cfg(cln_test)", +] diff --git a/Package.swift b/Package.swift index 67c02dd8b..253db6e68 100644 --- a/Package.swift +++ b/Package.swift @@ -3,8 +3,8 @@ import PackageDescription -let tag = "v0.3.0" -let checksum = "07c8741768956bf1a51d1c25f751b5e29d1ae9ee2fd786c4282031c9a8a92f0c" +let tag = "v0.4.0" +let checksum = "5dcdfdd6e3331062d649786fa6e758487227f6037d9881353fe0c293a3a4c7e0" let url = "https://github.com/lightningdevkit/ldk-node/releases/download/\(tag)/LDKNodeFFI.xcframework.zip" let package = Package( diff --git a/README.md b/README.md index 4078ce67b..02dcbf323 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ A ready-to-go Lightning node library built using [LDK][ldk] and [BDK][bdk]. LDK Node is a self-custodial Lightning node in library form. Its central goal is to provide a small, simple, and straightforward interface that enables users to easily set up and run a Lightning node with an integrated on-chain wallet. While minimalism is at its core, LDK Node aims to be sufficiently modular and configurable to be useful for a variety of use cases. ## Getting Started -The primary abstraction of the library is the [`Node`][api_docs_node], which can be retrieved by setting up and configuring a [`Builder`][api_docs_builder] to your liking and calling one of the `build` methods. `Node` can then be controlled via commands such as `start`, `stop`, `connect_open_channel`, `send`, etc. +The primary abstraction of the library is the [`Node`][api_docs_node], which can be retrieved by setting up and configuring a [`Builder`][api_docs_builder] to your liking and calling one of the `build` methods. `Node` can then be controlled via commands such as `start`, `stop`, `open_channel`, `send`, etc. ```rust use ldk_node::Builder; @@ -24,7 +24,7 @@ use std::str::FromStr; fn main() { let mut builder = Builder::new(); builder.set_network(Network::Testnet); - builder.set_esplora_server("https://blockstream.info/testnet/api".to_string()); + builder.set_chain_source_esplora("https://blockstream.info/testnet/api".to_string(), None); builder.set_gossip_source_rgs("https://rapidsync.lightningdevkit.org/testnet/snapshot".to_string()); let node = builder.build().unwrap(); @@ -37,7 +37,7 @@ fn main() { let node_id = PublicKey::from_str("NODE_ID").unwrap(); let node_addr = SocketAddress::from_str("IP_ADDR:PORT").unwrap(); - node.connect_open_channel(node_id, node_addr, 10000, None, None, false).unwrap(); + node.open_channel(node_id, node_addr, 10000, None, None).unwrap(); let event = node.wait_next_event(); println!("EVENT: {:?}", event); @@ -55,7 +55,7 @@ fn main() { LDK Node currently comes with a decidedly opinionated set of design choices: - On-chain data is handled by the integrated [BDK][bdk] wallet. -- Chain data may currently be sourced from an [Esplora][esplora] server, while support for Electrum and `bitcoind` RPC will follow soon. +- Chain data may currently be sourced from the Bitcoin Core RPC interface or an [Esplora][esplora] server, while support for Electrum will follow soon. - Wallet and channel state may be persisted to an [SQLite][sqlite] database, to file system, or to a custom back-end to be implemented by the user. - Gossip data may be sourced via Lightning's peer-to-peer network or the [Rapid Gossip Sync](https://docs.rs/lightning-rapid-gossip-sync/*/lightning_rapid_gossip_sync/) protocol. - Entropy for the Lightning and on-chain wallets may be sourced from raw bytes or a [BIP39](https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki) mnemonic. In addition, LDK Node offers the means to generate and persist the entropy bytes to disk. diff --git a/bindings/kotlin/ldk-node-android/gradle.properties b/bindings/kotlin/ldk-node-android/gradle.properties index 70f5823b6..c84f2c46c 100644 --- a/bindings/kotlin/ldk-node-android/gradle.properties +++ b/bindings/kotlin/ldk-node-android/gradle.properties @@ -2,4 +2,4 @@ org.gradle.jvmargs=-Xmx1536m android.useAndroidX=true android.enableJetifier=true kotlin.code.style=official -libraryVersion=0.3.0 +libraryVersion=0.4.0 diff --git a/bindings/kotlin/ldk-node-android/lib/build.gradle.kts b/bindings/kotlin/ldk-node-android/lib/build.gradle.kts index 5e6775cdc..69d126b54 100644 --- a/bindings/kotlin/ldk-node-android/lib/build.gradle.kts +++ b/bindings/kotlin/ldk-node-android/lib/build.gradle.kts @@ -16,7 +16,7 @@ repositories { } android { - compileSdk = 33 + compileSdk = 34 defaultConfig { minSdk = 21 diff --git a/bindings/kotlin/ldk-node-jvm/gradle.properties b/bindings/kotlin/ldk-node-jvm/gradle.properties index 4ed588117..a84d6e412 100644 --- a/bindings/kotlin/ldk-node-jvm/gradle.properties +++ b/bindings/kotlin/ldk-node-jvm/gradle.properties @@ -1,3 +1,3 @@ org.gradle.jvmargs=-Xmx1536m kotlin.code.style=official -libraryVersion=0.3.0 +libraryVersion=0.4.0 diff --git a/bindings/kotlin/ldk-node-jvm/lib/src/test/kotlin/org/lightningdevkit/ldknode/LibraryTest.kt b/bindings/kotlin/ldk-node-jvm/lib/src/test/kotlin/org/lightningdevkit/ldknode/LibraryTest.kt index 55b3e310f..215fc8584 100644 --- a/bindings/kotlin/ldk-node-jvm/lib/src/test/kotlin/org/lightningdevkit/ldknode/LibraryTest.kt +++ b/bindings/kotlin/ldk-node-jvm/lib/src/test/kotlin/org/lightningdevkit/ldknode/LibraryTest.kt @@ -130,9 +130,9 @@ class LibraryTest { println("Config 2: $config2") val builder1 = Builder.fromConfig(config1) - builder1.setEsploraServer(esploraEndpoint) + builder1.setChainSourceEsplora(esploraEndpoint, null) val builder2 = Builder.fromConfig(config2) - builder2.setEsploraServer(esploraEndpoint) + builder2.setChainSourceEsplora(esploraEndpoint, null) val node1 = builder1.build() val node2 = builder2.build() @@ -175,7 +175,7 @@ class LibraryTest { assertEquals(100000uL, totalBalance1) assertEquals(100000uL, totalBalance2) - node1.connectOpenChannel(nodeId2, listenAddress2, 50000u, null, null, true) + node1.openChannel(nodeId2, listenAddress2, 50000u, null, null) val channelPendingEvent1 = node1.waitNextEvent() println("Got event: $channelPendingEvent1") @@ -224,7 +224,7 @@ class LibraryTest { val invoice = node2.bolt11Payment().receive(2500000u, "asdf", 9217u) - node1.bolt11Payment().send(invoice) + node1.bolt11Payment().send(invoice, null) val paymentSuccessfulEvent = node1.waitNextEvent() println("Got event: $paymentSuccessfulEvent") diff --git a/bindings/ldk_node.udl b/bindings/ldk_node.udl index 463277571..ed4ada531 100644 --- a/bindings/ldk_node.udl +++ b/bindings/ldk_node.udl @@ -8,14 +8,12 @@ dictionary Config { string? log_dir_path; Network network; sequence? listening_addresses; - u32 default_cltv_expiry_delta; - u64 onchain_wallet_sync_interval_secs; - u64 wallet_sync_interval_secs; - u64 fee_rate_cache_update_interval_secs; + NodeAlias? node_alias; sequence trusted_peers_0conf; u64 probing_liquidity_limit_multiplier; LogLevel log_level; AnchorChannelsConfig? anchor_channels_config; + SendingParameters? sending_parameters; }; dictionary AnchorChannelsConfig { @@ -23,6 +21,12 @@ dictionary AnchorChannelsConfig { u64 per_channel_reserve_sats; }; +dictionary EsploraSyncConfig { + u64 onchain_wallet_sync_interval_secs; + u64 lightning_wallet_sync_interval_secs; + u64 fee_rate_cache_update_interval_secs; +}; + interface Builder { constructor(); [Name=from_config] @@ -31,7 +35,8 @@ interface Builder { [Throws=BuildError] void set_entropy_seed_bytes(sequence seed_bytes); void set_entropy_bip39_mnemonic(Mnemonic mnemonic, string? passphrase); - void set_esplora_server(string esplora_server_url); + void set_chain_source_esplora(string server_url, EsploraSyncConfig? config); + void set_chain_source_bitcoind_rpc(string rpc_host, u16 rpc_port, string rpc_user, string rpc_password); void set_gossip_source_p2p(); void set_gossip_source_rgs(string rgs_server_url); void set_liquidity_source_lsps2(SocketAddress address, PublicKey node_id, string? token); @@ -40,10 +45,18 @@ interface Builder { [Throws=BuildError] void set_listening_addresses(sequence listening_addresses); [Throws=BuildError] + void set_node_alias(string node_alias); + [Throws=BuildError] Node build(); [Throws=BuildError] Node build_with_fs_store(); void restore_encoded_channel_monitors(sequence monitors); + [Throws=BuildError] + Node build_with_vss_store(string vss_url, string store_id, string lnurl_auth_server_url, record fixed_headers); + [Throws=BuildError] + Node build_with_vss_store_and_fixed_headers(string vss_url, string store_id, record fixed_headers); + //[Throws=BuildError] + //Node build_with_vss_store_and_header_provider(string vss_url, string store_id, VssHeaderProvider header_provider); }; interface Node { @@ -60,20 +73,24 @@ interface Node { void event_handled(); PublicKey node_id(); sequence? listening_addresses(); + NodeAlias? node_alias(); Bolt11Payment bolt11_payment(); Bolt12Payment bolt12_payment(); SpontaneousPayment spontaneous_payment(); OnchainPayment onchain_payment(); + UnifiedQrPayment unified_qr_payment(); [Throws=NodeError] void connect(PublicKey node_id, SocketAddress address, boolean persist); [Throws=NodeError] void disconnect(PublicKey node_id); [Throws=NodeError] - UserChannelId connect_open_channel(PublicKey node_id, SocketAddress address, u64 channel_amount_sats, u64? push_to_counterparty_msat, ChannelConfig? channel_config, boolean announce_channel); + UserChannelId open_channel(PublicKey node_id, SocketAddress address, u64 channel_amount_sats, u64? push_to_counterparty_msat, ChannelConfig? channel_config); + [Throws=NodeError] + UserChannelId open_announced_channel(PublicKey node_id, SocketAddress address, u64 channel_amount_sats, u64? push_to_counterparty_msat, ChannelConfig? channel_config); [Throws=NodeError] void close_channel([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id); [Throws=NodeError] - void force_close_channel([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id); + void force_close_channel([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id, string? reason); [Throws=NodeError] void update_channel_config([ByRef]UserChannelId user_channel_id, PublicKey counterparty_node_id, ChannelConfig channel_config); [Throws=NodeError] @@ -88,7 +105,6 @@ interface Node { sequence list_peers(); sequence list_channels(); NetworkGraph network_graph(); - [Throws=NodeError] string sign_message([ByRef]sequence msg); boolean verify_signature([ByRef]sequence msg, [ByRef]string sig, [ByRef]PublicKey pkey); [Throws=NodeError] @@ -98,9 +114,9 @@ interface Node { interface Bolt11Payment { [Throws=NodeError] - PaymentId send([ByRef]Bolt11Invoice invoice); + PaymentId send([ByRef]Bolt11Invoice invoice, SendingParameters? sending_parameters); [Throws=NodeError] - PaymentId send_using_amount([ByRef]Bolt11Invoice invoice, u64 amount_msat); + PaymentId send_using_amount([ByRef]Bolt11Invoice invoice, u64 amount_msat, SendingParameters? sending_parameters); [Throws=NodeError] void send_probes([ByRef]Bolt11Invoice invoice); [Throws=NodeError] @@ -125,22 +141,22 @@ interface Bolt11Payment { interface Bolt12Payment { [Throws=NodeError] - PaymentId send([ByRef]Offer offer, string? payer_note); + PaymentId send([ByRef]Offer offer, u64? quantity, string? payer_note); [Throws=NodeError] - PaymentId send_using_amount([ByRef]Offer offer, string? payer_note, u64 amount_msat); + PaymentId send_using_amount([ByRef]Offer offer, u64 amount_msat, u64? quantity, string? payer_note); [Throws=NodeError] - Offer receive(u64 amount_msat, [ByRef]string description); + Offer receive(u64 amount_msat, [ByRef]string description, u32? expiry_secs, u64? quantity); [Throws=NodeError] - Offer receive_variable_amount([ByRef]string description); + Offer receive_variable_amount([ByRef]string description, u32? expiry_secs); [Throws=NodeError] Bolt12Invoice request_refund_payment([ByRef]Refund refund); [Throws=NodeError] - Refund initiate_refund(u64 amount_msat, u32 expiry_secs); + Refund initiate_refund(u64 amount_msat, u32 expiry_secs, u64? quantity, string? payer_note); }; interface SpontaneousPayment { [Throws=NodeError] - PaymentId send(u64 amount_msat, PublicKey node_id, sequence custom_tlvs, PaymentPreimage? preimage); + PaymentId send(u64 amount_msat, PublicKey node_id, SendingParameters? sending_parameters, sequence custom_tlvs, PaymentPreimage? preimage); [Throws=NodeError] void send_probes(u64 amount_msat, PublicKey node_id); }; @@ -154,6 +170,13 @@ interface OnchainPayment { Txid send_all_to_address([ByRef]Address address); }; +interface UnifiedQrPayment { + [Throws=NodeError] + string receive(u64 amount_sats, [ByRef]string message, u32 expiry_sec); + [Throws=NodeError] + QrPaymentResult send([ByRef]string uri_str); +}; + [Error] enum NodeError { "AlreadyRunning", @@ -175,12 +198,12 @@ enum NodeError { "WalletOperationFailed", "WalletOperationTimeout", "OnchainTxSigningFailed", - "MessageSigningFailed", "TxSyncFailed", "TxSyncTimeout", "GossipUpdateFailed", "GossipUpdateTimeout", "LiquidityRequestFailed", + "UriParameterParsingFailed", "InvalidAddress", "InvalidSocketAddress", "InvalidPublicKey", @@ -198,6 +221,9 @@ enum NodeError { "InvalidChannelId", "InvalidNetwork", "InvalidCustomTlv", + "InvalidUri", + "InvalidQuantity", + "InvalidNodeAlias", "DuplicatePayment", "UnsupportedCurrency", "InsufficientFunds", @@ -209,11 +235,12 @@ dictionary NodeStatus { boolean is_running; boolean is_listening; BestBlock current_best_block; - u64? latest_wallet_sync_timestamp; + u64? latest_lightning_wallet_sync_timestamp; u64? latest_onchain_wallet_sync_timestamp; u64? latest_fee_rate_cache_update_timestamp; u64? latest_rgs_snapshot_timestamp; u64? latest_node_announcement_broadcast_timestamp; + u32? latest_channel_monitor_archival_height; }; dictionary BestBlock { @@ -228,6 +255,7 @@ enum BuildError { "InvalidSystemTime", "InvalidChannelMonitor", "InvalidListeningAddresses", + "InvalidNodeAlias", "ReadFailed", "WriteFailed", "StoragePathAccessFailed", @@ -236,10 +264,24 @@ enum BuildError { "LoggerSetupFailed", }; +//[Trait] +//interface VssHeaderProvider { +// [Async, Throws=VssHeaderProviderError] +// record get_headers([ByRef]sequence request); +//}; + +[Error] +enum VssHeaderProviderError { + "InvalidData", + "RequestError", + "AuthorizationError", + "InternalError", +}; + [Enum] interface Event { PaymentSuccessful(PaymentId? payment_id, PaymentHash payment_hash, u64? fee_paid_msat); - PaymentFailed(PaymentId? payment_id, PaymentHash payment_hash, PaymentFailureReason? reason); + PaymentFailed(PaymentId? payment_id, PaymentHash? payment_hash, PaymentFailureReason? reason); PaymentReceived(PaymentId? payment_id, PaymentHash payment_hash, u64 amount_msat); PaymentClaimable(PaymentId payment_id, PaymentHash payment_hash, u64 claimable_amount_msat, u32? claim_deadline); ChannelPending(ChannelId channel_id, UserChannelId user_channel_id, ChannelId former_temporary_channel_id, PublicKey counterparty_node_id, OutPoint funding_txo); @@ -254,12 +296,15 @@ enum PaymentFailureReason { "PaymentExpired", "RouteNotFound", "UnexpectedError", + "UnknownRequiredFeatures", + "InvoiceRequestExpired", + "InvoiceRequestRejected", }; [Enum] interface ClosureReason { CounterpartyForceClosed(UntrustedString peer_msg); - HolderForceClosed(); + HolderForceClosed(boolean? broadcasted_latest_txn); LegacyCooperativeClosure(); CounterpartyInitiatedCooperativeClosure(); LocallyInitiatedCooperativeClosure(); @@ -271,6 +316,7 @@ interface ClosureReason { CounterpartyCoopClosedUnfundedChannel(); FundingBatchClosure(); HTLCsTimedOut(); + PeerFeerateTooLow(u32 peer_feerate_sat_per_kw, u32 required_feerate_sat_per_kw); }; [Enum] @@ -278,11 +324,18 @@ interface PaymentKind { Onchain(); Bolt11(PaymentHash hash, PaymentPreimage? preimage, PaymentSecret? secret, string? bolt11_invoice); Bolt11Jit(PaymentHash hash, PaymentPreimage? preimage, PaymentSecret? secret, LSPFeeLimits lsp_fee_limits); - Bolt12Offer(PaymentHash? hash, PaymentPreimage? preimage, PaymentSecret? secret, OfferId offer_id); - Bolt12Refund(PaymentHash? hash, PaymentPreimage? preimage, PaymentSecret? secret); + Bolt12Offer(PaymentHash? hash, PaymentPreimage? preimage, PaymentSecret? secret, OfferId offer_id, UntrustedString? payer_note, u64? quantity); + Bolt12Refund(PaymentHash? hash, PaymentPreimage? preimage, PaymentSecret? secret, UntrustedString? payer_note, u64? quantity); Spontaneous(PaymentHash hash, PaymentPreimage? preimage, sequence custom_tlvs); }; +[Enum] +interface QrPaymentResult { + Onchain(Txid txid); + Bolt11(PaymentId payment_id); + Bolt12(PaymentId payment_id); +}; + enum PaymentDirection { "Inbound", "Outbound", @@ -312,6 +365,19 @@ dictionary PaymentDetails { u64 latest_update_timestamp; }; +dictionary SendingParameters { + MaxTotalRoutingFeeLimit? max_total_routing_fee_msat; + u32? max_total_cltv_expiry_delta; + u8? max_path_count; + u8? max_channel_saturation_power_of_half; +}; + +[Enum] +interface MaxTotalRoutingFeeLimit { + None (); + Some ( u64 amount_msat ); +}; + // [NonExhaustive] // enum Network { // "Bitcoin", @@ -349,7 +415,7 @@ dictionary ChannelDetails { boolean is_outbound; boolean is_channel_ready; boolean is_usable; - boolean is_public; + boolean is_announced; u16? cltv_expiry_delta; u64 counterparty_unspendable_punishment_reserve; u64? counterparty_outbound_htlc_minimum_msat; @@ -374,12 +440,58 @@ dictionary PeerDetails { [Enum] interface LightningBalance { - ClaimableOnChannelClose ( ChannelId channel_id, PublicKey counterparty_node_id, u64 amount_satoshis ); - ClaimableAwaitingConfirmations ( ChannelId channel_id, PublicKey counterparty_node_id, u64 amount_satoshis, u32 confirmation_height ); - ContentiousClaimable ( ChannelId channel_id, PublicKey counterparty_node_id, u64 amount_satoshis, u32 timeout_height, PaymentHash payment_hash, PaymentPreimage payment_preimage ); - MaybeTimeoutClaimableHTLC ( ChannelId channel_id, PublicKey counterparty_node_id, u64 amount_satoshis, u32 claimable_height, PaymentHash payment_hash); - MaybePreimageClaimableHTLC ( ChannelId channel_id, PublicKey counterparty_node_id, u64 amount_satoshis, u32 expiry_height, PaymentHash payment_hash); - CounterpartyRevokedOutputClaimable ( ChannelId channel_id, PublicKey counterparty_node_id, u64 amount_satoshis ); + ClaimableOnChannelClose ( + ChannelId channel_id, + PublicKey counterparty_node_id, + u64 amount_satoshis, + u64 transaction_fee_satoshis, + u64 outbound_payment_htlc_rounded_msat, + u64 outbound_forwarded_htlc_rounded_msat, + u64 inbound_claiming_htlc_rounded_msat, + u64 inbound_htlc_rounded_msat + ); + ClaimableAwaitingConfirmations ( + ChannelId channel_id, + PublicKey counterparty_node_id, + u64 amount_satoshis, + u32 confirmation_height, + BalanceSource source + ); + ContentiousClaimable ( + ChannelId channel_id, + PublicKey counterparty_node_id, + u64 amount_satoshis, + u32 timeout_height, + PaymentHash payment_hash, + PaymentPreimage payment_preimage + ); + MaybeTimeoutClaimableHTLC ( + ChannelId channel_id, + PublicKey counterparty_node_id, + u64 amount_satoshis, + u32 claimable_height, + PaymentHash payment_hash, + boolean outbound_payment + ); + MaybePreimageClaimableHTLC ( + ChannelId channel_id, + PublicKey counterparty_node_id, + u64 amount_satoshis, + u32 expiry_height, + PaymentHash payment_hash + ); + CounterpartyRevokedOutputClaimable ( + ChannelId channel_id, + PublicKey counterparty_node_id, + u64 amount_satoshis + ); +}; + +enum BalanceSource { + "HolderForceClosed", + "CounterpartyForceClosed", + "CoopClose", + "Htlc", }; [Enum] @@ -398,20 +510,19 @@ dictionary BalanceDetails { sequence pending_balances_from_channel_closures; }; -interface ChannelConfig { - constructor(); - u32 forwarding_fee_proportional_millionths(); - void set_forwarding_fee_proportional_millionths(u32 value); - u32 forwarding_fee_base_msat(); - void set_forwarding_fee_base_msat(u32 fee_msat); - u16 cltv_expiry_delta(); - void set_cltv_expiry_delta(u16 value); - u64 force_close_avoidance_max_fee_satoshis(); - void set_force_close_avoidance_max_fee_satoshis(u64 value_sat); - boolean accept_underpaying_htlcs(); - void set_accept_underpaying_htlcs(boolean value); - void set_max_dust_htlc_exposure_from_fixed_limit(u64 limit_msat); - void set_max_dust_htlc_exposure_from_fee_rate_multiplier(u64 multiplier); +dictionary ChannelConfig { + u32 forwarding_fee_proportional_millionths; + u32 forwarding_fee_base_msat; + u16 cltv_expiry_delta; + MaxDustHTLCExposure max_dust_htlc_exposure; + u64 force_close_avoidance_max_fee_satoshis; + boolean accept_underpaying_htlcs; +}; + +[Enum] +interface MaxDustHTLCExposure { + FixedLimit ( u64 limit_msat ); + FeeRateMultiplier ( u64 multiplier ); }; enum LogLevel { @@ -529,3 +640,6 @@ typedef string Mnemonic; [Custom] typedef string UntrustedString; + +[Custom] +typedef string NodeAlias; diff --git a/bindings/python/pyproject.toml b/bindings/python/pyproject.toml index c8ff0a79d..7d24d7884 100644 --- a/bindings/python/pyproject.toml +++ b/bindings/python/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "ldk_node" -version = "0.3.0" +version = "0.4.0" authors = [ { name="Elias Rohrer", email="dev@tnull.de" }, ] diff --git a/bindings/python/src/ldk_node/test_ldk_node.py b/bindings/python/src/ldk_node/test_ldk_node.py index 57269bb09..407eb21aa 100644 --- a/bindings/python/src/ldk_node/test_ldk_node.py +++ b/bindings/python/src/ldk_node/test_ldk_node.py @@ -84,7 +84,7 @@ def setup_node(tmp_dir, esplora_endpoint, listening_addresses): config = default_config() builder = Builder.from_config(config) builder.set_storage_dir_path(tmp_dir) - builder.set_esplora_server(esplora_endpoint) + builder.set_chain_source_esplora(esplora_endpoint, None) builder.set_network(DEFAULT_TEST_NETWORK) builder.set_listening_addresses(listening_addresses) return builder.build() @@ -155,7 +155,7 @@ def test_channel_full_cycle(self): print("TOTAL 2:", total_balance_2) self.assertEqual(total_balance_2, 100000) - node_1.connect_open_channel(node_id_2, listening_addresses_2[0], 50000, None, None, True) + node_1.open_channel(node_id_2, listening_addresses_2[0], 50000, None, None) channel_pending_event_1 = node_1.wait_next_event() assert isinstance(channel_pending_event_1, Event.CHANNEL_PENDING) @@ -186,7 +186,7 @@ def test_channel_full_cycle(self): node_2.event_handled() invoice = node_2.bolt11_payment().receive(2500000, "asdf", 9217) - node_1.bolt11_payment().send(invoice) + node_1.bolt11_payment().send(invoice, None) payment_successful_event_1 = node_1.wait_next_event() assert isinstance(payment_successful_event_1, Event.PAYMENT_SUCCESSFUL) diff --git a/bindings/swift/Sources/LDKNode/LDKNode.swift b/bindings/swift/Sources/LDKNode/LDKNode.swift index 5937c9050..835816b9f 100644 --- a/bindings/swift/Sources/LDKNode/LDKNode.swift +++ b/bindings/swift/Sources/LDKNode/LDKNode.swift @@ -1,7 +1,9 @@ // This file was autogenerated by some hot garbage in the `uniffi` crate. // Trust me, you don't want to mess with it! -import Foundation + import SystemConfiguration +// swiftlint:disable all +import Foundation // Depending on the consumer's build setup, the low-level FFI code // might be in a separate module, or it might be compiled inline into @@ -19,6 +21,10 @@ fileprivate extension RustBuffer { self.init(capacity: rbuf.capacity, len: rbuf.len, data: rbuf.data) } + static func empty() -> RustBuffer { + RustBuffer(capacity: 0, len:0, data: nil) + } + static func from(_ ptr: UnsafeBufferPointer) -> RustBuffer { try! rustCall { ffi_ldk_node_rustbuffer_from_bytes(ForeignBytes(bufferPointer: ptr), $0) } } @@ -221,9 +227,17 @@ fileprivate enum UniffiInternalError: LocalizedError { } } +fileprivate extension NSLock { + func withLock(f: () throws -> T) rethrows -> T { + self.lock() + defer { self.unlock() } + return try f() + } +} + fileprivate let CALL_SUCCESS: Int8 = 0 fileprivate let CALL_ERROR: Int8 = 1 -fileprivate let CALL_PANIC: Int8 = 2 +fileprivate let CALL_UNEXPECTED_ERROR: Int8 = 2 fileprivate let CALL_CANCELLED: Int8 = 3 fileprivate extension RustCallStatus { @@ -276,7 +290,7 @@ private func uniffiCheckCallStatus( throw UniffiInternalError.unexpectedRustCallError } - case CALL_PANIC: + case CALL_UNEXPECTED_ERROR: // When the rust code sees a panic, it tries to construct a RustBuffer // with the message. But if that code panics, then it just sends back // an empty buffer. @@ -295,6 +309,76 @@ private func uniffiCheckCallStatus( } } +private func uniffiTraitInterfaceCall( + callStatus: UnsafeMutablePointer, + makeCall: () throws -> T, + writeReturn: (T) -> () +) { + do { + try writeReturn(makeCall()) + } catch let error { + callStatus.pointee.code = CALL_UNEXPECTED_ERROR + callStatus.pointee.errorBuf = FfiConverterString.lower(String(describing: error)) + } +} + +private func uniffiTraitInterfaceCallWithError( + callStatus: UnsafeMutablePointer, + makeCall: () throws -> T, + writeReturn: (T) -> (), + lowerError: (E) -> RustBuffer +) { + do { + try writeReturn(makeCall()) + } catch let error as E { + callStatus.pointee.code = CALL_ERROR + callStatus.pointee.errorBuf = lowerError(error) + } catch { + callStatus.pointee.code = CALL_UNEXPECTED_ERROR + callStatus.pointee.errorBuf = FfiConverterString.lower(String(describing: error)) + } +} +fileprivate class UniffiHandleMap { + private var map: [UInt64: T] = [:] + private let lock = NSLock() + private var currentHandle: UInt64 = 1 + + func insert(obj: T) -> UInt64 { + lock.withLock { + let handle = currentHandle + currentHandle += 1 + map[handle] = obj + return handle + } + } + + func get(handle: UInt64) throws -> T { + try lock.withLock { + guard let obj = map[handle] else { + throw UniffiInternalError.unexpectedStaleHandle + } + return obj + } + } + + @discardableResult + func remove(handle: UInt64) throws -> T { + try lock.withLock { + guard let obj = map.removeValue(forKey: handle) else { + throw UniffiInternalError.unexpectedStaleHandle + } + return obj + } + } + + var count: Int { + get { + map.count + } + } +} + + // Public interface members begin here. @@ -430,168 +514,168 @@ public protocol Bolt11PaymentProtocol : AnyObject { func receiveViaJitChannel(amountMsat: UInt64, description: String, expirySecs: UInt32, maxLspFeeLimitMsat: UInt64?) throws -> Bolt11Invoice - func send(invoice: Bolt11Invoice) throws -> PaymentId + func send(invoice: Bolt11Invoice, sendingParameters: SendingParameters?) throws -> PaymentId func sendProbes(invoice: Bolt11Invoice) throws func sendProbesUsingAmount(invoice: Bolt11Invoice, amountMsat: UInt64) throws - func sendUsingAmount(invoice: Bolt11Invoice, amountMsat: UInt64) throws -> PaymentId + func sendUsingAmount(invoice: Bolt11Invoice, amountMsat: UInt64, sendingParameters: SendingParameters?) throws -> PaymentId } -public class Bolt11Payment: +open class Bolt11Payment: Bolt11PaymentProtocol { - fileprivate let pointer: UnsafeMutableRawPointer + fileprivate let pointer: UnsafeMutableRawPointer! + + /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. + public struct NoPointer { + public init() {} + } // TODO: We'd like this to be `private` but for Swifty reasons, // we can't implement `FfiConverter` without making this `required` and we can't // make it `required` without making it `public`. - required init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { + required public init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { self.pointer = pointer } + /// This constructor can be used to instantiate a fake object. + /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + /// + /// - Warning: + /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + public init(noPointer: NoPointer) { + self.pointer = nil + } + public func uniffiClonePointer() -> UnsafeMutableRawPointer { return try! rustCall { uniffi_ldk_node_fn_clone_bolt11payment(self.pointer, $0) } } + // No primary constructor declared for this class. deinit { + guard let pointer = pointer else { + return + } + try! rustCall { uniffi_ldk_node_fn_free_bolt11payment(pointer, $0) } } - - public func claimForHash(paymentHash: PaymentHash, claimableAmountMsat: UInt64, preimage: PaymentPreimage) throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_claim_for_hash(self.uniffiClonePointer(), +open func claimForHash(paymentHash: PaymentHash, claimableAmountMsat: UInt64, preimage: PaymentPreimage)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_claim_for_hash(self.uniffiClonePointer(), FfiConverterTypePaymentHash.lower(paymentHash), FfiConverterUInt64.lower(claimableAmountMsat), FfiConverterTypePaymentPreimage.lower(preimage),$0 ) } - } - public func failForHash(paymentHash: PaymentHash) throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_fail_for_hash(self.uniffiClonePointer(), +} + +open func failForHash(paymentHash: PaymentHash)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_fail_for_hash(self.uniffiClonePointer(), FfiConverterTypePaymentHash.lower(paymentHash),$0 ) } - } - public func receive(amountMsat: UInt64, description: String, expirySecs: UInt32) throws -> Bolt11Invoice { - return try FfiConverterTypeBolt11Invoice.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_receive(self.uniffiClonePointer(), +} + +open func receive(amountMsat: UInt64, description: String, expirySecs: UInt32)throws -> Bolt11Invoice { + return try FfiConverterTypeBolt11Invoice.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_receive(self.uniffiClonePointer(), FfiConverterUInt64.lower(amountMsat), FfiConverterString.lower(description), FfiConverterUInt32.lower(expirySecs),$0 ) +}) } - ) - } - public func receiveForHash(amountMsat: UInt64, description: String, expirySecs: UInt32, paymentHash: PaymentHash) throws -> Bolt11Invoice { - return try FfiConverterTypeBolt11Invoice.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_receive_for_hash(self.uniffiClonePointer(), + +open func receiveForHash(amountMsat: UInt64, description: String, expirySecs: UInt32, paymentHash: PaymentHash)throws -> Bolt11Invoice { + return try FfiConverterTypeBolt11Invoice.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_receive_for_hash(self.uniffiClonePointer(), FfiConverterUInt64.lower(amountMsat), FfiConverterString.lower(description), FfiConverterUInt32.lower(expirySecs), FfiConverterTypePaymentHash.lower(paymentHash),$0 ) +}) } - ) - } - public func receiveVariableAmount(description: String, expirySecs: UInt32) throws -> Bolt11Invoice { - return try FfiConverterTypeBolt11Invoice.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_receive_variable_amount(self.uniffiClonePointer(), + +open func receiveVariableAmount(description: String, expirySecs: UInt32)throws -> Bolt11Invoice { + return try FfiConverterTypeBolt11Invoice.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_receive_variable_amount(self.uniffiClonePointer(), FfiConverterString.lower(description), FfiConverterUInt32.lower(expirySecs),$0 ) +}) } - ) - } - public func receiveVariableAmountForHash(description: String, expirySecs: UInt32, paymentHash: PaymentHash) throws -> Bolt11Invoice { - return try FfiConverterTypeBolt11Invoice.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_receive_variable_amount_for_hash(self.uniffiClonePointer(), + +open func receiveVariableAmountForHash(description: String, expirySecs: UInt32, paymentHash: PaymentHash)throws -> Bolt11Invoice { + return try FfiConverterTypeBolt11Invoice.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_receive_variable_amount_for_hash(self.uniffiClonePointer(), FfiConverterString.lower(description), FfiConverterUInt32.lower(expirySecs), FfiConverterTypePaymentHash.lower(paymentHash),$0 ) +}) } - ) - } - public func receiveVariableAmountViaJitChannel(description: String, expirySecs: UInt32, maxProportionalLspFeeLimitPpmMsat: UInt64?) throws -> Bolt11Invoice { - return try FfiConverterTypeBolt11Invoice.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_receive_variable_amount_via_jit_channel(self.uniffiClonePointer(), + +open func receiveVariableAmountViaJitChannel(description: String, expirySecs: UInt32, maxProportionalLspFeeLimitPpmMsat: UInt64?)throws -> Bolt11Invoice { + return try FfiConverterTypeBolt11Invoice.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_receive_variable_amount_via_jit_channel(self.uniffiClonePointer(), FfiConverterString.lower(description), FfiConverterUInt32.lower(expirySecs), FfiConverterOptionUInt64.lower(maxProportionalLspFeeLimitPpmMsat),$0 ) +}) } - ) - } - public func receiveViaJitChannel(amountMsat: UInt64, description: String, expirySecs: UInt32, maxLspFeeLimitMsat: UInt64?) throws -> Bolt11Invoice { - return try FfiConverterTypeBolt11Invoice.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_receive_via_jit_channel(self.uniffiClonePointer(), + +open func receiveViaJitChannel(amountMsat: UInt64, description: String, expirySecs: UInt32, maxLspFeeLimitMsat: UInt64?)throws -> Bolt11Invoice { + return try FfiConverterTypeBolt11Invoice.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_receive_via_jit_channel(self.uniffiClonePointer(), FfiConverterUInt64.lower(amountMsat), FfiConverterString.lower(description), FfiConverterUInt32.lower(expirySecs), FfiConverterOptionUInt64.lower(maxLspFeeLimitMsat),$0 ) +}) } - ) - } - public func send(invoice: Bolt11Invoice) throws -> PaymentId { - return try FfiConverterTypePaymentId.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_send(self.uniffiClonePointer(), - FfiConverterTypeBolt11Invoice.lower(invoice),$0 + +open func send(invoice: Bolt11Invoice, sendingParameters: SendingParameters?)throws -> PaymentId { + return try FfiConverterTypePaymentId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_send(self.uniffiClonePointer(), + FfiConverterTypeBolt11Invoice.lower(invoice), + FfiConverterOptionTypeSendingParameters.lower(sendingParameters),$0 ) +}) } - ) - } - public func sendProbes(invoice: Bolt11Invoice) throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_send_probes(self.uniffiClonePointer(), + +open func sendProbes(invoice: Bolt11Invoice)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_send_probes(self.uniffiClonePointer(), FfiConverterTypeBolt11Invoice.lower(invoice),$0 ) } - } - public func sendProbesUsingAmount(invoice: Bolt11Invoice, amountMsat: UInt64) throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_send_probes_using_amount(self.uniffiClonePointer(), +} + +open func sendProbesUsingAmount(invoice: Bolt11Invoice, amountMsat: UInt64)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_send_probes_using_amount(self.uniffiClonePointer(), FfiConverterTypeBolt11Invoice.lower(invoice), FfiConverterUInt64.lower(amountMsat),$0 ) } - } - public func sendUsingAmount(invoice: Bolt11Invoice, amountMsat: UInt64) throws -> PaymentId { - return try FfiConverterTypePaymentId.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt11payment_send_using_amount(self.uniffiClonePointer(), +} + +open func sendUsingAmount(invoice: Bolt11Invoice, amountMsat: UInt64, sendingParameters: SendingParameters?)throws -> PaymentId { + return try FfiConverterTypePaymentId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt11payment_send_using_amount(self.uniffiClonePointer(), FfiConverterTypeBolt11Invoice.lower(invoice), - FfiConverterUInt64.lower(amountMsat),$0 + FfiConverterUInt64.lower(amountMsat), + FfiConverterOptionTypeSendingParameters.lower(sendingParameters),$0 ) +}) } - ) - } + } @@ -627,6 +711,8 @@ public struct FfiConverterTypeBolt11Payment: FfiConverter { } + + public func FfiConverterTypeBolt11Payment_lift(_ pointer: UnsafeMutableRawPointer) throws -> Bolt11Payment { return try FfiConverterTypeBolt11Payment.lift(pointer) } @@ -640,108 +726,121 @@ public func FfiConverterTypeBolt11Payment_lower(_ value: Bolt11Payment) -> Unsaf public protocol Bolt12PaymentProtocol : AnyObject { - func initiateRefund(amountMsat: UInt64, expirySecs: UInt32) throws -> Refund + func initiateRefund(amountMsat: UInt64, expirySecs: UInt32, quantity: UInt64?, payerNote: String?) throws -> Refund - func receive(amountMsat: UInt64, description: String) throws -> Offer + func receive(amountMsat: UInt64, description: String, expirySecs: UInt32?, quantity: UInt64?) throws -> Offer - func receiveVariableAmount(description: String) throws -> Offer + func receiveVariableAmount(description: String, expirySecs: UInt32?) throws -> Offer func requestRefundPayment(refund: Refund) throws -> Bolt12Invoice - func send(offer: Offer, payerNote: String?) throws -> PaymentId + func send(offer: Offer, quantity: UInt64?, payerNote: String?) throws -> PaymentId - func sendUsingAmount(offer: Offer, payerNote: String?, amountMsat: UInt64) throws -> PaymentId + func sendUsingAmount(offer: Offer, amountMsat: UInt64, quantity: UInt64?, payerNote: String?) throws -> PaymentId } -public class Bolt12Payment: +open class Bolt12Payment: Bolt12PaymentProtocol { - fileprivate let pointer: UnsafeMutableRawPointer + fileprivate let pointer: UnsafeMutableRawPointer! + + /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. + public struct NoPointer { + public init() {} + } // TODO: We'd like this to be `private` but for Swifty reasons, // we can't implement `FfiConverter` without making this `required` and we can't // make it `required` without making it `public`. - required init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { + required public init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { self.pointer = pointer } + /// This constructor can be used to instantiate a fake object. + /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + /// + /// - Warning: + /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + public init(noPointer: NoPointer) { + self.pointer = nil + } + public func uniffiClonePointer() -> UnsafeMutableRawPointer { return try! rustCall { uniffi_ldk_node_fn_clone_bolt12payment(self.pointer, $0) } } + // No primary constructor declared for this class. deinit { + guard let pointer = pointer else { + return + } + try! rustCall { uniffi_ldk_node_fn_free_bolt12payment(pointer, $0) } } - - public func initiateRefund(amountMsat: UInt64, expirySecs: UInt32) throws -> Refund { - return try FfiConverterTypeRefund.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt12payment_initiate_refund(self.uniffiClonePointer(), +open func initiateRefund(amountMsat: UInt64, expirySecs: UInt32, quantity: UInt64?, payerNote: String?)throws -> Refund { + return try FfiConverterTypeRefund.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt12payment_initiate_refund(self.uniffiClonePointer(), FfiConverterUInt64.lower(amountMsat), - FfiConverterUInt32.lower(expirySecs),$0 + FfiConverterUInt32.lower(expirySecs), + FfiConverterOptionUInt64.lower(quantity), + FfiConverterOptionString.lower(payerNote),$0 ) +}) } - ) - } - public func receive(amountMsat: UInt64, description: String) throws -> Offer { - return try FfiConverterTypeOffer.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt12payment_receive(self.uniffiClonePointer(), + +open func receive(amountMsat: UInt64, description: String, expirySecs: UInt32?, quantity: UInt64?)throws -> Offer { + return try FfiConverterTypeOffer.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt12payment_receive(self.uniffiClonePointer(), FfiConverterUInt64.lower(amountMsat), - FfiConverterString.lower(description),$0 + FfiConverterString.lower(description), + FfiConverterOptionUInt32.lower(expirySecs), + FfiConverterOptionUInt64.lower(quantity),$0 ) +}) } - ) - } - public func receiveVariableAmount(description: String) throws -> Offer { - return try FfiConverterTypeOffer.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt12payment_receive_variable_amount(self.uniffiClonePointer(), - FfiConverterString.lower(description),$0 + +open func receiveVariableAmount(description: String, expirySecs: UInt32?)throws -> Offer { + return try FfiConverterTypeOffer.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt12payment_receive_variable_amount(self.uniffiClonePointer(), + FfiConverterString.lower(description), + FfiConverterOptionUInt32.lower(expirySecs),$0 ) +}) } - ) - } - public func requestRefundPayment(refund: Refund) throws -> Bolt12Invoice { - return try FfiConverterTypeBolt12Invoice.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt12payment_request_refund_payment(self.uniffiClonePointer(), + +open func requestRefundPayment(refund: Refund)throws -> Bolt12Invoice { + return try FfiConverterTypeBolt12Invoice.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt12payment_request_refund_payment(self.uniffiClonePointer(), FfiConverterTypeRefund.lower(refund),$0 ) +}) } - ) - } - public func send(offer: Offer, payerNote: String?) throws -> PaymentId { - return try FfiConverterTypePaymentId.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt12payment_send(self.uniffiClonePointer(), + +open func send(offer: Offer, quantity: UInt64?, payerNote: String?)throws -> PaymentId { + return try FfiConverterTypePaymentId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt12payment_send(self.uniffiClonePointer(), FfiConverterTypeOffer.lower(offer), + FfiConverterOptionUInt64.lower(quantity), FfiConverterOptionString.lower(payerNote),$0 ) +}) } - ) - } - public func sendUsingAmount(offer: Offer, payerNote: String?, amountMsat: UInt64) throws -> PaymentId { - return try FfiConverterTypePaymentId.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_bolt12payment_send_using_amount(self.uniffiClonePointer(), + +open func sendUsingAmount(offer: Offer, amountMsat: UInt64, quantity: UInt64?, payerNote: String?)throws -> PaymentId { + return try FfiConverterTypePaymentId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_bolt12payment_send_using_amount(self.uniffiClonePointer(), FfiConverterTypeOffer.lower(offer), - FfiConverterOptionString.lower(payerNote), - FfiConverterUInt64.lower(amountMsat),$0 + FfiConverterUInt64.lower(amountMsat), + FfiConverterOptionUInt64.lower(quantity), + FfiConverterOptionString.lower(payerNote),$0 ) +}) } - ) - } + } @@ -777,6 +876,8 @@ public struct FfiConverterTypeBolt12Payment: FfiConverter { } + + public func FfiConverterTypeBolt12Payment_lift(_ pointer: UnsafeMutableRawPointer) throws -> Bolt12Payment { return try FfiConverterTypeBolt12Payment.lift(pointer) } @@ -794,14 +895,22 @@ public protocol BuilderProtocol : AnyObject { func buildWithFsStore() throws -> Node + func buildWithVssStore(vssUrl: String, storeId: String, lnurlAuthServerUrl: String, fixedHeaders: [String: String]) throws -> Node + + func buildWithVssStoreAndFixedHeaders(vssUrl: String, storeId: String, fixedHeaders: [String: String]) throws -> Node + + func buildWithVssStoreAndHeaderProvider(vssUrl: String, storeId: String, headerProvider: VssHeaderProvider) throws -> Node + + func setChainSourceBitcoindRpc(rpcHost: String, rpcPort: UInt16, rpcUser: String, rpcPassword: String) + + func setChainSourceEsplora(serverUrl: String, config: EsploraSyncConfig?) + func setEntropyBip39Mnemonic(mnemonic: Mnemonic, passphrase: String?) func setEntropySeedBytes(seedBytes: [UInt8]) throws func setEntropySeedPath(seedPath: String) - func setEsploraServer(esploraServerUrl: String) - func setGossipSourceP2p() func setGossipSourceRgs(rgsServerUrl: String) @@ -812,154 +921,203 @@ public protocol BuilderProtocol : AnyObject { func setNetwork(network: Network) + func setNodeAlias(nodeAlias: String) throws + func setStorageDirPath(storageDirPath: String) } -public class Builder: +open class Builder: BuilderProtocol { - fileprivate let pointer: UnsafeMutableRawPointer + fileprivate let pointer: UnsafeMutableRawPointer! + + /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. + public struct NoPointer { + public init() {} + } // TODO: We'd like this to be `private` but for Swifty reasons, // we can't implement `FfiConverter` without making this `required` and we can't // make it `required` without making it `public`. - required init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { + required public init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { self.pointer = pointer } + /// This constructor can be used to instantiate a fake object. + /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + /// + /// - Warning: + /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + public init(noPointer: NoPointer) { + self.pointer = nil + } + public func uniffiClonePointer() -> UnsafeMutableRawPointer { return try! rustCall { uniffi_ldk_node_fn_clone_builder(self.pointer, $0) } } - public convenience init() { - self.init(unsafeFromRawPointer: try! rustCall() { - uniffi_ldk_node_fn_constructor_builder_new($0) -}) - } +public convenience init() { + let pointer = + try! rustCall() { + uniffi_ldk_node_fn_constructor_builder_new($0 + ) +} + self.init(unsafeFromRawPointer: pointer) +} deinit { + guard let pointer = pointer else { + return + } + try! rustCall { uniffi_ldk_node_fn_free_builder(pointer, $0) } } - public static func fromConfig(config: Config) -> Builder { - return Builder(unsafeFromRawPointer: try! rustCall() { +public static func fromConfig(config: Config) -> Builder { + return try! FfiConverterTypeBuilder.lift(try! rustCall() { uniffi_ldk_node_fn_constructor_builder_from_config( - FfiConverterTypeConfig.lower(config),$0) + FfiConverterTypeConfig.lower(config),$0 + ) }) - } - +} +open func build()throws -> Node { + return try FfiConverterTypeNode.lift(try rustCallWithError(FfiConverterTypeBuildError.lift) { + uniffi_ldk_node_fn_method_builder_build(self.uniffiClonePointer(),$0 + ) +}) +} - public func build() throws -> Node { - return try FfiConverterTypeNode.lift( - try - rustCallWithError(FfiConverterTypeBuildError.lift) { - uniffi_ldk_node_fn_method_builder_build(self.uniffiClonePointer(), $0 +open func buildWithFsStore()throws -> Node { + return try FfiConverterTypeNode.lift(try rustCallWithError(FfiConverterTypeBuildError.lift) { + uniffi_ldk_node_fn_method_builder_build_with_fs_store(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func buildWithFsStore() throws -> Node { - return try FfiConverterTypeNode.lift( - try - rustCallWithError(FfiConverterTypeBuildError.lift) { - uniffi_ldk_node_fn_method_builder_build_with_fs_store(self.uniffiClonePointer(), $0 + +open func buildWithVssStore(vssUrl: String, storeId: String, lnurlAuthServerUrl: String, fixedHeaders: [String: String])throws -> Node { + return try FfiConverterTypeNode.lift(try rustCallWithError(FfiConverterTypeBuildError.lift) { + uniffi_ldk_node_fn_method_builder_build_with_vss_store(self.uniffiClonePointer(), + FfiConverterString.lower(vssUrl), + FfiConverterString.lower(storeId), + FfiConverterString.lower(lnurlAuthServerUrl), + FfiConverterDictionaryStringString.lower(fixedHeaders),$0 ) +}) +} + +open func buildWithVssStoreAndFixedHeaders(vssUrl: String, storeId: String, fixedHeaders: [String: String])throws -> Node { + return try FfiConverterTypeNode.lift(try rustCallWithError(FfiConverterTypeBuildError.lift) { + uniffi_ldk_node_fn_method_builder_build_with_vss_store_and_fixed_headers(self.uniffiClonePointer(), + FfiConverterString.lower(vssUrl), + FfiConverterString.lower(storeId), + FfiConverterDictionaryStringString.lower(fixedHeaders),$0 + ) +}) +} + +open func buildWithVssStoreAndHeaderProvider(vssUrl: String, storeId: String, headerProvider: VssHeaderProvider)throws -> Node { + return try FfiConverterTypeNode.lift(try rustCallWithError(FfiConverterTypeBuildError.lift) { + uniffi_ldk_node_fn_method_builder_build_with_vss_store_and_header_provider(self.uniffiClonePointer(), + FfiConverterString.lower(vssUrl), + FfiConverterString.lower(storeId), + FfiConverterTypeVssHeaderProvider.lower(headerProvider),$0 + ) +}) +} + +open func setChainSourceBitcoindRpc(rpcHost: String, rpcPort: UInt16, rpcUser: String, rpcPassword: String) {try! rustCall() { + uniffi_ldk_node_fn_method_builder_set_chain_source_bitcoind_rpc(self.uniffiClonePointer(), + FfiConverterString.lower(rpcHost), + FfiConverterUInt16.lower(rpcPort), + FfiConverterString.lower(rpcUser), + FfiConverterString.lower(rpcPassword),$0 + ) +} } - ) - } - public func setEntropyBip39Mnemonic(mnemonic: Mnemonic, passphrase: String?) { - try! - rustCall() { - uniffi_ldk_node_fn_method_builder_set_entropy_bip39_mnemonic(self.uniffiClonePointer(), +open func setChainSourceEsplora(serverUrl: String, config: EsploraSyncConfig?) {try! rustCall() { + uniffi_ldk_node_fn_method_builder_set_chain_source_esplora(self.uniffiClonePointer(), + FfiConverterString.lower(serverUrl), + FfiConverterOptionTypeEsploraSyncConfig.lower(config),$0 + ) +} +} + +open func setEntropyBip39Mnemonic(mnemonic: Mnemonic, passphrase: String?) {try! rustCall() { + uniffi_ldk_node_fn_method_builder_set_entropy_bip39_mnemonic(self.uniffiClonePointer(), FfiConverterTypeMnemonic.lower(mnemonic), FfiConverterOptionString.lower(passphrase),$0 ) } - } - public func setEntropySeedBytes(seedBytes: [UInt8]) throws { - try - rustCallWithError(FfiConverterTypeBuildError.lift) { - uniffi_ldk_node_fn_method_builder_set_entropy_seed_bytes(self.uniffiClonePointer(), +} + +open func setEntropySeedBytes(seedBytes: [UInt8])throws {try rustCallWithError(FfiConverterTypeBuildError.lift) { + uniffi_ldk_node_fn_method_builder_set_entropy_seed_bytes(self.uniffiClonePointer(), FfiConverterSequenceUInt8.lower(seedBytes),$0 ) } - } - public func setEntropySeedPath(seedPath: String) { - try! - rustCall() { +} - uniffi_ldk_node_fn_method_builder_set_entropy_seed_path(self.uniffiClonePointer(), +open func setEntropySeedPath(seedPath: String) {try! rustCall() { + uniffi_ldk_node_fn_method_builder_set_entropy_seed_path(self.uniffiClonePointer(), FfiConverterString.lower(seedPath),$0 ) } - } - public func setEsploraServer(esploraServerUrl: String) { - try! - rustCall() { - - uniffi_ldk_node_fn_method_builder_set_esplora_server(self.uniffiClonePointer(), - FfiConverterString.lower(esploraServerUrl),$0 - ) } - } - public func setGossipSourceP2p() { - try! - rustCall() { - uniffi_ldk_node_fn_method_builder_set_gossip_source_p2p(self.uniffiClonePointer(), $0 +open func setGossipSourceP2p() {try! rustCall() { + uniffi_ldk_node_fn_method_builder_set_gossip_source_p2p(self.uniffiClonePointer(),$0 ) } - } - public func setGossipSourceRgs(rgsServerUrl: String) { - try! - rustCall() { +} - uniffi_ldk_node_fn_method_builder_set_gossip_source_rgs(self.uniffiClonePointer(), +open func setGossipSourceRgs(rgsServerUrl: String) {try! rustCall() { + uniffi_ldk_node_fn_method_builder_set_gossip_source_rgs(self.uniffiClonePointer(), FfiConverterString.lower(rgsServerUrl),$0 ) } - } - public func setLiquiditySourceLsps2(address: SocketAddress, nodeId: PublicKey, token: String?) { - try! - rustCall() { +} - uniffi_ldk_node_fn_method_builder_set_liquidity_source_lsps2(self.uniffiClonePointer(), +open func setLiquiditySourceLsps2(address: SocketAddress, nodeId: PublicKey, token: String?) {try! rustCall() { + uniffi_ldk_node_fn_method_builder_set_liquidity_source_lsps2(self.uniffiClonePointer(), FfiConverterTypeSocketAddress.lower(address), FfiConverterTypePublicKey.lower(nodeId), FfiConverterOptionString.lower(token),$0 ) } - } - public func setListeningAddresses(listeningAddresses: [SocketAddress]) throws { - try - rustCallWithError(FfiConverterTypeBuildError.lift) { - uniffi_ldk_node_fn_method_builder_set_listening_addresses(self.uniffiClonePointer(), +} + +open func setListeningAddresses(listeningAddresses: [SocketAddress])throws {try rustCallWithError(FfiConverterTypeBuildError.lift) { + uniffi_ldk_node_fn_method_builder_set_listening_addresses(self.uniffiClonePointer(), FfiConverterSequenceTypeSocketAddress.lower(listeningAddresses),$0 ) } - } - public func setNetwork(network: Network) { - try! - rustCall() { +} - uniffi_ldk_node_fn_method_builder_set_network(self.uniffiClonePointer(), +open func setNetwork(network: Network) {try! rustCall() { + uniffi_ldk_node_fn_method_builder_set_network(self.uniffiClonePointer(), FfiConverterTypeNetwork.lower(network),$0 ) } - } - public func setStorageDirPath(storageDirPath: String) { - try! - rustCall() { +} - uniffi_ldk_node_fn_method_builder_set_storage_dir_path(self.uniffiClonePointer(), +open func setNodeAlias(nodeAlias: String)throws {try rustCallWithError(FfiConverterTypeBuildError.lift) { + uniffi_ldk_node_fn_method_builder_set_node_alias(self.uniffiClonePointer(), + FfiConverterString.lower(nodeAlias),$0 + ) +} +} + +open func setStorageDirPath(storageDirPath: String) {try! rustCall() { + uniffi_ldk_node_fn_method_builder_set_storage_dir_path(self.uniffiClonePointer(), FfiConverterString.lower(storageDirPath),$0 ) } - } +} + } @@ -995,6 +1153,8 @@ public struct FfiConverterTypeBuilder: FfiConverter { } + + public func FfiConverterTypeBuilder_lift(_ pointer: UnsafeMutableRawPointer) throws -> Builder { return try FfiConverterTypeBuilder.lift(pointer) } @@ -1006,299 +1166,90 @@ public func FfiConverterTypeBuilder_lower(_ value: Builder) -> UnsafeMutableRawP -public protocol ChannelConfigProtocol : AnyObject { - - func acceptUnderpayingHtlcs() -> Bool - - func cltvExpiryDelta() -> UInt16 - - func forceCloseAvoidanceMaxFeeSatoshis() -> UInt64 - - func forwardingFeeBaseMsat() -> UInt32 - - func forwardingFeeProportionalMillionths() -> UInt32 - - func setAcceptUnderpayingHtlcs(value: Bool) - - func setCltvExpiryDelta(value: UInt16) - - func setForceCloseAvoidanceMaxFeeSatoshis(valueSat: UInt64) +public protocol NetworkGraphProtocol : AnyObject { - func setForwardingFeeBaseMsat(feeMsat: UInt32) + func channel(shortChannelId: UInt64) -> ChannelInfo? - func setForwardingFeeProportionalMillionths(value: UInt32) + func listChannels() -> [UInt64] - func setMaxDustHtlcExposureFromFeeRateMultiplier(multiplier: UInt64) + func listNodes() -> [NodeId] - func setMaxDustHtlcExposureFromFixedLimit(limitMsat: UInt64) + func node(nodeId: NodeId) -> NodeInfo? } -public class ChannelConfig: - ChannelConfigProtocol { - fileprivate let pointer: UnsafeMutableRawPointer +open class NetworkGraph: + NetworkGraphProtocol { + fileprivate let pointer: UnsafeMutableRawPointer! + + /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. + public struct NoPointer { + public init() {} + } // TODO: We'd like this to be `private` but for Swifty reasons, // we can't implement `FfiConverter` without making this `required` and we can't // make it `required` without making it `public`. - required init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { + required public init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { self.pointer = pointer } - public func uniffiClonePointer() -> UnsafeMutableRawPointer { - return try! rustCall { uniffi_ldk_node_fn_clone_channelconfig(self.pointer, $0) } + /// This constructor can be used to instantiate a fake object. + /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + /// + /// - Warning: + /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + public init(noPointer: NoPointer) { + self.pointer = nil } - public convenience init() { - self.init(unsafeFromRawPointer: try! rustCall() { - uniffi_ldk_node_fn_constructor_channelconfig_new($0) -}) + + public func uniffiClonePointer() -> UnsafeMutableRawPointer { + return try! rustCall { uniffi_ldk_node_fn_clone_networkgraph(self.pointer, $0) } } + // No primary constructor declared for this class. deinit { - try! rustCall { uniffi_ldk_node_fn_free_channelconfig(pointer, $0) } + guard let pointer = pointer else { + return + } + + try! rustCall { uniffi_ldk_node_fn_free_networkgraph(pointer, $0) } } - - public func acceptUnderpayingHtlcs() -> Bool { - return try! FfiConverterBool.lift( - try! - rustCall() { - - uniffi_ldk_node_fn_method_channelconfig_accept_underpaying_htlcs(self.uniffiClonePointer(), $0 +open func channel(shortChannelId: UInt64) -> ChannelInfo? { + return try! FfiConverterOptionTypeChannelInfo.lift(try! rustCall() { + uniffi_ldk_node_fn_method_networkgraph_channel(self.uniffiClonePointer(), + FfiConverterUInt64.lower(shortChannelId),$0 ) +}) } - ) - } - public func cltvExpiryDelta() -> UInt16 { - return try! FfiConverterUInt16.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_channelconfig_cltv_expiry_delta(self.uniffiClonePointer(), $0 +open func listChannels() -> [UInt64] { + return try! FfiConverterSequenceUInt64.lift(try! rustCall() { + uniffi_ldk_node_fn_method_networkgraph_list_channels(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func forceCloseAvoidanceMaxFeeSatoshis() -> UInt64 { - return try! FfiConverterUInt64.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_channelconfig_force_close_avoidance_max_fee_satoshis(self.uniffiClonePointer(), $0 +open func listNodes() -> [NodeId] { + return try! FfiConverterSequenceTypeNodeId.lift(try! rustCall() { + uniffi_ldk_node_fn_method_networkgraph_list_nodes(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func forwardingFeeBaseMsat() -> UInt32 { - return try! FfiConverterUInt32.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_channelconfig_forwarding_fee_base_msat(self.uniffiClonePointer(), $0 +open func node(nodeId: NodeId) -> NodeInfo? { + return try! FfiConverterOptionTypeNodeInfo.lift(try! rustCall() { + uniffi_ldk_node_fn_method_networkgraph_node(self.uniffiClonePointer(), + FfiConverterTypeNodeId.lower(nodeId),$0 ) +}) } - ) - } - public func forwardingFeeProportionalMillionths() -> UInt32 { - return try! FfiConverterUInt32.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_channelconfig_forwarding_fee_proportional_millionths(self.uniffiClonePointer(), $0 - ) -} - ) - } - public func setAcceptUnderpayingHtlcs(value: Bool) { - try! - rustCall() { - - uniffi_ldk_node_fn_method_channelconfig_set_accept_underpaying_htlcs(self.uniffiClonePointer(), - FfiConverterBool.lower(value),$0 - ) -} - } - public func setCltvExpiryDelta(value: UInt16) { - try! - rustCall() { - - uniffi_ldk_node_fn_method_channelconfig_set_cltv_expiry_delta(self.uniffiClonePointer(), - FfiConverterUInt16.lower(value),$0 - ) -} - } - public func setForceCloseAvoidanceMaxFeeSatoshis(valueSat: UInt64) { - try! - rustCall() { - - uniffi_ldk_node_fn_method_channelconfig_set_force_close_avoidance_max_fee_satoshis(self.uniffiClonePointer(), - FfiConverterUInt64.lower(valueSat),$0 - ) -} - } - public func setForwardingFeeBaseMsat(feeMsat: UInt32) { - try! - rustCall() { - - uniffi_ldk_node_fn_method_channelconfig_set_forwarding_fee_base_msat(self.uniffiClonePointer(), - FfiConverterUInt32.lower(feeMsat),$0 - ) -} - } - public func setForwardingFeeProportionalMillionths(value: UInt32) { - try! - rustCall() { - - uniffi_ldk_node_fn_method_channelconfig_set_forwarding_fee_proportional_millionths(self.uniffiClonePointer(), - FfiConverterUInt32.lower(value),$0 - ) -} - } - public func setMaxDustHtlcExposureFromFeeRateMultiplier(multiplier: UInt64) { - try! - rustCall() { - - uniffi_ldk_node_fn_method_channelconfig_set_max_dust_htlc_exposure_from_fee_rate_multiplier(self.uniffiClonePointer(), - FfiConverterUInt64.lower(multiplier),$0 - ) -} - } - public func setMaxDustHtlcExposureFromFixedLimit(limitMsat: UInt64) { - try! - rustCall() { - - uniffi_ldk_node_fn_method_channelconfig_set_max_dust_htlc_exposure_from_fixed_limit(self.uniffiClonePointer(), - FfiConverterUInt64.lower(limitMsat),$0 - ) -} - } - -} - -public struct FfiConverterTypeChannelConfig: FfiConverter { - - typealias FfiType = UnsafeMutableRawPointer - typealias SwiftType = ChannelConfig - - public static func lift(_ pointer: UnsafeMutableRawPointer) throws -> ChannelConfig { - return ChannelConfig(unsafeFromRawPointer: pointer) - } - - public static func lower(_ value: ChannelConfig) -> UnsafeMutableRawPointer { - return value.uniffiClonePointer() - } - - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> ChannelConfig { - let v: UInt64 = try readInt(&buf) - // The Rust code won't compile if a pointer won't fit in a UInt64. - // We have to go via `UInt` because that's the thing that's the size of a pointer. - let ptr = UnsafeMutableRawPointer(bitPattern: UInt(truncatingIfNeeded: v)) - if (ptr == nil) { - throw UniffiInternalError.unexpectedNullPointer - } - return try lift(ptr!) - } - - public static func write(_ value: ChannelConfig, into buf: inout [UInt8]) { - // This fiddling is because `Int` is the thing that's the same size as a pointer. - // The Rust code won't compile if a pointer won't fit in a `UInt64`. - writeInt(&buf, UInt64(bitPattern: Int64(Int(bitPattern: lower(value))))) - } -} - - -public func FfiConverterTypeChannelConfig_lift(_ pointer: UnsafeMutableRawPointer) throws -> ChannelConfig { - return try FfiConverterTypeChannelConfig.lift(pointer) -} - -public func FfiConverterTypeChannelConfig_lower(_ value: ChannelConfig) -> UnsafeMutableRawPointer { - return FfiConverterTypeChannelConfig.lower(value) -} - - - - -public protocol NetworkGraphProtocol : AnyObject { - - func channel(shortChannelId: UInt64) -> ChannelInfo? - - func listChannels() -> [UInt64] - - func listNodes() -> [NodeId] - - func node(nodeId: NodeId) -> NodeInfo? - -} - -public class NetworkGraph: - NetworkGraphProtocol { - fileprivate let pointer: UnsafeMutableRawPointer - - // TODO: We'd like this to be `private` but for Swifty reasons, - // we can't implement `FfiConverter` without making this `required` and we can't - // make it `required` without making it `public`. - required init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { - self.pointer = pointer - } - - public func uniffiClonePointer() -> UnsafeMutableRawPointer { - return try! rustCall { uniffi_ldk_node_fn_clone_networkgraph(self.pointer, $0) } - } - - deinit { - try! rustCall { uniffi_ldk_node_fn_free_networkgraph(pointer, $0) } - } - - - - - - public func channel(shortChannelId: UInt64) -> ChannelInfo? { - return try! FfiConverterOptionTypeChannelInfo.lift( - try! - rustCall() { - - uniffi_ldk_node_fn_method_networkgraph_channel(self.uniffiClonePointer(), - FfiConverterUInt64.lower(shortChannelId),$0 - ) -} - ) - } - public func listChannels() -> [UInt64] { - return try! FfiConverterSequenceUInt64.lift( - try! - rustCall() { - - uniffi_ldk_node_fn_method_networkgraph_list_channels(self.uniffiClonePointer(), $0 - ) -} - ) - } - public func listNodes() -> [NodeId] { - return try! FfiConverterSequenceTypeNodeId.lift( - try! - rustCall() { - - uniffi_ldk_node_fn_method_networkgraph_list_nodes(self.uniffiClonePointer(), $0 - ) -} - ) - } - public func node(nodeId: NodeId) -> NodeInfo? { - return try! FfiConverterOptionTypeNodeInfo.lift( - try! - rustCall() { - - uniffi_ldk_node_fn_method_networkgraph_node(self.uniffiClonePointer(), - FfiConverterTypeNodeId.lower(nodeId),$0 - ) -} - ) - } - + } public struct FfiConverterTypeNetworkGraph: FfiConverter { @@ -1333,6 +1284,8 @@ public struct FfiConverterTypeNetworkGraph: FfiConverter { } + + public func FfiConverterTypeNetworkGraph_lift(_ pointer: UnsafeMutableRawPointer) throws -> NetworkGraph { return try FfiConverterTypeNetworkGraph.lift(pointer) } @@ -1356,13 +1309,11 @@ public protocol NodeProtocol : AnyObject { func connect(nodeId: PublicKey, address: SocketAddress, persist: Bool) throws - func connectOpenChannel(nodeId: PublicKey, address: SocketAddress, channelAmountSats: UInt64, pushToCounterpartyMsat: UInt64?, channelConfig: ChannelConfig?, announceChannel: Bool) throws -> UserChannelId - func disconnect(nodeId: PublicKey) throws func eventHandled() - func forceCloseChannel(userChannelId: UserChannelId, counterpartyNodeId: PublicKey) throws + func forceCloseChannel(userChannelId: UserChannelId, counterpartyNodeId: PublicKey, reason: String?) throws func listBalances() -> BalanceDetails @@ -1380,15 +1331,21 @@ public protocol NodeProtocol : AnyObject { func nextEventAsync() async -> Event + func nodeAlias() -> NodeAlias? + func nodeId() -> PublicKey func onchainPayment() -> OnchainPayment + func openAnnouncedChannel(nodeId: PublicKey, address: SocketAddress, channelAmountSats: UInt64, pushToCounterpartyMsat: UInt64?, channelConfig: ChannelConfig?) throws -> UserChannelId + + func openChannel(nodeId: PublicKey, address: SocketAddress, channelAmountSats: UInt64, pushToCounterpartyMsat: UInt64?, channelConfig: ChannelConfig?) throws -> UserChannelId + func payment(paymentId: PaymentId) -> PaymentDetails? func removePayment(paymentId: PaymentId) throws - func signMessage(msg: [UInt8]) throws -> String + func signMessage(msg: [UInt8]) -> String func spontaneousPayment() -> SpontaneousPayment @@ -1400,6 +1357,8 @@ public protocol NodeProtocol : AnyObject { func syncWallets() throws + func unifiedQrPayment() -> UnifiedQrPayment + func updateChannelConfig(userChannelId: UserChannelId, counterpartyNodeId: PublicKey, channelConfig: ChannelConfig) throws func verifySignature(msg: [UInt8], sig: String, pkey: PublicKey) -> Bool @@ -1408,193 +1367,163 @@ public protocol NodeProtocol : AnyObject { } -public class Node: +open class Node: NodeProtocol { - fileprivate let pointer: UnsafeMutableRawPointer + fileprivate let pointer: UnsafeMutableRawPointer! + + /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. + public struct NoPointer { + public init() {} + } // TODO: We'd like this to be `private` but for Swifty reasons, // we can't implement `FfiConverter` without making this `required` and we can't // make it `required` without making it `public`. - required init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { + required public init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { self.pointer = pointer } + /// This constructor can be used to instantiate a fake object. + /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + /// + /// - Warning: + /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + public init(noPointer: NoPointer) { + self.pointer = nil + } + public func uniffiClonePointer() -> UnsafeMutableRawPointer { return try! rustCall { uniffi_ldk_node_fn_clone_node(self.pointer, $0) } } + // No primary constructor declared for this class. deinit { + guard let pointer = pointer else { + return + } + try! rustCall { uniffi_ldk_node_fn_free_node(pointer, $0) } } - - public func bolt11Payment() -> Bolt11Payment { - return try! FfiConverterTypeBolt11Payment.lift( - try! - rustCall() { - - uniffi_ldk_node_fn_method_node_bolt11_payment(self.uniffiClonePointer(), $0 +open func bolt11Payment() -> Bolt11Payment { + return try! FfiConverterTypeBolt11Payment.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_bolt11_payment(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func bolt12Payment() -> Bolt12Payment { - return try! FfiConverterTypeBolt12Payment.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_bolt12_payment(self.uniffiClonePointer(), $0 +open func bolt12Payment() -> Bolt12Payment { + return try! FfiConverterTypeBolt12Payment.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_bolt12_payment(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func closeChannel(userChannelId: UserChannelId, counterpartyNodeId: PublicKey) throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_node_close_channel(self.uniffiClonePointer(), + +open func closeChannel(userChannelId: UserChannelId, counterpartyNodeId: PublicKey)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_close_channel(self.uniffiClonePointer(), FfiConverterTypeUserChannelId.lower(userChannelId), FfiConverterTypePublicKey.lower(counterpartyNodeId),$0 ) } - } - public func config() -> Config { - return try! FfiConverterTypeConfig.lift( - try! - rustCall() { +} - uniffi_ldk_node_fn_method_node_config(self.uniffiClonePointer(), $0 +open func config() -> Config { + return try! FfiConverterTypeConfig.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_config(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func connect(nodeId: PublicKey, address: SocketAddress, persist: Bool) throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_node_connect(self.uniffiClonePointer(), + +open func connect(nodeId: PublicKey, address: SocketAddress, persist: Bool)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_connect(self.uniffiClonePointer(), FfiConverterTypePublicKey.lower(nodeId), FfiConverterTypeSocketAddress.lower(address), FfiConverterBool.lower(persist),$0 ) } - } - public func connectOpenChannel(nodeId: PublicKey, address: SocketAddress, channelAmountSats: UInt64, pushToCounterpartyMsat: UInt64?, channelConfig: ChannelConfig?, announceChannel: Bool) throws -> UserChannelId { - return try FfiConverterTypeUserChannelId.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_node_connect_open_channel(self.uniffiClonePointer(), - FfiConverterTypePublicKey.lower(nodeId), - FfiConverterTypeSocketAddress.lower(address), - FfiConverterUInt64.lower(channelAmountSats), - FfiConverterOptionUInt64.lower(pushToCounterpartyMsat), - FfiConverterOptionTypeChannelConfig.lower(channelConfig), - FfiConverterBool.lower(announceChannel),$0 - ) } - ) - } - public func disconnect(nodeId: PublicKey) throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_node_disconnect(self.uniffiClonePointer(), + +open func disconnect(nodeId: PublicKey)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_disconnect(self.uniffiClonePointer(), FfiConverterTypePublicKey.lower(nodeId),$0 ) } - } - public func eventHandled() { - try! - rustCall() { +} - uniffi_ldk_node_fn_method_node_event_handled(self.uniffiClonePointer(), $0 +open func eventHandled() {try! rustCall() { + uniffi_ldk_node_fn_method_node_event_handled(self.uniffiClonePointer(),$0 ) } - } - public func forceCloseChannel(userChannelId: UserChannelId, counterpartyNodeId: PublicKey) throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_node_force_close_channel(self.uniffiClonePointer(), +} + +open func forceCloseChannel(userChannelId: UserChannelId, counterpartyNodeId: PublicKey, reason: String?)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_force_close_channel(self.uniffiClonePointer(), FfiConverterTypeUserChannelId.lower(userChannelId), - FfiConverterTypePublicKey.lower(counterpartyNodeId),$0 + FfiConverterTypePublicKey.lower(counterpartyNodeId), + FfiConverterOptionString.lower(reason),$0 ) } - } - public func listBalances() -> BalanceDetails { - return try! FfiConverterTypeBalanceDetails.lift( - try! - rustCall() { +} - uniffi_ldk_node_fn_method_node_list_balances(self.uniffiClonePointer(), $0 +open func listBalances() -> BalanceDetails { + return try! FfiConverterTypeBalanceDetails.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_list_balances(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func listChannels() -> [ChannelDetails] { - return try! FfiConverterSequenceTypeChannelDetails.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_list_channels(self.uniffiClonePointer(), $0 +open func listChannels() -> [ChannelDetails] { + return try! FfiConverterSequenceTypeChannelDetails.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_list_channels(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func listPayments() -> [PaymentDetails] { - return try! FfiConverterSequenceTypePaymentDetails.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_list_payments(self.uniffiClonePointer(), $0 +open func listPayments() -> [PaymentDetails] { + return try! FfiConverterSequenceTypePaymentDetails.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_list_payments(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func listPeers() -> [PeerDetails] { - return try! FfiConverterSequenceTypePeerDetails.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_list_peers(self.uniffiClonePointer(), $0 +open func listPeers() -> [PeerDetails] { + return try! FfiConverterSequenceTypePeerDetails.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_list_peers(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func listeningAddresses() -> [SocketAddress]? { - return try! FfiConverterOptionSequenceTypeSocketAddress.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_listening_addresses(self.uniffiClonePointer(), $0 +open func listeningAddresses() -> [SocketAddress]? { + return try! FfiConverterOptionSequenceTypeSocketAddress.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_listening_addresses(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func networkGraph() -> NetworkGraph { - return try! FfiConverterTypeNetworkGraph.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_network_graph(self.uniffiClonePointer(), $0 +open func networkGraph() -> NetworkGraph { + return try! FfiConverterTypeNetworkGraph.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_network_graph(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func nextEvent() -> Event? { - return try! FfiConverterOptionTypeEvent.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_next_event(self.uniffiClonePointer(), $0 +open func nextEvent() -> Event? { + return try! FfiConverterOptionTypeEvent.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_next_event(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func nextEventAsync() async -> Event { - return try! await uniffiRustCallAsync( + +open func nextEventAsync()async -> Event { + return + try! await uniffiRustCallAsync( rustFutureFunc: { uniffi_ldk_node_fn_method_node_next_event_async( self.uniffiClonePointer() + ) }, pollFunc: ffi_ldk_node_rust_future_poll_rust_buffer, @@ -1604,132 +1533,141 @@ public class Node: errorHandler: nil ) - } - +} - public func nodeId() -> PublicKey { - return try! FfiConverterTypePublicKey.lift( - try! - rustCall() { +open func nodeAlias() -> NodeAlias? { + return try! FfiConverterOptionTypeNodeAlias.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_node_alias(self.uniffiClonePointer(),$0 + ) +}) +} - uniffi_ldk_node_fn_method_node_node_id(self.uniffiClonePointer(), $0 +open func nodeId() -> PublicKey { + return try! FfiConverterTypePublicKey.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_node_id(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func onchainPayment() -> OnchainPayment { - return try! FfiConverterTypeOnchainPayment.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_onchain_payment(self.uniffiClonePointer(), $0 +open func onchainPayment() -> OnchainPayment { + return try! FfiConverterTypeOnchainPayment.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_onchain_payment(self.uniffiClonePointer(),$0 ) +}) +} + +open func openAnnouncedChannel(nodeId: PublicKey, address: SocketAddress, channelAmountSats: UInt64, pushToCounterpartyMsat: UInt64?, channelConfig: ChannelConfig?)throws -> UserChannelId { + return try FfiConverterTypeUserChannelId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_open_announced_channel(self.uniffiClonePointer(), + FfiConverterTypePublicKey.lower(nodeId), + FfiConverterTypeSocketAddress.lower(address), + FfiConverterUInt64.lower(channelAmountSats), + FfiConverterOptionUInt64.lower(pushToCounterpartyMsat), + FfiConverterOptionTypeChannelConfig.lower(channelConfig),$0 + ) +}) +} + +open func openChannel(nodeId: PublicKey, address: SocketAddress, channelAmountSats: UInt64, pushToCounterpartyMsat: UInt64?, channelConfig: ChannelConfig?)throws -> UserChannelId { + return try FfiConverterTypeUserChannelId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_open_channel(self.uniffiClonePointer(), + FfiConverterTypePublicKey.lower(nodeId), + FfiConverterTypeSocketAddress.lower(address), + FfiConverterUInt64.lower(channelAmountSats), + FfiConverterOptionUInt64.lower(pushToCounterpartyMsat), + FfiConverterOptionTypeChannelConfig.lower(channelConfig),$0 + ) +}) } - ) - } - public func payment(paymentId: PaymentId) -> PaymentDetails? { - return try! FfiConverterOptionTypePaymentDetails.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_payment(self.uniffiClonePointer(), +open func payment(paymentId: PaymentId) -> PaymentDetails? { + return try! FfiConverterOptionTypePaymentDetails.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_payment(self.uniffiClonePointer(), FfiConverterTypePaymentId.lower(paymentId),$0 ) +}) } - ) - } - public func removePayment(paymentId: PaymentId) throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_node_remove_payment(self.uniffiClonePointer(), + +open func removePayment(paymentId: PaymentId)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_remove_payment(self.uniffiClonePointer(), FfiConverterTypePaymentId.lower(paymentId),$0 ) } - } - public func signMessage(msg: [UInt8]) throws -> String { - return try FfiConverterString.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_node_sign_message(self.uniffiClonePointer(), +} + +open func signMessage(msg: [UInt8]) -> String { + return try! FfiConverterString.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_sign_message(self.uniffiClonePointer(), FfiConverterSequenceUInt8.lower(msg),$0 ) +}) +} + +open func spontaneousPayment() -> SpontaneousPayment { + return try! FfiConverterTypeSpontaneousPayment.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_spontaneous_payment(self.uniffiClonePointer(),$0 + ) +}) } - ) - } - public func spontaneousPayment() -> SpontaneousPayment { - return try! FfiConverterTypeSpontaneousPayment.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_spontaneous_payment(self.uniffiClonePointer(), $0 +open func start()throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_start(self.uniffiClonePointer(),$0 ) } - ) - } - public func start() throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_node_start(self.uniffiClonePointer(), $0 +} + +open func status() -> NodeStatus { + return try! FfiConverterTypeNodeStatus.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_status(self.uniffiClonePointer(),$0 ) +}) } - } - public func status() -> NodeStatus { - return try! FfiConverterTypeNodeStatus.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_status(self.uniffiClonePointer(), $0 +open func stop()throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_stop(self.uniffiClonePointer(),$0 ) } - ) - } - public func stop() throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_node_stop(self.uniffiClonePointer(), $0 +} + +open func syncWallets()throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_sync_wallets(self.uniffiClonePointer(),$0 ) } - } - public func syncWallets() throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_node_sync_wallets(self.uniffiClonePointer(), $0 +} + +open func unifiedQrPayment() -> UnifiedQrPayment { + return try! FfiConverterTypeUnifiedQrPayment.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_unified_qr_payment(self.uniffiClonePointer(),$0 ) +}) } - } - public func updateChannelConfig(userChannelId: UserChannelId, counterpartyNodeId: PublicKey, channelConfig: ChannelConfig) throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_node_update_channel_config(self.uniffiClonePointer(), + +open func updateChannelConfig(userChannelId: UserChannelId, counterpartyNodeId: PublicKey, channelConfig: ChannelConfig)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_node_update_channel_config(self.uniffiClonePointer(), FfiConverterTypeUserChannelId.lower(userChannelId), FfiConverterTypePublicKey.lower(counterpartyNodeId), FfiConverterTypeChannelConfig.lower(channelConfig),$0 ) } - } - public func verifySignature(msg: [UInt8], sig: String, pkey: PublicKey) -> Bool { - return try! FfiConverterBool.lift( - try! - rustCall() { +} - uniffi_ldk_node_fn_method_node_verify_signature(self.uniffiClonePointer(), +open func verifySignature(msg: [UInt8], sig: String, pkey: PublicKey) -> Bool { + return try! FfiConverterBool.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_verify_signature(self.uniffiClonePointer(), FfiConverterSequenceUInt8.lower(msg), FfiConverterString.lower(sig), FfiConverterTypePublicKey.lower(pkey),$0 ) +}) } - ) - } - public func waitNextEvent() -> Event { - return try! FfiConverterTypeEvent.lift( - try! - rustCall() { - uniffi_ldk_node_fn_method_node_wait_next_event(self.uniffiClonePointer(), $0 +open func waitNextEvent() -> Event { + return try! FfiConverterTypeEvent.lift(try! rustCall() { + uniffi_ldk_node_fn_method_node_wait_next_event(self.uniffiClonePointer(),$0 ) +}) } - ) - } + } @@ -1765,6 +1703,8 @@ public struct FfiConverterTypeNode: FfiConverter { } + + public func FfiConverterTypeNode_lift(_ pointer: UnsafeMutableRawPointer) throws -> Node { return try FfiConverterTypeNode.lift(pointer) } @@ -1782,63 +1722,75 @@ public protocol OnchainPaymentProtocol : AnyObject { func sendAllToAddress(address: Address) throws -> Txid - func sendToAddress(address: Address, amountMsat: UInt64) throws -> Txid + func sendToAddress(address: Address, amountSats: UInt64) throws -> Txid } -public class OnchainPayment: +open class OnchainPayment: OnchainPaymentProtocol { - fileprivate let pointer: UnsafeMutableRawPointer + fileprivate let pointer: UnsafeMutableRawPointer! + + /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. + public struct NoPointer { + public init() {} + } // TODO: We'd like this to be `private` but for Swifty reasons, // we can't implement `FfiConverter` without making this `required` and we can't // make it `required` without making it `public`. - required init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { + required public init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { self.pointer = pointer } + /// This constructor can be used to instantiate a fake object. + /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + /// + /// - Warning: + /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + public init(noPointer: NoPointer) { + self.pointer = nil + } + public func uniffiClonePointer() -> UnsafeMutableRawPointer { return try! rustCall { uniffi_ldk_node_fn_clone_onchainpayment(self.pointer, $0) } } + // No primary constructor declared for this class. deinit { + guard let pointer = pointer else { + return + } + try! rustCall { uniffi_ldk_node_fn_free_onchainpayment(pointer, $0) } } - - public func newAddress() throws -> Address { - return try FfiConverterTypeAddress.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_onchainpayment_new_address(self.uniffiClonePointer(), $0 +open func newAddress()throws -> Address { + return try FfiConverterTypeAddress.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_onchainpayment_new_address(self.uniffiClonePointer(),$0 ) +}) } - ) - } - public func sendAllToAddress(address: Address) throws -> Txid { - return try FfiConverterTypeTxid.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_onchainpayment_send_all_to_address(self.uniffiClonePointer(), + +open func sendAllToAddress(address: Address)throws -> Txid { + return try FfiConverterTypeTxid.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_onchainpayment_send_all_to_address(self.uniffiClonePointer(), FfiConverterTypeAddress.lower(address),$0 ) +}) } - ) - } - public func sendToAddress(address: Address, amountMsat: UInt64) throws -> Txid { - return try FfiConverterTypeTxid.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_onchainpayment_send_to_address(self.uniffiClonePointer(), + +open func sendToAddress(address: Address, amountSats: UInt64)throws -> Txid { + return try FfiConverterTypeTxid.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_onchainpayment_send_to_address(self.uniffiClonePointer(), FfiConverterTypeAddress.lower(address), - FfiConverterUInt64.lower(amountMsat),$0 + FfiConverterUInt64.lower(amountSats),$0 ) +}) } - ) - } + } @@ -1874,6 +1826,8 @@ public struct FfiConverterTypeOnchainPayment: FfiConverter { } + + public func FfiConverterTypeOnchainPayment_lift(_ pointer: UnsafeMutableRawPointer) throws -> OnchainPayment { return try FfiConverterTypeOnchainPayment.lift(pointer) } @@ -1887,55 +1841,71 @@ public func FfiConverterTypeOnchainPayment_lower(_ value: OnchainPayment) -> Uns public protocol SpontaneousPaymentProtocol : AnyObject { - func send(amountMsat: UInt64, nodeId: PublicKey) throws -> PaymentId + func send(amountMsat: UInt64, nodeId: PublicKey, sendingParameters: SendingParameters?) throws -> PaymentId func sendProbes(amountMsat: UInt64, nodeId: PublicKey) throws } -public class SpontaneousPayment: +open class SpontaneousPayment: SpontaneousPaymentProtocol { - fileprivate let pointer: UnsafeMutableRawPointer + fileprivate let pointer: UnsafeMutableRawPointer! + + /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. + public struct NoPointer { + public init() {} + } // TODO: We'd like this to be `private` but for Swifty reasons, // we can't implement `FfiConverter` without making this `required` and we can't // make it `required` without making it `public`. - required init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { + required public init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { self.pointer = pointer } + /// This constructor can be used to instantiate a fake object. + /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + /// + /// - Warning: + /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + public init(noPointer: NoPointer) { + self.pointer = nil + } + public func uniffiClonePointer() -> UnsafeMutableRawPointer { return try! rustCall { uniffi_ldk_node_fn_clone_spontaneouspayment(self.pointer, $0) } } + // No primary constructor declared for this class. deinit { + guard let pointer = pointer else { + return + } + try! rustCall { uniffi_ldk_node_fn_free_spontaneouspayment(pointer, $0) } } - - public func send(amountMsat: UInt64, nodeId: PublicKey) throws -> PaymentId { - return try FfiConverterTypePaymentId.lift( - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_spontaneouspayment_send(self.uniffiClonePointer(), +open func send(amountMsat: UInt64, nodeId: PublicKey, sendingParameters: SendingParameters?)throws -> PaymentId { + return try FfiConverterTypePaymentId.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_spontaneouspayment_send(self.uniffiClonePointer(), FfiConverterUInt64.lower(amountMsat), - FfiConverterTypePublicKey.lower(nodeId),$0 + FfiConverterTypePublicKey.lower(nodeId), + FfiConverterOptionTypeSendingParameters.lower(sendingParameters),$0 ) +}) } - ) - } - public func sendProbes(amountMsat: UInt64, nodeId: PublicKey) throws { - try - rustCallWithError(FfiConverterTypeNodeError.lift) { - uniffi_ldk_node_fn_method_spontaneouspayment_send_probes(self.uniffiClonePointer(), + +open func sendProbes(amountMsat: UInt64, nodeId: PublicKey)throws {try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_spontaneouspayment_send_probes(self.uniffiClonePointer(), FfiConverterUInt64.lower(amountMsat), FfiConverterTypePublicKey.lower(nodeId),$0 ) } - } +} + } @@ -1971,12 +1941,241 @@ public struct FfiConverterTypeSpontaneousPayment: FfiConverter { } -public func FfiConverterTypeSpontaneousPayment_lift(_ pointer: UnsafeMutableRawPointer) throws -> SpontaneousPayment { - return try FfiConverterTypeSpontaneousPayment.lift(pointer) + + +public func FfiConverterTypeSpontaneousPayment_lift(_ pointer: UnsafeMutableRawPointer) throws -> SpontaneousPayment { + return try FfiConverterTypeSpontaneousPayment.lift(pointer) +} + +public func FfiConverterTypeSpontaneousPayment_lower(_ value: SpontaneousPayment) -> UnsafeMutableRawPointer { + return FfiConverterTypeSpontaneousPayment.lower(value) +} + + + + +public protocol UnifiedQrPaymentProtocol : AnyObject { + + func receive(amountSats: UInt64, message: String, expirySec: UInt32) throws -> String + + func send(uriStr: String) throws -> QrPaymentResult + +} + +open class UnifiedQrPayment: + UnifiedQrPaymentProtocol { + fileprivate let pointer: UnsafeMutableRawPointer! + + /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. + public struct NoPointer { + public init() {} + } + + // TODO: We'd like this to be `private` but for Swifty reasons, + // we can't implement `FfiConverter` without making this `required` and we can't + // make it `required` without making it `public`. + required public init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { + self.pointer = pointer + } + + /// This constructor can be used to instantiate a fake object. + /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + /// + /// - Warning: + /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + public init(noPointer: NoPointer) { + self.pointer = nil + } + + public func uniffiClonePointer() -> UnsafeMutableRawPointer { + return try! rustCall { uniffi_ldk_node_fn_clone_unifiedqrpayment(self.pointer, $0) } + } + // No primary constructor declared for this class. + + deinit { + guard let pointer = pointer else { + return + } + + try! rustCall { uniffi_ldk_node_fn_free_unifiedqrpayment(pointer, $0) } + } + + + + +open func receive(amountSats: UInt64, message: String, expirySec: UInt32)throws -> String { + return try FfiConverterString.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_unifiedqrpayment_receive(self.uniffiClonePointer(), + FfiConverterUInt64.lower(amountSats), + FfiConverterString.lower(message), + FfiConverterUInt32.lower(expirySec),$0 + ) +}) +} + +open func send(uriStr: String)throws -> QrPaymentResult { + return try FfiConverterTypeQrPaymentResult.lift(try rustCallWithError(FfiConverterTypeNodeError.lift) { + uniffi_ldk_node_fn_method_unifiedqrpayment_send(self.uniffiClonePointer(), + FfiConverterString.lower(uriStr),$0 + ) +}) +} + + +} + +public struct FfiConverterTypeUnifiedQrPayment: FfiConverter { + + typealias FfiType = UnsafeMutableRawPointer + typealias SwiftType = UnifiedQrPayment + + public static func lift(_ pointer: UnsafeMutableRawPointer) throws -> UnifiedQrPayment { + return UnifiedQrPayment(unsafeFromRawPointer: pointer) + } + + public static func lower(_ value: UnifiedQrPayment) -> UnsafeMutableRawPointer { + return value.uniffiClonePointer() + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> UnifiedQrPayment { + let v: UInt64 = try readInt(&buf) + // The Rust code won't compile if a pointer won't fit in a UInt64. + // We have to go via `UInt` because that's the thing that's the size of a pointer. + let ptr = UnsafeMutableRawPointer(bitPattern: UInt(truncatingIfNeeded: v)) + if (ptr == nil) { + throw UniffiInternalError.unexpectedNullPointer + } + return try lift(ptr!) + } + + public static func write(_ value: UnifiedQrPayment, into buf: inout [UInt8]) { + // This fiddling is because `Int` is the thing that's the same size as a pointer. + // The Rust code won't compile if a pointer won't fit in a `UInt64`. + writeInt(&buf, UInt64(bitPattern: Int64(Int(bitPattern: lower(value))))) + } +} + + + + +public func FfiConverterTypeUnifiedQrPayment_lift(_ pointer: UnsafeMutableRawPointer) throws -> UnifiedQrPayment { + return try FfiConverterTypeUnifiedQrPayment.lift(pointer) +} + +public func FfiConverterTypeUnifiedQrPayment_lower(_ value: UnifiedQrPayment) -> UnsafeMutableRawPointer { + return FfiConverterTypeUnifiedQrPayment.lower(value) +} + + + + +public protocol VssHeaderProviderProtocol : AnyObject { + + func getHeaders(request: [UInt8]) async throws -> [String: String] + +} + +open class VssHeaderProvider: + VssHeaderProviderProtocol { + fileprivate let pointer: UnsafeMutableRawPointer! + + /// Used to instantiate a [FFIObject] without an actual pointer, for fakes in tests, mostly. + public struct NoPointer { + public init() {} + } + + // TODO: We'd like this to be `private` but for Swifty reasons, + // we can't implement `FfiConverter` without making this `required` and we can't + // make it `required` without making it `public`. + required public init(unsafeFromRawPointer pointer: UnsafeMutableRawPointer) { + self.pointer = pointer + } + + /// This constructor can be used to instantiate a fake object. + /// - Parameter noPointer: Placeholder value so we can have a constructor separate from the default empty one that may be implemented for classes extending [FFIObject]. + /// + /// - Warning: + /// Any object instantiated with this constructor cannot be passed to an actual Rust-backed object. Since there isn't a backing [Pointer] the FFI lower functions will crash. + public init(noPointer: NoPointer) { + self.pointer = nil + } + + public func uniffiClonePointer() -> UnsafeMutableRawPointer { + return try! rustCall { uniffi_ldk_node_fn_clone_vssheaderprovider(self.pointer, $0) } + } + // No primary constructor declared for this class. + + deinit { + guard let pointer = pointer else { + return + } + + try! rustCall { uniffi_ldk_node_fn_free_vssheaderprovider(pointer, $0) } + } + + + + +open func getHeaders(request: [UInt8])async throws -> [String: String] { + return + try await uniffiRustCallAsync( + rustFutureFunc: { + uniffi_ldk_node_fn_method_vssheaderprovider_get_headers( + self.uniffiClonePointer(), + FfiConverterSequenceUInt8.lower(request) + ) + }, + pollFunc: ffi_ldk_node_rust_future_poll_rust_buffer, + completeFunc: ffi_ldk_node_rust_future_complete_rust_buffer, + freeFunc: ffi_ldk_node_rust_future_free_rust_buffer, + liftFunc: FfiConverterDictionaryStringString.lift, + errorHandler: FfiConverterTypeVssHeaderProviderError.lift + ) +} + + +} + +public struct FfiConverterTypeVssHeaderProvider: FfiConverter { + + typealias FfiType = UnsafeMutableRawPointer + typealias SwiftType = VssHeaderProvider + + public static func lift(_ pointer: UnsafeMutableRawPointer) throws -> VssHeaderProvider { + return VssHeaderProvider(unsafeFromRawPointer: pointer) + } + + public static func lower(_ value: VssHeaderProvider) -> UnsafeMutableRawPointer { + return value.uniffiClonePointer() + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> VssHeaderProvider { + let v: UInt64 = try readInt(&buf) + // The Rust code won't compile if a pointer won't fit in a UInt64. + // We have to go via `UInt` because that's the thing that's the size of a pointer. + let ptr = UnsafeMutableRawPointer(bitPattern: UInt(truncatingIfNeeded: v)) + if (ptr == nil) { + throw UniffiInternalError.unexpectedNullPointer + } + return try lift(ptr!) + } + + public static func write(_ value: VssHeaderProvider, into buf: inout [UInt8]) { + // This fiddling is because `Int` is the thing that's the same size as a pointer. + // The Rust code won't compile if a pointer won't fit in a `UInt64`. + writeInt(&buf, UInt64(bitPattern: Int64(Int(bitPattern: lower(value))))) + } +} + + + + +public func FfiConverterTypeVssHeaderProvider_lift(_ pointer: UnsafeMutableRawPointer) throws -> VssHeaderProvider { + return try FfiConverterTypeVssHeaderProvider.lift(pointer) } -public func FfiConverterTypeSpontaneousPayment_lower(_ value: SpontaneousPayment) -> UnsafeMutableRawPointer { - return FfiConverterTypeSpontaneousPayment.lower(value) +public func FfiConverterTypeVssHeaderProvider_lower(_ value: VssHeaderProvider) -> UnsafeMutableRawPointer { + return FfiConverterTypeVssHeaderProvider.lower(value) } @@ -1986,15 +2185,14 @@ public struct AnchorChannelsConfig { // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - trustedPeersNoReserve: [PublicKey], - perChannelReserveSats: UInt64) { + public init(trustedPeersNoReserve: [PublicKey], perChannelReserveSats: UInt64) { self.trustedPeersNoReserve = trustedPeersNoReserve self.perChannelReserveSats = perChannelReserveSats } } + extension AnchorChannelsConfig: Equatable, Hashable { public static func ==(lhs: AnchorChannelsConfig, rhs: AnchorChannelsConfig) -> Bool { if lhs.trustedPeersNoReserve != rhs.trustedPeersNoReserve { @@ -2048,13 +2246,7 @@ public struct BalanceDetails { // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - totalOnchainBalanceSats: UInt64, - spendableOnchainBalanceSats: UInt64, - totalAnchorChannelsReserveSats: UInt64, - totalLightningBalanceSats: UInt64, - lightningBalances: [LightningBalance], - pendingBalancesFromChannelClosures: [PendingSweepBalance]) { + public init(totalOnchainBalanceSats: UInt64, spendableOnchainBalanceSats: UInt64, totalAnchorChannelsReserveSats: UInt64, totalLightningBalanceSats: UInt64, lightningBalances: [LightningBalance], pendingBalancesFromChannelClosures: [PendingSweepBalance]) { self.totalOnchainBalanceSats = totalOnchainBalanceSats self.spendableOnchainBalanceSats = spendableOnchainBalanceSats self.totalAnchorChannelsReserveSats = totalAnchorChannelsReserveSats @@ -2065,6 +2257,7 @@ public struct BalanceDetails { } + extension BalanceDetails: Equatable, Hashable { public static func ==(lhs: BalanceDetails, rhs: BalanceDetails) -> Bool { if lhs.totalOnchainBalanceSats != rhs.totalOnchainBalanceSats { @@ -2138,15 +2331,14 @@ public struct BestBlock { // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - blockHash: BlockHash, - height: UInt32) { + public init(blockHash: BlockHash, height: UInt32) { self.blockHash = blockHash self.height = height } } + extension BestBlock: Equatable, Hashable { public static func ==(lhs: BestBlock, rhs: BestBlock) -> Bool { if lhs.blockHash != rhs.blockHash { @@ -2190,6 +2382,95 @@ public func FfiConverterTypeBestBlock_lower(_ value: BestBlock) -> RustBuffer { } +public struct ChannelConfig { + public var forwardingFeeProportionalMillionths: UInt32 + public var forwardingFeeBaseMsat: UInt32 + public var cltvExpiryDelta: UInt16 + public var maxDustHtlcExposure: MaxDustHtlcExposure + public var forceCloseAvoidanceMaxFeeSatoshis: UInt64 + public var acceptUnderpayingHtlcs: Bool + + // Default memberwise initializers are never public by default, so we + // declare one manually. + public init(forwardingFeeProportionalMillionths: UInt32, forwardingFeeBaseMsat: UInt32, cltvExpiryDelta: UInt16, maxDustHtlcExposure: MaxDustHtlcExposure, forceCloseAvoidanceMaxFeeSatoshis: UInt64, acceptUnderpayingHtlcs: Bool) { + self.forwardingFeeProportionalMillionths = forwardingFeeProportionalMillionths + self.forwardingFeeBaseMsat = forwardingFeeBaseMsat + self.cltvExpiryDelta = cltvExpiryDelta + self.maxDustHtlcExposure = maxDustHtlcExposure + self.forceCloseAvoidanceMaxFeeSatoshis = forceCloseAvoidanceMaxFeeSatoshis + self.acceptUnderpayingHtlcs = acceptUnderpayingHtlcs + } +} + + + +extension ChannelConfig: Equatable, Hashable { + public static func ==(lhs: ChannelConfig, rhs: ChannelConfig) -> Bool { + if lhs.forwardingFeeProportionalMillionths != rhs.forwardingFeeProportionalMillionths { + return false + } + if lhs.forwardingFeeBaseMsat != rhs.forwardingFeeBaseMsat { + return false + } + if lhs.cltvExpiryDelta != rhs.cltvExpiryDelta { + return false + } + if lhs.maxDustHtlcExposure != rhs.maxDustHtlcExposure { + return false + } + if lhs.forceCloseAvoidanceMaxFeeSatoshis != rhs.forceCloseAvoidanceMaxFeeSatoshis { + return false + } + if lhs.acceptUnderpayingHtlcs != rhs.acceptUnderpayingHtlcs { + return false + } + return true + } + + public func hash(into hasher: inout Hasher) { + hasher.combine(forwardingFeeProportionalMillionths) + hasher.combine(forwardingFeeBaseMsat) + hasher.combine(cltvExpiryDelta) + hasher.combine(maxDustHtlcExposure) + hasher.combine(forceCloseAvoidanceMaxFeeSatoshis) + hasher.combine(acceptUnderpayingHtlcs) + } +} + + +public struct FfiConverterTypeChannelConfig: FfiConverterRustBuffer { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> ChannelConfig { + return + try ChannelConfig( + forwardingFeeProportionalMillionths: FfiConverterUInt32.read(from: &buf), + forwardingFeeBaseMsat: FfiConverterUInt32.read(from: &buf), + cltvExpiryDelta: FfiConverterUInt16.read(from: &buf), + maxDustHtlcExposure: FfiConverterTypeMaxDustHTLCExposure.read(from: &buf), + forceCloseAvoidanceMaxFeeSatoshis: FfiConverterUInt64.read(from: &buf), + acceptUnderpayingHtlcs: FfiConverterBool.read(from: &buf) + ) + } + + public static func write(_ value: ChannelConfig, into buf: inout [UInt8]) { + FfiConverterUInt32.write(value.forwardingFeeProportionalMillionths, into: &buf) + FfiConverterUInt32.write(value.forwardingFeeBaseMsat, into: &buf) + FfiConverterUInt16.write(value.cltvExpiryDelta, into: &buf) + FfiConverterTypeMaxDustHTLCExposure.write(value.maxDustHtlcExposure, into: &buf) + FfiConverterUInt64.write(value.forceCloseAvoidanceMaxFeeSatoshis, into: &buf) + FfiConverterBool.write(value.acceptUnderpayingHtlcs, into: &buf) + } +} + + +public func FfiConverterTypeChannelConfig_lift(_ buf: RustBuffer) throws -> ChannelConfig { + return try FfiConverterTypeChannelConfig.lift(buf) +} + +public func FfiConverterTypeChannelConfig_lower(_ value: ChannelConfig) -> RustBuffer { + return FfiConverterTypeChannelConfig.lower(value) +} + + public struct ChannelDetails { public var channelId: ChannelId public var counterpartyNodeId: PublicKey @@ -2205,7 +2486,7 @@ public struct ChannelDetails { public var isOutbound: Bool public var isChannelReady: Bool public var isUsable: Bool - public var isPublic: Bool + public var isAnnounced: Bool public var cltvExpiryDelta: UInt16? public var counterpartyUnspendablePunishmentReserve: UInt64 public var counterpartyOutboundHtlcMinimumMsat: UInt64? @@ -2222,35 +2503,7 @@ public struct ChannelDetails { // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - channelId: ChannelId, - counterpartyNodeId: PublicKey, - fundingTxo: OutPoint?, - channelValueSats: UInt64, - unspendablePunishmentReserve: UInt64?, - userChannelId: UserChannelId, - feerateSatPer1000Weight: UInt32, - outboundCapacityMsat: UInt64, - inboundCapacityMsat: UInt64, - confirmationsRequired: UInt32?, - confirmations: UInt32?, - isOutbound: Bool, - isChannelReady: Bool, - isUsable: Bool, - isPublic: Bool, - cltvExpiryDelta: UInt16?, - counterpartyUnspendablePunishmentReserve: UInt64, - counterpartyOutboundHtlcMinimumMsat: UInt64?, - counterpartyOutboundHtlcMaximumMsat: UInt64?, - counterpartyForwardingInfoFeeBaseMsat: UInt32?, - counterpartyForwardingInfoFeeProportionalMillionths: UInt32?, - counterpartyForwardingInfoCltvExpiryDelta: UInt16?, - nextOutboundHtlcLimitMsat: UInt64, - nextOutboundHtlcMinimumMsat: UInt64, - forceCloseSpendDelay: UInt16?, - inboundHtlcMinimumMsat: UInt64, - inboundHtlcMaximumMsat: UInt64?, - config: ChannelConfig) { + public init(channelId: ChannelId, counterpartyNodeId: PublicKey, fundingTxo: OutPoint?, channelValueSats: UInt64, unspendablePunishmentReserve: UInt64?, userChannelId: UserChannelId, feerateSatPer1000Weight: UInt32, outboundCapacityMsat: UInt64, inboundCapacityMsat: UInt64, confirmationsRequired: UInt32?, confirmations: UInt32?, isOutbound: Bool, isChannelReady: Bool, isUsable: Bool, isAnnounced: Bool, cltvExpiryDelta: UInt16?, counterpartyUnspendablePunishmentReserve: UInt64, counterpartyOutboundHtlcMinimumMsat: UInt64?, counterpartyOutboundHtlcMaximumMsat: UInt64?, counterpartyForwardingInfoFeeBaseMsat: UInt32?, counterpartyForwardingInfoFeeProportionalMillionths: UInt32?, counterpartyForwardingInfoCltvExpiryDelta: UInt16?, nextOutboundHtlcLimitMsat: UInt64, nextOutboundHtlcMinimumMsat: UInt64, forceCloseSpendDelay: UInt16?, inboundHtlcMinimumMsat: UInt64, inboundHtlcMaximumMsat: UInt64?, config: ChannelConfig) { self.channelId = channelId self.counterpartyNodeId = counterpartyNodeId self.fundingTxo = fundingTxo @@ -2265,7 +2518,7 @@ public struct ChannelDetails { self.isOutbound = isOutbound self.isChannelReady = isChannelReady self.isUsable = isUsable - self.isPublic = isPublic + self.isAnnounced = isAnnounced self.cltvExpiryDelta = cltvExpiryDelta self.counterpartyUnspendablePunishmentReserve = counterpartyUnspendablePunishmentReserve self.counterpartyOutboundHtlcMinimumMsat = counterpartyOutboundHtlcMinimumMsat @@ -2284,6 +2537,128 @@ public struct ChannelDetails { +extension ChannelDetails: Equatable, Hashable { + public static func ==(lhs: ChannelDetails, rhs: ChannelDetails) -> Bool { + if lhs.channelId != rhs.channelId { + return false + } + if lhs.counterpartyNodeId != rhs.counterpartyNodeId { + return false + } + if lhs.fundingTxo != rhs.fundingTxo { + return false + } + if lhs.channelValueSats != rhs.channelValueSats { + return false + } + if lhs.unspendablePunishmentReserve != rhs.unspendablePunishmentReserve { + return false + } + if lhs.userChannelId != rhs.userChannelId { + return false + } + if lhs.feerateSatPer1000Weight != rhs.feerateSatPer1000Weight { + return false + } + if lhs.outboundCapacityMsat != rhs.outboundCapacityMsat { + return false + } + if lhs.inboundCapacityMsat != rhs.inboundCapacityMsat { + return false + } + if lhs.confirmationsRequired != rhs.confirmationsRequired { + return false + } + if lhs.confirmations != rhs.confirmations { + return false + } + if lhs.isOutbound != rhs.isOutbound { + return false + } + if lhs.isChannelReady != rhs.isChannelReady { + return false + } + if lhs.isUsable != rhs.isUsable { + return false + } + if lhs.isAnnounced != rhs.isAnnounced { + return false + } + if lhs.cltvExpiryDelta != rhs.cltvExpiryDelta { + return false + } + if lhs.counterpartyUnspendablePunishmentReserve != rhs.counterpartyUnspendablePunishmentReserve { + return false + } + if lhs.counterpartyOutboundHtlcMinimumMsat != rhs.counterpartyOutboundHtlcMinimumMsat { + return false + } + if lhs.counterpartyOutboundHtlcMaximumMsat != rhs.counterpartyOutboundHtlcMaximumMsat { + return false + } + if lhs.counterpartyForwardingInfoFeeBaseMsat != rhs.counterpartyForwardingInfoFeeBaseMsat { + return false + } + if lhs.counterpartyForwardingInfoFeeProportionalMillionths != rhs.counterpartyForwardingInfoFeeProportionalMillionths { + return false + } + if lhs.counterpartyForwardingInfoCltvExpiryDelta != rhs.counterpartyForwardingInfoCltvExpiryDelta { + return false + } + if lhs.nextOutboundHtlcLimitMsat != rhs.nextOutboundHtlcLimitMsat { + return false + } + if lhs.nextOutboundHtlcMinimumMsat != rhs.nextOutboundHtlcMinimumMsat { + return false + } + if lhs.forceCloseSpendDelay != rhs.forceCloseSpendDelay { + return false + } + if lhs.inboundHtlcMinimumMsat != rhs.inboundHtlcMinimumMsat { + return false + } + if lhs.inboundHtlcMaximumMsat != rhs.inboundHtlcMaximumMsat { + return false + } + if lhs.config != rhs.config { + return false + } + return true + } + + public func hash(into hasher: inout Hasher) { + hasher.combine(channelId) + hasher.combine(counterpartyNodeId) + hasher.combine(fundingTxo) + hasher.combine(channelValueSats) + hasher.combine(unspendablePunishmentReserve) + hasher.combine(userChannelId) + hasher.combine(feerateSatPer1000Weight) + hasher.combine(outboundCapacityMsat) + hasher.combine(inboundCapacityMsat) + hasher.combine(confirmationsRequired) + hasher.combine(confirmations) + hasher.combine(isOutbound) + hasher.combine(isChannelReady) + hasher.combine(isUsable) + hasher.combine(isAnnounced) + hasher.combine(cltvExpiryDelta) + hasher.combine(counterpartyUnspendablePunishmentReserve) + hasher.combine(counterpartyOutboundHtlcMinimumMsat) + hasher.combine(counterpartyOutboundHtlcMaximumMsat) + hasher.combine(counterpartyForwardingInfoFeeBaseMsat) + hasher.combine(counterpartyForwardingInfoFeeProportionalMillionths) + hasher.combine(counterpartyForwardingInfoCltvExpiryDelta) + hasher.combine(nextOutboundHtlcLimitMsat) + hasher.combine(nextOutboundHtlcMinimumMsat) + hasher.combine(forceCloseSpendDelay) + hasher.combine(inboundHtlcMinimumMsat) + hasher.combine(inboundHtlcMaximumMsat) + hasher.combine(config) + } +} + + public struct FfiConverterTypeChannelDetails: FfiConverterRustBuffer { public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> ChannelDetails { return @@ -2302,7 +2677,7 @@ public struct FfiConverterTypeChannelDetails: FfiConverterRustBuffer { isOutbound: FfiConverterBool.read(from: &buf), isChannelReady: FfiConverterBool.read(from: &buf), isUsable: FfiConverterBool.read(from: &buf), - isPublic: FfiConverterBool.read(from: &buf), + isAnnounced: FfiConverterBool.read(from: &buf), cltvExpiryDelta: FfiConverterOptionUInt16.read(from: &buf), counterpartyUnspendablePunishmentReserve: FfiConverterUInt64.read(from: &buf), counterpartyOutboundHtlcMinimumMsat: FfiConverterOptionUInt64.read(from: &buf), @@ -2334,7 +2709,7 @@ public struct FfiConverterTypeChannelDetails: FfiConverterRustBuffer { FfiConverterBool.write(value.isOutbound, into: &buf) FfiConverterBool.write(value.isChannelReady, into: &buf) FfiConverterBool.write(value.isUsable, into: &buf) - FfiConverterBool.write(value.isPublic, into: &buf) + FfiConverterBool.write(value.isAnnounced, into: &buf) FfiConverterOptionUInt16.write(value.cltvExpiryDelta, into: &buf) FfiConverterUInt64.write(value.counterpartyUnspendablePunishmentReserve, into: &buf) FfiConverterOptionUInt64.write(value.counterpartyOutboundHtlcMinimumMsat, into: &buf) @@ -2370,12 +2745,7 @@ public struct ChannelInfo { // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - nodeOne: NodeId, - oneToTwo: ChannelUpdateInfo?, - nodeTwo: NodeId, - twoToOne: ChannelUpdateInfo?, - capacitySats: UInt64?) { + public init(nodeOne: NodeId, oneToTwo: ChannelUpdateInfo?, nodeTwo: NodeId, twoToOne: ChannelUpdateInfo?, capacitySats: UInt64?) { self.nodeOne = nodeOne self.oneToTwo = oneToTwo self.nodeTwo = nodeTwo @@ -2385,6 +2755,7 @@ public struct ChannelInfo { } + extension ChannelInfo: Equatable, Hashable { public static func ==(lhs: ChannelInfo, rhs: ChannelInfo) -> Bool { if lhs.nodeOne != rhs.nodeOne { @@ -2456,13 +2827,7 @@ public struct ChannelUpdateInfo { // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - lastUpdate: UInt32, - enabled: Bool, - cltvExpiryDelta: UInt16, - htlcMinimumMsat: UInt64, - htlcMaximumMsat: UInt64, - fees: RoutingFees) { + public init(lastUpdate: UInt32, enabled: Bool, cltvExpiryDelta: UInt16, htlcMinimumMsat: UInt64, htlcMaximumMsat: UInt64, fees: RoutingFees) { self.lastUpdate = lastUpdate self.enabled = enabled self.cltvExpiryDelta = cltvExpiryDelta @@ -2473,6 +2838,7 @@ public struct ChannelUpdateInfo { } + extension ChannelUpdateInfo: Equatable, Hashable { public static func ==(lhs: ChannelUpdateInfo, rhs: ChannelUpdateInfo) -> Bool { if lhs.lastUpdate != rhs.lastUpdate { @@ -2545,46 +2911,31 @@ public struct Config { public var logDirPath: String? public var network: Network public var listeningAddresses: [SocketAddress]? - public var defaultCltvExpiryDelta: UInt32 - public var onchainWalletSyncIntervalSecs: UInt64 - public var walletSyncIntervalSecs: UInt64 - public var feeRateCacheUpdateIntervalSecs: UInt64 + public var nodeAlias: NodeAlias? public var trustedPeers0conf: [PublicKey] public var probingLiquidityLimitMultiplier: UInt64 public var logLevel: LogLevel public var anchorChannelsConfig: AnchorChannelsConfig? + public var sendingParameters: SendingParameters? // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - storageDirPath: String, - logDirPath: String?, - network: Network, - listeningAddresses: [SocketAddress]?, - defaultCltvExpiryDelta: UInt32, - onchainWalletSyncIntervalSecs: UInt64, - walletSyncIntervalSecs: UInt64, - feeRateCacheUpdateIntervalSecs: UInt64, - trustedPeers0conf: [PublicKey], - probingLiquidityLimitMultiplier: UInt64, - logLevel: LogLevel, - anchorChannelsConfig: AnchorChannelsConfig?) { + public init(storageDirPath: String, logDirPath: String?, network: Network, listeningAddresses: [SocketAddress]?, nodeAlias: NodeAlias?, trustedPeers0conf: [PublicKey], probingLiquidityLimitMultiplier: UInt64, logLevel: LogLevel, anchorChannelsConfig: AnchorChannelsConfig?, sendingParameters: SendingParameters?) { self.storageDirPath = storageDirPath self.logDirPath = logDirPath self.network = network self.listeningAddresses = listeningAddresses - self.defaultCltvExpiryDelta = defaultCltvExpiryDelta - self.onchainWalletSyncIntervalSecs = onchainWalletSyncIntervalSecs - self.walletSyncIntervalSecs = walletSyncIntervalSecs - self.feeRateCacheUpdateIntervalSecs = feeRateCacheUpdateIntervalSecs + self.nodeAlias = nodeAlias self.trustedPeers0conf = trustedPeers0conf self.probingLiquidityLimitMultiplier = probingLiquidityLimitMultiplier self.logLevel = logLevel self.anchorChannelsConfig = anchorChannelsConfig + self.sendingParameters = sendingParameters } } + extension Config: Equatable, Hashable { public static func ==(lhs: Config, rhs: Config) -> Bool { if lhs.storageDirPath != rhs.storageDirPath { @@ -2599,16 +2950,7 @@ extension Config: Equatable, Hashable { if lhs.listeningAddresses != rhs.listeningAddresses { return false } - if lhs.defaultCltvExpiryDelta != rhs.defaultCltvExpiryDelta { - return false - } - if lhs.onchainWalletSyncIntervalSecs != rhs.onchainWalletSyncIntervalSecs { - return false - } - if lhs.walletSyncIntervalSecs != rhs.walletSyncIntervalSecs { - return false - } - if lhs.feeRateCacheUpdateIntervalSecs != rhs.feeRateCacheUpdateIntervalSecs { + if lhs.nodeAlias != rhs.nodeAlias { return false } if lhs.trustedPeers0conf != rhs.trustedPeers0conf { @@ -2623,6 +2965,9 @@ extension Config: Equatable, Hashable { if lhs.anchorChannelsConfig != rhs.anchorChannelsConfig { return false } + if lhs.sendingParameters != rhs.sendingParameters { + return false + } return true } @@ -2631,14 +2976,12 @@ extension Config: Equatable, Hashable { hasher.combine(logDirPath) hasher.combine(network) hasher.combine(listeningAddresses) - hasher.combine(defaultCltvExpiryDelta) - hasher.combine(onchainWalletSyncIntervalSecs) - hasher.combine(walletSyncIntervalSecs) - hasher.combine(feeRateCacheUpdateIntervalSecs) + hasher.combine(nodeAlias) hasher.combine(trustedPeers0conf) hasher.combine(probingLiquidityLimitMultiplier) hasher.combine(logLevel) hasher.combine(anchorChannelsConfig) + hasher.combine(sendingParameters) } } @@ -2651,14 +2994,12 @@ public struct FfiConverterTypeConfig: FfiConverterRustBuffer { logDirPath: FfiConverterOptionString.read(from: &buf), network: FfiConverterTypeNetwork.read(from: &buf), listeningAddresses: FfiConverterOptionSequenceTypeSocketAddress.read(from: &buf), - defaultCltvExpiryDelta: FfiConverterUInt32.read(from: &buf), - onchainWalletSyncIntervalSecs: FfiConverterUInt64.read(from: &buf), - walletSyncIntervalSecs: FfiConverterUInt64.read(from: &buf), - feeRateCacheUpdateIntervalSecs: FfiConverterUInt64.read(from: &buf), + nodeAlias: FfiConverterOptionTypeNodeAlias.read(from: &buf), trustedPeers0conf: FfiConverterSequenceTypePublicKey.read(from: &buf), probingLiquidityLimitMultiplier: FfiConverterUInt64.read(from: &buf), logLevel: FfiConverterTypeLogLevel.read(from: &buf), - anchorChannelsConfig: FfiConverterOptionTypeAnchorChannelsConfig.read(from: &buf) + anchorChannelsConfig: FfiConverterOptionTypeAnchorChannelsConfig.read(from: &buf), + sendingParameters: FfiConverterOptionTypeSendingParameters.read(from: &buf) ) } @@ -2667,14 +3008,12 @@ public struct FfiConverterTypeConfig: FfiConverterRustBuffer { FfiConverterOptionString.write(value.logDirPath, into: &buf) FfiConverterTypeNetwork.write(value.network, into: &buf) FfiConverterOptionSequenceTypeSocketAddress.write(value.listeningAddresses, into: &buf) - FfiConverterUInt32.write(value.defaultCltvExpiryDelta, into: &buf) - FfiConverterUInt64.write(value.onchainWalletSyncIntervalSecs, into: &buf) - FfiConverterUInt64.write(value.walletSyncIntervalSecs, into: &buf) - FfiConverterUInt64.write(value.feeRateCacheUpdateIntervalSecs, into: &buf) + FfiConverterOptionTypeNodeAlias.write(value.nodeAlias, into: &buf) FfiConverterSequenceTypePublicKey.write(value.trustedPeers0conf, into: &buf) FfiConverterUInt64.write(value.probingLiquidityLimitMultiplier, into: &buf) FfiConverterTypeLogLevel.write(value.logLevel, into: &buf) FfiConverterOptionTypeAnchorChannelsConfig.write(value.anchorChannelsConfig, into: &buf) + FfiConverterOptionTypeSendingParameters.write(value.sendingParameters, into: &buf) } } @@ -2688,21 +3027,85 @@ public func FfiConverterTypeConfig_lower(_ value: Config) -> RustBuffer { } +public struct EsploraSyncConfig { + public var onchainWalletSyncIntervalSecs: UInt64 + public var lightningWalletSyncIntervalSecs: UInt64 + public var feeRateCacheUpdateIntervalSecs: UInt64 + + // Default memberwise initializers are never public by default, so we + // declare one manually. + public init(onchainWalletSyncIntervalSecs: UInt64, lightningWalletSyncIntervalSecs: UInt64, feeRateCacheUpdateIntervalSecs: UInt64) { + self.onchainWalletSyncIntervalSecs = onchainWalletSyncIntervalSecs + self.lightningWalletSyncIntervalSecs = lightningWalletSyncIntervalSecs + self.feeRateCacheUpdateIntervalSecs = feeRateCacheUpdateIntervalSecs + } +} + + + +extension EsploraSyncConfig: Equatable, Hashable { + public static func ==(lhs: EsploraSyncConfig, rhs: EsploraSyncConfig) -> Bool { + if lhs.onchainWalletSyncIntervalSecs != rhs.onchainWalletSyncIntervalSecs { + return false + } + if lhs.lightningWalletSyncIntervalSecs != rhs.lightningWalletSyncIntervalSecs { + return false + } + if lhs.feeRateCacheUpdateIntervalSecs != rhs.feeRateCacheUpdateIntervalSecs { + return false + } + return true + } + + public func hash(into hasher: inout Hasher) { + hasher.combine(onchainWalletSyncIntervalSecs) + hasher.combine(lightningWalletSyncIntervalSecs) + hasher.combine(feeRateCacheUpdateIntervalSecs) + } +} + + +public struct FfiConverterTypeEsploraSyncConfig: FfiConverterRustBuffer { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> EsploraSyncConfig { + return + try EsploraSyncConfig( + onchainWalletSyncIntervalSecs: FfiConverterUInt64.read(from: &buf), + lightningWalletSyncIntervalSecs: FfiConverterUInt64.read(from: &buf), + feeRateCacheUpdateIntervalSecs: FfiConverterUInt64.read(from: &buf) + ) + } + + public static func write(_ value: EsploraSyncConfig, into buf: inout [UInt8]) { + FfiConverterUInt64.write(value.onchainWalletSyncIntervalSecs, into: &buf) + FfiConverterUInt64.write(value.lightningWalletSyncIntervalSecs, into: &buf) + FfiConverterUInt64.write(value.feeRateCacheUpdateIntervalSecs, into: &buf) + } +} + + +public func FfiConverterTypeEsploraSyncConfig_lift(_ buf: RustBuffer) throws -> EsploraSyncConfig { + return try FfiConverterTypeEsploraSyncConfig.lift(buf) +} + +public func FfiConverterTypeEsploraSyncConfig_lower(_ value: EsploraSyncConfig) -> RustBuffer { + return FfiConverterTypeEsploraSyncConfig.lower(value) +} + + public struct LspFeeLimits { public var maxTotalOpeningFeeMsat: UInt64? public var maxProportionalOpeningFeePpmMsat: UInt64? // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - maxTotalOpeningFeeMsat: UInt64?, - maxProportionalOpeningFeePpmMsat: UInt64?) { + public init(maxTotalOpeningFeeMsat: UInt64?, maxProportionalOpeningFeePpmMsat: UInt64?) { self.maxTotalOpeningFeeMsat = maxTotalOpeningFeeMsat self.maxProportionalOpeningFeePpmMsat = maxProportionalOpeningFeePpmMsat } } + extension LspFeeLimits: Equatable, Hashable { public static func ==(lhs: LspFeeLimits, rhs: LspFeeLimits) -> Bool { if lhs.maxTotalOpeningFeeMsat != rhs.maxTotalOpeningFeeMsat { @@ -2753,10 +3156,7 @@ public struct NodeAnnouncementInfo { // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - lastUpdate: UInt32, - alias: String, - addresses: [SocketAddress]) { + public init(lastUpdate: UInt32, alias: String, addresses: [SocketAddress]) { self.lastUpdate = lastUpdate self.alias = alias self.addresses = addresses @@ -2764,6 +3164,7 @@ public struct NodeAnnouncementInfo { } + extension NodeAnnouncementInfo: Equatable, Hashable { public static func ==(lhs: NodeAnnouncementInfo, rhs: NodeAnnouncementInfo) -> Bool { if lhs.lastUpdate != rhs.lastUpdate { @@ -2819,15 +3220,14 @@ public struct NodeInfo { // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - channels: [UInt64], - announcementInfo: NodeAnnouncementInfo?) { + public init(channels: [UInt64], announcementInfo: NodeAnnouncementInfo?) { self.channels = channels self.announcementInfo = announcementInfo } } + extension NodeInfo: Equatable, Hashable { public static func ==(lhs: NodeInfo, rhs: NodeInfo) -> Bool { if lhs.channels != rhs.channels { @@ -2875,35 +3275,30 @@ public struct NodeStatus { public var isRunning: Bool public var isListening: Bool public var currentBestBlock: BestBlock - public var latestWalletSyncTimestamp: UInt64? + public var latestLightningWalletSyncTimestamp: UInt64? public var latestOnchainWalletSyncTimestamp: UInt64? public var latestFeeRateCacheUpdateTimestamp: UInt64? public var latestRgsSnapshotTimestamp: UInt64? public var latestNodeAnnouncementBroadcastTimestamp: UInt64? + public var latestChannelMonitorArchivalHeight: UInt32? // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - isRunning: Bool, - isListening: Bool, - currentBestBlock: BestBlock, - latestWalletSyncTimestamp: UInt64?, - latestOnchainWalletSyncTimestamp: UInt64?, - latestFeeRateCacheUpdateTimestamp: UInt64?, - latestRgsSnapshotTimestamp: UInt64?, - latestNodeAnnouncementBroadcastTimestamp: UInt64?) { + public init(isRunning: Bool, isListening: Bool, currentBestBlock: BestBlock, latestLightningWalletSyncTimestamp: UInt64?, latestOnchainWalletSyncTimestamp: UInt64?, latestFeeRateCacheUpdateTimestamp: UInt64?, latestRgsSnapshotTimestamp: UInt64?, latestNodeAnnouncementBroadcastTimestamp: UInt64?, latestChannelMonitorArchivalHeight: UInt32?) { self.isRunning = isRunning self.isListening = isListening self.currentBestBlock = currentBestBlock - self.latestWalletSyncTimestamp = latestWalletSyncTimestamp + self.latestLightningWalletSyncTimestamp = latestLightningWalletSyncTimestamp self.latestOnchainWalletSyncTimestamp = latestOnchainWalletSyncTimestamp self.latestFeeRateCacheUpdateTimestamp = latestFeeRateCacheUpdateTimestamp self.latestRgsSnapshotTimestamp = latestRgsSnapshotTimestamp self.latestNodeAnnouncementBroadcastTimestamp = latestNodeAnnouncementBroadcastTimestamp + self.latestChannelMonitorArchivalHeight = latestChannelMonitorArchivalHeight } } + extension NodeStatus: Equatable, Hashable { public static func ==(lhs: NodeStatus, rhs: NodeStatus) -> Bool { if lhs.isRunning != rhs.isRunning { @@ -2915,7 +3310,7 @@ extension NodeStatus: Equatable, Hashable { if lhs.currentBestBlock != rhs.currentBestBlock { return false } - if lhs.latestWalletSyncTimestamp != rhs.latestWalletSyncTimestamp { + if lhs.latestLightningWalletSyncTimestamp != rhs.latestLightningWalletSyncTimestamp { return false } if lhs.latestOnchainWalletSyncTimestamp != rhs.latestOnchainWalletSyncTimestamp { @@ -2930,6 +3325,9 @@ extension NodeStatus: Equatable, Hashable { if lhs.latestNodeAnnouncementBroadcastTimestamp != rhs.latestNodeAnnouncementBroadcastTimestamp { return false } + if lhs.latestChannelMonitorArchivalHeight != rhs.latestChannelMonitorArchivalHeight { + return false + } return true } @@ -2937,11 +3335,12 @@ extension NodeStatus: Equatable, Hashable { hasher.combine(isRunning) hasher.combine(isListening) hasher.combine(currentBestBlock) - hasher.combine(latestWalletSyncTimestamp) + hasher.combine(latestLightningWalletSyncTimestamp) hasher.combine(latestOnchainWalletSyncTimestamp) hasher.combine(latestFeeRateCacheUpdateTimestamp) hasher.combine(latestRgsSnapshotTimestamp) hasher.combine(latestNodeAnnouncementBroadcastTimestamp) + hasher.combine(latestChannelMonitorArchivalHeight) } } @@ -2953,11 +3352,12 @@ public struct FfiConverterTypeNodeStatus: FfiConverterRustBuffer { isRunning: FfiConverterBool.read(from: &buf), isListening: FfiConverterBool.read(from: &buf), currentBestBlock: FfiConverterTypeBestBlock.read(from: &buf), - latestWalletSyncTimestamp: FfiConverterOptionUInt64.read(from: &buf), + latestLightningWalletSyncTimestamp: FfiConverterOptionUInt64.read(from: &buf), latestOnchainWalletSyncTimestamp: FfiConverterOptionUInt64.read(from: &buf), latestFeeRateCacheUpdateTimestamp: FfiConverterOptionUInt64.read(from: &buf), latestRgsSnapshotTimestamp: FfiConverterOptionUInt64.read(from: &buf), - latestNodeAnnouncementBroadcastTimestamp: FfiConverterOptionUInt64.read(from: &buf) + latestNodeAnnouncementBroadcastTimestamp: FfiConverterOptionUInt64.read(from: &buf), + latestChannelMonitorArchivalHeight: FfiConverterOptionUInt32.read(from: &buf) ) } @@ -2965,11 +3365,12 @@ public struct FfiConverterTypeNodeStatus: FfiConverterRustBuffer { FfiConverterBool.write(value.isRunning, into: &buf) FfiConverterBool.write(value.isListening, into: &buf) FfiConverterTypeBestBlock.write(value.currentBestBlock, into: &buf) - FfiConverterOptionUInt64.write(value.latestWalletSyncTimestamp, into: &buf) + FfiConverterOptionUInt64.write(value.latestLightningWalletSyncTimestamp, into: &buf) FfiConverterOptionUInt64.write(value.latestOnchainWalletSyncTimestamp, into: &buf) FfiConverterOptionUInt64.write(value.latestFeeRateCacheUpdateTimestamp, into: &buf) FfiConverterOptionUInt64.write(value.latestRgsSnapshotTimestamp, into: &buf) FfiConverterOptionUInt64.write(value.latestNodeAnnouncementBroadcastTimestamp, into: &buf) + FfiConverterOptionUInt32.write(value.latestChannelMonitorArchivalHeight, into: &buf) } } @@ -2989,15 +3390,14 @@ public struct OutPoint { // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - txid: Txid, - vout: UInt32) { + public init(txid: Txid, vout: UInt32) { self.txid = txid self.vout = vout } } + extension OutPoint: Equatable, Hashable { public static func ==(lhs: OutPoint, rhs: OutPoint) -> Bool { if lhs.txid != rhs.txid { @@ -3051,13 +3451,7 @@ public struct PaymentDetails { // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - id: PaymentId, - kind: PaymentKind, - amountMsat: UInt64?, - direction: PaymentDirection, - status: PaymentStatus, - latestUpdateTimestamp: UInt64) { + public init(id: PaymentId, kind: PaymentKind, amountMsat: UInt64?, direction: PaymentDirection, status: PaymentStatus, latestUpdateTimestamp: UInt64) { self.id = id self.kind = kind self.amountMsat = amountMsat @@ -3068,6 +3462,7 @@ public struct PaymentDetails { } + extension PaymentDetails: Equatable, Hashable { public static func ==(lhs: PaymentDetails, rhs: PaymentDetails) -> Bool { if lhs.id != rhs.id { @@ -3143,11 +3538,7 @@ public struct PeerDetails { // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - nodeId: PublicKey, - address: SocketAddress, - isPersisted: Bool, - isConnected: Bool) { + public init(nodeId: PublicKey, address: SocketAddress, isPersisted: Bool, isConnected: Bool) { self.nodeId = nodeId self.address = address self.isPersisted = isPersisted @@ -3156,6 +3547,7 @@ public struct PeerDetails { } + extension PeerDetails: Equatable, Hashable { public static func ==(lhs: PeerDetails, rhs: PeerDetails) -> Bool { if lhs.nodeId != rhs.nodeId { @@ -3206,69 +3598,210 @@ public func FfiConverterTypePeerDetails_lift(_ buf: RustBuffer) throws -> PeerDe return try FfiConverterTypePeerDetails.lift(buf) } -public func FfiConverterTypePeerDetails_lower(_ value: PeerDetails) -> RustBuffer { - return FfiConverterTypePeerDetails.lower(value) +public func FfiConverterTypePeerDetails_lower(_ value: PeerDetails) -> RustBuffer { + return FfiConverterTypePeerDetails.lower(value) +} + + +public struct RoutingFees { + public var baseMsat: UInt32 + public var proportionalMillionths: UInt32 + + // Default memberwise initializers are never public by default, so we + // declare one manually. + public init(baseMsat: UInt32, proportionalMillionths: UInt32) { + self.baseMsat = baseMsat + self.proportionalMillionths = proportionalMillionths + } +} + + + +extension RoutingFees: Equatable, Hashable { + public static func ==(lhs: RoutingFees, rhs: RoutingFees) -> Bool { + if lhs.baseMsat != rhs.baseMsat { + return false + } + if lhs.proportionalMillionths != rhs.proportionalMillionths { + return false + } + return true + } + + public func hash(into hasher: inout Hasher) { + hasher.combine(baseMsat) + hasher.combine(proportionalMillionths) + } +} + + +public struct FfiConverterTypeRoutingFees: FfiConverterRustBuffer { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> RoutingFees { + return + try RoutingFees( + baseMsat: FfiConverterUInt32.read(from: &buf), + proportionalMillionths: FfiConverterUInt32.read(from: &buf) + ) + } + + public static func write(_ value: RoutingFees, into buf: inout [UInt8]) { + FfiConverterUInt32.write(value.baseMsat, into: &buf) + FfiConverterUInt32.write(value.proportionalMillionths, into: &buf) + } +} + + +public func FfiConverterTypeRoutingFees_lift(_ buf: RustBuffer) throws -> RoutingFees { + return try FfiConverterTypeRoutingFees.lift(buf) +} + +public func FfiConverterTypeRoutingFees_lower(_ value: RoutingFees) -> RustBuffer { + return FfiConverterTypeRoutingFees.lower(value) } -public struct RoutingFees { - public var baseMsat: UInt32 - public var proportionalMillionths: UInt32 +public struct SendingParameters { + public var maxTotalRoutingFeeMsat: MaxTotalRoutingFeeLimit? + public var maxTotalCltvExpiryDelta: UInt32? + public var maxPathCount: UInt8? + public var maxChannelSaturationPowerOfHalf: UInt8? // Default memberwise initializers are never public by default, so we // declare one manually. - public init( - baseMsat: UInt32, - proportionalMillionths: UInt32) { - self.baseMsat = baseMsat - self.proportionalMillionths = proportionalMillionths + public init(maxTotalRoutingFeeMsat: MaxTotalRoutingFeeLimit?, maxTotalCltvExpiryDelta: UInt32?, maxPathCount: UInt8?, maxChannelSaturationPowerOfHalf: UInt8?) { + self.maxTotalRoutingFeeMsat = maxTotalRoutingFeeMsat + self.maxTotalCltvExpiryDelta = maxTotalCltvExpiryDelta + self.maxPathCount = maxPathCount + self.maxChannelSaturationPowerOfHalf = maxChannelSaturationPowerOfHalf } } -extension RoutingFees: Equatable, Hashable { - public static func ==(lhs: RoutingFees, rhs: RoutingFees) -> Bool { - if lhs.baseMsat != rhs.baseMsat { + +extension SendingParameters: Equatable, Hashable { + public static func ==(lhs: SendingParameters, rhs: SendingParameters) -> Bool { + if lhs.maxTotalRoutingFeeMsat != rhs.maxTotalRoutingFeeMsat { return false } - if lhs.proportionalMillionths != rhs.proportionalMillionths { + if lhs.maxTotalCltvExpiryDelta != rhs.maxTotalCltvExpiryDelta { + return false + } + if lhs.maxPathCount != rhs.maxPathCount { + return false + } + if lhs.maxChannelSaturationPowerOfHalf != rhs.maxChannelSaturationPowerOfHalf { return false } return true } public func hash(into hasher: inout Hasher) { - hasher.combine(baseMsat) - hasher.combine(proportionalMillionths) + hasher.combine(maxTotalRoutingFeeMsat) + hasher.combine(maxTotalCltvExpiryDelta) + hasher.combine(maxPathCount) + hasher.combine(maxChannelSaturationPowerOfHalf) } } -public struct FfiConverterTypeRoutingFees: FfiConverterRustBuffer { - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> RoutingFees { +public struct FfiConverterTypeSendingParameters: FfiConverterRustBuffer { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SendingParameters { return - try RoutingFees( - baseMsat: FfiConverterUInt32.read(from: &buf), - proportionalMillionths: FfiConverterUInt32.read(from: &buf) + try SendingParameters( + maxTotalRoutingFeeMsat: FfiConverterOptionTypeMaxTotalRoutingFeeLimit.read(from: &buf), + maxTotalCltvExpiryDelta: FfiConverterOptionUInt32.read(from: &buf), + maxPathCount: FfiConverterOptionUInt8.read(from: &buf), + maxChannelSaturationPowerOfHalf: FfiConverterOptionUInt8.read(from: &buf) ) } - public static func write(_ value: RoutingFees, into buf: inout [UInt8]) { - FfiConverterUInt32.write(value.baseMsat, into: &buf) - FfiConverterUInt32.write(value.proportionalMillionths, into: &buf) + public static func write(_ value: SendingParameters, into buf: inout [UInt8]) { + FfiConverterOptionTypeMaxTotalRoutingFeeLimit.write(value.maxTotalRoutingFeeMsat, into: &buf) + FfiConverterOptionUInt32.write(value.maxTotalCltvExpiryDelta, into: &buf) + FfiConverterOptionUInt8.write(value.maxPathCount, into: &buf) + FfiConverterOptionUInt8.write(value.maxChannelSaturationPowerOfHalf, into: &buf) } } -public func FfiConverterTypeRoutingFees_lift(_ buf: RustBuffer) throws -> RoutingFees { - return try FfiConverterTypeRoutingFees.lift(buf) +public func FfiConverterTypeSendingParameters_lift(_ buf: RustBuffer) throws -> SendingParameters { + return try FfiConverterTypeSendingParameters.lift(buf) } -public func FfiConverterTypeRoutingFees_lower(_ value: RoutingFees) -> RustBuffer { - return FfiConverterTypeRoutingFees.lower(value) +public func FfiConverterTypeSendingParameters_lower(_ value: SendingParameters) -> RustBuffer { + return FfiConverterTypeSendingParameters.lower(value) +} + +// Note that we don't yet support `indirect` for enums. +// See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + +public enum BalanceSource { + + case holderForceClosed + case counterpartyForceClosed + case coopClose + case htlc +} + + +public struct FfiConverterTypeBalanceSource: FfiConverterRustBuffer { + typealias SwiftType = BalanceSource + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> BalanceSource { + let variant: Int32 = try readInt(&buf) + switch variant { + + case 1: return .holderForceClosed + + case 2: return .counterpartyForceClosed + + case 3: return .coopClose + + case 4: return .htlc + + default: throw UniffiInternalError.unexpectedEnumCase + } + } + + public static func write(_ value: BalanceSource, into buf: inout [UInt8]) { + switch value { + + + case .holderForceClosed: + writeInt(&buf, Int32(1)) + + + case .counterpartyForceClosed: + writeInt(&buf, Int32(2)) + + + case .coopClose: + writeInt(&buf, Int32(3)) + + + case .htlc: + writeInt(&buf, Int32(4)) + + } + } +} + + +public func FfiConverterTypeBalanceSource_lift(_ buf: RustBuffer) throws -> BalanceSource { + return try FfiConverterTypeBalanceSource.lift(buf) +} + +public func FfiConverterTypeBalanceSource_lower(_ value: BalanceSource) -> RustBuffer { + return FfiConverterTypeBalanceSource.lower(value) } + +extension BalanceSource: Equatable, Hashable {} + + + + public enum BuildError { @@ -3283,6 +3816,8 @@ public enum BuildError { case InvalidListeningAddresses(message: String) + case InvalidNodeAlias(message: String) + case ReadFailed(message: String) case WriteFailed(message: String) @@ -3295,10 +3830,6 @@ public enum BuildError { case LoggerSetupFailed(message: String) - - fileprivate static func uniffiErrorHandler(_ error: RustBuffer) throws -> Error { - return try FfiConverterTypeBuildError.lift(error) - } } @@ -3332,27 +3863,31 @@ public struct FfiConverterTypeBuildError: FfiConverterRustBuffer { message: try FfiConverterString.read(from: &buf) ) - case 6: return .ReadFailed( + case 6: return .InvalidNodeAlias( message: try FfiConverterString.read(from: &buf) ) - case 7: return .WriteFailed( + case 7: return .ReadFailed( message: try FfiConverterString.read(from: &buf) ) - case 8: return .StoragePathAccessFailed( + case 8: return .WriteFailed( message: try FfiConverterString.read(from: &buf) ) - case 9: return .KvStoreSetupFailed( + case 9: return .StoragePathAccessFailed( message: try FfiConverterString.read(from: &buf) ) - case 10: return .WalletSetupFailed( + case 10: return .KvStoreSetupFailed( message: try FfiConverterString.read(from: &buf) ) - case 11: return .LoggerSetupFailed( + case 11: return .WalletSetupFailed( + message: try FfiConverterString.read(from: &buf) + ) + + case 12: return .LoggerSetupFailed( message: try FfiConverterString.read(from: &buf) ) @@ -3377,18 +3912,20 @@ public struct FfiConverterTypeBuildError: FfiConverterRustBuffer { writeInt(&buf, Int32(4)) case .InvalidListeningAddresses(_ /* message is ignored*/): writeInt(&buf, Int32(5)) - case .ReadFailed(_ /* message is ignored*/): + case .InvalidNodeAlias(_ /* message is ignored*/): writeInt(&buf, Int32(6)) - case .WriteFailed(_ /* message is ignored*/): + case .ReadFailed(_ /* message is ignored*/): writeInt(&buf, Int32(7)) - case .StoragePathAccessFailed(_ /* message is ignored*/): + case .WriteFailed(_ /* message is ignored*/): writeInt(&buf, Int32(8)) - case .KvStoreSetupFailed(_ /* message is ignored*/): + case .StoragePathAccessFailed(_ /* message is ignored*/): writeInt(&buf, Int32(9)) - case .WalletSetupFailed(_ /* message is ignored*/): + case .KvStoreSetupFailed(_ /* message is ignored*/): writeInt(&buf, Int32(10)) - case .LoggerSetupFailed(_ /* message is ignored*/): + case .WalletSetupFailed(_ /* message is ignored*/): writeInt(&buf, Int32(11)) + case .LoggerSetupFailed(_ /* message is ignored*/): + writeInt(&buf, Int32(12)) } @@ -3402,27 +3939,30 @@ extension BuildError: Error { } // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + public enum ClosureReason { - case counterpartyForceClosed( - peerMsg: UntrustedString + case counterpartyForceClosed(peerMsg: UntrustedString + ) + case holderForceClosed(broadcastedLatestTxn: Bool? ) - case holderForceClosed case legacyCooperativeClosure case counterpartyInitiatedCooperativeClosure case locallyInitiatedCooperativeClosure case commitmentTxConfirmed case fundingTimedOut - case processingError( - err: String + case processingError(err: String ) case disconnectedPeer case outdatedChannelManager case counterpartyCoopClosedUnfundedChannel case fundingBatchClosure case htlCsTimedOut + case peerFeerateTooLow(peerFeerateSatPerKw: UInt32, requiredFeerateSatPerKw: UInt32 + ) } + public struct FfiConverterTypeClosureReason: FfiConverterRustBuffer { typealias SwiftType = ClosureReason @@ -3430,11 +3970,11 @@ public struct FfiConverterTypeClosureReason: FfiConverterRustBuffer { let variant: Int32 = try readInt(&buf) switch variant { - case 1: return .counterpartyForceClosed( - peerMsg: try FfiConverterTypeUntrustedString.read(from: &buf) + case 1: return .counterpartyForceClosed(peerMsg: try FfiConverterTypeUntrustedString.read(from: &buf) ) - case 2: return .holderForceClosed + case 2: return .holderForceClosed(broadcastedLatestTxn: try FfiConverterOptionBool.read(from: &buf) + ) case 3: return .legacyCooperativeClosure @@ -3446,8 +3986,7 @@ public struct FfiConverterTypeClosureReason: FfiConverterRustBuffer { case 7: return .fundingTimedOut - case 8: return .processingError( - err: try FfiConverterString.read(from: &buf) + case 8: return .processingError(err: try FfiConverterString.read(from: &buf) ) case 9: return .disconnectedPeer @@ -3460,6 +3999,9 @@ public struct FfiConverterTypeClosureReason: FfiConverterRustBuffer { case 13: return .htlCsTimedOut + case 14: return .peerFeerateTooLow(peerFeerateSatPerKw: try FfiConverterUInt32.read(from: &buf), requiredFeerateSatPerKw: try FfiConverterUInt32.read(from: &buf) + ) + default: throw UniffiInternalError.unexpectedEnumCase } } @@ -3473,9 +4015,10 @@ public struct FfiConverterTypeClosureReason: FfiConverterRustBuffer { FfiConverterTypeUntrustedString.write(peerMsg, into: &buf) - case .holderForceClosed: + case let .holderForceClosed(broadcastedLatestTxn): writeInt(&buf, Int32(2)) - + FfiConverterOptionBool.write(broadcastedLatestTxn, into: &buf) + case .legacyCooperativeClosure: writeInt(&buf, Int32(3)) @@ -3521,6 +4064,12 @@ public struct FfiConverterTypeClosureReason: FfiConverterRustBuffer { case .htlCsTimedOut: writeInt(&buf, Int32(13)) + + case let .peerFeerateTooLow(peerFeerateSatPerKw,requiredFeerateSatPerKw): + writeInt(&buf, Int32(14)) + FfiConverterUInt32.write(peerFeerateSatPerKw, into: &buf) + FfiConverterUInt32.write(requiredFeerateSatPerKw, into: &buf) + } } } @@ -3535,55 +4084,33 @@ public func FfiConverterTypeClosureReason_lower(_ value: ClosureReason) -> RustB } + extension ClosureReason: Equatable, Hashable {} // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + public enum Event { - case paymentSuccessful( - paymentId: PaymentId?, - paymentHash: PaymentHash, - feePaidMsat: UInt64? + case paymentSuccessful(paymentId: PaymentId?, paymentHash: PaymentHash, feePaidMsat: UInt64? ) - case paymentFailed( - paymentId: PaymentId?, - paymentHash: PaymentHash, - reason: PaymentFailureReason? + case paymentFailed(paymentId: PaymentId?, paymentHash: PaymentHash?, reason: PaymentFailureReason? ) - case paymentReceived( - paymentId: PaymentId?, - paymentHash: PaymentHash, - amountMsat: UInt64 + case paymentReceived(paymentId: PaymentId?, paymentHash: PaymentHash, amountMsat: UInt64 ) - case paymentClaimable( - paymentId: PaymentId, - paymentHash: PaymentHash, - claimableAmountMsat: UInt64, - claimDeadline: UInt32? + case paymentClaimable(paymentId: PaymentId, paymentHash: PaymentHash, claimableAmountMsat: UInt64, claimDeadline: UInt32? ) - case channelPending( - channelId: ChannelId, - userChannelId: UserChannelId, - formerTemporaryChannelId: ChannelId, - counterpartyNodeId: PublicKey, - fundingTxo: OutPoint + case channelPending(channelId: ChannelId, userChannelId: UserChannelId, formerTemporaryChannelId: ChannelId, counterpartyNodeId: PublicKey, fundingTxo: OutPoint ) - case channelReady( - channelId: ChannelId, - userChannelId: UserChannelId, - counterpartyNodeId: PublicKey? + case channelReady(channelId: ChannelId, userChannelId: UserChannelId, counterpartyNodeId: PublicKey? ) - case channelClosed( - channelId: ChannelId, - userChannelId: UserChannelId, - counterpartyNodeId: PublicKey?, - reason: ClosureReason? + case channelClosed(channelId: ChannelId, userChannelId: UserChannelId, counterpartyNodeId: PublicKey?, reason: ClosureReason? ) } + public struct FfiConverterTypeEvent: FfiConverterRustBuffer { typealias SwiftType = Event @@ -3591,50 +4118,25 @@ public struct FfiConverterTypeEvent: FfiConverterRustBuffer { let variant: Int32 = try readInt(&buf) switch variant { - case 1: return .paymentSuccessful( - paymentId: try FfiConverterOptionTypePaymentId.read(from: &buf), - paymentHash: try FfiConverterTypePaymentHash.read(from: &buf), - feePaidMsat: try FfiConverterOptionUInt64.read(from: &buf) + case 1: return .paymentSuccessful(paymentId: try FfiConverterOptionTypePaymentId.read(from: &buf), paymentHash: try FfiConverterTypePaymentHash.read(from: &buf), feePaidMsat: try FfiConverterOptionUInt64.read(from: &buf) ) - case 2: return .paymentFailed( - paymentId: try FfiConverterOptionTypePaymentId.read(from: &buf), - paymentHash: try FfiConverterTypePaymentHash.read(from: &buf), - reason: try FfiConverterOptionTypePaymentFailureReason.read(from: &buf) + case 2: return .paymentFailed(paymentId: try FfiConverterOptionTypePaymentId.read(from: &buf), paymentHash: try FfiConverterOptionTypePaymentHash.read(from: &buf), reason: try FfiConverterOptionTypePaymentFailureReason.read(from: &buf) ) - case 3: return .paymentReceived( - paymentId: try FfiConverterOptionTypePaymentId.read(from: &buf), - paymentHash: try FfiConverterTypePaymentHash.read(from: &buf), - amountMsat: try FfiConverterUInt64.read(from: &buf) + case 3: return .paymentReceived(paymentId: try FfiConverterOptionTypePaymentId.read(from: &buf), paymentHash: try FfiConverterTypePaymentHash.read(from: &buf), amountMsat: try FfiConverterUInt64.read(from: &buf) ) - case 4: return .paymentClaimable( - paymentId: try FfiConverterTypePaymentId.read(from: &buf), - paymentHash: try FfiConverterTypePaymentHash.read(from: &buf), - claimableAmountMsat: try FfiConverterUInt64.read(from: &buf), - claimDeadline: try FfiConverterOptionUInt32.read(from: &buf) + case 4: return .paymentClaimable(paymentId: try FfiConverterTypePaymentId.read(from: &buf), paymentHash: try FfiConverterTypePaymentHash.read(from: &buf), claimableAmountMsat: try FfiConverterUInt64.read(from: &buf), claimDeadline: try FfiConverterOptionUInt32.read(from: &buf) ) - case 5: return .channelPending( - channelId: try FfiConverterTypeChannelId.read(from: &buf), - userChannelId: try FfiConverterTypeUserChannelId.read(from: &buf), - formerTemporaryChannelId: try FfiConverterTypeChannelId.read(from: &buf), - counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), - fundingTxo: try FfiConverterTypeOutPoint.read(from: &buf) + case 5: return .channelPending(channelId: try FfiConverterTypeChannelId.read(from: &buf), userChannelId: try FfiConverterTypeUserChannelId.read(from: &buf), formerTemporaryChannelId: try FfiConverterTypeChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), fundingTxo: try FfiConverterTypeOutPoint.read(from: &buf) ) - case 6: return .channelReady( - channelId: try FfiConverterTypeChannelId.read(from: &buf), - userChannelId: try FfiConverterTypeUserChannelId.read(from: &buf), - counterpartyNodeId: try FfiConverterOptionTypePublicKey.read(from: &buf) + case 6: return .channelReady(channelId: try FfiConverterTypeChannelId.read(from: &buf), userChannelId: try FfiConverterTypeUserChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterOptionTypePublicKey.read(from: &buf) ) - case 7: return .channelClosed( - channelId: try FfiConverterTypeChannelId.read(from: &buf), - userChannelId: try FfiConverterTypeUserChannelId.read(from: &buf), - counterpartyNodeId: try FfiConverterOptionTypePublicKey.read(from: &buf), - reason: try FfiConverterOptionTypeClosureReason.read(from: &buf) + case 7: return .channelClosed(channelId: try FfiConverterTypeChannelId.read(from: &buf), userChannelId: try FfiConverterTypeUserChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterOptionTypePublicKey.read(from: &buf), reason: try FfiConverterOptionTypeClosureReason.read(from: &buf) ) default: throw UniffiInternalError.unexpectedEnumCase @@ -3655,7 +4157,7 @@ public struct FfiConverterTypeEvent: FfiConverterRustBuffer { case let .paymentFailed(paymentId,paymentHash,reason): writeInt(&buf, Int32(2)) FfiConverterOptionTypePaymentId.write(paymentId, into: &buf) - FfiConverterTypePaymentHash.write(paymentHash, into: &buf) + FfiConverterOptionTypePaymentHash.write(paymentHash, into: &buf) FfiConverterOptionTypePaymentFailureReason.write(reason, into: &buf) @@ -3711,54 +4213,31 @@ public func FfiConverterTypeEvent_lower(_ value: Event) -> RustBuffer { } + extension Event: Equatable, Hashable {} // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + public enum LightningBalance { - case claimableOnChannelClose( - channelId: ChannelId, - counterpartyNodeId: PublicKey, - amountSatoshis: UInt64 + case claimableOnChannelClose(channelId: ChannelId, counterpartyNodeId: PublicKey, amountSatoshis: UInt64, transactionFeeSatoshis: UInt64, outboundPaymentHtlcRoundedMsat: UInt64, outboundForwardedHtlcRoundedMsat: UInt64, inboundClaimingHtlcRoundedMsat: UInt64, inboundHtlcRoundedMsat: UInt64 ) - case claimableAwaitingConfirmations( - channelId: ChannelId, - counterpartyNodeId: PublicKey, - amountSatoshis: UInt64, - confirmationHeight: UInt32 + case claimableAwaitingConfirmations(channelId: ChannelId, counterpartyNodeId: PublicKey, amountSatoshis: UInt64, confirmationHeight: UInt32, source: BalanceSource ) - case contentiousClaimable( - channelId: ChannelId, - counterpartyNodeId: PublicKey, - amountSatoshis: UInt64, - timeoutHeight: UInt32, - paymentHash: PaymentHash, - paymentPreimage: PaymentPreimage + case contentiousClaimable(channelId: ChannelId, counterpartyNodeId: PublicKey, amountSatoshis: UInt64, timeoutHeight: UInt32, paymentHash: PaymentHash, paymentPreimage: PaymentPreimage ) - case maybeTimeoutClaimableHtlc( - channelId: ChannelId, - counterpartyNodeId: PublicKey, - amountSatoshis: UInt64, - claimableHeight: UInt32, - paymentHash: PaymentHash + case maybeTimeoutClaimableHtlc(channelId: ChannelId, counterpartyNodeId: PublicKey, amountSatoshis: UInt64, claimableHeight: UInt32, paymentHash: PaymentHash, outboundPayment: Bool ) - case maybePreimageClaimableHtlc( - channelId: ChannelId, - counterpartyNodeId: PublicKey, - amountSatoshis: UInt64, - expiryHeight: UInt32, - paymentHash: PaymentHash + case maybePreimageClaimableHtlc(channelId: ChannelId, counterpartyNodeId: PublicKey, amountSatoshis: UInt64, expiryHeight: UInt32, paymentHash: PaymentHash ) - case counterpartyRevokedOutputClaimable( - channelId: ChannelId, - counterpartyNodeId: PublicKey, - amountSatoshis: UInt64 + case counterpartyRevokedOutputClaimable(channelId: ChannelId, counterpartyNodeId: PublicKey, amountSatoshis: UInt64 ) } + public struct FfiConverterTypeLightningBalance: FfiConverterRustBuffer { typealias SwiftType = LightningBalance @@ -3766,48 +4245,22 @@ public struct FfiConverterTypeLightningBalance: FfiConverterRustBuffer { let variant: Int32 = try readInt(&buf) switch variant { - case 1: return .claimableOnChannelClose( - channelId: try FfiConverterTypeChannelId.read(from: &buf), - counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), - amountSatoshis: try FfiConverterUInt64.read(from: &buf) + case 1: return .claimableOnChannelClose(channelId: try FfiConverterTypeChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), amountSatoshis: try FfiConverterUInt64.read(from: &buf), transactionFeeSatoshis: try FfiConverterUInt64.read(from: &buf), outboundPaymentHtlcRoundedMsat: try FfiConverterUInt64.read(from: &buf), outboundForwardedHtlcRoundedMsat: try FfiConverterUInt64.read(from: &buf), inboundClaimingHtlcRoundedMsat: try FfiConverterUInt64.read(from: &buf), inboundHtlcRoundedMsat: try FfiConverterUInt64.read(from: &buf) ) - case 2: return .claimableAwaitingConfirmations( - channelId: try FfiConverterTypeChannelId.read(from: &buf), - counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), - amountSatoshis: try FfiConverterUInt64.read(from: &buf), - confirmationHeight: try FfiConverterUInt32.read(from: &buf) + case 2: return .claimableAwaitingConfirmations(channelId: try FfiConverterTypeChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), amountSatoshis: try FfiConverterUInt64.read(from: &buf), confirmationHeight: try FfiConverterUInt32.read(from: &buf), source: try FfiConverterTypeBalanceSource.read(from: &buf) ) - case 3: return .contentiousClaimable( - channelId: try FfiConverterTypeChannelId.read(from: &buf), - counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), - amountSatoshis: try FfiConverterUInt64.read(from: &buf), - timeoutHeight: try FfiConverterUInt32.read(from: &buf), - paymentHash: try FfiConverterTypePaymentHash.read(from: &buf), - paymentPreimage: try FfiConverterTypePaymentPreimage.read(from: &buf) + case 3: return .contentiousClaimable(channelId: try FfiConverterTypeChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), amountSatoshis: try FfiConverterUInt64.read(from: &buf), timeoutHeight: try FfiConverterUInt32.read(from: &buf), paymentHash: try FfiConverterTypePaymentHash.read(from: &buf), paymentPreimage: try FfiConverterTypePaymentPreimage.read(from: &buf) ) - case 4: return .maybeTimeoutClaimableHtlc( - channelId: try FfiConverterTypeChannelId.read(from: &buf), - counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), - amountSatoshis: try FfiConverterUInt64.read(from: &buf), - claimableHeight: try FfiConverterUInt32.read(from: &buf), - paymentHash: try FfiConverterTypePaymentHash.read(from: &buf) + case 4: return .maybeTimeoutClaimableHtlc(channelId: try FfiConverterTypeChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), amountSatoshis: try FfiConverterUInt64.read(from: &buf), claimableHeight: try FfiConverterUInt32.read(from: &buf), paymentHash: try FfiConverterTypePaymentHash.read(from: &buf), outboundPayment: try FfiConverterBool.read(from: &buf) ) - case 5: return .maybePreimageClaimableHtlc( - channelId: try FfiConverterTypeChannelId.read(from: &buf), - counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), - amountSatoshis: try FfiConverterUInt64.read(from: &buf), - expiryHeight: try FfiConverterUInt32.read(from: &buf), - paymentHash: try FfiConverterTypePaymentHash.read(from: &buf) + case 5: return .maybePreimageClaimableHtlc(channelId: try FfiConverterTypeChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), amountSatoshis: try FfiConverterUInt64.read(from: &buf), expiryHeight: try FfiConverterUInt32.read(from: &buf), paymentHash: try FfiConverterTypePaymentHash.read(from: &buf) ) - case 6: return .counterpartyRevokedOutputClaimable( - channelId: try FfiConverterTypeChannelId.read(from: &buf), - counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), - amountSatoshis: try FfiConverterUInt64.read(from: &buf) + case 6: return .counterpartyRevokedOutputClaimable(channelId: try FfiConverterTypeChannelId.read(from: &buf), counterpartyNodeId: try FfiConverterTypePublicKey.read(from: &buf), amountSatoshis: try FfiConverterUInt64.read(from: &buf) ) default: throw UniffiInternalError.unexpectedEnumCase @@ -3818,19 +4271,25 @@ public struct FfiConverterTypeLightningBalance: FfiConverterRustBuffer { switch value { - case let .claimableOnChannelClose(channelId,counterpartyNodeId,amountSatoshis): + case let .claimableOnChannelClose(channelId,counterpartyNodeId,amountSatoshis,transactionFeeSatoshis,outboundPaymentHtlcRoundedMsat,outboundForwardedHtlcRoundedMsat,inboundClaimingHtlcRoundedMsat,inboundHtlcRoundedMsat): writeInt(&buf, Int32(1)) FfiConverterTypeChannelId.write(channelId, into: &buf) FfiConverterTypePublicKey.write(counterpartyNodeId, into: &buf) FfiConverterUInt64.write(amountSatoshis, into: &buf) + FfiConverterUInt64.write(transactionFeeSatoshis, into: &buf) + FfiConverterUInt64.write(outboundPaymentHtlcRoundedMsat, into: &buf) + FfiConverterUInt64.write(outboundForwardedHtlcRoundedMsat, into: &buf) + FfiConverterUInt64.write(inboundClaimingHtlcRoundedMsat, into: &buf) + FfiConverterUInt64.write(inboundHtlcRoundedMsat, into: &buf) - case let .claimableAwaitingConfirmations(channelId,counterpartyNodeId,amountSatoshis,confirmationHeight): + case let .claimableAwaitingConfirmations(channelId,counterpartyNodeId,amountSatoshis,confirmationHeight,source): writeInt(&buf, Int32(2)) FfiConverterTypeChannelId.write(channelId, into: &buf) FfiConverterTypePublicKey.write(counterpartyNodeId, into: &buf) FfiConverterUInt64.write(amountSatoshis, into: &buf) FfiConverterUInt32.write(confirmationHeight, into: &buf) + FfiConverterTypeBalanceSource.write(source, into: &buf) case let .contentiousClaimable(channelId,counterpartyNodeId,amountSatoshis,timeoutHeight,paymentHash,paymentPreimage): @@ -3843,130 +4302,255 @@ public struct FfiConverterTypeLightningBalance: FfiConverterRustBuffer { FfiConverterTypePaymentPreimage.write(paymentPreimage, into: &buf) - case let .maybeTimeoutClaimableHtlc(channelId,counterpartyNodeId,amountSatoshis,claimableHeight,paymentHash): + case let .maybeTimeoutClaimableHtlc(channelId,counterpartyNodeId,amountSatoshis,claimableHeight,paymentHash,outboundPayment): writeInt(&buf, Int32(4)) FfiConverterTypeChannelId.write(channelId, into: &buf) FfiConverterTypePublicKey.write(counterpartyNodeId, into: &buf) FfiConverterUInt64.write(amountSatoshis, into: &buf) FfiConverterUInt32.write(claimableHeight, into: &buf) FfiConverterTypePaymentHash.write(paymentHash, into: &buf) + FfiConverterBool.write(outboundPayment, into: &buf) + + + case let .maybePreimageClaimableHtlc(channelId,counterpartyNodeId,amountSatoshis,expiryHeight,paymentHash): + writeInt(&buf, Int32(5)) + FfiConverterTypeChannelId.write(channelId, into: &buf) + FfiConverterTypePublicKey.write(counterpartyNodeId, into: &buf) + FfiConverterUInt64.write(amountSatoshis, into: &buf) + FfiConverterUInt32.write(expiryHeight, into: &buf) + FfiConverterTypePaymentHash.write(paymentHash, into: &buf) + + + case let .counterpartyRevokedOutputClaimable(channelId,counterpartyNodeId,amountSatoshis): + writeInt(&buf, Int32(6)) + FfiConverterTypeChannelId.write(channelId, into: &buf) + FfiConverterTypePublicKey.write(counterpartyNodeId, into: &buf) + FfiConverterUInt64.write(amountSatoshis, into: &buf) + } + } +} + + +public func FfiConverterTypeLightningBalance_lift(_ buf: RustBuffer) throws -> LightningBalance { + return try FfiConverterTypeLightningBalance.lift(buf) +} + +public func FfiConverterTypeLightningBalance_lower(_ value: LightningBalance) -> RustBuffer { + return FfiConverterTypeLightningBalance.lower(value) +} + + + +extension LightningBalance: Equatable, Hashable {} + + + +// Note that we don't yet support `indirect` for enums. +// See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + +public enum LogLevel { + + case gossip + case trace + case debug + case info + case warn + case error +} + + +public struct FfiConverterTypeLogLevel: FfiConverterRustBuffer { + typealias SwiftType = LogLevel + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> LogLevel { + let variant: Int32 = try readInt(&buf) + switch variant { + + case 1: return .gossip + + case 2: return .trace + + case 3: return .debug + + case 4: return .info + + case 5: return .warn + + case 6: return .error + + default: throw UniffiInternalError.unexpectedEnumCase + } + } + + public static func write(_ value: LogLevel, into buf: inout [UInt8]) { + switch value { + + + case .gossip: + writeInt(&buf, Int32(1)) + + + case .trace: + writeInt(&buf, Int32(2)) + + + case .debug: + writeInt(&buf, Int32(3)) + + + case .info: + writeInt(&buf, Int32(4)) + + + case .warn: + writeInt(&buf, Int32(5)) + + + case .error: + writeInt(&buf, Int32(6)) + + } + } +} + + +public func FfiConverterTypeLogLevel_lift(_ buf: RustBuffer) throws -> LogLevel { + return try FfiConverterTypeLogLevel.lift(buf) +} + +public func FfiConverterTypeLogLevel_lower(_ value: LogLevel) -> RustBuffer { + return FfiConverterTypeLogLevel.lower(value) +} + + + +extension LogLevel: Equatable, Hashable {} + + + +// Note that we don't yet support `indirect` for enums. +// See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + +public enum MaxDustHtlcExposure { + + case fixedLimit(limitMsat: UInt64 + ) + case feeRateMultiplier(multiplier: UInt64 + ) +} + + +public struct FfiConverterTypeMaxDustHTLCExposure: FfiConverterRustBuffer { + typealias SwiftType = MaxDustHtlcExposure + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> MaxDustHtlcExposure { + let variant: Int32 = try readInt(&buf) + switch variant { + + case 1: return .fixedLimit(limitMsat: try FfiConverterUInt64.read(from: &buf) + ) + + case 2: return .feeRateMultiplier(multiplier: try FfiConverterUInt64.read(from: &buf) + ) + + default: throw UniffiInternalError.unexpectedEnumCase + } + } + + public static func write(_ value: MaxDustHtlcExposure, into buf: inout [UInt8]) { + switch value { - case let .maybePreimageClaimableHtlc(channelId,counterpartyNodeId,amountSatoshis,expiryHeight,paymentHash): - writeInt(&buf, Int32(5)) - FfiConverterTypeChannelId.write(channelId, into: &buf) - FfiConverterTypePublicKey.write(counterpartyNodeId, into: &buf) - FfiConverterUInt64.write(amountSatoshis, into: &buf) - FfiConverterUInt32.write(expiryHeight, into: &buf) - FfiConverterTypePaymentHash.write(paymentHash, into: &buf) + + case let .fixedLimit(limitMsat): + writeInt(&buf, Int32(1)) + FfiConverterUInt64.write(limitMsat, into: &buf) - case let .counterpartyRevokedOutputClaimable(channelId,counterpartyNodeId,amountSatoshis): - writeInt(&buf, Int32(6)) - FfiConverterTypeChannelId.write(channelId, into: &buf) - FfiConverterTypePublicKey.write(counterpartyNodeId, into: &buf) - FfiConverterUInt64.write(amountSatoshis, into: &buf) + case let .feeRateMultiplier(multiplier): + writeInt(&buf, Int32(2)) + FfiConverterUInt64.write(multiplier, into: &buf) } } } -public func FfiConverterTypeLightningBalance_lift(_ buf: RustBuffer) throws -> LightningBalance { - return try FfiConverterTypeLightningBalance.lift(buf) +public func FfiConverterTypeMaxDustHTLCExposure_lift(_ buf: RustBuffer) throws -> MaxDustHtlcExposure { + return try FfiConverterTypeMaxDustHTLCExposure.lift(buf) } -public func FfiConverterTypeLightningBalance_lower(_ value: LightningBalance) -> RustBuffer { - return FfiConverterTypeLightningBalance.lower(value) +public func FfiConverterTypeMaxDustHTLCExposure_lower(_ value: MaxDustHtlcExposure) -> RustBuffer { + return FfiConverterTypeMaxDustHTLCExposure.lower(value) } -extension LightningBalance: Equatable, Hashable {} + +extension MaxDustHtlcExposure: Equatable, Hashable {} // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. -public enum LogLevel { + +public enum MaxTotalRoutingFeeLimit { - case gossip - case trace - case debug - case info - case warn - case error + case none + case some(amountMsat: UInt64 + ) } -public struct FfiConverterTypeLogLevel: FfiConverterRustBuffer { - typealias SwiftType = LogLevel - public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> LogLevel { +public struct FfiConverterTypeMaxTotalRoutingFeeLimit: FfiConverterRustBuffer { + typealias SwiftType = MaxTotalRoutingFeeLimit + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> MaxTotalRoutingFeeLimit { let variant: Int32 = try readInt(&buf) switch variant { - case 1: return .gossip - - case 2: return .trace - - case 3: return .debug - - case 4: return .info - - case 5: return .warn + case 1: return .none - case 6: return .error + case 2: return .some(amountMsat: try FfiConverterUInt64.read(from: &buf) + ) default: throw UniffiInternalError.unexpectedEnumCase } } - public static func write(_ value: LogLevel, into buf: inout [UInt8]) { + public static func write(_ value: MaxTotalRoutingFeeLimit, into buf: inout [UInt8]) { switch value { - case .gossip: + case .none: writeInt(&buf, Int32(1)) - case .trace: + case let .some(amountMsat): writeInt(&buf, Int32(2)) - - - case .debug: - writeInt(&buf, Int32(3)) - - - case .info: - writeInt(&buf, Int32(4)) - - - case .warn: - writeInt(&buf, Int32(5)) - - - case .error: - writeInt(&buf, Int32(6)) - + FfiConverterUInt64.write(amountMsat, into: &buf) + } } } -public func FfiConverterTypeLogLevel_lift(_ buf: RustBuffer) throws -> LogLevel { - return try FfiConverterTypeLogLevel.lift(buf) +public func FfiConverterTypeMaxTotalRoutingFeeLimit_lift(_ buf: RustBuffer) throws -> MaxTotalRoutingFeeLimit { + return try FfiConverterTypeMaxTotalRoutingFeeLimit.lift(buf) } -public func FfiConverterTypeLogLevel_lower(_ value: LogLevel) -> RustBuffer { - return FfiConverterTypeLogLevel.lower(value) +public func FfiConverterTypeMaxTotalRoutingFeeLimit_lower(_ value: MaxTotalRoutingFeeLimit) -> RustBuffer { + return FfiConverterTypeMaxTotalRoutingFeeLimit.lower(value) } -extension LogLevel: Equatable, Hashable {} + +extension MaxTotalRoutingFeeLimit: Equatable, Hashable {} // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + public enum Network { case bitcoin @@ -3975,6 +4559,7 @@ public enum Network { case regtest } + public struct FfiConverterTypeNetwork: FfiConverterRustBuffer { typealias SwiftType = Network @@ -4027,6 +4612,7 @@ public func FfiConverterTypeNetwork_lower(_ value: Network) -> RustBuffer { } + extension Network: Equatable, Hashable {} @@ -4074,8 +4660,6 @@ public enum NodeError { case OnchainTxSigningFailed(message: String) - case MessageSigningFailed(message: String) - case TxSyncFailed(message: String) case TxSyncTimeout(message: String) @@ -4086,6 +4670,8 @@ public enum NodeError { case LiquidityRequestFailed(message: String) + case UriParameterParsingFailed(message: String) + case InvalidAddress(message: String) case InvalidSocketAddress(message: String) @@ -4118,6 +4704,12 @@ public enum NodeError { case InvalidNetwork(message: String) + case InvalidUri(message: String) + + case InvalidQuantity(message: String) + + case InvalidNodeAlias(message: String) + case DuplicatePayment(message: String) case UnsupportedCurrency(message: String) @@ -4128,10 +4720,6 @@ public enum NodeError { case LiquidityFeeTooHigh(message: String) - - fileprivate static func uniffiErrorHandler(_ error: RustBuffer) throws -> Error { - return try FfiConverterTypeNodeError.lift(error) - } } @@ -4221,27 +4809,27 @@ public struct FfiConverterTypeNodeError: FfiConverterRustBuffer { message: try FfiConverterString.read(from: &buf) ) - case 20: return .MessageSigningFailed( + case 20: return .TxSyncFailed( message: try FfiConverterString.read(from: &buf) ) - case 21: return .TxSyncFailed( + case 21: return .TxSyncTimeout( message: try FfiConverterString.read(from: &buf) ) - case 22: return .TxSyncTimeout( + case 22: return .GossipUpdateFailed( message: try FfiConverterString.read(from: &buf) ) - case 23: return .GossipUpdateFailed( + case 23: return .GossipUpdateTimeout( message: try FfiConverterString.read(from: &buf) ) - case 24: return .GossipUpdateTimeout( + case 24: return .LiquidityRequestFailed( message: try FfiConverterString.read(from: &buf) ) - case 25: return .LiquidityRequestFailed( + case 25: return .UriParameterParsingFailed( message: try FfiConverterString.read(from: &buf) ) @@ -4309,23 +4897,35 @@ public struct FfiConverterTypeNodeError: FfiConverterRustBuffer { message: try FfiConverterString.read(from: &buf) ) - case 42: return .DuplicatePayment( + case 42: return .InvalidUri( + message: try FfiConverterString.read(from: &buf) + ) + + case 43: return .InvalidQuantity( + message: try FfiConverterString.read(from: &buf) + ) + + case 44: return .InvalidNodeAlias( + message: try FfiConverterString.read(from: &buf) + ) + + case 45: return .DuplicatePayment( message: try FfiConverterString.read(from: &buf) ) - case 43: return .UnsupportedCurrency( + case 46: return .UnsupportedCurrency( message: try FfiConverterString.read(from: &buf) ) - case 44: return .InsufficientFunds( + case 47: return .InsufficientFunds( message: try FfiConverterString.read(from: &buf) ) - case 45: return .LiquiditySourceUnavailable( + case 48: return .LiquiditySourceUnavailable( message: try FfiConverterString.read(from: &buf) ) - case 46: return .LiquidityFeeTooHigh( + case 49: return .LiquidityFeeTooHigh( message: try FfiConverterString.read(from: &buf) ) @@ -4378,17 +4978,17 @@ public struct FfiConverterTypeNodeError: FfiConverterRustBuffer { writeInt(&buf, Int32(18)) case .OnchainTxSigningFailed(_ /* message is ignored*/): writeInt(&buf, Int32(19)) - case .MessageSigningFailed(_ /* message is ignored*/): - writeInt(&buf, Int32(20)) case .TxSyncFailed(_ /* message is ignored*/): - writeInt(&buf, Int32(21)) + writeInt(&buf, Int32(20)) case .TxSyncTimeout(_ /* message is ignored*/): - writeInt(&buf, Int32(22)) + writeInt(&buf, Int32(21)) case .GossipUpdateFailed(_ /* message is ignored*/): - writeInt(&buf, Int32(23)) + writeInt(&buf, Int32(22)) case .GossipUpdateTimeout(_ /* message is ignored*/): - writeInt(&buf, Int32(24)) + writeInt(&buf, Int32(23)) case .LiquidityRequestFailed(_ /* message is ignored*/): + writeInt(&buf, Int32(24)) + case .UriParameterParsingFailed(_ /* message is ignored*/): writeInt(&buf, Int32(25)) case .InvalidAddress(_ /* message is ignored*/): writeInt(&buf, Int32(26)) @@ -4422,16 +5022,22 @@ public struct FfiConverterTypeNodeError: FfiConverterRustBuffer { writeInt(&buf, Int32(40)) case .InvalidNetwork(_ /* message is ignored*/): writeInt(&buf, Int32(41)) - case .DuplicatePayment(_ /* message is ignored*/): + case .InvalidUri(_ /* message is ignored*/): writeInt(&buf, Int32(42)) - case .UnsupportedCurrency(_ /* message is ignored*/): + case .InvalidQuantity(_ /* message is ignored*/): writeInt(&buf, Int32(43)) - case .InsufficientFunds(_ /* message is ignored*/): + case .InvalidNodeAlias(_ /* message is ignored*/): writeInt(&buf, Int32(44)) - case .LiquiditySourceUnavailable(_ /* message is ignored*/): + case .DuplicatePayment(_ /* message is ignored*/): writeInt(&buf, Int32(45)) - case .LiquidityFeeTooHigh(_ /* message is ignored*/): + case .UnsupportedCurrency(_ /* message is ignored*/): writeInt(&buf, Int32(46)) + case .InsufficientFunds(_ /* message is ignored*/): + writeInt(&buf, Int32(47)) + case .LiquiditySourceUnavailable(_ /* message is ignored*/): + writeInt(&buf, Int32(48)) + case .LiquidityFeeTooHigh(_ /* message is ignored*/): + writeInt(&buf, Int32(49)) } @@ -4445,12 +5051,14 @@ extension NodeError: Error { } // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + public enum PaymentDirection { case inbound case outbound } + public struct FfiConverterTypePaymentDirection: FfiConverterRustBuffer { typealias SwiftType = PaymentDirection @@ -4491,12 +5099,14 @@ public func FfiConverterTypePaymentDirection_lower(_ value: PaymentDirection) -> } + extension PaymentDirection: Equatable, Hashable {} // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + public enum PaymentFailureReason { case recipientRejected @@ -4505,8 +5115,12 @@ public enum PaymentFailureReason { case paymentExpired case routeNotFound case unexpectedError + case unknownRequiredFeatures + case invoiceRequestExpired + case invoiceRequestRejected } + public struct FfiConverterTypePaymentFailureReason: FfiConverterRustBuffer { typealias SwiftType = PaymentFailureReason @@ -4526,6 +5140,12 @@ public struct FfiConverterTypePaymentFailureReason: FfiConverterRustBuffer { case 6: return .unexpectedError + case 7: return .unknownRequiredFeatures + + case 8: return .invoiceRequestExpired + + case 9: return .invoiceRequestRejected + default: throw UniffiInternalError.unexpectedEnumCase } } @@ -4557,6 +5177,18 @@ public struct FfiConverterTypePaymentFailureReason: FfiConverterRustBuffer { case .unexpectedError: writeInt(&buf, Int32(6)) + + case .unknownRequiredFeatures: + writeInt(&buf, Int32(7)) + + + case .invoiceRequestExpired: + writeInt(&buf, Int32(8)) + + + case .invoiceRequestRejected: + writeInt(&buf, Int32(9)) + } } } @@ -4571,43 +5203,30 @@ public func FfiConverterTypePaymentFailureReason_lower(_ value: PaymentFailureRe } + extension PaymentFailureReason: Equatable, Hashable {} // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + public enum PaymentKind { case onchain - case bolt11( - hash: PaymentHash, - preimage: PaymentPreimage?, - secret: PaymentSecret? + case bolt11(hash: PaymentHash, preimage: PaymentPreimage?, secret: PaymentSecret? ) - case bolt11Jit( - hash: PaymentHash, - preimage: PaymentPreimage?, - secret: PaymentSecret?, - lspFeeLimits: LspFeeLimits + case bolt11Jit(hash: PaymentHash, preimage: PaymentPreimage?, secret: PaymentSecret?, lspFeeLimits: LspFeeLimits ) - case bolt12Offer( - hash: PaymentHash?, - preimage: PaymentPreimage?, - secret: PaymentSecret?, - offerId: OfferId + case bolt12Offer(hash: PaymentHash?, preimage: PaymentPreimage?, secret: PaymentSecret?, offerId: OfferId, payerNote: UntrustedString?, quantity: UInt64? ) - case bolt12Refund( - hash: PaymentHash?, - preimage: PaymentPreimage?, - secret: PaymentSecret? + case bolt12Refund(hash: PaymentHash?, preimage: PaymentPreimage?, secret: PaymentSecret?, payerNote: UntrustedString?, quantity: UInt64? ) - case spontaneous( - hash: PaymentHash, - preimage: PaymentPreimage? + case spontaneous(hash: PaymentHash, preimage: PaymentPreimage? ) } + public struct FfiConverterTypePaymentKind: FfiConverterRustBuffer { typealias SwiftType = PaymentKind @@ -4617,35 +5236,19 @@ public struct FfiConverterTypePaymentKind: FfiConverterRustBuffer { case 1: return .onchain - case 2: return .bolt11( - hash: try FfiConverterTypePaymentHash.read(from: &buf), - preimage: try FfiConverterOptionTypePaymentPreimage.read(from: &buf), - secret: try FfiConverterOptionTypePaymentSecret.read(from: &buf) + case 2: return .bolt11(hash: try FfiConverterTypePaymentHash.read(from: &buf), preimage: try FfiConverterOptionTypePaymentPreimage.read(from: &buf), secret: try FfiConverterOptionTypePaymentSecret.read(from: &buf) ) - case 3: return .bolt11Jit( - hash: try FfiConverterTypePaymentHash.read(from: &buf), - preimage: try FfiConverterOptionTypePaymentPreimage.read(from: &buf), - secret: try FfiConverterOptionTypePaymentSecret.read(from: &buf), - lspFeeLimits: try FfiConverterTypeLSPFeeLimits.read(from: &buf) + case 3: return .bolt11Jit(hash: try FfiConverterTypePaymentHash.read(from: &buf), preimage: try FfiConverterOptionTypePaymentPreimage.read(from: &buf), secret: try FfiConverterOptionTypePaymentSecret.read(from: &buf), lspFeeLimits: try FfiConverterTypeLSPFeeLimits.read(from: &buf) ) - case 4: return .bolt12Offer( - hash: try FfiConverterOptionTypePaymentHash.read(from: &buf), - preimage: try FfiConverterOptionTypePaymentPreimage.read(from: &buf), - secret: try FfiConverterOptionTypePaymentSecret.read(from: &buf), - offerId: try FfiConverterTypeOfferId.read(from: &buf) + case 4: return .bolt12Offer(hash: try FfiConverterOptionTypePaymentHash.read(from: &buf), preimage: try FfiConverterOptionTypePaymentPreimage.read(from: &buf), secret: try FfiConverterOptionTypePaymentSecret.read(from: &buf), offerId: try FfiConverterTypeOfferId.read(from: &buf), payerNote: try FfiConverterOptionTypeUntrustedString.read(from: &buf), quantity: try FfiConverterOptionUInt64.read(from: &buf) ) - case 5: return .bolt12Refund( - hash: try FfiConverterOptionTypePaymentHash.read(from: &buf), - preimage: try FfiConverterOptionTypePaymentPreimage.read(from: &buf), - secret: try FfiConverterOptionTypePaymentSecret.read(from: &buf) + case 5: return .bolt12Refund(hash: try FfiConverterOptionTypePaymentHash.read(from: &buf), preimage: try FfiConverterOptionTypePaymentPreimage.read(from: &buf), secret: try FfiConverterOptionTypePaymentSecret.read(from: &buf), payerNote: try FfiConverterOptionTypeUntrustedString.read(from: &buf), quantity: try FfiConverterOptionUInt64.read(from: &buf) ) - case 6: return .spontaneous( - hash: try FfiConverterTypePaymentHash.read(from: &buf), - preimage: try FfiConverterOptionTypePaymentPreimage.read(from: &buf) + case 6: return .spontaneous(hash: try FfiConverterTypePaymentHash.read(from: &buf), preimage: try FfiConverterOptionTypePaymentPreimage.read(from: &buf) ) default: throw UniffiInternalError.unexpectedEnumCase @@ -4675,19 +5278,23 @@ public struct FfiConverterTypePaymentKind: FfiConverterRustBuffer { FfiConverterTypeLSPFeeLimits.write(lspFeeLimits, into: &buf) - case let .bolt12Offer(hash,preimage,secret,offerId): + case let .bolt12Offer(hash,preimage,secret,offerId,payerNote,quantity): writeInt(&buf, Int32(4)) FfiConverterOptionTypePaymentHash.write(hash, into: &buf) FfiConverterOptionTypePaymentPreimage.write(preimage, into: &buf) FfiConverterOptionTypePaymentSecret.write(secret, into: &buf) FfiConverterTypeOfferId.write(offerId, into: &buf) + FfiConverterOptionTypeUntrustedString.write(payerNote, into: &buf) + FfiConverterOptionUInt64.write(quantity, into: &buf) - case let .bolt12Refund(hash,preimage,secret): + case let .bolt12Refund(hash,preimage,secret,payerNote,quantity): writeInt(&buf, Int32(5)) FfiConverterOptionTypePaymentHash.write(hash, into: &buf) FfiConverterOptionTypePaymentPreimage.write(preimage, into: &buf) FfiConverterOptionTypePaymentSecret.write(secret, into: &buf) + FfiConverterOptionTypeUntrustedString.write(payerNote, into: &buf) + FfiConverterOptionUInt64.write(quantity, into: &buf) case let .spontaneous(hash,preimage): @@ -4709,12 +5316,14 @@ public func FfiConverterTypePaymentKind_lower(_ value: PaymentKind) -> RustBuffe } + extension PaymentKind: Equatable, Hashable {} // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + public enum PaymentStatus { case pending @@ -4722,6 +5331,7 @@ public enum PaymentStatus { case failed } + public struct FfiConverterTypePaymentStatus: FfiConverterRustBuffer { typealias SwiftType = PaymentStatus @@ -4768,33 +5378,25 @@ public func FfiConverterTypePaymentStatus_lower(_ value: PaymentStatus) -> RustB } + extension PaymentStatus: Equatable, Hashable {} // Note that we don't yet support `indirect` for enums. // See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + public enum PendingSweepBalance { - case pendingBroadcast( - channelId: ChannelId?, - amountSatoshis: UInt64 + case pendingBroadcast(channelId: ChannelId?, amountSatoshis: UInt64 ) - case broadcastAwaitingConfirmation( - channelId: ChannelId?, - latestBroadcastHeight: UInt32, - latestSpendingTxid: Txid, - amountSatoshis: UInt64 + case broadcastAwaitingConfirmation(channelId: ChannelId?, latestBroadcastHeight: UInt32, latestSpendingTxid: Txid, amountSatoshis: UInt64 ) - case awaitingThresholdConfirmations( - channelId: ChannelId?, - latestSpendingTxid: Txid, - confirmationHash: BlockHash, - confirmationHeight: UInt32, - amountSatoshis: UInt64 + case awaitingThresholdConfirmations(channelId: ChannelId?, latestSpendingTxid: Txid, confirmationHash: BlockHash, confirmationHeight: UInt32, amountSatoshis: UInt64 ) } + public struct FfiConverterTypePendingSweepBalance: FfiConverterRustBuffer { typealias SwiftType = PendingSweepBalance @@ -4802,24 +5404,13 @@ public struct FfiConverterTypePendingSweepBalance: FfiConverterRustBuffer { let variant: Int32 = try readInt(&buf) switch variant { - case 1: return .pendingBroadcast( - channelId: try FfiConverterOptionTypeChannelId.read(from: &buf), - amountSatoshis: try FfiConverterUInt64.read(from: &buf) + case 1: return .pendingBroadcast(channelId: try FfiConverterOptionTypeChannelId.read(from: &buf), amountSatoshis: try FfiConverterUInt64.read(from: &buf) ) - case 2: return .broadcastAwaitingConfirmation( - channelId: try FfiConverterOptionTypeChannelId.read(from: &buf), - latestBroadcastHeight: try FfiConverterUInt32.read(from: &buf), - latestSpendingTxid: try FfiConverterTypeTxid.read(from: &buf), - amountSatoshis: try FfiConverterUInt64.read(from: &buf) + case 2: return .broadcastAwaitingConfirmation(channelId: try FfiConverterOptionTypeChannelId.read(from: &buf), latestBroadcastHeight: try FfiConverterUInt32.read(from: &buf), latestSpendingTxid: try FfiConverterTypeTxid.read(from: &buf), amountSatoshis: try FfiConverterUInt64.read(from: &buf) ) - case 3: return .awaitingThresholdConfirmations( - channelId: try FfiConverterOptionTypeChannelId.read(from: &buf), - latestSpendingTxid: try FfiConverterTypeTxid.read(from: &buf), - confirmationHash: try FfiConverterTypeBlockHash.read(from: &buf), - confirmationHeight: try FfiConverterUInt32.read(from: &buf), - amountSatoshis: try FfiConverterUInt64.read(from: &buf) + case 3: return .awaitingThresholdConfirmations(channelId: try FfiConverterOptionTypeChannelId.read(from: &buf), latestSpendingTxid: try FfiConverterTypeTxid.read(from: &buf), confirmationHash: try FfiConverterTypeBlockHash.read(from: &buf), confirmationHeight: try FfiConverterUInt32.read(from: &buf), amountSatoshis: try FfiConverterUInt64.read(from: &buf) ) default: throw UniffiInternalError.unexpectedEnumCase @@ -4844,31 +5435,196 @@ public struct FfiConverterTypePendingSweepBalance: FfiConverterRustBuffer { FfiConverterUInt64.write(amountSatoshis, into: &buf) - case let .awaitingThresholdConfirmations(channelId,latestSpendingTxid,confirmationHash,confirmationHeight,amountSatoshis): + case let .awaitingThresholdConfirmations(channelId,latestSpendingTxid,confirmationHash,confirmationHeight,amountSatoshis): + writeInt(&buf, Int32(3)) + FfiConverterOptionTypeChannelId.write(channelId, into: &buf) + FfiConverterTypeTxid.write(latestSpendingTxid, into: &buf) + FfiConverterTypeBlockHash.write(confirmationHash, into: &buf) + FfiConverterUInt32.write(confirmationHeight, into: &buf) + FfiConverterUInt64.write(amountSatoshis, into: &buf) + + } + } +} + + +public func FfiConverterTypePendingSweepBalance_lift(_ buf: RustBuffer) throws -> PendingSweepBalance { + return try FfiConverterTypePendingSweepBalance.lift(buf) +} + +public func FfiConverterTypePendingSweepBalance_lower(_ value: PendingSweepBalance) -> RustBuffer { + return FfiConverterTypePendingSweepBalance.lower(value) +} + + + +extension PendingSweepBalance: Equatable, Hashable {} + + + +// Note that we don't yet support `indirect` for enums. +// See https://github.com/mozilla/uniffi-rs/issues/396 for further discussion. + +public enum QrPaymentResult { + + case onchain(txid: Txid + ) + case bolt11(paymentId: PaymentId + ) + case bolt12(paymentId: PaymentId + ) +} + + +public struct FfiConverterTypeQrPaymentResult: FfiConverterRustBuffer { + typealias SwiftType = QrPaymentResult + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> QrPaymentResult { + let variant: Int32 = try readInt(&buf) + switch variant { + + case 1: return .onchain(txid: try FfiConverterTypeTxid.read(from: &buf) + ) + + case 2: return .bolt11(paymentId: try FfiConverterTypePaymentId.read(from: &buf) + ) + + case 3: return .bolt12(paymentId: try FfiConverterTypePaymentId.read(from: &buf) + ) + + default: throw UniffiInternalError.unexpectedEnumCase + } + } + + public static func write(_ value: QrPaymentResult, into buf: inout [UInt8]) { + switch value { + + + case let .onchain(txid): + writeInt(&buf, Int32(1)) + FfiConverterTypeTxid.write(txid, into: &buf) + + + case let .bolt11(paymentId): + writeInt(&buf, Int32(2)) + FfiConverterTypePaymentId.write(paymentId, into: &buf) + + + case let .bolt12(paymentId): + writeInt(&buf, Int32(3)) + FfiConverterTypePaymentId.write(paymentId, into: &buf) + + } + } +} + + +public func FfiConverterTypeQrPaymentResult_lift(_ buf: RustBuffer) throws -> QrPaymentResult { + return try FfiConverterTypeQrPaymentResult.lift(buf) +} + +public func FfiConverterTypeQrPaymentResult_lower(_ value: QrPaymentResult) -> RustBuffer { + return FfiConverterTypeQrPaymentResult.lower(value) +} + + + +extension QrPaymentResult: Equatable, Hashable {} + + + + +public enum VssHeaderProviderError { + + + + case InvalidData(message: String) + + case RequestError(message: String) + + case AuthorizationError(message: String) + + case InternalError(message: String) + +} + + +public struct FfiConverterTypeVssHeaderProviderError: FfiConverterRustBuffer { + typealias SwiftType = VssHeaderProviderError + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> VssHeaderProviderError { + let variant: Int32 = try readInt(&buf) + switch variant { + + + + + case 1: return .InvalidData( + message: try FfiConverterString.read(from: &buf) + ) + + case 2: return .RequestError( + message: try FfiConverterString.read(from: &buf) + ) + + case 3: return .AuthorizationError( + message: try FfiConverterString.read(from: &buf) + ) + + case 4: return .InternalError( + message: try FfiConverterString.read(from: &buf) + ) + + + default: throw UniffiInternalError.unexpectedEnumCase + } + } + + public static func write(_ value: VssHeaderProviderError, into buf: inout [UInt8]) { + switch value { + + + + + case .InvalidData(_ /* message is ignored*/): + writeInt(&buf, Int32(1)) + case .RequestError(_ /* message is ignored*/): + writeInt(&buf, Int32(2)) + case .AuthorizationError(_ /* message is ignored*/): writeInt(&buf, Int32(3)) - FfiConverterOptionTypeChannelId.write(channelId, into: &buf) - FfiConverterTypeTxid.write(latestSpendingTxid, into: &buf) - FfiConverterTypeBlockHash.write(confirmationHash, into: &buf) - FfiConverterUInt32.write(confirmationHeight, into: &buf) - FfiConverterUInt64.write(amountSatoshis, into: &buf) - + case .InternalError(_ /* message is ignored*/): + writeInt(&buf, Int32(4)) + + } } } -public func FfiConverterTypePendingSweepBalance_lift(_ buf: RustBuffer) throws -> PendingSweepBalance { - return try FfiConverterTypePendingSweepBalance.lift(buf) -} - -public func FfiConverterTypePendingSweepBalance_lower(_ value: PendingSweepBalance) -> RustBuffer { - return FfiConverterTypePendingSweepBalance.lower(value) -} +extension VssHeaderProviderError: Equatable, Hashable {} +extension VssHeaderProviderError: Error { } -extension PendingSweepBalance: Equatable, Hashable {} +fileprivate struct FfiConverterOptionUInt8: FfiConverterRustBuffer { + typealias SwiftType = UInt8? + public static func write(_ value: SwiftType, into buf: inout [UInt8]) { + guard let value = value else { + writeInt(&buf, Int8(0)) + return + } + writeInt(&buf, Int8(1)) + FfiConverterUInt8.write(value, into: &buf) + } + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { + switch try readInt(&buf) as Int8 { + case 0: return nil + case 1: return try FfiConverterUInt8.read(from: &buf) + default: throw UniffiInternalError.unexpectedOptionalTag + } + } +} fileprivate struct FfiConverterOptionUInt16: FfiConverterRustBuffer { typealias SwiftType = UInt16? @@ -4933,8 +5689,8 @@ fileprivate struct FfiConverterOptionUInt64: FfiConverterRustBuffer { } } -fileprivate struct FfiConverterOptionString: FfiConverterRustBuffer { - typealias SwiftType = String? +fileprivate struct FfiConverterOptionBool: FfiConverterRustBuffer { + typealias SwiftType = Bool? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -4942,20 +5698,20 @@ fileprivate struct FfiConverterOptionString: FfiConverterRustBuffer { return } writeInt(&buf, Int8(1)) - FfiConverterString.write(value, into: &buf) + FfiConverterBool.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterString.read(from: &buf) + case 1: return try FfiConverterBool.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } } -fileprivate struct FfiConverterOptionTypeChannelConfig: FfiConverterRustBuffer { - typealias SwiftType = ChannelConfig? +fileprivate struct FfiConverterOptionString: FfiConverterRustBuffer { + typealias SwiftType = String? public static func write(_ value: SwiftType, into buf: inout [UInt8]) { guard let value = value else { @@ -4963,13 +5719,13 @@ fileprivate struct FfiConverterOptionTypeChannelConfig: FfiConverterRustBuffer { return } writeInt(&buf, Int8(1)) - FfiConverterTypeChannelConfig.write(value, into: &buf) + FfiConverterString.write(value, into: &buf) } public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { switch try readInt(&buf) as Int8 { case 0: return nil - case 1: return try FfiConverterTypeChannelConfig.read(from: &buf) + case 1: return try FfiConverterString.read(from: &buf) default: throw UniffiInternalError.unexpectedOptionalTag } } @@ -4996,6 +5752,27 @@ fileprivate struct FfiConverterOptionTypeAnchorChannelsConfig: FfiConverterRustB } } +fileprivate struct FfiConverterOptionTypeChannelConfig: FfiConverterRustBuffer { + typealias SwiftType = ChannelConfig? + + public static func write(_ value: SwiftType, into buf: inout [UInt8]) { + guard let value = value else { + writeInt(&buf, Int8(0)) + return + } + writeInt(&buf, Int8(1)) + FfiConverterTypeChannelConfig.write(value, into: &buf) + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { + switch try readInt(&buf) as Int8 { + case 0: return nil + case 1: return try FfiConverterTypeChannelConfig.read(from: &buf) + default: throw UniffiInternalError.unexpectedOptionalTag + } + } +} + fileprivate struct FfiConverterOptionTypeChannelInfo: FfiConverterRustBuffer { typealias SwiftType = ChannelInfo? @@ -5038,6 +5815,27 @@ fileprivate struct FfiConverterOptionTypeChannelUpdateInfo: FfiConverterRustBuff } } +fileprivate struct FfiConverterOptionTypeEsploraSyncConfig: FfiConverterRustBuffer { + typealias SwiftType = EsploraSyncConfig? + + public static func write(_ value: SwiftType, into buf: inout [UInt8]) { + guard let value = value else { + writeInt(&buf, Int8(0)) + return + } + writeInt(&buf, Int8(1)) + FfiConverterTypeEsploraSyncConfig.write(value, into: &buf) + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { + switch try readInt(&buf) as Int8 { + case 0: return nil + case 1: return try FfiConverterTypeEsploraSyncConfig.read(from: &buf) + default: throw UniffiInternalError.unexpectedOptionalTag + } + } +} + fileprivate struct FfiConverterOptionTypeNodeAnnouncementInfo: FfiConverterRustBuffer { typealias SwiftType = NodeAnnouncementInfo? @@ -5122,6 +5920,27 @@ fileprivate struct FfiConverterOptionTypePaymentDetails: FfiConverterRustBuffer } } +fileprivate struct FfiConverterOptionTypeSendingParameters: FfiConverterRustBuffer { + typealias SwiftType = SendingParameters? + + public static func write(_ value: SwiftType, into buf: inout [UInt8]) { + guard let value = value else { + writeInt(&buf, Int8(0)) + return + } + writeInt(&buf, Int8(1)) + FfiConverterTypeSendingParameters.write(value, into: &buf) + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { + switch try readInt(&buf) as Int8 { + case 0: return nil + case 1: return try FfiConverterTypeSendingParameters.read(from: &buf) + default: throw UniffiInternalError.unexpectedOptionalTag + } + } +} + fileprivate struct FfiConverterOptionTypeClosureReason: FfiConverterRustBuffer { typealias SwiftType = ClosureReason? @@ -5164,6 +5983,27 @@ fileprivate struct FfiConverterOptionTypeEvent: FfiConverterRustBuffer { } } +fileprivate struct FfiConverterOptionTypeMaxTotalRoutingFeeLimit: FfiConverterRustBuffer { + typealias SwiftType = MaxTotalRoutingFeeLimit? + + public static func write(_ value: SwiftType, into buf: inout [UInt8]) { + guard let value = value else { + writeInt(&buf, Int8(0)) + return + } + writeInt(&buf, Int8(1)) + FfiConverterTypeMaxTotalRoutingFeeLimit.write(value, into: &buf) + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { + switch try readInt(&buf) as Int8 { + case 0: return nil + case 1: return try FfiConverterTypeMaxTotalRoutingFeeLimit.read(from: &buf) + default: throw UniffiInternalError.unexpectedOptionalTag + } + } +} + fileprivate struct FfiConverterOptionTypePaymentFailureReason: FfiConverterRustBuffer { typealias SwiftType = PaymentFailureReason? @@ -5227,6 +6067,27 @@ fileprivate struct FfiConverterOptionTypeChannelId: FfiConverterRustBuffer { } } +fileprivate struct FfiConverterOptionTypeNodeAlias: FfiConverterRustBuffer { + typealias SwiftType = NodeAlias? + + public static func write(_ value: SwiftType, into buf: inout [UInt8]) { + guard let value = value else { + writeInt(&buf, Int8(0)) + return + } + writeInt(&buf, Int8(1)) + FfiConverterTypeNodeAlias.write(value, into: &buf) + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { + switch try readInt(&buf) as Int8 { + case 0: return nil + case 1: return try FfiConverterTypeNodeAlias.read(from: &buf) + default: throw UniffiInternalError.unexpectedOptionalTag + } + } +} + fileprivate struct FfiConverterOptionTypePaymentHash: FfiConverterRustBuffer { typealias SwiftType = PaymentHash? @@ -5332,6 +6193,27 @@ fileprivate struct FfiConverterOptionTypePublicKey: FfiConverterRustBuffer { } } +fileprivate struct FfiConverterOptionTypeUntrustedString: FfiConverterRustBuffer { + typealias SwiftType = UntrustedString? + + public static func write(_ value: SwiftType, into buf: inout [UInt8]) { + guard let value = value else { + writeInt(&buf, Int8(0)) + return + } + writeInt(&buf, Int8(1)) + FfiConverterTypeUntrustedString.write(value, into: &buf) + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> SwiftType { + switch try readInt(&buf) as Int8 { + case 0: return nil + case 1: return try FfiConverterTypeUntrustedString.read(from: &buf) + default: throw UniffiInternalError.unexpectedOptionalTag + } + } +} + fileprivate struct FfiConverterSequenceUInt8: FfiConverterRustBuffer { typealias SwiftType = [UInt8] @@ -5552,6 +6434,29 @@ fileprivate struct FfiConverterSequenceTypeSocketAddress: FfiConverterRustBuffer } } +fileprivate struct FfiConverterDictionaryStringString: FfiConverterRustBuffer { + public static func write(_ value: [String: String], into buf: inout [UInt8]) { + let len = Int32(value.count) + writeInt(&buf, len) + for (key, value) in value { + FfiConverterString.write(key, into: &buf) + FfiConverterString.write(value, into: &buf) + } + } + + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> [String: String] { + let len: Int32 = try readInt(&buf) + var dict = [String: String]() + dict.reserveCapacity(Int(len)) + for _ in 0.. RustBuffer { +/** + * Typealias from the type name used in the UDL file to the builtin type. This + * is needed because the UDL type name is used in function/method signatures. + */ +public typealias NodeAlias = String +public struct FfiConverterTypeNodeAlias: FfiConverter { + public static func read(from buf: inout (data: Data, offset: Data.Index)) throws -> NodeAlias { + return try FfiConverterString.read(from: &buf) + } + + public static func write(_ value: NodeAlias, into buf: inout [UInt8]) { + return FfiConverterString.write(value, into: &buf) + } + + public static func lift(_ value: RustBuffer) throws -> NodeAlias { + return try FfiConverterString.lift(value) + } + + public static func lower(_ value: NodeAlias) -> RustBuffer { + return FfiConverterString.lower(value) + } +} + + +public func FfiConverterTypeNodeAlias_lift(_ value: RustBuffer) throws -> NodeAlias { + return try FfiConverterTypeNodeAlias.lift(value) +} + +public func FfiConverterTypeNodeAlias_lower(_ value: NodeAlias) -> RustBuffer { + return FfiConverterTypeNodeAlias.lower(value) +} + + + /** * Typealias from the type name used in the UDL file to the builtin type. This * is needed because the UDL type name is used in function/method signatures. @@ -6200,11 +7139,13 @@ public func FfiConverterTypeUserChannelId_lower(_ value: UserChannelId) -> RustB private let UNIFFI_RUST_FUTURE_POLL_READY: Int8 = 0 private let UNIFFI_RUST_FUTURE_POLL_MAYBE_READY: Int8 = 1 +fileprivate let uniffiContinuationHandleMap = UniffiHandleMap>() + fileprivate func uniffiRustCallAsync( - rustFutureFunc: () -> UnsafeMutableRawPointer, - pollFunc: (UnsafeMutableRawPointer, @escaping UniFfiRustFutureContinuation, UnsafeMutableRawPointer) -> (), - completeFunc: (UnsafeMutableRawPointer, UnsafeMutablePointer) -> F, - freeFunc: (UnsafeMutableRawPointer) -> (), + rustFutureFunc: () -> UInt64, + pollFunc: (UInt64, @escaping UniffiRustFutureContinuationCallback, UInt64) -> (), + completeFunc: (UInt64, UnsafeMutablePointer) -> F, + freeFunc: (UInt64) -> (), liftFunc: (F) throws -> T, errorHandler: ((RustBuffer) throws -> Error)? ) async throws -> T { @@ -6218,7 +7159,11 @@ fileprivate func uniffiRustCallAsync( var pollResult: Int8; repeat { pollResult = await withUnsafeContinuation { - pollFunc(rustFuture, uniffiFutureContinuationCallback, ContinuationHolder($0).toOpaque()) + pollFunc( + rustFuture, + uniffiFutureContinuationCallback, + uniffiContinuationHandleMap.insert(obj: $0) + ) } } while pollResult != UNIFFI_RUST_FUTURE_POLL_READY @@ -6230,44 +7175,24 @@ fileprivate func uniffiRustCallAsync( // Callback handlers for an async calls. These are invoked by Rust when the future is ready. They // lift the return value or error and resume the suspended function. -fileprivate func uniffiFutureContinuationCallback(ptr: UnsafeMutableRawPointer, pollResult: Int8) { - ContinuationHolder.fromOpaque(ptr).resume(pollResult) -} - -// Wraps UnsafeContinuation in a class so that we can use reference counting when passing it across -// the FFI -fileprivate class ContinuationHolder { - let continuation: UnsafeContinuation - - init(_ continuation: UnsafeContinuation) { - self.continuation = continuation - } - - func resume(_ pollResult: Int8) { - self.continuation.resume(returning: pollResult) - } - - func toOpaque() -> UnsafeMutableRawPointer { - return Unmanaged.passRetained(self).toOpaque() +fileprivate func uniffiFutureContinuationCallback(handle: UInt64, pollResult: Int8) { + if let continuation = try? uniffiContinuationHandleMap.remove(handle: handle) { + continuation.resume(returning: pollResult) + } else { + print("uniffiFutureContinuationCallback invalid handle") } - - static func fromOpaque(_ ptr: UnsafeRawPointer) -> ContinuationHolder { - return Unmanaged.fromOpaque(ptr).takeRetainedValue() - } -} -public func defaultConfig() -> Config { - return try! FfiConverterTypeConfig.lift( - try! rustCall() { - uniffi_ldk_node_fn_func_default_config($0) } +public func defaultConfig() -> Config { + return try! FfiConverterTypeConfig.lift(try! rustCall() { + uniffi_ldk_node_fn_func_default_config($0 ) +}) } -public func generateEntropyMnemonic() -> Mnemonic { - return try! FfiConverterTypeMnemonic.lift( - try! rustCall() { - uniffi_ldk_node_fn_func_generate_entropy_mnemonic($0) -} +public func generateEntropyMnemonic() -> Mnemonic { + return try! FfiConverterTypeMnemonic.lift(try! rustCall() { + uniffi_ldk_node_fn_func_generate_entropy_mnemonic($0 ) +}) } private enum InitializationResult { @@ -6279,7 +7204,7 @@ private enum InitializationResult { // the code inside is only computed once. private var initializationResult: InitializationResult { // Get the bindings contract version from our ComponentInterface - let bindings_contract_version = 25 + let bindings_contract_version = 26 // Get the scaffolding contract version by calling the into the dylib let scaffolding_contract_version = ffi_ldk_node_uniffi_contract_version() if bindings_contract_version != scaffolding_contract_version { @@ -6315,7 +7240,7 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_method_bolt11payment_receive_via_jit_channel() != 50555) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt11payment_send() != 35346) { + if (uniffi_ldk_node_checksum_method_bolt11payment_send() != 39133) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_bolt11payment_send_probes() != 39625) { @@ -6324,25 +7249,25 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_method_bolt11payment_send_probes_using_amount() != 25010) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt11payment_send_using_amount() != 15471) { + if (uniffi_ldk_node_checksum_method_bolt11payment_send_using_amount() != 19557) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt12payment_initiate_refund() != 15379) { + if (uniffi_ldk_node_checksum_method_bolt12payment_initiate_refund() != 38039) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt12payment_receive() != 20864) { + if (uniffi_ldk_node_checksum_method_bolt12payment_receive() != 15049) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt12payment_receive_variable_amount() != 10863) { + if (uniffi_ldk_node_checksum_method_bolt12payment_receive_variable_amount() != 7279) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_bolt12payment_request_refund_payment() != 61945) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt12payment_send() != 15282) { + if (uniffi_ldk_node_checksum_method_bolt12payment_send() != 56449) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_bolt12payment_send_using_amount() != 21384) { + if (uniffi_ldk_node_checksum_method_bolt12payment_send_using_amount() != 26006) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_builder_build() != 785) { @@ -6351,70 +7276,49 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_method_builder_build_with_fs_store() != 61304) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_builder_set_entropy_bip39_mnemonic() != 827) { - return InitializationResult.apiChecksumMismatch - } - if (uniffi_ldk_node_checksum_method_builder_set_entropy_seed_bytes() != 44799) { - return InitializationResult.apiChecksumMismatch - } - if (uniffi_ldk_node_checksum_method_builder_set_entropy_seed_path() != 64056) { - return InitializationResult.apiChecksumMismatch - } - if (uniffi_ldk_node_checksum_method_builder_set_esplora_server() != 7044) { - return InitializationResult.apiChecksumMismatch - } - if (uniffi_ldk_node_checksum_method_builder_set_gossip_source_p2p() != 9279) { - return InitializationResult.apiChecksumMismatch - } - if (uniffi_ldk_node_checksum_method_builder_set_gossip_source_rgs() != 64312) { - return InitializationResult.apiChecksumMismatch - } - if (uniffi_ldk_node_checksum_method_builder_set_liquidity_source_lsps2() != 2667) { - return InitializationResult.apiChecksumMismatch - } - if (uniffi_ldk_node_checksum_method_builder_set_listening_addresses() != 14051) { + if (uniffi_ldk_node_checksum_method_builder_build_with_vss_store() != 2871) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_builder_set_network() != 27539) { + if (uniffi_ldk_node_checksum_method_builder_build_with_vss_store_and_fixed_headers() != 24910) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_builder_set_storage_dir_path() != 59019) { + if (uniffi_ldk_node_checksum_method_builder_build_with_vss_store_and_header_provider() != 9090) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_accept_underpaying_htlcs() != 45655) { + if (uniffi_ldk_node_checksum_method_builder_set_chain_source_bitcoind_rpc() != 2111) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_cltv_expiry_delta() != 19044) { + if (uniffi_ldk_node_checksum_method_builder_set_chain_source_esplora() != 1781) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_force_close_avoidance_max_fee_satoshis() != 69) { + if (uniffi_ldk_node_checksum_method_builder_set_entropy_bip39_mnemonic() != 827) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_forwarding_fee_base_msat() != 3400) { + if (uniffi_ldk_node_checksum_method_builder_set_entropy_seed_bytes() != 44799) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_forwarding_fee_proportional_millionths() != 31794) { + if (uniffi_ldk_node_checksum_method_builder_set_entropy_seed_path() != 64056) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_set_accept_underpaying_htlcs() != 27275) { + if (uniffi_ldk_node_checksum_method_builder_set_gossip_source_p2p() != 9279) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_set_cltv_expiry_delta() != 40735) { + if (uniffi_ldk_node_checksum_method_builder_set_gossip_source_rgs() != 64312) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_set_force_close_avoidance_max_fee_satoshis() != 48479) { + if (uniffi_ldk_node_checksum_method_builder_set_liquidity_source_lsps2() != 2667) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_set_forwarding_fee_base_msat() != 29831) { + if (uniffi_ldk_node_checksum_method_builder_set_listening_addresses() != 14051) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_set_forwarding_fee_proportional_millionths() != 65060) { + if (uniffi_ldk_node_checksum_method_builder_set_network() != 27539) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_set_max_dust_htlc_exposure_from_fee_rate_multiplier() != 4707) { + if (uniffi_ldk_node_checksum_method_builder_set_node_alias() != 18342) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_channelconfig_set_max_dust_htlc_exposure_from_fixed_limit() != 16864) { + if (uniffi_ldk_node_checksum_method_builder_set_storage_dir_path() != 59019) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_networkgraph_channel() != 38070) { @@ -6444,16 +7348,13 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_method_node_connect() != 34120) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_node_connect_open_channel() != 64763) { - return InitializationResult.apiChecksumMismatch - } if (uniffi_ldk_node_checksum_method_node_disconnect() != 43538) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_node_event_handled() != 47939) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_node_force_close_channel() != 44813) { + if (uniffi_ldk_node_checksum_method_node_force_close_channel() != 48831) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_node_list_balances() != 57528) { @@ -6480,19 +7381,28 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_method_node_next_event_async() != 25426) { return InitializationResult.apiChecksumMismatch } + if (uniffi_ldk_node_checksum_method_node_node_alias() != 29526) { + return InitializationResult.apiChecksumMismatch + } if (uniffi_ldk_node_checksum_method_node_node_id() != 51489) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_node_onchain_payment() != 6092) { return InitializationResult.apiChecksumMismatch } + if (uniffi_ldk_node_checksum_method_node_open_announced_channel() != 36623) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_node_open_channel() != 40283) { + return InitializationResult.apiChecksumMismatch + } if (uniffi_ldk_node_checksum_method_node_payment() != 60296) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_node_remove_payment() != 47952) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_node_sign_message() != 51392) { + if (uniffi_ldk_node_checksum_method_node_sign_message() != 49319) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_node_spontaneous_payment() != 37403) { @@ -6510,7 +7420,10 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_method_node_sync_wallets() != 32474) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_node_update_channel_config() != 38109) { + if (uniffi_ldk_node_checksum_method_node_unified_qr_payment() != 9837) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_node_update_channel_config() != 37852) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_node_verify_signature() != 20486) { @@ -6525,22 +7438,28 @@ private var initializationResult: InitializationResult { if (uniffi_ldk_node_checksum_method_onchainpayment_send_all_to_address() != 20046) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_onchainpayment_send_to_address() != 34782) { + if (uniffi_ldk_node_checksum_method_onchainpayment_send_to_address() != 55731) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_method_spontaneouspayment_send() != 16613) { + if (uniffi_ldk_node_checksum_method_spontaneouspayment_send() != 48210) { return InitializationResult.apiChecksumMismatch } if (uniffi_ldk_node_checksum_method_spontaneouspayment_send_probes() != 25937) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_constructor_builder_from_config() != 64393) { + if (uniffi_ldk_node_checksum_method_unifiedqrpayment_receive() != 913) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_method_unifiedqrpayment_send() != 53900) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_constructor_builder_new() != 48442) { + if (uniffi_ldk_node_checksum_method_vssheaderprovider_get_headers() != 7788) { return InitializationResult.apiChecksumMismatch } - if (uniffi_ldk_node_checksum_constructor_channelconfig_new() != 24987) { + if (uniffi_ldk_node_checksum_constructor_builder_from_config() != 994) { + return InitializationResult.apiChecksumMismatch + } + if (uniffi_ldk_node_checksum_constructor_builder_new() != 40499) { return InitializationResult.apiChecksumMismatch } @@ -6556,4 +7475,6 @@ private func uniffiEnsureInitialized() { case .apiChecksumMismatch: fatalError("UniFFI API checksum mismatch: try cleaning and rebuilding your project") } -} \ No newline at end of file +} + +// swiftlint:enable all \ No newline at end of file diff --git a/bindings/uniffi-bindgen/src/main.rs b/bindings/uniffi-bindgen/src/main.rs index 2aea96784..a71a3e914 100644 --- a/bindings/uniffi-bindgen/src/main.rs +++ b/bindings/uniffi-bindgen/src/main.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + fn main() { uniffi::uniffi_bindgen_main() } diff --git a/build.rs b/build.rs index 087855111..f011148e7 100644 --- a/build.rs +++ b/build.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + fn main() { #[cfg(feature = "uniffi")] uniffi::generate_scaffolding("bindings/ldk_node.udl").unwrap(); diff --git a/src/balance.rs b/src/balance.rs index f1c95dcbe..c43386d80 100644 --- a/src/balance.rs +++ b/src/balance.rs @@ -1,7 +1,16 @@ -use crate::sweep::value_satoshis_from_descriptor; +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use crate::sweep::value_from_descriptor; use lightning::chain::channelmonitor::Balance as LdkBalance; -use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage}; +use lightning::chain::channelmonitor::BalanceSource; +use lightning::ln::types::ChannelId; +use lightning::ln::{PaymentHash, PaymentPreimage}; use lightning::util::sweep::{OutputSpendStatus, TrackedSpendableOutput}; use bitcoin::secp256k1::PublicKey; @@ -73,6 +82,49 @@ pub enum LightningBalance { /// The amount available to claim, in satoshis, excluding the on-chain fees which will be /// required to do so. amount_satoshis: u64, + /// The transaction fee we pay for the closing commitment transaction. This amount is not + /// included in the `amount_satoshis` value. + /// + /// Note that if this channel is inbound (and thus our counterparty pays the commitment + /// transaction fee) this value will be zero. For channels created prior to LDK Node 0.4 + /// the channel is always treated as outbound (and thus this value is never zero). + transaction_fee_satoshis: u64, + /// The amount of millisatoshis which has been burned to fees from HTLCs which are outbound + /// from us and are related to a payment which was sent by us. This is the sum of the + /// millisatoshis part of all HTLCs which are otherwise represented by + /// [`LightningBalance::MaybeTimeoutClaimableHTLC`] with their + /// [`LightningBalance::MaybeTimeoutClaimableHTLC::outbound_payment`] flag set, as well as + /// any dust HTLCs which would otherwise be represented the same. + /// + /// This amount (rounded up to a whole satoshi value) will not be included in `amount_satoshis`. + outbound_payment_htlc_rounded_msat: u64, + /// The amount of millisatoshis which has been burned to fees from HTLCs which are outbound + /// from us and are related to a forwarded HTLC. This is the sum of the millisatoshis part + /// of all HTLCs which are otherwise represented by + /// [`LightningBalance::MaybeTimeoutClaimableHTLC`] with their + /// [`LightningBalance::MaybeTimeoutClaimableHTLC::outbound_payment`] flag *not* set, as + /// well as any dust HTLCs which would otherwise be represented the same. + /// + /// This amount (rounded up to a whole satoshi value) will not be included in `amount_satoshis`. + outbound_forwarded_htlc_rounded_msat: u64, + /// The amount of millisatoshis which has been burned to fees from HTLCs which are inbound + /// to us and for which we know the preimage. This is the sum of the millisatoshis part of + /// all HTLCs which would be represented by [`LightningBalance::ContentiousClaimable`] on + /// channel close, but whose current value is included in `amount_satoshis`, as well as any + /// dust HTLCs which would otherwise be represented the same. + /// + /// This amount (rounded up to a whole satoshi value) will not be included in the counterparty's + /// `amount_satoshis`. + inbound_claiming_htlc_rounded_msat: u64, + /// The amount of millisatoshis which has been burned to fees from HTLCs which are inbound + /// to us and for which we do not know the preimage. This is the sum of the millisatoshis + /// part of all HTLCs which would be represented by + /// [`LightningBalance::MaybePreimageClaimableHTLC`] on channel close, as well as any dust + /// HTLCs which would otherwise be represented the same. + /// + /// This amount (rounded up to a whole satoshi value) will not be included in the + /// counterparty's `amount_satoshis`. + inbound_htlc_rounded_msat: u64, }, /// The channel has been closed, and the given balance is ours but awaiting confirmations until /// we consider it spendable. @@ -89,6 +141,8 @@ pub enum LightningBalance { /// /// [`Event::SpendableOutputs`]: lightning::events::Event::SpendableOutputs confirmation_height: u32, + /// Whether this balance is a result of cooperative close, a force-close, or an HTLC. + source: BalanceSource, }, /// The channel has been closed, and the given balance should be ours but awaiting spending /// transaction confirmation. If the spending transaction does not confirm in time, it is @@ -129,6 +183,8 @@ pub enum LightningBalance { claimable_height: u32, /// The payment hash whose preimage our counterparty needs to claim this HTLC. payment_hash: PaymentHash, + /// Indicates whether this HTLC represents a payment which was sent outbound from us. + outbound_payment: bool, }, /// HTLCs which we received from our counterparty which are claimable with a preimage which we /// do not currently have. This will only be claimable if we receive the preimage from the node @@ -167,16 +223,33 @@ impl LightningBalance { channel_id: ChannelId, counterparty_node_id: PublicKey, balance: LdkBalance, ) -> Self { match balance { - LdkBalance::ClaimableOnChannelClose { amount_satoshis } => { - Self::ClaimableOnChannelClose { channel_id, counterparty_node_id, amount_satoshis } + LdkBalance::ClaimableOnChannelClose { + amount_satoshis, + transaction_fee_satoshis, + outbound_payment_htlc_rounded_msat, + outbound_forwarded_htlc_rounded_msat, + inbound_claiming_htlc_rounded_msat, + inbound_htlc_rounded_msat, + } => Self::ClaimableOnChannelClose { + channel_id, + counterparty_node_id, + amount_satoshis, + transaction_fee_satoshis, + outbound_payment_htlc_rounded_msat, + outbound_forwarded_htlc_rounded_msat, + inbound_claiming_htlc_rounded_msat, + inbound_htlc_rounded_msat, }, - LdkBalance::ClaimableAwaitingConfirmations { amount_satoshis, confirmation_height } => { - Self::ClaimableAwaitingConfirmations { - channel_id, - counterparty_node_id, - amount_satoshis, - confirmation_height, - } + LdkBalance::ClaimableAwaitingConfirmations { + amount_satoshis, + confirmation_height, + source, + } => Self::ClaimableAwaitingConfirmations { + channel_id, + counterparty_node_id, + amount_satoshis, + confirmation_height, + source, }, LdkBalance::ContentiousClaimable { amount_satoshis, @@ -195,12 +268,14 @@ impl LightningBalance { amount_satoshis, claimable_height, payment_hash, + outbound_payment, } => Self::MaybeTimeoutClaimableHTLC { channel_id, counterparty_node_id, amount_satoshis, claimable_height, payment_hash, + outbound_payment, }, LdkBalance::MaybePreimageClaimableHTLC { amount_satoshis, @@ -271,7 +346,7 @@ impl PendingSweepBalance { match output_info.status { OutputSpendStatus::PendingInitialBroadcast { .. } => { let channel_id = output_info.channel_id; - let amount_satoshis = value_satoshis_from_descriptor(&output_info.descriptor); + let amount_satoshis = value_from_descriptor(&output_info.descriptor).to_sat(); Self::PendingBroadcast { channel_id, amount_satoshis } }, OutputSpendStatus::PendingFirstConfirmation { @@ -280,8 +355,8 @@ impl PendingSweepBalance { .. } => { let channel_id = output_info.channel_id; - let amount_satoshis = value_satoshis_from_descriptor(&output_info.descriptor); - let latest_spending_txid = latest_spending_tx.txid(); + let amount_satoshis = value_from_descriptor(&output_info.descriptor).to_sat(); + let latest_spending_txid = latest_spending_tx.compute_txid(); Self::BroadcastAwaitingConfirmation { channel_id, latest_broadcast_height, @@ -296,8 +371,8 @@ impl PendingSweepBalance { .. } => { let channel_id = output_info.channel_id; - let amount_satoshis = value_satoshis_from_descriptor(&output_info.descriptor); - let latest_spending_txid = latest_spending_tx.txid(); + let amount_satoshis = value_from_descriptor(&output_info.descriptor).to_sat(); + let latest_spending_txid = latest_spending_tx.compute_txid(); Self::AwaitingThresholdConfirmations { channel_id, latest_spending_txid, diff --git a/src/builder.rs b/src/builder.rs index ad2c38c7a..4651a832e 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -1,13 +1,20 @@ -use crate::config::{ - default_user_config, Config, BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, - DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS, DEFAULT_ESPLORA_SERVER_URL, WALLET_KEYS_SEED_LEN, -}; +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use crate::chain::{ChainSource, DEFAULT_ESPLORA_SERVER_URL}; +use crate::config::{default_user_config, Config, EsploraSyncConfig, WALLET_KEYS_SEED_LEN}; + use crate::connection::ConnectionManager; use crate::event::EventQueue; use crate::fee_estimator::OnchainFeeEstimator; use crate::gossip::GossipSource; -use crate::io; use crate::io::sqlite_store::SqliteStore; +use crate::io::utils::{read_node_metrics, write_node_metrics}; +use crate::io::vss_store::VssStore; use crate::liquidity::LiquiditySource; use crate::logger::{log_error, log_info, FilesystemLogger, Logger}; use crate::message_handler::NodeCustomMessageHandler; @@ -18,13 +25,17 @@ use crate::types::{ ChainMonitor, ChannelManager, DynStore, GossipSync, Graph, KeyValue, KeysManager, MessageRouter, OnionMessenger, PeerManager, }; +use crate::wallet::persist::KVStoreWalletPersister; use crate::wallet::Wallet; +use crate::{io, NodeMetrics}; use crate::{LogLevel, Node}; use lightning::chain::{chainmonitor, BestBlock, Watch}; +use lightning::io::Cursor; use lightning::ln::channelmanager::{self, ChainParameters, ChannelManagerReadArgs}; use lightning::ln::msgs::{RoutingMessageHandler, SocketAddress}; use lightning::ln::peer_handler::{IgnoringMessageHandler, MessageHandler}; +use lightning::routing::gossip::NodeAlias; use lightning::routing::router::DefaultRouter; use lightning::routing::scoring::{ ProbabilisticScorer, ProbabilisticScoringDecayParameters, ProbabilisticScoringFeeParameters, @@ -40,38 +51,34 @@ use lightning::util::sweep::OutputSweeper; use lightning_persister::fs_store::FilesystemStore; -use lightning_transaction_sync::EsploraSyncClient; - use lightning_liquidity::lsps2::client::LSPS2ClientConfig; use lightning_liquidity::{LiquidityClientConfig, LiquidityManager}; -#[cfg(any(vss, vss_test))] -use crate::io::vss_store::VssStore; -use bdk::bitcoin::secp256k1::Secp256k1; -use bdk::blockchain::esplora::EsploraBlockchain; -use bdk::database::SqliteDatabase; -use bdk::template::Bip84; +use bdk_wallet::template::Bip84; +use bdk_wallet::KeychainKind; +use bdk_wallet::Wallet as BdkWallet; use bip39::Mnemonic; use bitcoin::secp256k1::PublicKey; use bitcoin::{BlockHash, Network}; -#[cfg(any(vss, vss_test))] -use bitcoin::bip32::ChildNumber; +use bitcoin::bip32::{ChildNumber, Xpriv}; +use std::collections::HashMap; use std::convert::TryInto; use std::default::Default; use std::fmt; use std::fs; -use std::io::Cursor; use std::path::PathBuf; use std::sync::atomic::AtomicBool; use std::sync::{Arc, Mutex, RwLock}; use std::time::SystemTime; +use vss_client::headers::{FixedHeaders, LnurlAuthToJwtProvider, VssHeaderProvider}; #[derive(Debug, Clone)] enum ChainDataSourceConfig { - Esplora(String), + Esplora { server_url: String, sync_config: Option }, + BitcoindRpc { rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String }, } #[derive(Debug, Clone)] @@ -102,7 +109,7 @@ impl Default for LiquiditySourceConfig { /// An error encountered during building a [`Node`]. /// /// [`Node`]: crate::Node -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub enum BuildError { /// The given seed bytes are invalid, e.g., have invalid length. InvalidSeedBytes, @@ -114,6 +121,8 @@ pub enum BuildError { InvalidChannelMonitor, /// The given listening addresses are invalid, e.g. too many were passed. InvalidListeningAddresses, + /// The provided alias is invalid. + InvalidNodeAlias, /// We failed to read data from the [`KVStore`]. /// /// [`KVStore`]: lightning::util::persist::KVStore @@ -152,6 +161,7 @@ impl fmt::Display for BuildError { Self::KVStoreSetupFailed => write!(f, "Failed to setup KVStore."), Self::WalletSetupFailed => write!(f, "Failed to setup onchain wallet."), Self::LoggerSetupFailed => write!(f, "Failed to setup the logger."), + Self::InvalidNodeAlias => write!(f, "Given node alias is invalid."), } } } @@ -237,8 +247,24 @@ impl NodeBuilder { } /// Configures the [`Node`] instance to source its chain data from the given Esplora server. - pub fn set_esplora_server(&mut self, esplora_server_url: String) -> &mut Self { - self.chain_data_source_config = Some(ChainDataSourceConfig::Esplora(esplora_server_url)); + /// + /// If no `sync_config` is given, default values are used. See [`EsploraSyncConfig`] for more + /// information. + pub fn set_chain_source_esplora( + &mut self, server_url: String, sync_config: Option, + ) -> &mut Self { + self.chain_data_source_config = + Some(ChainDataSourceConfig::Esplora { server_url, sync_config }); + self + } + + /// Configures the [`Node`] instance to source its chain data from the given Bitcoin Core RPC + /// endpoint. + pub fn set_chain_source_bitcoind_rpc( + &mut self, rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, + ) -> &mut Self { + self.chain_data_source_config = + Some(ChainDataSourceConfig::BitcoindRpc { rpc_host, rpc_port, rpc_user, rpc_password }); self } @@ -305,6 +331,17 @@ impl NodeBuilder { Ok(self) } + /// Sets the node alias that will be used when broadcasting announcements to the gossip + /// network. + /// + /// The provided alias must be a valid UTF-8 string and no longer than 32 bytes in total. + pub fn set_node_alias(&mut self, node_alias: String) -> Result<&mut Self, BuildError> { + let node_alias = sanitize_alias(&node_alias)?; + + self.config.node_alias = Some(node_alias); + Ok(self) + } + /// Sets the level at which [`Node`] will log messages. pub fn set_log_level(&mut self, level: LogLevel) -> &mut Self { self.config.log_level = level; @@ -341,10 +378,29 @@ impl NodeBuilder { self.build_with_store(kv_store) } - /// Builds a [`Node`] instance with a [`VssStore`] backend and according to the options + /// Builds a [`Node`] instance with a [VSS] backend and according to the options /// previously configured. - #[cfg(any(vss, vss_test))] - pub fn build_with_vss_store(&self, url: String, store_id: String) -> Result { + /// + /// Uses [LNURL-auth] based authentication scheme as default method for authentication/authorization. + /// + /// The LNURL challenge will be retrieved by making a request to the given `lnurl_auth_server_url`. + /// The returned JWT token in response to the signed LNURL request, will be used for + /// authentication/authorization of all the requests made to VSS. + /// + /// `fixed_headers` are included as it is in all the requests made to VSS and LNURL auth server. + /// + /// **Caution**: VSS support is in **alpha** and is considered experimental. + /// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are + /// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted. + /// + /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md + /// [LNURL-auth]: https://github.com/lnurl/luds/blob/luds/04.md + pub fn build_with_vss_store( + &self, vss_url: String, store_id: String, lnurl_auth_server_url: String, + fixed_headers: HashMap, + ) -> Result { + use bitcoin::key::Secp256k1; + let logger = setup_logger(&self.config)?; let seed_bytes = seed_bytes_from_config( @@ -352,24 +408,83 @@ impl NodeBuilder { self.entropy_source_config.as_ref(), Arc::clone(&logger), )?; + let config = Arc::new(self.config.clone()); - let xprv = bitcoin::bip32::ExtendedPrivKey::new_master(config.network.into(), &seed_bytes) - .map_err(|e| { - log_error!(logger, "Failed to derive master secret: {}", e); - BuildError::InvalidSeedBytes - })?; + let vss_xprv = derive_vss_xprv(config, &seed_bytes, Arc::clone(&logger))?; - let vss_xprv = xprv - .ckd_priv(&Secp256k1::new(), ChildNumber::Hardened { index: 877 }) + let lnurl_auth_xprv = vss_xprv + .derive_priv(&Secp256k1::new(), &[ChildNumber::Hardened { index: 138 }]) .map_err(|e| { log_error!(logger, "Failed to derive VSS secret: {}", e); BuildError::KVStoreSetupFailed })?; + let lnurl_auth_jwt_provider = + LnurlAuthToJwtProvider::new(lnurl_auth_xprv, lnurl_auth_server_url, fixed_headers) + .map_err(|e| { + log_error!(logger, "Failed to create LnurlAuthToJwtProvider: {}", e); + BuildError::KVStoreSetupFailed + })?; + + let header_provider = Arc::new(lnurl_auth_jwt_provider); + + self.build_with_vss_store_and_header_provider(vss_url, store_id, header_provider) + } + + /// Builds a [`Node`] instance with a [VSS] backend and according to the options + /// previously configured. + /// + /// Uses [`FixedHeaders`] as default method for authentication/authorization. + /// + /// Given `fixed_headers` are included as it is in all the requests made to VSS. + /// + /// **Caution**: VSS support is in **alpha** and is considered experimental. + /// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are + /// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted. + /// + /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md + pub fn build_with_vss_store_and_fixed_headers( + &self, vss_url: String, store_id: String, fixed_headers: HashMap, + ) -> Result { + let header_provider = Arc::new(FixedHeaders::new(fixed_headers)); + + self.build_with_vss_store_and_header_provider(vss_url, store_id, header_provider) + } + + /// Builds a [`Node`] instance with a [VSS] backend and according to the options + /// previously configured. + /// + /// Given `header_provider` is used to attach headers to every request made + /// to VSS. + /// + /// **Caution**: VSS support is in **alpha** and is considered experimental. + /// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are + /// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted. + /// + /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md + pub fn build_with_vss_store_and_header_provider( + &self, vss_url: String, store_id: String, header_provider: Arc, + ) -> Result { + let logger = setup_logger(&self.config)?; + + let seed_bytes = seed_bytes_from_config( + &self.config, + self.entropy_source_config.as_ref(), + Arc::clone(&logger), + )?; + + let config = Arc::new(self.config.clone()); + + let vss_xprv = derive_vss_xprv(config.clone(), &seed_bytes, Arc::clone(&logger))?; + let vss_seed_bytes: [u8; 32] = vss_xprv.private_key.secret_bytes(); - let vss_store = Arc::new(VssStore::new(url, store_id, vss_seed_bytes)); + let vss_store = + VssStore::new(vss_url, store_id, vss_seed_bytes, header_provider).map_err(|e| { + log_error!(logger, "Failed to setup VssStore: {}", e); + BuildError::KVStoreSetupFailed + })?; build_with_store_internal( config, self.chain_data_source_config.as_ref(), @@ -377,7 +492,7 @@ impl NodeBuilder { self.liquidity_source_config.as_ref(), seed_bytes, logger, - vss_store, + Arc::new(vss_store), ) } @@ -469,8 +584,26 @@ impl ArcedNodeBuilder { } /// Configures the [`Node`] instance to source its chain data from the given Esplora server. - pub fn set_esplora_server(&self, esplora_server_url: String) { - self.inner.write().unwrap().set_esplora_server(esplora_server_url); + /// + /// If no `sync_config` is given, default values are used. See [`EsploraSyncConfig`] for more + /// information. + pub fn set_chain_source_esplora( + &self, server_url: String, sync_config: Option, + ) { + self.inner.write().unwrap().set_chain_source_esplora(server_url, sync_config); + } + + /// Configures the [`Node`] instance to source its chain data from the given Bitcoin Core RPC + /// endpoint. + pub fn set_chain_source_bitcoind_rpc( + &self, rpc_host: String, rpc_port: u16, rpc_user: String, rpc_password: String, + ) { + self.inner.write().unwrap().set_chain_source_bitcoind_rpc( + rpc_host, + rpc_port, + rpc_user, + rpc_password, + ); } /// Configures the [`Node`] instance to source its gossip data from the Lightning peer-to-peer @@ -520,6 +653,14 @@ impl ArcedNodeBuilder { self.inner.write().unwrap().set_listening_addresses(listening_addresses).map(|_| ()) } + /// Sets the node alias that will be used when broadcasting announcements to the gossip + /// network. + /// + /// The provided alias must be a valid UTF-8 string and no longer than 32 bytes in total. + pub fn set_node_alias(&self, node_alias: String) -> Result<(), BuildError> { + self.inner.write().unwrap().set_node_alias(node_alias).map(|_| ()) + } + /// Sets the level at which [`Node`] will log messages. pub fn set_log_level(&self, level: LogLevel) { self.inner.write().unwrap().set_log_level(level); @@ -537,6 +678,77 @@ impl ArcedNodeBuilder { self.inner.read().unwrap().build_with_fs_store().map(Arc::new) } + /// Builds a [`Node`] instance with a [VSS] backend and according to the options + /// previously configured. + /// + /// Uses [LNURL-auth] based authentication scheme as default method for authentication/authorization. + /// + /// The LNURL challenge will be retrieved by making a request to the given `lnurl_auth_server_url`. + /// The returned JWT token in response to the signed LNURL request, will be used for + /// authentication/authorization of all the requests made to VSS. + /// + /// `fixed_headers` are included as it is in all the requests made to VSS and LNURL auth server. + /// + /// **Caution**: VSS support is in **alpha** and is considered experimental. + /// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are + /// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted. + /// + /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md + /// [LNURL-auth]: https://github.com/lnurl/luds/blob/luds/04.md + pub fn build_with_vss_store( + &self, vss_url: String, store_id: String, lnurl_auth_server_url: String, + fixed_headers: HashMap, + ) -> Result, BuildError> { + self.inner + .read() + .unwrap() + .build_with_vss_store(vss_url, store_id, lnurl_auth_server_url, fixed_headers) + .map(Arc::new) + } + + /// Builds a [`Node`] instance with a [VSS] backend and according to the options + /// previously configured. + /// + /// Uses [`FixedHeaders`] as default method for authentication/authorization. + /// + /// Given `fixed_headers` are included as it is in all the requests made to VSS. + /// + /// **Caution**: VSS support is in **alpha** and is considered experimental. + /// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are + /// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted. + /// + /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md + pub fn build_with_vss_store_and_fixed_headers( + &self, vss_url: String, store_id: String, fixed_headers: HashMap, + ) -> Result, BuildError> { + self.inner + .read() + .unwrap() + .build_with_vss_store_and_fixed_headers(vss_url, store_id, fixed_headers) + .map(Arc::new) + } + + /// Builds a [`Node`] instance with a [VSS] backend and according to the options + /// previously configured. + /// + /// Given `header_provider` is used to attach headers to every request made + /// to VSS. + /// + /// **Caution**: VSS support is in **alpha** and is considered experimental. + /// Using VSS (or any remote persistence) may cause LDK to panic if persistence failures are + /// unrecoverable, i.e., if they remain unresolved after internal retries are exhausted. + /// + /// [VSS]: https://github.com/lightningdevkit/vss-server/blob/main/README.md + pub fn build_with_vss_store_and_header_provider( + &self, vss_url: String, store_id: String, header_provider: Arc, + ) -> Result, BuildError> { + self.inner + .read() + .unwrap() + .build_with_vss_store_and_header_provider(vss_url, store_id, header_provider) + .map(Arc::new) + } + /// Builds a [`Node`] instance according to the options previously configured. pub fn build_with_store(&self, kv_store: Arc) -> Result, BuildError> { self.inner.read().unwrap().build_with_store(kv_store).map(Arc::new) @@ -550,96 +762,114 @@ fn build_with_store_internal( liquidity_source_config: Option<&LiquiditySourceConfig>, seed_bytes: [u8; 64], logger: Arc, kv_store: Arc, ) -> Result { + // Initialize the status fields. + let is_listening = Arc::new(AtomicBool::new(false)); + let node_metrics = match read_node_metrics(Arc::clone(&kv_store), Arc::clone(&logger)) { + Ok(metrics) => Arc::new(RwLock::new(metrics)), + Err(e) => { + if e.kind() == std::io::ErrorKind::NotFound { + Arc::new(RwLock::new(NodeMetrics::default())) + } else { + return Err(BuildError::ReadFailed); + } + }, + }; + // Initialize the on-chain wallet and chain access - let xprv = bitcoin::bip32::ExtendedPrivKey::new_master(config.network.into(), &seed_bytes) + let xprv = bitcoin::bip32::Xpriv::new_master(config.network, &seed_bytes).map_err(|e| { + log_error!(logger, "Failed to derive master secret: {}", e); + BuildError::InvalidSeedBytes + })?; + + let descriptor = Bip84(xprv, KeychainKind::External); + let change_descriptor = Bip84(xprv, KeychainKind::Internal); + let mut wallet_persister = + KVStoreWalletPersister::new(Arc::clone(&kv_store), Arc::clone(&logger)); + let wallet_opt = BdkWallet::load() + .descriptor(KeychainKind::External, Some(descriptor.clone())) + .descriptor(KeychainKind::Internal, Some(change_descriptor.clone())) + .extract_keys() + .check_network(config.network) + .load_wallet(&mut wallet_persister) .map_err(|e| { - log_error!(logger, "Failed to derive master secret: {}", e); - BuildError::InvalidSeedBytes + log_error!(logger, "Failed to set up wallet: {}", e); + BuildError::WalletSetupFailed })?; + let bdk_wallet = match wallet_opt { + Some(wallet) => wallet, + None => BdkWallet::create(descriptor, change_descriptor) + .network(config.network) + .create_wallet(&mut wallet_persister) + .map_err(|e| { + log_error!(logger, "Failed to set up wallet: {}", e); + BuildError::WalletSetupFailed + })?, + }; - let wallet_name = bdk::wallet::wallet_name_from_descriptor( - Bip84(xprv, bdk::KeychainKind::External), - Some(Bip84(xprv, bdk::KeychainKind::Internal)), - config.network.into(), - &Secp256k1::new(), - ) - .map_err(|e| { - log_error!(logger, "Failed to derive wallet name: {}", e); - BuildError::WalletSetupFailed - })?; - - let database_path = format!("{}/bdk_wallet_{}.sqlite", config.storage_dir_path, wallet_name); - let database = SqliteDatabase::new(database_path); - - let bdk_wallet = bdk::Wallet::new( - Bip84(xprv, bdk::KeychainKind::External), - Some(Bip84(xprv, bdk::KeychainKind::Internal)), - config.network.into(), - database, - ) - .map_err(|e| { - log_error!(logger, "Failed to set up wallet: {}", e); - BuildError::WalletSetupFailed - })?; + let tx_broadcaster = Arc::new(TransactionBroadcaster::new(Arc::clone(&logger))); + let fee_estimator = Arc::new(OnchainFeeEstimator::new()); + let wallet = Arc::new(Wallet::new( + bdk_wallet, + wallet_persister, + Arc::clone(&tx_broadcaster), + Arc::clone(&fee_estimator), + Arc::clone(&logger), + )); - let (blockchain, tx_sync, tx_broadcaster, fee_estimator) = match chain_data_source_config { - Some(ChainDataSourceConfig::Esplora(server_url)) => { + let chain_source = match chain_data_source_config { + Some(ChainDataSourceConfig::Esplora { server_url, sync_config }) => { log_info!(logger, "Using custom esplora server: {}", server_url); - let mut client_builder = esplora_client::Builder::new(&server_url.clone()); - client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); - - let tx_sync = Arc::new(EsploraSyncClient::from_client( - client_builder.clone().build_async().unwrap(), - Arc::clone(&logger), - )); - let blockchain = EsploraBlockchain::from_client( - client_builder.clone().build_async().unwrap(), - BDK_CLIENT_STOP_GAP, - ) - .with_concurrency(BDK_CLIENT_CONCURRENCY); - let tx_broadcaster = Arc::new(TransactionBroadcaster::new( - client_builder.clone().build_async().unwrap(), + let sync_config = sync_config.unwrap_or(EsploraSyncConfig::default()); + Arc::new(ChainSource::new_esplora( + server_url.clone(), + sync_config, + Arc::clone(&wallet), + Arc::clone(&fee_estimator), + Arc::clone(&tx_broadcaster), + Arc::clone(&kv_store), + Arc::clone(&config), Arc::clone(&logger), - )); - let fee_estimator = Arc::new(OnchainFeeEstimator::new( - client_builder.clone().build_async().unwrap(), + Arc::clone(&node_metrics), + )) + }, + Some(ChainDataSourceConfig::BitcoindRpc { rpc_host, rpc_port, rpc_user, rpc_password }) => { + Arc::new(ChainSource::new_bitcoind_rpc( + rpc_host.clone(), + *rpc_port, + rpc_user.clone(), + rpc_password.clone(), + Arc::clone(&wallet), + Arc::clone(&fee_estimator), + Arc::clone(&tx_broadcaster), + Arc::clone(&kv_store), Arc::clone(&config), Arc::clone(&logger), - )); - (blockchain, tx_sync, tx_broadcaster, fee_estimator) + Arc::clone(&node_metrics), + )) }, None => { // Default to Esplora client. let server_url = DEFAULT_ESPLORA_SERVER_URL.to_string(); - let tx_sync = Arc::new(EsploraSyncClient::new(server_url, Arc::clone(&logger))); - let blockchain = - EsploraBlockchain::from_client(tx_sync.client().clone(), BDK_CLIENT_STOP_GAP) - .with_concurrency(BDK_CLIENT_CONCURRENCY); - let tx_broadcaster = Arc::new(TransactionBroadcaster::new( - tx_sync.client().clone(), - Arc::clone(&logger), - )); - let fee_estimator = Arc::new(OnchainFeeEstimator::new( - tx_sync.client().clone(), + let sync_config = EsploraSyncConfig::default(); + Arc::new(ChainSource::new_esplora( + server_url.clone(), + sync_config, + Arc::clone(&wallet), + Arc::clone(&fee_estimator), + Arc::clone(&tx_broadcaster), + Arc::clone(&kv_store), Arc::clone(&config), Arc::clone(&logger), - )); - (blockchain, tx_sync, tx_broadcaster, fee_estimator) + Arc::clone(&node_metrics), + )) }, }; let runtime = Arc::new(RwLock::new(None)); - let wallet = Arc::new(Wallet::new( - blockchain, - bdk_wallet, - Arc::clone(&tx_broadcaster), - Arc::clone(&fee_estimator), - Arc::clone(&logger), - )); // Initialize the ChainMonitor let chain_monitor: Arc = Arc::new(chainmonitor::ChainMonitor::new( - Some(Arc::clone(&tx_sync)), + Some(Arc::clone(&chain_source)), Arc::clone(&tx_broadcaster), Arc::clone(&logger), Arc::clone(&fee_estimator), @@ -720,7 +950,7 @@ fn build_with_store_internal( ) { Ok(monitors) => monitors, Err(e) => { - if e.kind() == std::io::ErrorKind::NotFound { + if e.kind() == lightning::io::ErrorKind::NotFound { Vec::new() } else { log_error!(logger, "Failed to read channel monitors: {}", e.to_string()); @@ -773,7 +1003,7 @@ fn build_with_store_internal( } else { // We're starting a fresh node. let genesis_block_hash = - bitcoin::blockdata::constants::genesis_block(config.network.into()).block_hash(); + bitcoin::blockdata::constants::genesis_block(config.network).block_hash(); let chain_params = ChainParameters { network: config.network.into(), @@ -817,6 +1047,7 @@ fn build_with_store_internal( Arc::new(message_router), Arc::clone(&channel_manager), IgnoringMessageHandler {}, + IgnoringMessageHandler {}, )); let ephemeral_bytes: [u8; 32] = keys_manager.get_secure_random_bytes(); @@ -830,23 +1061,24 @@ fn build_with_store_internal( Arc::new(GossipSource::new_p2p(Arc::clone(&network_graph), Arc::clone(&logger))); // Reset the RGS sync timestamp in case we somehow switch gossip sources - io::utils::write_latest_rgs_sync_timestamp( - 0, - Arc::clone(&kv_store), - Arc::clone(&logger), - ) - .map_err(|e| { - log_error!(logger, "Failed writing to store: {}", e); - BuildError::WriteFailed - })?; + { + let mut locked_node_metrics = node_metrics.write().unwrap(); + locked_node_metrics.latest_rgs_snapshot_timestamp = None; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&kv_store), + Arc::clone(&logger), + ) + .map_err(|e| { + log_error!(logger, "Failed writing to store: {}", e); + BuildError::WriteFailed + })?; + } p2p_source }, GossipSourceConfig::RapidGossipSync(rgs_server) => { - let latest_sync_timestamp = io::utils::read_latest_rgs_sync_timestamp( - Arc::clone(&kv_store), - Arc::clone(&logger), - ) - .unwrap_or(0); + let latest_sync_timestamp = + node_metrics.read().unwrap().latest_rgs_snapshot_timestamp.unwrap_or(0); Arc::new(GossipSource::new_rgs( rgs_server.clone(), latest_sync_timestamp, @@ -863,7 +1095,7 @@ fn build_with_store_internal( let liquidity_manager = Arc::new(LiquidityManager::new( Arc::clone(&keys_manager), Arc::clone(&channel_manager), - Some(Arc::clone(&tx_sync)), + Some(Arc::clone(&chain_source)), None, None, liquidity_client_config, @@ -892,14 +1124,14 @@ fn build_with_store_internal( chan_handler: Arc::clone(&channel_manager), route_handler: Arc::clone(&p2p_gossip_sync) as Arc, - onion_message_handler: onion_messenger, + onion_message_handler: Arc::clone(&onion_messenger), custom_message_handler, }, GossipSync::Rapid(_) => MessageHandler { chan_handler: Arc::clone(&channel_manager), route_handler: Arc::new(IgnoringMessageHandler {}) as Arc, - onion_message_handler: onion_messenger, + onion_message_handler: Arc::clone(&onion_messenger), custom_message_handler, }, GossipSync::None => { @@ -931,7 +1163,7 @@ fn build_with_store_internal( let output_sweeper = match io::utils::read_output_sweeper( Arc::clone(&tx_broadcaster), Arc::clone(&fee_estimator), - Arc::clone(&tx_sync), + Arc::clone(&chain_source), Arc::clone(&keys_manager), Arc::clone(&kv_store), Arc::clone(&logger), @@ -943,7 +1175,7 @@ fn build_with_store_internal( channel_manager.current_best_block(), Arc::clone(&tx_broadcaster), Arc::clone(&fee_estimator), - Some(Arc::clone(&tx_sync)), + Some(Arc::clone(&chain_source)), Arc::clone(&keys_manager), Arc::clone(&keys_manager), Arc::clone(&kv_store), @@ -1005,28 +1237,20 @@ fn build_with_store_internal( let (stop_sender, _) = tokio::sync::watch::channel(()); let (event_handling_stopped_sender, _) = tokio::sync::watch::channel(()); - let is_listening = Arc::new(AtomicBool::new(false)); - let latest_wallet_sync_timestamp = Arc::new(RwLock::new(None)); - let latest_onchain_wallet_sync_timestamp = Arc::new(RwLock::new(None)); - let latest_fee_rate_cache_update_timestamp = Arc::new(RwLock::new(None)); - let latest_rgs_snapshot_timestamp = Arc::new(RwLock::new(None)); - let latest_node_announcement_broadcast_timestamp = Arc::new(RwLock::new(None)); - let latest_channel_monitor_archival_height = Arc::new(RwLock::new(None)); - Ok(Node { runtime, stop_sender, event_handling_stopped_sender, config, wallet, - tx_sync, + chain_source, tx_broadcaster, - fee_estimator, event_queue, channel_manager, chain_monitor, output_sweeper, peer_manager, + onion_messenger, connection_manager, keys_manager, network_graph, @@ -1039,12 +1263,7 @@ fn build_with_store_internal( peer_store, payment_store, is_listening, - latest_wallet_sync_timestamp, - latest_onchain_wallet_sync_timestamp, - latest_fee_rate_cache_update_timestamp, - latest_rgs_snapshot_timestamp, - latest_node_announcement_broadcast_timestamp, - latest_channel_monitor_archival_height, + node_metrics, }) } @@ -1082,3 +1301,74 @@ fn seed_bytes_from_config( }, } } + +fn derive_vss_xprv( + config: Arc, seed_bytes: &[u8; 64], logger: Arc, +) -> Result { + use bitcoin::key::Secp256k1; + + let xprv = Xpriv::new_master(config.network, seed_bytes).map_err(|e| { + log_error!(logger, "Failed to derive master secret: {}", e); + BuildError::InvalidSeedBytes + })?; + + xprv.derive_priv(&Secp256k1::new(), &[ChildNumber::Hardened { index: 877 }]).map_err(|e| { + log_error!(logger, "Failed to derive VSS secret: {}", e); + BuildError::KVStoreSetupFailed + }) +} + +/// Sanitize the user-provided node alias to ensure that it is a valid protocol-specified UTF-8 string. +pub(crate) fn sanitize_alias(alias_str: &str) -> Result { + let alias = alias_str.trim(); + + // Alias must be 32-bytes long or less. + if alias.as_bytes().len() > 32 { + return Err(BuildError::InvalidNodeAlias); + } + + let mut bytes = [0u8; 32]; + bytes[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes()); + Ok(NodeAlias(bytes)) +} + +#[cfg(test)] +mod tests { + use super::{sanitize_alias, BuildError, NodeAlias}; + + #[test] + fn sanitize_empty_node_alias() { + // Empty node alias + let alias = ""; + let mut buf = [0u8; 32]; + buf[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes()); + + let expected_node_alias = NodeAlias([0; 32]); + let node_alias = sanitize_alias(alias).unwrap(); + assert_eq!(node_alias, expected_node_alias); + } + + #[test] + fn sanitize_alias_with_sandwiched_null() { + // Alias with emojis + let alias = "I\u{1F496}LDK-Node!"; + let mut buf = [0u8; 32]; + buf[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes()); + let expected_alias = NodeAlias(buf); + + let user_provided_alias = "I\u{1F496}LDK-Node!\0\u{26A1}"; + let node_alias = sanitize_alias(user_provided_alias).unwrap(); + + let node_alias_display = format!("{}", node_alias); + + assert_eq!(alias, &node_alias_display); + assert_ne!(expected_alias, node_alias); + } + + #[test] + fn sanitize_alias_gt_32_bytes() { + let alias = "This is a string longer than thirty-two bytes!"; // 46 bytes + let node = sanitize_alias(alias); + assert_eq!(node.err().unwrap(), BuildError::InvalidNodeAlias); + } +} diff --git a/src/chain/bitcoind_rpc.rs b/src/chain/bitcoind_rpc.rs new file mode 100644 index 000000000..6e7360601 --- /dev/null +++ b/src/chain/bitcoind_rpc.rs @@ -0,0 +1,394 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use crate::types::{ChainMonitor, ChannelManager, Sweeper, Wallet}; + +use lightning::chain::Listen; + +use lightning_block_sync::http::HttpEndpoint; +use lightning_block_sync::http::JsonResponse; +use lightning_block_sync::poll::ValidatedBlockHeader; +use lightning_block_sync::rpc::{RpcClient, RpcError}; +use lightning_block_sync::{ + AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource, Cache, +}; + +use serde::Serialize; + +use bitcoin::{BlockHash, FeeRate, Transaction, Txid}; + +use base64::prelude::{Engine, BASE64_STANDARD}; + +use std::collections::{HashMap, VecDeque}; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::Arc; + +pub struct BitcoindRpcClient { + rpc_client: Arc, + latest_mempool_timestamp: AtomicU64, +} + +impl BitcoindRpcClient { + pub(crate) fn new(host: String, port: u16, rpc_user: String, rpc_password: String) -> Self { + let http_endpoint = HttpEndpoint::for_host(host.clone()).with_port(port); + let rpc_credentials = + BASE64_STANDARD.encode(format!("{}:{}", rpc_user.clone(), rpc_password.clone())); + + let rpc_client = Arc::new( + RpcClient::new(&rpc_credentials, http_endpoint) + .expect("RpcClient::new is actually infallible"), + ); + + let latest_mempool_timestamp = AtomicU64::new(0); + + Self { rpc_client, latest_mempool_timestamp } + } + + pub(crate) async fn broadcast_transaction(&self, tx: &Transaction) -> std::io::Result { + let tx_serialized = bitcoin::consensus::encode::serialize_hex(tx); + let tx_json = serde_json::json!(tx_serialized); + self.rpc_client.call_method::("sendrawtransaction", &vec![tx_json]).await + } + + pub(crate) async fn get_fee_estimate_for_target( + &self, num_blocks: usize, estimation_mode: FeeRateEstimationMode, + ) -> std::io::Result { + let num_blocks_json = serde_json::json!(num_blocks); + let estimation_mode_json = serde_json::json!(estimation_mode); + self.rpc_client + .call_method::( + "estimatesmartfee", + &vec![num_blocks_json, estimation_mode_json], + ) + .await + .map(|resp| resp.0) + } + + pub(crate) async fn get_mempool_minimum_fee_rate(&self) -> std::io::Result { + self.rpc_client + .call_method::("getmempoolinfo", &vec![]) + .await + .map(|resp| resp.0) + } + + pub(crate) async fn get_raw_transaction( + &self, txid: &Txid, + ) -> std::io::Result> { + let txid_hex = bitcoin::consensus::encode::serialize_hex(txid); + let txid_json = serde_json::json!(txid_hex); + match self + .rpc_client + .call_method::("getrawtransaction", &vec![txid_json]) + .await + { + Ok(resp) => Ok(Some(resp.0)), + Err(e) => match e.into_inner() { + Some(inner) => { + let rpc_error_res: Result, _> = inner.downcast(); + + match rpc_error_res { + Ok(rpc_error) => { + // Check if it's the 'not found' error code. + if rpc_error.code == -5 { + Ok(None) + } else { + Err(std::io::Error::new(std::io::ErrorKind::Other, rpc_error)) + } + }, + Err(_) => Err(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to process getrawtransaction response", + )), + } + }, + None => Err(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to process getrawtransaction response", + )), + }, + } + } + + pub(crate) async fn get_raw_mempool(&self) -> std::io::Result> { + let verbose_flag_json = serde_json::json!(true); + self.rpc_client + .call_method::("getrawmempool", &vec![verbose_flag_json]) + .await + .map(|resp| resp.0) + } + + /// Get mempool transactions, alongside their first-seen unix timestamps. + /// + /// This method is an adapted version of `bdk_bitcoind_rpc::Emitter::mempool`. It emits each + /// transaction only once, unless we cannot assume the transaction's ancestors are already + /// emitted. + pub(crate) async fn get_mempool_transactions_and_timestamp_at_height( + &self, best_processed_height: u32, + ) -> std::io::Result> { + let prev_mempool_time = self.latest_mempool_timestamp.load(Ordering::Relaxed); + let mut latest_time = prev_mempool_time; + + let mempool_entries = self.get_raw_mempool().await?; + let mut txs_to_emit = Vec::new(); + + for entry in mempool_entries { + if entry.time > latest_time { + latest_time = entry.time; + } + + // Avoid emitting transactions that are already emitted if we can guarantee + // blocks containing ancestors are already emitted. The bitcoind rpc interface + // provides us with the block height that the tx is introduced to the mempool. + // If we have already emitted the block of height, we can assume that all + // ancestor txs have been processed by the receiver. + let ancestor_within_height = entry.height <= best_processed_height; + let is_already_emitted = entry.time <= prev_mempool_time; + if is_already_emitted && ancestor_within_height { + continue; + } + + match self.get_raw_transaction(&entry.txid).await { + Ok(Some(tx)) => { + txs_to_emit.push((tx, entry.time)); + }, + Ok(None) => { + continue; + }, + Err(e) => return Err(e), + }; + } + + if !txs_to_emit.is_empty() { + self.latest_mempool_timestamp.store(latest_time, Ordering::Release); + } + Ok(txs_to_emit) + } +} + +impl BlockSource for BitcoindRpcClient { + fn get_header<'a>( + &'a self, header_hash: &'a BlockHash, height_hint: Option, + ) -> AsyncBlockSourceResult<'a, BlockHeaderData> { + Box::pin(async move { self.rpc_client.get_header(header_hash, height_hint).await }) + } + + fn get_block<'a>( + &'a self, header_hash: &'a BlockHash, + ) -> AsyncBlockSourceResult<'a, BlockData> { + Box::pin(async move { self.rpc_client.get_block(header_hash).await }) + } + + fn get_best_block<'a>(&'a self) -> AsyncBlockSourceResult<(BlockHash, Option)> { + Box::pin(async move { self.rpc_client.get_best_block().await }) + } +} + +pub(crate) struct FeeResponse(pub FeeRate); + +impl TryInto for JsonResponse { + type Error = std::io::Error; + fn try_into(self) -> std::io::Result { + if !self.0["errors"].is_null() { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + self.0["errors"].to_string(), + )); + } + let fee_rate_btc_per_kvbyte = self.0["feerate"] + .as_f64() + .ok_or(std::io::Error::new(std::io::ErrorKind::Other, "Failed to parse fee rate"))?; + // Bitcoin Core gives us a feerate in BTC/KvB. + // Thus, we multiply by 25_000_000 (10^8 / 4) to get satoshis/kwu. + let fee_rate = { + let fee_rate_sat_per_kwu = (fee_rate_btc_per_kvbyte * 25_000_000.0).round() as u64; + FeeRate::from_sat_per_kwu(fee_rate_sat_per_kwu) + }; + Ok(FeeResponse(fee_rate)) + } +} + +pub struct MempoolMinFeeResponse(pub FeeRate); + +impl TryInto for JsonResponse { + type Error = std::io::Error; + fn try_into(self) -> std::io::Result { + let fee_rate_btc_per_kvbyte = self.0["mempoolminfee"] + .as_f64() + .ok_or(std::io::Error::new(std::io::ErrorKind::Other, "Failed to parse fee rate"))?; + // Bitcoin Core gives us a feerate in BTC/KvB. + // Thus, we multiply by 25_000_000 (10^8 / 4) to get satoshis/kwu. + let fee_rate = { + let fee_rate_sat_per_kwu = (fee_rate_btc_per_kvbyte * 25_000_000.0).round() as u64; + FeeRate::from_sat_per_kwu(fee_rate_sat_per_kwu) + }; + Ok(MempoolMinFeeResponse(fee_rate)) + } +} + +pub struct GetRawTransactionResponse(pub Transaction); + +impl TryInto for JsonResponse { + type Error = std::io::Error; + fn try_into(self) -> std::io::Result { + let tx = self + .0 + .as_str() + .ok_or(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to parse getrawtransaction response", + )) + .and_then(|s| { + bitcoin::consensus::encode::deserialize_hex(s).map_err(|_| { + std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to parse getrawtransaction response", + ) + }) + })?; + + Ok(GetRawTransactionResponse(tx)) + } +} + +pub struct GetRawMempoolResponse(Vec); + +impl TryInto for JsonResponse { + type Error = std::io::Error; + fn try_into(self) -> std::io::Result { + let mut mempool_transactions = Vec::new(); + let res = self.0.as_object().ok_or(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to parse getrawmempool response", + ))?; + + for (k, v) in res { + let txid = match bitcoin::consensus::encode::deserialize_hex(k) { + Ok(txid) => txid, + Err(_) => { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to parse getrawmempool response", + )); + }, + }; + + let time = match v["time"].as_u64() { + Some(time) => time, + None => { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to parse getrawmempool response", + )); + }, + }; + + let height = match v["height"].as_u64().and_then(|h| h.try_into().ok()) { + Some(height) => height, + None => { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + "Failed to parse getrawmempool response", + )); + }, + }; + let entry = RawMempoolEntry { txid, time, height }; + + mempool_transactions.push(entry); + } + + Ok(GetRawMempoolResponse(mempool_transactions)) + } +} + +#[derive(Debug, Clone)] +pub(crate) struct RawMempoolEntry { + /// The transaction id + txid: Txid, + /// Local time transaction entered pool in seconds since 1 Jan 1970 GMT + time: u64, + /// Block height when transaction entered pool + height: u32, +} + +#[derive(Debug, Clone, Serialize)] +#[serde(rename_all = "UPPERCASE")] +pub(crate) enum FeeRateEstimationMode { + Economical, + Conservative, +} + +const MAX_HEADER_CACHE_ENTRIES: usize = 100; + +pub(crate) struct BoundedHeaderCache { + header_map: HashMap, + recently_seen: VecDeque, +} + +impl BoundedHeaderCache { + pub(crate) fn new() -> Self { + let header_map = HashMap::new(); + let recently_seen = VecDeque::new(); + Self { header_map, recently_seen } + } +} + +impl Cache for BoundedHeaderCache { + fn look_up(&self, block_hash: &BlockHash) -> Option<&ValidatedBlockHeader> { + self.header_map.get(block_hash) + } + + fn block_connected(&mut self, block_hash: BlockHash, block_header: ValidatedBlockHeader) { + self.recently_seen.push_back(block_hash); + self.header_map.insert(block_hash, block_header); + + if self.header_map.len() >= MAX_HEADER_CACHE_ENTRIES { + // Keep dropping old entries until we've actually removed a header entry. + while let Some(oldest_entry) = self.recently_seen.pop_front() { + if self.header_map.remove(&oldest_entry).is_some() { + break; + } + } + } + } + + fn block_disconnected(&mut self, block_hash: &BlockHash) -> Option { + self.recently_seen.retain(|e| e != block_hash); + self.header_map.remove(block_hash) + } +} + +pub(crate) struct ChainListener { + pub(crate) onchain_wallet: Arc, + pub(crate) channel_manager: Arc, + pub(crate) chain_monitor: Arc, + pub(crate) output_sweeper: Arc, +} + +impl Listen for ChainListener { + fn filtered_block_connected( + &self, header: &bitcoin::block::Header, + txdata: &lightning::chain::transaction::TransactionData, height: u32, + ) { + self.onchain_wallet.filtered_block_connected(header, txdata, height); + self.channel_manager.filtered_block_connected(header, txdata, height); + self.chain_monitor.filtered_block_connected(header, txdata, height); + self.output_sweeper.filtered_block_connected(header, txdata, height); + } + fn block_connected(&self, block: &bitcoin::Block, height: u32) { + self.onchain_wallet.block_connected(block, height); + self.channel_manager.block_connected(block, height); + self.chain_monitor.block_connected(block, height); + self.output_sweeper.block_connected(block, height); + } + + fn block_disconnected(&self, header: &bitcoin::block::Header, height: u32) { + self.onchain_wallet.block_disconnected(header, height); + self.channel_manager.block_disconnected(header, height); + self.chain_monitor.block_disconnected(header, height); + self.output_sweeper.block_disconnected(header, height); + } +} diff --git a/src/chain/mod.rs b/src/chain/mod.rs new file mode 100644 index 000000000..a7906fc0f --- /dev/null +++ b/src/chain/mod.rs @@ -0,0 +1,1136 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +mod bitcoind_rpc; + +use crate::chain::bitcoind_rpc::{ + BitcoindRpcClient, BoundedHeaderCache, ChainListener, FeeRateEstimationMode, +}; +use crate::config::{ + Config, EsploraSyncConfig, BDK_CLIENT_CONCURRENCY, BDK_CLIENT_STOP_GAP, + BDK_WALLET_SYNC_TIMEOUT_SECS, FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS, LDK_WALLET_SYNC_TIMEOUT_SECS, + RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL, TX_BROADCAST_TIMEOUT_SECS, + WALLET_SYNC_INTERVAL_MINIMUM_SECS, +}; +use crate::fee_estimator::{ + apply_post_estimation_adjustments, get_all_conf_targets, get_num_block_defaults_for_target, + ConfirmationTarget, OnchainFeeEstimator, +}; +use crate::io::utils::write_node_metrics; +use crate::logger::{log_bytes, log_error, log_info, log_trace, FilesystemLogger, Logger}; +use crate::types::{Broadcaster, ChainMonitor, ChannelManager, DynStore, Sweeper, Wallet}; +use crate::{Error, NodeMetrics}; + +use lightning::chain::chaininterface::ConfirmationTarget as LdkConfirmationTarget; +use lightning::chain::{Confirm, Filter, Listen}; +use lightning::util::ser::Writeable; + +use lightning_transaction_sync::EsploraSyncClient; + +use lightning_block_sync::init::{synchronize_listeners, validate_best_block_header}; +use lightning_block_sync::poll::{ChainPoller, ChainTip, ValidatedBlockHeader}; +use lightning_block_sync::SpvClient; + +use bdk_esplora::EsploraAsyncExt; + +use esplora_client::AsyncClient as EsploraAsyncClient; + +use bitcoin::{FeeRate, Network}; + +use std::collections::HashMap; +use std::sync::{Arc, Mutex, RwLock}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; + +// The default Esplora server we're using. +pub(crate) const DEFAULT_ESPLORA_SERVER_URL: &str = "https://blockstream.info/api"; + +// The default Esplora client timeout we're using. +pub(crate) const DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS: u64 = 10; + +const CHAIN_POLLING_INTERVAL_SECS: u64 = 1; + +pub(crate) enum WalletSyncStatus { + Completed, + InProgress { subscribers: tokio::sync::broadcast::Sender> }, +} + +impl WalletSyncStatus { + fn register_or_subscribe_pending_sync( + &mut self, + ) -> Option>> { + match self { + WalletSyncStatus::Completed => { + // We're first to register for a sync. + let (tx, _) = tokio::sync::broadcast::channel(1); + *self = WalletSyncStatus::InProgress { subscribers: tx }; + None + }, + WalletSyncStatus::InProgress { subscribers } => { + // A sync is in-progress, we subscribe. + let rx = subscribers.subscribe(); + Some(rx) + }, + } + } + + fn propagate_result_to_subscribers(&mut self, res: Result<(), Error>) { + // Send the notification to any other tasks that might be waiting on it by now. + { + match self { + WalletSyncStatus::Completed => { + // No sync in-progress, do nothing. + return; + }, + WalletSyncStatus::InProgress { subscribers } => { + // A sync is in-progress, we notify subscribers. + if subscribers.receiver_count() > 0 { + match subscribers.send(res) { + Ok(_) => (), + Err(e) => { + debug_assert!( + false, + "Failed to send wallet sync result to subscribers: {:?}", + e + ); + }, + } + } + *self = WalletSyncStatus::Completed; + }, + } + } + } +} + +pub(crate) enum ChainSource { + Esplora { + sync_config: EsploraSyncConfig, + esplora_client: EsploraAsyncClient, + onchain_wallet: Arc, + onchain_wallet_sync_status: Mutex, + tx_sync: Arc>>, + lightning_wallet_sync_status: Mutex, + fee_estimator: Arc, + tx_broadcaster: Arc, + kv_store: Arc, + config: Arc, + logger: Arc, + node_metrics: Arc>, + }, + BitcoindRpc { + bitcoind_rpc_client: Arc, + header_cache: tokio::sync::Mutex, + latest_chain_tip: RwLock>, + onchain_wallet: Arc, + wallet_polling_status: Mutex, + fee_estimator: Arc, + tx_broadcaster: Arc, + kv_store: Arc, + config: Arc, + logger: Arc, + node_metrics: Arc>, + }, +} + +impl ChainSource { + pub(crate) fn new_esplora( + server_url: String, sync_config: EsploraSyncConfig, onchain_wallet: Arc, + fee_estimator: Arc, tx_broadcaster: Arc, + kv_store: Arc, config: Arc, logger: Arc, + node_metrics: Arc>, + ) -> Self { + let mut client_builder = esplora_client::Builder::new(&server_url); + client_builder = client_builder.timeout(DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS); + let esplora_client = client_builder.build_async().unwrap(); + let tx_sync = + Arc::new(EsploraSyncClient::from_client(esplora_client.clone(), Arc::clone(&logger))); + let onchain_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); + let lightning_wallet_sync_status = Mutex::new(WalletSyncStatus::Completed); + Self::Esplora { + sync_config, + esplora_client, + onchain_wallet, + onchain_wallet_sync_status, + tx_sync, + lightning_wallet_sync_status, + fee_estimator, + tx_broadcaster, + kv_store, + config, + logger, + node_metrics, + } + } + + pub(crate) fn new_bitcoind_rpc( + host: String, port: u16, rpc_user: String, rpc_password: String, + onchain_wallet: Arc, fee_estimator: Arc, + tx_broadcaster: Arc, kv_store: Arc, config: Arc, + logger: Arc, node_metrics: Arc>, + ) -> Self { + let bitcoind_rpc_client = + Arc::new(BitcoindRpcClient::new(host, port, rpc_user, rpc_password)); + let header_cache = tokio::sync::Mutex::new(BoundedHeaderCache::new()); + let latest_chain_tip = RwLock::new(None); + let wallet_polling_status = Mutex::new(WalletSyncStatus::Completed); + Self::BitcoindRpc { + bitcoind_rpc_client, + header_cache, + latest_chain_tip, + onchain_wallet, + wallet_polling_status, + fee_estimator, + tx_broadcaster, + kv_store, + config, + logger, + node_metrics, + } + } + + pub(crate) async fn continuously_sync_wallets( + &self, mut stop_sync_receiver: tokio::sync::watch::Receiver<()>, + channel_manager: Arc, chain_monitor: Arc, + output_sweeper: Arc, + ) { + match self { + Self::Esplora { sync_config, logger, .. } => { + // Setup syncing intervals + let onchain_wallet_sync_interval_secs = sync_config + .onchain_wallet_sync_interval_secs + .max(WALLET_SYNC_INTERVAL_MINIMUM_SECS); + let mut onchain_wallet_sync_interval = + tokio::time::interval(Duration::from_secs(onchain_wallet_sync_interval_secs)); + onchain_wallet_sync_interval + .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + let fee_rate_cache_update_interval_secs = sync_config + .fee_rate_cache_update_interval_secs + .max(WALLET_SYNC_INTERVAL_MINIMUM_SECS); + let mut fee_rate_update_interval = + tokio::time::interval(Duration::from_secs(fee_rate_cache_update_interval_secs)); + // When starting up, we just blocked on updating, so skip the first tick. + fee_rate_update_interval.reset(); + fee_rate_update_interval + .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + let lightning_wallet_sync_interval_secs = sync_config + .lightning_wallet_sync_interval_secs + .max(WALLET_SYNC_INTERVAL_MINIMUM_SECS); + let mut lightning_wallet_sync_interval = + tokio::time::interval(Duration::from_secs(lightning_wallet_sync_interval_secs)); + lightning_wallet_sync_interval + .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + // Start the syncing loop. + loop { + tokio::select! { + _ = stop_sync_receiver.changed() => { + log_trace!( + logger, + "Stopping background syncing on-chain wallet.", + ); + return; + } + _ = onchain_wallet_sync_interval.tick() => { + let _ = self.sync_onchain_wallet().await; + } + _ = fee_rate_update_interval.tick() => { + let _ = self.update_fee_rate_estimates().await; + } + _ = lightning_wallet_sync_interval.tick() => { + let _ = self.sync_lightning_wallet( + Arc::clone(&channel_manager), + Arc::clone(&chain_monitor), + Arc::clone(&output_sweeper), + ).await; + } + } + } + }, + Self::BitcoindRpc { + bitcoind_rpc_client, + header_cache, + latest_chain_tip, + onchain_wallet, + wallet_polling_status, + kv_store, + config, + logger, + node_metrics, + .. + } => { + // First register for the wallet polling status to make sure `Node::sync_wallets` calls + // wait on the result before proceeding. + { + let mut status_lock = wallet_polling_status.lock().unwrap(); + if status_lock.register_or_subscribe_pending_sync().is_some() { + debug_assert!(false, "Sync already in progress. This should never happen."); + } + } + + let channel_manager_best_block_hash = + channel_manager.current_best_block().block_hash; + let sweeper_best_block_hash = output_sweeper.current_best_block().block_hash; + let onchain_wallet_best_block_hash = onchain_wallet.current_best_block().block_hash; + + let mut chain_listeners = vec![ + ( + onchain_wallet_best_block_hash, + &**onchain_wallet as &(dyn Listen + Send + Sync), + ), + ( + channel_manager_best_block_hash, + &*channel_manager as &(dyn Listen + Send + Sync), + ), + (sweeper_best_block_hash, &*output_sweeper as &(dyn Listen + Send + Sync)), + ]; + + // TODO: Eventually we might want to see if we can synchronize `ChannelMonitor`s + // before giving them to `ChainMonitor` it the first place. However, this isn't + // trivial as we load them on initialization (in the `Builder`) and only gain + // network access during `start`. For now, we just make sure we get the worst known + // block hash and sychronize them via `ChainMonitor`. + if let Some(worst_channel_monitor_block_hash) = chain_monitor + .list_monitors() + .iter() + .flat_map(|(txo, _)| chain_monitor.get_monitor(*txo)) + .map(|m| m.current_best_block()) + .min_by_key(|b| b.height) + .map(|b| b.block_hash) + { + chain_listeners.push(( + worst_channel_monitor_block_hash, + &*chain_monitor as &(dyn Listen + Send + Sync), + )); + } + + loop { + let mut locked_header_cache = header_cache.lock().await; + match synchronize_listeners( + bitcoind_rpc_client.as_ref(), + config.network, + &mut *locked_header_cache, + chain_listeners.clone(), + ) + .await + { + Ok(chain_tip) => { + { + *latest_chain_tip.write().unwrap() = Some(chain_tip); + let unix_time_secs_opt = SystemTime::now() + .duration_since(UNIX_EPOCH) + .ok() + .map(|d| d.as_secs()); + let mut locked_node_metrics = node_metrics.write().unwrap(); + locked_node_metrics.latest_lightning_wallet_sync_timestamp = + unix_time_secs_opt; + locked_node_metrics.latest_onchain_wallet_sync_timestamp = + unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&kv_store), + Arc::clone(&logger), + ) + .unwrap_or_else(|e| { + log_error!(logger, "Failed to persist node metrics: {}", e); + }); + } + break; + }, + + Err(e) => { + log_error!(logger, "Failed to synchronize chain listeners: {:?}", e); + tokio::time::sleep(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)) + .await; + }, + } + } + + // Now propagate the initial result to unblock waiting subscribers. + wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(Ok(())); + + let mut chain_polling_interval = + tokio::time::interval(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)); + chain_polling_interval + .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + let mut fee_rate_update_interval = + tokio::time::interval(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)); + // When starting up, we just blocked on updating, so skip the first tick. + fee_rate_update_interval.reset(); + fee_rate_update_interval + .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + // Start the polling loop. + loop { + tokio::select! { + _ = stop_sync_receiver.changed() => { + log_trace!( + logger, + "Stopping polling for new chain data.", + ); + return; + } + _ = chain_polling_interval.tick() => { + let _ = self.poll_and_update_listeners(Arc::clone(&channel_manager), Arc::clone(&chain_monitor), Arc::clone(&output_sweeper)).await; + } + _ = fee_rate_update_interval.tick() => { + let _ = self.update_fee_rate_estimates().await; + } + } + } + }, + } + } + + // Synchronize the onchain wallet via transaction-based protocols (i.e., Esplora, Electrum, + // etc.) + pub(crate) async fn sync_onchain_wallet(&self) -> Result<(), Error> { + match self { + Self::Esplora { + esplora_client, + onchain_wallet, + onchain_wallet_sync_status, + kv_store, + logger, + node_metrics, + .. + } => { + let receiver_res = { + let mut status_lock = onchain_wallet_sync_status.lock().unwrap(); + status_lock.register_or_subscribe_pending_sync() + }; + if let Some(mut sync_receiver) = receiver_res { + log_info!(logger, "Sync in progress, skipping."); + return sync_receiver.recv().await.map_err(|e| { + debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); + log_error!(logger, "Failed to receive wallet sync result: {:?}", e); + Error::WalletOperationFailed + })?; + } + + let res = { + // If this is our first sync, do a full scan with the configured gap limit. + // Otherwise just do an incremental sync. + let incremental_sync = + node_metrics.read().unwrap().latest_onchain_wallet_sync_timestamp.is_some(); + + macro_rules! get_and_apply_wallet_update { + ($sync_future: expr) => {{ + let now = Instant::now(); + match $sync_future.await { + Ok(res) => match res { + Ok(update) => match onchain_wallet.apply_update(update) { + Ok(()) => { + log_info!( + logger, + "{} of on-chain wallet finished in {}ms.", + if incremental_sync { "Incremental sync" } else { "Sync" }, + now.elapsed().as_millis() + ); + let unix_time_secs_opt = SystemTime::now() + .duration_since(UNIX_EPOCH) + .ok() + .map(|d| d.as_secs()); + { + let mut locked_node_metrics = node_metrics.write().unwrap(); + locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; + write_node_metrics(&*locked_node_metrics, Arc::clone(&kv_store), Arc::clone(&logger))?; + } + Ok(()) + }, + Err(e) => Err(e), + }, + Err(e) => match *e { + esplora_client::Error::Reqwest(he) => { + log_error!( + logger, + "{} of on-chain wallet failed due to HTTP connection error: {}", + if incremental_sync { "Incremental sync" } else { "Sync" }, + he + ); + Err(Error::WalletOperationFailed) + }, + _ => { + log_error!( + logger, + "{} of on-chain wallet failed due to Esplora error: {}", + if incremental_sync { "Incremental sync" } else { "Sync" }, + e + ); + Err(Error::WalletOperationFailed) + }, + }, + }, + Err(e) => { + log_error!( + logger, + "{} of on-chain wallet timed out: {}", + if incremental_sync { "Incremental sync" } else { "Sync" }, + e + ); + Err(Error::WalletOperationTimeout) + }, + } + }} + } + + if incremental_sync { + let sync_request = onchain_wallet.get_incremental_sync_request(); + let wallet_sync_timeout_fut = tokio::time::timeout( + Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), + esplora_client.sync(sync_request, BDK_CLIENT_CONCURRENCY), + ); + get_and_apply_wallet_update!(wallet_sync_timeout_fut) + } else { + let full_scan_request = onchain_wallet.get_full_scan_request(); + let wallet_sync_timeout_fut = tokio::time::timeout( + Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), + esplora_client.full_scan( + full_scan_request, + BDK_CLIENT_STOP_GAP, + BDK_CLIENT_CONCURRENCY, + ), + ); + get_and_apply_wallet_update!(wallet_sync_timeout_fut) + } + }; + + onchain_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + + res + }, + Self::BitcoindRpc { .. } => { + // In BitcoindRpc mode we sync lightning and onchain wallet in one go by via + // `ChainPoller`. So nothing to do here. + unreachable!("Onchain wallet will be synced via chain polling") + }, + } + } + + // Synchronize the Lightning wallet via transaction-based protocols (i.e., Esplora, Electrum, + // etc.) + pub(crate) async fn sync_lightning_wallet( + &self, channel_manager: Arc, chain_monitor: Arc, + output_sweeper: Arc, + ) -> Result<(), Error> { + match self { + Self::Esplora { + tx_sync, + lightning_wallet_sync_status, + kv_store, + logger, + node_metrics, + .. + } => { + let sync_cman = Arc::clone(&channel_manager); + let sync_cmon = Arc::clone(&chain_monitor); + let sync_sweeper = Arc::clone(&output_sweeper); + let confirmables = vec![ + &*sync_cman as &(dyn Confirm + Sync + Send), + &*sync_cmon as &(dyn Confirm + Sync + Send), + &*sync_sweeper as &(dyn Confirm + Sync + Send), + ]; + + let receiver_res = { + let mut status_lock = lightning_wallet_sync_status.lock().unwrap(); + status_lock.register_or_subscribe_pending_sync() + }; + if let Some(mut sync_receiver) = receiver_res { + log_info!(logger, "Sync in progress, skipping."); + return sync_receiver.recv().await.map_err(|e| { + debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); + log_error!(logger, "Failed to receive wallet sync result: {:?}", e); + Error::WalletOperationFailed + })?; + } + let res = { + let timeout_fut = tokio::time::timeout( + Duration::from_secs(LDK_WALLET_SYNC_TIMEOUT_SECS), + tx_sync.sync(confirmables), + ); + let now = Instant::now(); + match timeout_fut.await { + Ok(res) => match res { + Ok(()) => { + log_info!( + logger, + "Sync of Lightning wallet finished in {}ms.", + now.elapsed().as_millis() + ); + + let unix_time_secs_opt = SystemTime::now() + .duration_since(UNIX_EPOCH) + .ok() + .map(|d| d.as_secs()); + { + let mut locked_node_metrics = node_metrics.write().unwrap(); + locked_node_metrics.latest_lightning_wallet_sync_timestamp = + unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&kv_store), + Arc::clone(&logger), + )?; + } + + periodically_archive_fully_resolved_monitors( + Arc::clone(&channel_manager), + Arc::clone(&chain_monitor), + Arc::clone(&kv_store), + Arc::clone(&logger), + Arc::clone(&node_metrics), + )?; + Ok(()) + }, + Err(e) => { + log_error!(logger, "Sync of Lightning wallet failed: {}", e); + Err(e.into()) + }, + }, + Err(e) => { + log_error!(logger, "Lightning wallet sync timed out: {}", e); + Err(Error::TxSyncTimeout) + }, + } + }; + + lightning_wallet_sync_status.lock().unwrap().propagate_result_to_subscribers(res); + + res + }, + Self::BitcoindRpc { .. } => { + // In BitcoindRpc mode we sync lightning and onchain wallet in one go by via + // `ChainPoller`. So nothing to do here. + unreachable!("Lightning wallet will be synced via chain polling") + }, + } + } + + pub(crate) async fn poll_and_update_listeners( + &self, channel_manager: Arc, chain_monitor: Arc, + output_sweeper: Arc, + ) -> Result<(), Error> { + match self { + Self::Esplora { .. } => { + // In Esplora mode we sync lightning and onchain wallets via + // `sync_onchain_wallet` and `sync_lightning_wallet`. So nothing to do here. + unreachable!("Listeners will be synced via transction-based syncing") + }, + Self::BitcoindRpc { + bitcoind_rpc_client, + header_cache, + latest_chain_tip, + onchain_wallet, + wallet_polling_status, + kv_store, + config, + logger, + node_metrics, + .. + } => { + let receiver_res = { + let mut status_lock = wallet_polling_status.lock().unwrap(); + status_lock.register_or_subscribe_pending_sync() + }; + + if let Some(mut sync_receiver) = receiver_res { + log_info!(logger, "Sync in progress, skipping."); + return sync_receiver.recv().await.map_err(|e| { + debug_assert!(false, "Failed to receive wallet polling result: {:?}", e); + log_error!(logger, "Failed to receive wallet polling result: {:?}", e); + Error::WalletOperationFailed + })?; + } + + let latest_chain_tip_opt = latest_chain_tip.read().unwrap().clone(); + let chain_tip = if let Some(tip) = latest_chain_tip_opt { + tip + } else { + match validate_best_block_header(bitcoind_rpc_client.as_ref()).await { + Ok(tip) => { + *latest_chain_tip.write().unwrap() = Some(tip); + tip + }, + Err(e) => { + log_error!(logger, "Failed to poll for chain data: {:?}", e); + let res = Err(Error::TxSyncFailed); + wallet_polling_status + .lock() + .unwrap() + .propagate_result_to_subscribers(res); + return res; + }, + } + }; + + let mut locked_header_cache = header_cache.lock().await; + let chain_poller = + ChainPoller::new(Arc::clone(&bitcoind_rpc_client), config.network); + let chain_listener = ChainListener { + onchain_wallet: Arc::clone(&onchain_wallet), + channel_manager: Arc::clone(&channel_manager), + chain_monitor, + output_sweeper, + }; + let mut spv_client = SpvClient::new( + chain_tip, + chain_poller, + &mut *locked_header_cache, + &chain_listener, + ); + let mut chain_polling_interval = + tokio::time::interval(Duration::from_secs(CHAIN_POLLING_INTERVAL_SECS)); + chain_polling_interval + .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); + + match spv_client.poll_best_tip().await { + Ok((ChainTip::Better(tip), true)) => { + *latest_chain_tip.write().unwrap() = Some(tip); + }, + Ok(_) => {}, + Err(e) => { + log_error!(logger, "Failed to poll for chain data: {:?}", e); + let res = Err(Error::TxSyncFailed); + wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); + return res; + }, + } + + let cur_height = channel_manager.current_best_block().height; + match bitcoind_rpc_client + .get_mempool_transactions_and_timestamp_at_height(cur_height) + .await + { + Ok(unconfirmed_txs) => { + let _ = onchain_wallet.apply_unconfirmed_txs(unconfirmed_txs); + }, + Err(e) => { + log_error!(logger, "Failed to poll for mempool transactions: {:?}", e); + let res = Err(Error::TxSyncFailed); + wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); + return res; + }, + } + + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + let mut locked_node_metrics = node_metrics.write().unwrap(); + locked_node_metrics.latest_lightning_wallet_sync_timestamp = unix_time_secs_opt; + locked_node_metrics.latest_onchain_wallet_sync_timestamp = unix_time_secs_opt; + + let write_res = write_node_metrics( + &*locked_node_metrics, + Arc::clone(&kv_store), + Arc::clone(&logger), + ); + match write_res { + Ok(()) => (), + Err(e) => { + log_error!(logger, "Failed to persist node metrics: {}", e); + let res = Err(Error::PersistenceFailed); + wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); + return res; + }, + } + + let res = Ok(()); + wallet_polling_status.lock().unwrap().propagate_result_to_subscribers(res); + res + }, + } + } + + pub(crate) async fn update_fee_rate_estimates(&self) -> Result<(), Error> { + match self { + Self::Esplora { + esplora_client, + fee_estimator, + config, + kv_store, + logger, + node_metrics, + .. + } => { + let now = Instant::now(); + let estimates = tokio::time::timeout( + Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), + esplora_client.get_fee_estimates(), + ) + .await + .map_err(|e| { + log_error!(logger, "Updating fee rate estimates timed out: {}", e); + Error::FeerateEstimationUpdateTimeout + })? + .map_err(|e| { + log_error!(logger, "Failed to retrieve fee rate estimates: {}", e); + Error::FeerateEstimationUpdateFailed + })?; + + if estimates.is_empty() && config.network == Network::Bitcoin { + // Ensure we fail if we didn't receive any estimates. + log_error!( + logger, + "Failed to retrieve fee rate estimates: empty fee estimates are dissallowed on Mainnet.", + ); + return Err(Error::FeerateEstimationUpdateFailed); + } + + let confirmation_targets = get_all_conf_targets(); + + let mut new_fee_rate_cache = HashMap::with_capacity(10); + for target in confirmation_targets { + let num_blocks = get_num_block_defaults_for_target(target); + + let converted_estimate_sat_vb = + esplora_client::convert_fee_rate(num_blocks, estimates.clone()).map_err( + |e| { + log_error!( + logger, + "Failed to convert fee rate estimates for {:?}: {}", + target, + e + ); + Error::FeerateEstimationUpdateFailed + }, + )?; + + let fee_rate = + FeeRate::from_sat_per_kwu((converted_estimate_sat_vb * 250.0) as u64); + + // LDK 0.0.118 introduced changes to the `ConfirmationTarget` semantics that + // require some post-estimation adjustments to the fee rates, which we do here. + let adjusted_fee_rate = apply_post_estimation_adjustments(target, fee_rate); + + new_fee_rate_cache.insert(target, adjusted_fee_rate); + + log_trace!( + logger, + "Fee rate estimation updated for {:?}: {} sats/kwu", + target, + adjusted_fee_rate.to_sat_per_kwu(), + ); + } + + fee_estimator.set_fee_rate_cache(new_fee_rate_cache); + + log_info!( + logger, + "Fee rate cache update finished in {}ms.", + now.elapsed().as_millis() + ); + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + { + let mut locked_node_metrics = node_metrics.write().unwrap(); + locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&kv_store), + Arc::clone(&logger), + )?; + } + + Ok(()) + }, + Self::BitcoindRpc { + bitcoind_rpc_client, + fee_estimator, + config, + kv_store, + logger, + node_metrics, + .. + } => { + macro_rules! get_fee_rate_update { + ($estimation_fut: expr) => {{ + let update_res = tokio::time::timeout( + Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), + $estimation_fut, + ) + .await + .map_err(|e| { + log_error!(logger, "Updating fee rate estimates timed out: {}", e); + Error::FeerateEstimationUpdateTimeout + })?; + update_res + }}; + } + let confirmation_targets = get_all_conf_targets(); + + let mut new_fee_rate_cache = HashMap::with_capacity(10); + let now = Instant::now(); + for target in confirmation_targets { + let fee_rate_update_res = match target { + ConfirmationTarget::Lightning( + LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee, + ) => { + let estimation_fut = bitcoind_rpc_client.get_mempool_minimum_fee_rate(); + get_fee_rate_update!(estimation_fut) + }, + ConfirmationTarget::Lightning( + LdkConfirmationTarget::MaximumFeeEstimate, + ) => { + let num_blocks = get_num_block_defaults_for_target(target); + let estimation_mode = FeeRateEstimationMode::Conservative; + let estimation_fut = bitcoind_rpc_client + .get_fee_estimate_for_target(num_blocks, estimation_mode); + get_fee_rate_update!(estimation_fut) + }, + ConfirmationTarget::Lightning( + LdkConfirmationTarget::UrgentOnChainSweep, + ) => { + let num_blocks = get_num_block_defaults_for_target(target); + let estimation_mode = FeeRateEstimationMode::Conservative; + let estimation_fut = bitcoind_rpc_client + .get_fee_estimate_for_target(num_blocks, estimation_mode); + get_fee_rate_update!(estimation_fut) + }, + _ => { + // Otherwise, we default to economical block-target estimate. + let num_blocks = get_num_block_defaults_for_target(target); + let estimation_mode = FeeRateEstimationMode::Economical; + let estimation_fut = bitcoind_rpc_client + .get_fee_estimate_for_target(num_blocks, estimation_mode); + get_fee_rate_update!(estimation_fut) + }, + }; + + let fee_rate = match (fee_rate_update_res, config.network) { + (Ok(rate), _) => rate, + (Err(e), Network::Bitcoin) => { + // Strictly fail on mainnet. + log_error!(logger, "Failed to retrieve fee rate estimates: {}", e); + return Err(Error::FeerateEstimationUpdateFailed); + }, + (Err(e), n) if n == Network::Regtest || n == Network::Signet => { + // On regtest/signet we just fall back to the usual 1 sat/vb == 250 + // sat/kwu default. + log_error!( + logger, + "Failed to retrieve fee rate estimates: {}. Falling back to default of 1 sat/vb.", + e, + ); + FeeRate::from_sat_per_kwu(250) + }, + (Err(e), _) => { + // On testnet `estimatesmartfee` can be unreliable so we just skip in + // case of a failure, which will have us falling back to defaults. + log_error!( + logger, + "Failed to retrieve fee rate estimates: {}. Falling back to defaults.", + e, + ); + return Ok(()); + }, + }; + + // LDK 0.0.118 introduced changes to the `ConfirmationTarget` semantics that + // require some post-estimation adjustments to the fee rates, which we do here. + let adjusted_fee_rate = apply_post_estimation_adjustments(target, fee_rate); + + new_fee_rate_cache.insert(target, adjusted_fee_rate); + + log_trace!( + logger, + "Fee rate estimation updated for {:?}: {} sats/kwu", + target, + adjusted_fee_rate.to_sat_per_kwu(), + ); + } + + if fee_estimator.set_fee_rate_cache(new_fee_rate_cache) { + // We only log if the values changed, as it might be very spammy otherwise. + log_info!( + logger, + "Fee rate cache update finished in {}ms.", + now.elapsed().as_millis() + ); + } + + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + { + let mut locked_node_metrics = node_metrics.write().unwrap(); + locked_node_metrics.latest_fee_rate_cache_update_timestamp = unix_time_secs_opt; + write_node_metrics( + &*locked_node_metrics, + Arc::clone(&kv_store), + Arc::clone(&logger), + )?; + } + + Ok(()) + }, + } + } + + pub(crate) async fn process_broadcast_queue(&self) { + match self { + Self::Esplora { esplora_client, tx_broadcaster, logger, .. } => { + let mut receiver = tx_broadcaster.get_broadcast_queue().await; + while let Some(next_package) = receiver.recv().await { + for tx in &next_package { + let txid = tx.compute_txid(); + let timeout_fut = tokio::time::timeout( + Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), + esplora_client.broadcast(tx), + ); + match timeout_fut.await { + Ok(res) => match res { + Ok(()) => { + log_trace!( + logger, + "Successfully broadcast transaction {}", + txid + ); + }, + Err(e) => match e { + esplora_client::Error::Reqwest(err) => { + if err.status() == reqwest::StatusCode::from_u16(400).ok() { + // Ignore 400, as this just means bitcoind already knows the + // transaction. + // FIXME: We can further differentiate here based on the error + // message which will be available with rust-esplora-client 0.7 and + // later. + } else { + log_error!( + logger, + "Failed to broadcast due to HTTP connection error: {}", + err + ); + } + log_trace!( + logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, + _ => { + log_error!( + logger, + "Failed to broadcast transaction {}: {}", + txid, + e + ); + log_trace!( + logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, + }, + }, + Err(e) => { + log_error!( + logger, + "Failed to broadcast transaction due to timeout {}: {}", + txid, + e + ); + log_trace!( + logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, + } + } + } + }, + Self::BitcoindRpc { bitcoind_rpc_client, tx_broadcaster, logger, .. } => { + // While it's a bit unclear when we'd be able to lean on Bitcoin Core >v28 + // features, we should eventually switch to use `submitpackage` via the + // `rust-bitcoind-json-rpc` crate rather than just broadcasting individual + // transactions. + let mut receiver = tx_broadcaster.get_broadcast_queue().await; + while let Some(next_package) = receiver.recv().await { + for tx in &next_package { + let txid = tx.compute_txid(); + let timeout_fut = tokio::time::timeout( + Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), + bitcoind_rpc_client.broadcast_transaction(tx), + ); + match timeout_fut.await { + Ok(res) => match res { + Ok(id) => { + debug_assert_eq!(id, txid); + log_trace!( + logger, + "Successfully broadcast transaction {}", + txid + ); + }, + Err(e) => { + log_error!( + logger, + "Failed to broadcast transaction {}: {}", + txid, + e + ); + log_trace!( + logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, + }, + Err(e) => { + log_error!( + logger, + "Failed to broadcast transaction due to timeout {}: {}", + txid, + e + ); + log_trace!( + logger, + "Failed broadcast transaction bytes: {}", + log_bytes!(tx.encode()) + ); + }, + } + } + } + }, + } + } +} + +impl Filter for ChainSource { + fn register_tx(&self, txid: &bitcoin::Txid, script_pubkey: &bitcoin::Script) { + match self { + Self::Esplora { tx_sync, .. } => tx_sync.register_tx(txid, script_pubkey), + Self::BitcoindRpc { .. } => (), + } + } + fn register_output(&self, output: lightning::chain::WatchedOutput) { + match self { + Self::Esplora { tx_sync, .. } => tx_sync.register_output(output), + Self::BitcoindRpc { .. } => (), + } + } +} + +fn periodically_archive_fully_resolved_monitors( + channel_manager: Arc, chain_monitor: Arc, + kv_store: Arc, logger: Arc, node_metrics: Arc>, +) -> Result<(), Error> { + let mut locked_node_metrics = node_metrics.write().unwrap(); + let cur_height = channel_manager.current_best_block().height; + let should_archive = locked_node_metrics + .latest_channel_monitor_archival_height + .as_ref() + .map_or(true, |h| cur_height >= h + RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL); + + if should_archive { + chain_monitor.archive_fully_resolved_channel_monitors(); + locked_node_metrics.latest_channel_monitor_archival_height = Some(cur_height); + write_node_metrics(&*locked_node_metrics, kv_store, logger)?; + } + Ok(()) +} diff --git a/src/config.rs b/src/config.rs index fcf6ee7d9..473261059 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,17 +1,30 @@ -use std::time::Duration; +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +//! Objects for configuring the node. + +use crate::payment::SendingParameters; use lightning::ln::msgs::SocketAddress; +use lightning::routing::gossip::NodeAlias; +use lightning::util::config::ChannelConfig as LdkChannelConfig; +use lightning::util::config::MaxDustHTLCExposure as LdkMaxDustHTLCExposure; use lightning::util::config::UserConfig; use lightning::util::logger::Level as LogLevel; use bitcoin::secp256k1::PublicKey; use bitcoin::Network; +use std::time::Duration; + // Config defaults const DEFAULT_STORAGE_DIR_PATH: &str = "/tmp/ldk_node/"; const DEFAULT_NETWORK: Network = Network::Bitcoin; -const DEFAULT_CLTV_EXPIRY_DELTA: u32 = 144; -const DEFAULT_BDK_WALLET_SYNC_INTERVAL_SECS: u64 = 300; //80; +const DEFAULT_BDK_WALLET_SYNC_INTERVAL_SECS: u64 = 80; const DEFAULT_LDK_WALLET_SYNC_INTERVAL_SECS: u64 = 30; const DEFAULT_FEE_RATE_CACHE_UPDATE_INTERVAL_SECS: u64 = 60 * 10; const DEFAULT_PROBING_LIQUIDITY_LIMIT_MULTIPLIER: u64 = 3; @@ -26,13 +39,7 @@ pub(crate) const ENABLE_BACKGROUND_SYNC: bool = false; pub(crate) const BDK_CLIENT_STOP_GAP: usize = 20; // The number of concurrent requests made against the API provider. -pub(crate) const BDK_CLIENT_CONCURRENCY: u8 = 4; - -// The default Esplora server we're using. -pub(crate) const DEFAULT_ESPLORA_SERVER_URL: &str = "https://blockstream.info/api"; - -// The default Esplora client timeout we're using. -pub(crate) const DEFAULT_ESPLORA_CLIENT_TIMEOUT_SECS: u64 = 10; +pub(crate) const BDK_CLIENT_CONCURRENCY: usize = 4; // The timeout after which we abandon retrying failed payments. pub(crate) const LDK_PAYMENT_RETRY_TIMEOUT: Duration = Duration::from_secs(50); // (10); @@ -81,6 +88,7 @@ pub(crate) const WALLET_KEYS_SEED_LEN: usize = 64; /// | `log_dir_path` | None | /// | `network` | Bitcoin | /// | `listening_addresses` | None | +/// | `node_alias` | None | /// | `default_cltv_expiry_delta` | 144 | /// | `onchain_wallet_sync_interval_secs` | 80 | /// | `wallet_sync_interval_secs` | 30 | @@ -89,8 +97,10 @@ pub(crate) const WALLET_KEYS_SEED_LEN: usize = 64; /// | `probing_liquidity_limit_multiplier` | 3 | /// | `log_level` | Debug | /// | `anchor_channels_config` | Some(..) | +/// | `sending_parameters` | None | /// -/// See [`AnchorChannelsConfig`] for more information on its respective default values. +/// See [`AnchorChannelsConfig`] and [`SendingParameters`] for more information regarding their +/// respective default values. /// /// [`Node`]: crate::Node pub struct Config { @@ -103,21 +113,17 @@ pub struct Config { /// The used Bitcoin network. pub network: Network, /// The addresses on which the node will listen for incoming connections. + /// + /// **Note**: We will only allow opening and accepting public channels if the `node_alias` and the + /// `listening_addresses` are set. pub listening_addresses: Option>, - /// The default CLTV expiry delta to be used for payments. - pub default_cltv_expiry_delta: u32, - /// The time in-between background sync attempts of the onchain wallet, in seconds. + /// The node alias that will be used when broadcasting announcements to the gossip network. /// - /// **Note:** A minimum of 10 seconds is always enforced. - pub onchain_wallet_sync_interval_secs: u64, - /// The time in-between background sync attempts of the LDK wallet, in seconds. + /// The provided alias must be a valid UTF-8 string and no longer than 32 bytes in total. /// - /// **Note:** A minimum of 10 seconds is always enforced. - pub wallet_sync_interval_secs: u64, - /// The time in-between background update attempts to our fee rate cache, in seconds. - /// - /// **Note:** A minimum of 10 seconds is always enforced. - pub fee_rate_cache_update_interval_secs: u64, + /// **Note**: We will only allow opening and accepting public channels if the `node_alias` and the + /// `listening_addresses` are set. + pub node_alias: Option, /// A list of peers that we allow to establish zero confirmation channels to us. /// /// **Note:** Allowing payments via zero-confirmation channels is potentially insecure if the @@ -150,6 +156,14 @@ pub struct Config { /// closure. We *will* however still try to get the Anchor spending transactions confirmed /// on-chain with the funds available. pub anchor_channels_config: Option, + /// Configuration options for payment routing and pathfinding. + /// + /// Setting the `SendingParameters` provides flexibility to customize how payments are routed, + /// including setting limits on routing fees, CLTV expiry, and channel utilization. + /// + /// **Note:** If unset, default parameters will be used, and you will be able to override the + /// parameters on a per-payment basis in the corresponding method calls. + pub sending_parameters: Option, } impl Default for Config { @@ -159,14 +173,12 @@ impl Default for Config { log_dir_path: None, network: DEFAULT_NETWORK, listening_addresses: None, - default_cltv_expiry_delta: DEFAULT_CLTV_EXPIRY_DELTA, - onchain_wallet_sync_interval_secs: DEFAULT_BDK_WALLET_SYNC_INTERVAL_SECS, - wallet_sync_interval_secs: DEFAULT_LDK_WALLET_SYNC_INTERVAL_SECS, - fee_rate_cache_update_interval_secs: DEFAULT_FEE_RATE_CACHE_UPDATE_INTERVAL_SECS, trusted_peers_0conf: Vec::new(), probing_liquidity_limit_multiplier: DEFAULT_PROBING_LIQUIDITY_LIMIT_MULTIPLIER, log_level: DEFAULT_LOG_LEVEL, anchor_channels_config: Some(AnchorChannelsConfig::default()), + sending_parameters: None, + node_alias: None, } } } @@ -252,17 +264,217 @@ pub fn default_config() -> Config { Config::default() } +pub(crate) fn may_announce_channel(config: &Config) -> bool { + config.node_alias.is_some() + && config.listening_addresses.as_ref().map_or(false, |addrs| !addrs.is_empty()) +} + pub(crate) fn default_user_config(config: &Config) -> UserConfig { // Initialize the default config values. // - // Note that methods such as Node::connect_open_channel might override some of the values set - // here, e.g. the ChannelHandshakeConfig, meaning these default values will mostly be relevant - // for inbound channels. + // Note that methods such as Node::open_channel and Node::open_announced_channel might override + // some of the values set here, e.g. the ChannelHandshakeConfig, meaning these default values + // will mostly be relevant for inbound channels. let mut user_config = UserConfig::default(); user_config.channel_handshake_limits.force_announced_channel_preference = false; user_config.manually_accept_inbound_channels = true; user_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = config.anchor_channels_config.is_some(); + if !may_announce_channel(config) { + user_config.accept_forwards_to_priv_channels = false; + user_config.channel_handshake_config.announce_for_forwarding = false; + user_config.channel_handshake_limits.force_announced_channel_preference = true; + } + user_config } + +/// Options related to syncing the Lightning and on-chain wallets via an Esplora backend. +/// +/// ### Defaults +/// +/// | Parameter | Value | +/// |----------------------------------------|--------------------| +/// | `onchain_wallet_sync_interval_secs` | 80 | +/// | `lightning_wallet_sync_interval_secs` | 30 | +/// | `fee_rate_cache_update_interval_secs` | 600 | +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub struct EsploraSyncConfig { + /// The time in-between background sync attempts of the onchain wallet, in seconds. + /// + /// **Note:** A minimum of 10 seconds is always enforced. + pub onchain_wallet_sync_interval_secs: u64, + /// The time in-between background sync attempts of the LDK wallet, in seconds. + /// + /// **Note:** A minimum of 10 seconds is always enforced. + pub lightning_wallet_sync_interval_secs: u64, + /// The time in-between background update attempts to our fee rate cache, in seconds. + /// + /// **Note:** A minimum of 10 seconds is always enforced. + pub fee_rate_cache_update_interval_secs: u64, +} + +impl Default for EsploraSyncConfig { + fn default() -> Self { + Self { + onchain_wallet_sync_interval_secs: DEFAULT_BDK_WALLET_SYNC_INTERVAL_SECS, + lightning_wallet_sync_interval_secs: DEFAULT_LDK_WALLET_SYNC_INTERVAL_SECS, + fee_rate_cache_update_interval_secs: DEFAULT_FEE_RATE_CACHE_UPDATE_INTERVAL_SECS, + } + } +} + +/// Options which apply on a per-channel basis and may change at runtime or based on negotiation +/// with our counterparty. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct ChannelConfig { + /// Amount (in millionths of a satoshi) charged per satoshi for payments forwarded outbound + /// over the channel. + /// This may be allowed to change at runtime in a later update, however doing so must result in + /// update messages sent to notify all nodes of our updated relay fee. + /// + /// Please refer to [`LdkChannelConfig`] for further details. + pub forwarding_fee_proportional_millionths: u32, + /// Amount (in milli-satoshi) charged for payments forwarded outbound over the channel, in + /// excess of [`ChannelConfig::forwarding_fee_proportional_millionths`]. + /// This may be allowed to change at runtime in a later update, however doing so must result in + /// update messages sent to notify all nodes of our updated relay fee. + /// + /// Please refer to [`LdkChannelConfig`] for further details. + pub forwarding_fee_base_msat: u32, + /// The difference in the CLTV value between incoming HTLCs and an outbound HTLC forwarded over + /// the channel this config applies to. + /// + /// Please refer to [`LdkChannelConfig`] for further details. + pub cltv_expiry_delta: u16, + /// Limit our total exposure to potential loss to on-chain fees on close, including in-flight + /// HTLCs which are burned to fees as they are too small to claim on-chain and fees on + /// commitment transaction(s) broadcasted by our counterparty in excess of our own fee estimate. + /// + /// Please refer to [`LdkChannelConfig`] for further details. + pub max_dust_htlc_exposure: MaxDustHTLCExposure, + /// The additional fee we're willing to pay to avoid waiting for the counterparty's + /// `to_self_delay` to reclaim funds. + /// + /// Please refer to [`LdkChannelConfig`] for further details. + pub force_close_avoidance_max_fee_satoshis: u64, + /// If set, allows this channel's counterparty to skim an additional fee off this node's inbound + /// HTLCs. Useful for liquidity providers to offload on-chain channel costs to end users. + /// + /// Please refer to [`LdkChannelConfig`] for further details. + pub accept_underpaying_htlcs: bool, +} + +impl From for ChannelConfig { + fn from(value: LdkChannelConfig) -> Self { + Self { + forwarding_fee_proportional_millionths: value.forwarding_fee_proportional_millionths, + forwarding_fee_base_msat: value.forwarding_fee_base_msat, + cltv_expiry_delta: value.cltv_expiry_delta, + max_dust_htlc_exposure: value.max_dust_htlc_exposure.into(), + force_close_avoidance_max_fee_satoshis: value.force_close_avoidance_max_fee_satoshis, + accept_underpaying_htlcs: value.accept_underpaying_htlcs, + } + } +} + +impl From for LdkChannelConfig { + fn from(value: ChannelConfig) -> Self { + Self { + forwarding_fee_proportional_millionths: value.forwarding_fee_proportional_millionths, + forwarding_fee_base_msat: value.forwarding_fee_base_msat, + cltv_expiry_delta: value.cltv_expiry_delta, + max_dust_htlc_exposure: value.max_dust_htlc_exposure.into(), + force_close_avoidance_max_fee_satoshis: value.force_close_avoidance_max_fee_satoshis, + accept_underpaying_htlcs: value.accept_underpaying_htlcs, + } + } +} + +impl Default for ChannelConfig { + fn default() -> Self { + LdkChannelConfig::default().into() + } +} + +/// Options for how to set the max dust exposure allowed on a channel. +/// +/// See [`LdkChannelConfig::max_dust_htlc_exposure`] for details. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum MaxDustHTLCExposure { + /// This sets a fixed limit on the total dust exposure in millisatoshis. + /// + /// Please refer to [`LdkMaxDustHTLCExposure`] for further details. + FixedLimit { + /// The fixed limit, in millisatoshis. + limit_msat: u64, + }, + /// This sets a multiplier on the feerate to determine the maximum allowed dust exposure. + /// + /// Please refer to [`LdkMaxDustHTLCExposure`] for further details. + FeeRateMultiplier { + /// The applied fee rate multiplier. + multiplier: u64, + }, +} + +impl From for MaxDustHTLCExposure { + fn from(value: LdkMaxDustHTLCExposure) -> Self { + match value { + LdkMaxDustHTLCExposure::FixedLimitMsat(limit_msat) => Self::FixedLimit { limit_msat }, + LdkMaxDustHTLCExposure::FeeRateMultiplier(multiplier) => { + Self::FeeRateMultiplier { multiplier } + }, + } + } +} + +impl From for LdkMaxDustHTLCExposure { + fn from(value: MaxDustHTLCExposure) -> Self { + match value { + MaxDustHTLCExposure::FixedLimit { limit_msat } => Self::FixedLimitMsat(limit_msat), + MaxDustHTLCExposure::FeeRateMultiplier { multiplier } => { + Self::FeeRateMultiplier(multiplier) + }, + } + } +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use super::may_announce_channel; + use super::Config; + use super::NodeAlias; + use super::SocketAddress; + + #[test] + fn node_announce_channel() { + // Default configuration with node alias and listening addresses unset + let mut node_config = Config::default(); + assert!(!may_announce_channel(&node_config)); + + // Set node alias with listening addresses unset + let alias_frm_str = |alias: &str| { + let mut bytes = [0u8; 32]; + bytes[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes()); + NodeAlias(bytes) + }; + node_config.node_alias = Some(alias_frm_str("LDK_Node")); + assert!(!may_announce_channel(&node_config)); + + // Set node alias with an empty list of listening addresses + node_config.listening_addresses = Some(vec![]); + assert!(!may_announce_channel(&node_config)); + + // Set node alias with a non-empty list of listening addresses + let socket_address = + SocketAddress::from_str("localhost:8000").expect("Socket address conversion failed."); + if let Some(ref mut addresses) = node_config.listening_addresses { + addresses.push(socket_address); + } + assert!(may_announce_channel(&node_config)); + } +} diff --git a/src/connection.rs b/src/connection.rs index 9d956d6be..5f665f77e 100644 --- a/src/connection.rs +++ b/src/connection.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use crate::logger::{log_error, log_info, Logger}; use crate::types::PeerManager; use crate::Error; diff --git a/src/error.rs b/src/error.rs index 15aa5a960..cb2833cf1 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,3 +1,15 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use bdk_chain::bitcoin::psbt::ExtractTxError as BdkExtractTxError; +use bdk_chain::local_chain::CannotConnectError as BdkChainConnectionError; +use bdk_wallet::error::CreateTxError as BdkCreateTxError; +use bdk_wallet::signer::SignerError as BdkSignerError; + use std::fmt; #[derive(Copy, Clone, Debug, PartialEq, Eq)] @@ -41,8 +53,6 @@ pub enum Error { WalletOperationTimeout, /// A signing operation for transaction failed. OnchainTxSigningFailed, - /// A signing operation for message failed. - MessageSigningFailed, /// A transaction sync operation failed. TxSyncFailed, /// A transaction sync operation timed out. @@ -53,6 +63,8 @@ pub enum Error { GossipUpdateTimeout, /// A liquidity request operation failed. LiquidityRequestFailed, + /// Parsing a URI parameter has failed. + UriParameterParsingFailed, /// The given address is invalid. InvalidAddress, /// The given network address is invalid. @@ -87,6 +99,12 @@ pub enum Error { InvalidNetwork, /// The custom TLVs are invalid. InvalidCustomTlv, + /// The given URI is invalid. + InvalidUri, + /// The given quantity is invalid. + InvalidQuantity, + /// The given node alias is invalid. + InvalidNodeAlias, /// A payment with the given hash has already been initiated. DuplicatePayment, /// The provided offer was denonminated in an unsupported currency. @@ -127,12 +145,12 @@ impl fmt::Display for Error { Self::WalletOperationFailed => write!(f, "Failed to conduct wallet operation."), Self::WalletOperationTimeout => write!(f, "A wallet operation timed out."), Self::OnchainTxSigningFailed => write!(f, "Failed to sign given transaction."), - Self::MessageSigningFailed => write!(f, "Failed to sign given message."), Self::TxSyncFailed => write!(f, "Failed to sync transactions."), Self::TxSyncTimeout => write!(f, "Syncing transactions timed out."), Self::GossipUpdateFailed => write!(f, "Failed to update gossip data."), Self::GossipUpdateTimeout => write!(f, "Updating gossip data timed out."), Self::LiquidityRequestFailed => write!(f, "Failed to request inbound liquidity."), + Self::UriParameterParsingFailed => write!(f, "Failed to parse a URI parameter."), Self::InvalidAddress => write!(f, "The given address is invalid."), Self::InvalidSocketAddress => write!(f, "The given network address is invalid."), Self::InvalidPublicKey => write!(f, "The given public key is invalid."), @@ -150,6 +168,9 @@ impl fmt::Display for Error { Self::InvalidChannelId => write!(f, "The given channel ID is invalid."), Self::InvalidNetwork => write!(f, "The given network is invalid."), Self::InvalidCustomTlv => write!(f, "The given custom TLVs are invalid."), + Self::InvalidUri => write!(f, "The given URI is invalid."), + Self::InvalidQuantity => write!(f, "The given quantity is invalid."), + Self::InvalidNodeAlias => write!(f, "The given node alias is invalid."), Self::DuplicatePayment => { write!(f, "A payment with the given hash has already been initiated.") }, @@ -171,12 +192,27 @@ impl fmt::Display for Error { impl std::error::Error for Error {} -impl From for Error { - fn from(e: bdk::Error) -> Self { - match e { - bdk::Error::Signer(_) => Self::OnchainTxSigningFailed, - _ => Self::WalletOperationFailed, - } +impl From for Error { + fn from(_: BdkSignerError) -> Self { + Self::OnchainTxSigningFailed + } +} + +impl From for Error { + fn from(_: BdkCreateTxError) -> Self { + Self::OnchainTxCreationFailed + } +} + +impl From for Error { + fn from(_: BdkExtractTxError) -> Self { + Self::OnchainTxCreationFailed + } +} + +impl From for Error { + fn from(_: BdkChainConnectionError) -> Self { + Self::WalletOperationFailed } } diff --git a/src/event.rs b/src/event.rs index 0bf437051..a975a87f6 100644 --- a/src/event.rs +++ b/src/event.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use crate::types::{DynStore, Sweeper, Wallet}; use crate::{ @@ -6,6 +13,7 @@ use crate::{ }; use crate::connection::ConnectionManager; +use crate::fee_estimator::ConfirmationTarget; use crate::payment::store::{ PaymentDetails, PaymentDetailsUpdate, PaymentDirection, PaymentKind, PaymentStatus, @@ -18,13 +26,13 @@ use crate::io::{ }; use crate::logger::{log_debug, log_error, log_info, Logger}; -use lightning::chain::chaininterface::ConfirmationTarget; use lightning::events::bump_transaction::BumpTransactionEvent; -use lightning::events::{ClosureReason, PaymentPurpose}; +use lightning::events::{ClosureReason, PaymentPurpose, ReplayEvent}; use lightning::events::{Event as LdkEvent, PaymentFailureReason}; use lightning::impl_writeable_tlv_based_enum; use lightning::ln::channelmanager::PaymentId; -use lightning::ln::{ChannelId, PaymentHash}; +use lightning::ln::types::ChannelId; +use lightning::ln::PaymentHash; use lightning::routing::gossip::NodeId; use lightning::util::errors::APIError; use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer}; @@ -33,7 +41,7 @@ use lightning_liquidity::lsps2::utils::compute_opening_fee; use bitcoin::blockdata::locktime::absolute::LockTime; use bitcoin::secp256k1::PublicKey; -use bitcoin::OutPoint; +use bitcoin::{Amount, OutPoint}; use rand::{thread_rng, Rng}; @@ -68,7 +76,12 @@ pub enum Event { /// Will only be `None` for events serialized with LDK Node v0.2.1 or prior. payment_id: Option, /// The hash of the payment. - payment_hash: PaymentHash, + /// + /// This will be `None` if the payment failed before receiving an invoice when paying a + /// BOLT12 [`Offer`]. + /// + /// [`Offer`]: lightning::offers::offer::Offer + payment_hash: Option, /// The reason why the payment failed. /// /// This will be `None` for events serialized by LDK Node v0.2.1 and prior. @@ -153,8 +166,8 @@ impl_writeable_tlv_based_enum!(Event, (3, payment_id, option), }, (1, PaymentFailed) => { - (0, payment_hash, required), - (1, reason, option), + (0, payment_hash, option), + (1, reason, upgradable_option), (3, payment_id, option), }, (2, PaymentReceived) => { @@ -185,7 +198,7 @@ impl_writeable_tlv_based_enum!(Event, (2, payment_id, required), (4, claimable_amount_msat, required), (6, claim_deadline, option), - }; + } ); pub struct EventQueue @@ -355,7 +368,7 @@ where network_graph: Arc, payment_store: Arc>, peer_store: Arc>, - runtime: Arc>>, + runtime: Arc>>>, logger: L, config: Arc, } @@ -370,7 +383,7 @@ where channel_manager: Arc, connection_manager: Arc>, output_sweeper: Arc, network_graph: Arc, payment_store: Arc>, peer_store: Arc>, - runtime: Arc>>, logger: L, config: Arc, + runtime: Arc>>>, logger: L, config: Arc, ) -> Self { Self { event_queue, @@ -388,7 +401,7 @@ where } } - pub async fn handle_event(&self, event: LdkEvent) { + pub async fn handle_event(&self, event: LdkEvent) -> Result<(), ReplayEvent> { match event { LdkEvent::FundingGenerationReady { temporary_channel_id, @@ -399,24 +412,25 @@ where } => { // Construct the raw transaction with the output that is paid the amount of the // channel. - let confirmation_target = ConfirmationTarget::NonAnchorChannelFee; + let confirmation_target = ConfirmationTarget::ChannelFunding; // We set nLockTime to the current height to discourage fee sniping. let cur_height = self.channel_manager.current_best_block().height; let locktime = LockTime::from_height(cur_height).unwrap_or(LockTime::ZERO); // Sign the final funding transaction and broadcast it. + let channel_amount = Amount::from_sat(channel_value_satoshis); match self.wallet.create_funding_transaction( output_script, - channel_value_satoshis, + channel_amount, confirmation_target, locktime, ) { Ok(final_tx) => { // Give the funding transaction back to LDK for opening the channel. match self.channel_manager.funding_transaction_generated( - &temporary_channel_id, - &counterparty_node_id, + temporary_channel_id, + counterparty_node_id, final_tx, ) { Ok(()) => {}, @@ -446,6 +460,7 @@ where .force_close_without_broadcasting_txn( &temporary_channel_id, &counterparty_node_id, + "Failed to create funding transaction".to_string(), ) .unwrap_or_else(|e| { log_error!(self.logger, "Failed to force close channel after funding generation failed: {:?}", e); @@ -456,6 +471,9 @@ where }, } }, + LdkEvent::FundingTxBroadcastSafe { .. } => { + debug_assert!(false, "We currently only support safe funding, so this event should never be emitted."); + }, LdkEvent::PaymentClaimable { payment_hash, purpose, @@ -481,11 +499,13 @@ where status: Some(PaymentStatus::Failed), ..PaymentDetailsUpdate::new(payment_id) }; - self.payment_store.update(&update).unwrap_or_else(|e| { - log_error!(self.logger, "Failed to access payment store: {}", e); - panic!("Failed to access payment store"); - }); - return; + match self.payment_store.update(&update) { + Ok(_) => return Ok(()), + Err(e) => { + log_error!(self.logger, "Failed to access payment store: {}", e); + return Err(ReplayEvent()); + }, + }; } if info.status == PaymentStatus::Succeeded @@ -503,11 +523,13 @@ where status: Some(PaymentStatus::Failed), ..PaymentDetailsUpdate::new(payment_id) }; - self.payment_store.update(&update).unwrap_or_else(|e| { - log_error!(self.logger, "Failed to access payment store: {}", e); - panic!("Failed to access payment store"); - }); - return; + match self.payment_store.update(&update) { + Ok(_) => return Ok(()), + Err(e) => { + log_error!(self.logger, "Failed to access payment store: {}", e); + return Err(ReplayEvent()); + }, + }; } let max_total_opening_fee_msat = match info.kind { @@ -542,11 +564,13 @@ where status: Some(PaymentStatus::Failed), ..PaymentDetailsUpdate::new(payment_id) }; - self.payment_store.update(&update).unwrap_or_else(|e| { - log_error!(self.logger, "Failed to access payment store: {}", e); - panic!("Failed to access payment store"); - }); - return; + match self.payment_store.update(&update) { + Ok(_) => return Ok(()), + Err(e) => { + log_error!(self.logger, "Failed to access payment store: {}", e); + return Err(ReplayEvent()); + }, + }; } // If this is known by the store but ChannelManager doesn't know the preimage, @@ -560,22 +584,23 @@ where "We would have registered the preimage if we knew" ); - self.event_queue - .add_event(Event::PaymentClaimable { - payment_id, - payment_hash, - claimable_amount_msat: amount_msat, - claim_deadline, - }) - .unwrap_or_else(|e| { + let event = Event::PaymentClaimable { + payment_id, + payment_hash, + claimable_amount_msat: amount_msat, + claim_deadline, + }; + match self.event_queue.add_event(event) { + Ok(_) => return Ok(()), + Err(e) => { log_error!( self.logger, "Failed to push to event queue: {}", e ); - panic!("Failed to push to event queue"); - }); - return; + return Err(ReplayEvent()); + }, + }; } }, _ => {}, @@ -598,12 +623,16 @@ where payment_context, .. } => { + let payer_note = payment_context.invoice_request.payer_note_truncated; let offer_id = payment_context.offer_id; + let quantity = payment_context.invoice_request.quantity; let kind = PaymentKind::Bolt12Offer { hash: Some(payment_hash), preimage: payment_preimage, secret: Some(payment_secret), offer_id, + payer_note, + quantity, }; let payment = PaymentDetails::new( @@ -722,10 +751,13 @@ where status: Some(PaymentStatus::Failed), ..PaymentDetailsUpdate::new(payment_id) }; - self.payment_store.update(&update).unwrap_or_else(|e| { - log_error!(self.logger, "Failed to access payment store: {}", e); - panic!("Failed to access payment store"); - }); + match self.payment_store.update(&update) { + Ok(_) => return Ok(()), + Err(e) => { + log_error!(self.logger, "Failed to access payment store: {}", e); + return Err(ReplayEvent()); + }, + }; } }, LdkEvent::PaymentClaimed { @@ -735,6 +767,7 @@ where receiver_node_id: _, htlcs: _, sender_intended_total_msat: _, + onion_fields: _, } => { let payment_id = PaymentId(payment_hash.0); log_info!( @@ -802,20 +835,22 @@ where payment_id, e ); - panic!("Failed to access payment store"); + return Err(ReplayEvent()); }, } - self.event_queue - .add_event(Event::PaymentReceived { - payment_id: Some(payment_id), - payment_hash, - amount_msat, - }) - .unwrap_or_else(|e| { + let event = Event::PaymentReceived { + payment_id: Some(payment_id), + payment_hash, + amount_msat, + }; + match self.event_queue.add_event(event) { + Ok(_) => return Ok(()), + Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); - panic!("Failed to push to event queue"); - }); + return Err(ReplayEvent()); + }, + }; }, LdkEvent::PaymentSent { payment_id, @@ -828,7 +863,7 @@ where id } else { debug_assert!(false, "payment_id should always be set."); - return; + return Ok(()); }; let update = PaymentDetailsUpdate { @@ -839,10 +874,13 @@ where ..PaymentDetailsUpdate::new(payment_id) }; - self.payment_store.update(&update).unwrap_or_else(|e| { - log_error!(self.logger, "Failed to access payment store: {}", e); - panic!("Failed to access payment store"); - }); + match self.payment_store.update(&update) { + Ok(_) => {}, + Err(e) => { + log_error!(self.logger, "Failed to access payment store: {}", e); + return Err(ReplayEvent()); + }, + }; self.payment_store.get(&payment_id).map(|payment| { log_info!( @@ -859,45 +897,50 @@ where hex_utils::to_string(&payment_preimage.0) ); }); + let event = Event::PaymentSuccessful { + payment_id: Some(payment_id), + payment_hash, + fee_paid_msat, + }; - self.event_queue - .add_event(Event::PaymentSuccessful { - payment_id: Some(payment_id), - payment_hash, - fee_paid_msat, - }) - .unwrap_or_else(|e| { + match self.event_queue.add_event(event) { + Ok(_) => return Ok(()), + Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); - panic!("Failed to push to event queue"); - }); + return Err(ReplayEvent()); + }, + }; }, LdkEvent::PaymentFailed { payment_id, payment_hash, reason, .. } => { log_info!( self.logger, - "Failed to send payment to payment hash {:?} due to {:?}.", - hex_utils::to_string(&payment_hash.0), + "Failed to send payment with ID {} due to {:?}.", + payment_id, reason ); let update = PaymentDetailsUpdate { - hash: Some(Some(payment_hash)), + hash: Some(payment_hash), status: Some(PaymentStatus::Failed), ..PaymentDetailsUpdate::new(payment_id) }; - self.payment_store.update(&update).unwrap_or_else(|e| { - log_error!(self.logger, "Failed to access payment store: {}", e); - panic!("Failed to access payment store"); - }); - self.event_queue - .add_event(Event::PaymentFailed { - payment_id: Some(payment_id), - payment_hash, - reason, - }) - .unwrap_or_else(|e| { + match self.payment_store.update(&update) { + Ok(_) => {}, + Err(e) => { + log_error!(self.logger, "Failed to access payment store: {}", e); + return Err(ReplayEvent()); + }, + }; + + let event = + Event::PaymentFailed { payment_id: Some(payment_id), payment_hash, reason }; + match self.event_queue.add_event(event) { + Ok(_) => return Ok(()), + Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); - panic!("Failed to push to event queue"); - }); + return Err(ReplayEvent()); + }, + }; }, LdkEvent::PaymentPathSuccessful { .. } => {}, @@ -922,12 +965,13 @@ where } }, LdkEvent::SpendableOutputs { outputs, channel_id } => { - self.output_sweeper - .track_spendable_outputs(outputs, channel_id, true, None) - .unwrap_or_else(|_| { + match self.output_sweeper.track_spendable_outputs(outputs, channel_id, true, None) { + Ok(_) => return Ok(()), + Err(_) => { log_error!(self.logger, "Failed to track spendable outputs"); - panic!("Failed to track spendable outputs"); - }); + return Err(ReplayEvent()); + }, + }; }, LdkEvent::OpenChannelRequest { temporary_channel_id, @@ -935,9 +979,15 @@ where funding_satoshis, channel_type, push_msat: _, + is_announced: _, + params: _, } => { let anchor_channel = channel_type.requires_anchors_zero_fee_htlc_tx(); + // TODO: We should use `is_announced` flag above and reject announced channels if + // we're not a forwading node, once we add a 'forwarding mode' based on listening + // address / node alias being set. + if anchor_channel { if let Some(anchor_channels_config) = self.config.anchor_channels_config.as_ref() @@ -970,11 +1020,12 @@ where .force_close_without_broadcasting_txn( &temporary_channel_id, &counterparty_node_id, + "Channel request rejected".to_string(), ) .unwrap_or_else(|e| { log_error!(self.logger, "Failed to reject channel: {:?}", e) }); - return; + return Ok(()); } } else { log_error!( @@ -986,11 +1037,12 @@ where .force_close_without_broadcasting_txn( &temporary_channel_id, &counterparty_node_id, + "Channel request rejected".to_string(), ) .unwrap_or_else(|e| { log_error!(self.logger, "Failed to reject channel: {:?}", e) }); - return; + return Ok(()); } } @@ -1098,7 +1150,7 @@ where node.announcement_info .as_ref() .map_or("unnamed node".to_string(), |ann| { - format!("node {}", ann.alias) + format!("node {}", ann.alias()) }) }) }; @@ -1151,18 +1203,22 @@ where channel_id, counterparty_node_id, ); - self.event_queue - .add_event(Event::ChannelPending { - channel_id, - user_channel_id: UserChannelId(user_channel_id), - former_temporary_channel_id: former_temporary_channel_id.unwrap(), - counterparty_node_id, - funding_txo, - }) - .unwrap_or_else(|e| { + + let event = Event::ChannelPending { + channel_id, + user_channel_id: UserChannelId(user_channel_id), + former_temporary_channel_id: former_temporary_channel_id.unwrap(), + counterparty_node_id, + funding_txo, + }; + match self.event_queue.add_event(event) { + Ok(_) => {}, + Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); - panic!("Failed to push to event queue"); - }); + return Err(ReplayEvent()); + }, + }; + let network_graph = self.network_graph.read_only(); let channels = self.channel_manager.list_channels_with_counterparty(&counterparty_node_id); @@ -1204,16 +1260,19 @@ where channel_id, counterparty_node_id, ); - self.event_queue - .add_event(Event::ChannelReady { - channel_id, - user_channel_id: UserChannelId(user_channel_id), - counterparty_node_id: Some(counterparty_node_id), - }) - .unwrap_or_else(|e| { + + let event = Event::ChannelReady { + channel_id, + user_channel_id: UserChannelId(user_channel_id), + counterparty_node_id: Some(counterparty_node_id), + }; + match self.event_queue.add_event(event) { + Ok(_) => {}, + Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); - panic!("Failed to push to event queue"); - }); + return Err(ReplayEvent()); + }, + }; }, LdkEvent::ChannelClosed { channel_id, @@ -1229,35 +1288,26 @@ where counterparty_node_id, reason ); - self.event_queue - .add_event(Event::ChannelClosed { - channel_id, - user_channel_id: UserChannelId(user_channel_id), - counterparty_node_id, - reason: Some(reason), - }) - .unwrap_or_else(|e| { + + let event = Event::ChannelClosed { + channel_id, + user_channel_id: UserChannelId(user_channel_id), + counterparty_node_id, + reason: Some(reason), + }; + + match self.event_queue.add_event(event) { + Ok(_) => {}, + Err(e) => { log_error!(self.logger, "Failed to push to event queue: {}", e); - panic!("Failed to push to event queue"); - }); + return Err(ReplayEvent()); + }, + }; }, LdkEvent::DiscardFunding { .. } => {}, LdkEvent::HTLCIntercepted { .. } => {}, - LdkEvent::InvoiceRequestFailed { payment_id } => { - log_error!( - self.logger, - "Failed to request invoice for outbound BOLT12 payment {}", - payment_id - ); - let update = PaymentDetailsUpdate { - status: Some(PaymentStatus::Failed), - ..PaymentDetailsUpdate::new(payment_id) - }; - self.payment_store.update(&update).unwrap_or_else(|e| { - log_error!(self.logger, "Failed to access payment store: {}", e); - panic!("Failed to access payment store"); - }); - return; + LdkEvent::InvoiceReceived { .. } => { + debug_assert!(false, "We currently don't handle BOLT12 invoices manually, so this event should never be emitted."); }, LdkEvent::ConnectionNeeded { node_id, addresses } => { let runtime_lock = self.runtime.read().unwrap(); @@ -1309,13 +1359,20 @@ where "Ignoring BumpTransactionEvent for channel {} due to trusted counterparty {}", channel_id, counterparty_node_id ); - return; + return Ok(()); } } self.bump_tx_event_handler.handle_event(&bte); }, + LdkEvent::OnionMessageIntercepted { .. } => { + debug_assert!(false, "We currently don't support onion message interception, so this event should never be emitted."); + }, + LdkEvent::OnionMessagePeerConnected { .. } => { + debug_assert!(false, "We currently don't support onion message interception, so this event should never be emitted."); + }, } + Ok(()) } } diff --git a/src/fee_estimator.rs b/src/fee_estimator.rs index 329cc6e42..8db6a6050 100644 --- a/src/fee_estimator.rs +++ b/src/fee_estimator.rs @@ -1,147 +1,145 @@ -use crate::config::FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS; -use crate::logger::{log_error, log_trace, Logger}; -use crate::{Config, Error}; +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. -use lightning::chain::chaininterface::{ - ConfirmationTarget, FeeEstimator, FEERATE_FLOOR_SATS_PER_KW, -}; +use lightning::chain::chaininterface::ConfirmationTarget as LdkConfirmationTarget; +use lightning::chain::chaininterface::FeeEstimator as LdkFeeEstimator; +use lightning::chain::chaininterface::FEERATE_FLOOR_SATS_PER_KW; -use bdk::FeeRate; -use esplora_client::AsyncClient as EsploraClient; - -use bitcoin::blockdata::weight::Weight; -use bitcoin::Network; +use bitcoin::FeeRate; use std::collections::HashMap; -use std::ops::Deref; -use std::sync::{Arc, RwLock}; -use std::time::Duration; - -pub(crate) struct OnchainFeeEstimator -where - L::Target: Logger, -{ +use std::sync::RwLock; + +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] +pub(crate) enum ConfirmationTarget { + /// The default target for onchain payments. + OnchainPayment, + /// The target used for funding transactions. + ChannelFunding, + /// Targets used by LDK. + Lightning(LdkConfirmationTarget), +} + +pub(crate) trait FeeEstimator { + fn estimate_fee_rate(&self, confirmation_target: ConfirmationTarget) -> FeeRate; +} + +impl From for ConfirmationTarget { + fn from(value: LdkConfirmationTarget) -> Self { + Self::Lightning(value) + } +} + +pub(crate) struct OnchainFeeEstimator { fee_rate_cache: RwLock>, - esplora_client: EsploraClient, - config: Arc, - logger: L, } -impl OnchainFeeEstimator -where - L::Target: Logger, -{ - pub(crate) fn new(esplora_client: EsploraClient, config: Arc, logger: L) -> Self { +impl OnchainFeeEstimator { + pub(crate) fn new() -> Self { let fee_rate_cache = RwLock::new(HashMap::new()); - Self { fee_rate_cache, esplora_client, config, logger } + Self { fee_rate_cache } } - pub(crate) async fn update_fee_estimates(&self) -> Result<(), Error> { - let estimates = tokio::time::timeout( - Duration::from_secs(FEE_RATE_CACHE_UPDATE_TIMEOUT_SECS), - self.esplora_client.get_fee_estimates(), - ) - .await - .map_err(|e| { - log_error!(self.logger, "Updating fee rate estimates timed out: {}", e); - Error::FeerateEstimationUpdateTimeout - })? - .map_err(|e| { - log_error!(self.logger, "Failed to retrieve fee rate estimates: {}", e); - Error::FeerateEstimationUpdateFailed - })?; - - if estimates.is_empty() && self.config.network == Network::Bitcoin { - // Ensure we fail if we didn't receive any estimates. - log_error!( - self.logger, - "Failed to retrieve fee rate estimates: empty fee estimates are dissallowed on Mainnet.", - ); - return Err(Error::FeerateEstimationUpdateFailed); - } - - let confirmation_targets = vec![ - ConfirmationTarget::OnChainSweep, - ConfirmationTarget::MinAllowedAnchorChannelRemoteFee, - ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee, - ConfirmationTarget::AnchorChannelFee, - ConfirmationTarget::NonAnchorChannelFee, - ConfirmationTarget::ChannelCloseMinimum, - ConfirmationTarget::OutputSpendingFee, - ]; - for target in confirmation_targets { - let num_blocks = match target { - ConfirmationTarget::OnChainSweep => 6, - ConfirmationTarget::MinAllowedAnchorChannelRemoteFee => 1008, - ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee => 144, - ConfirmationTarget::AnchorChannelFee => 1008, - ConfirmationTarget::NonAnchorChannelFee => 12, - ConfirmationTarget::ChannelCloseMinimum => 144, - ConfirmationTarget::OutputSpendingFee => 12, - }; - - let converted_estimates = - esplora_client::convert_fee_rate(num_blocks, estimates.clone()).map_err(|e| { - log_error!( - self.logger, - "Failed to convert fee rate estimates for {:?}: {}", - target, - e - ); - Error::FeerateEstimationUpdateFailed - })?; - - let fee_rate = FeeRate::from_sat_per_vb(converted_estimates); - - // LDK 0.0.118 introduced changes to the `ConfirmationTarget` semantics that - // require some post-estimation adjustments to the fee rates, which we do here. - let adjusted_fee_rate = match target { - ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee => { - let slightly_less_than_background = - fee_rate.fee_wu(Weight::from_wu(1000)) - 250; - FeeRate::from_sat_per_kwu(slightly_less_than_background as f32) - }, - _ => fee_rate, - }; - - let mut locked_fee_rate_cache = self.fee_rate_cache.write().unwrap(); - locked_fee_rate_cache.insert(target, adjusted_fee_rate); - log_trace!( - self.logger, - "Fee rate estimation updated for {:?}: {} sats/kwu", - target, - adjusted_fee_rate.fee_wu(Weight::from_wu(1000)) - ); + // Updates the fee rate cache and returns if the new values changed. + pub(crate) fn set_fee_rate_cache( + &self, fee_rate_cache_update: HashMap, + ) -> bool { + let mut locked_fee_rate_cache = self.fee_rate_cache.write().unwrap(); + if fee_rate_cache_update != *locked_fee_rate_cache { + *locked_fee_rate_cache = fee_rate_cache_update; + true + } else { + false } - Ok(()) } +} - pub(crate) fn estimate_fee_rate(&self, confirmation_target: ConfirmationTarget) -> FeeRate { +impl FeeEstimator for OnchainFeeEstimator { + fn estimate_fee_rate(&self, confirmation_target: ConfirmationTarget) -> FeeRate { let locked_fee_rate_cache = self.fee_rate_cache.read().unwrap(); - let fallback_sats_kwu = match confirmation_target { - ConfirmationTarget::OnChainSweep => 5000, - ConfirmationTarget::MinAllowedAnchorChannelRemoteFee => FEERATE_FLOOR_SATS_PER_KW, - ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee => FEERATE_FLOOR_SATS_PER_KW, - ConfirmationTarget::AnchorChannelFee => 500, - ConfirmationTarget::NonAnchorChannelFee => 1000, - ConfirmationTarget::ChannelCloseMinimum => 500, - ConfirmationTarget::OutputSpendingFee => 1000, - }; + let fallback_sats_kwu = get_fallback_rate_for_target(confirmation_target); // We'll fall back on this, if we really don't have any other information. - let fallback_rate = FeeRate::from_sat_per_kwu(fallback_sats_kwu as f32); + let fallback_rate = FeeRate::from_sat_per_kwu(fallback_sats_kwu as u64); + + let estimate = *locked_fee_rate_cache.get(&confirmation_target).unwrap_or(&fallback_rate); - *locked_fee_rate_cache.get(&confirmation_target).unwrap_or(&fallback_rate) + // Currently we assume every transaction needs to at least be relayable, which is why we + // enforce a lower bound of `FEERATE_FLOOR_SATS_PER_KW`. + FeeRate::from_sat_per_kwu(estimate.to_sat_per_kwu().max(FEERATE_FLOOR_SATS_PER_KW as u64)) } } -impl FeeEstimator for OnchainFeeEstimator -where - L::Target: Logger, -{ - fn get_est_sat_per_1000_weight(&self, confirmation_target: ConfirmationTarget) -> u32 { - (self.estimate_fee_rate(confirmation_target).fee_wu(Weight::from_wu(1000)) as u32) - .max(FEERATE_FLOOR_SATS_PER_KW) +impl LdkFeeEstimator for OnchainFeeEstimator { + fn get_est_sat_per_1000_weight(&self, confirmation_target: LdkConfirmationTarget) -> u32 { + self.estimate_fee_rate(confirmation_target.into()).to_sat_per_kwu() as u32 + } +} + +pub(crate) fn get_num_block_defaults_for_target(target: ConfirmationTarget) -> usize { + match target { + ConfirmationTarget::OnchainPayment => 6, + ConfirmationTarget::ChannelFunding => 12, + ConfirmationTarget::Lightning(ldk_target) => match ldk_target { + LdkConfirmationTarget::MaximumFeeEstimate => 1, + LdkConfirmationTarget::UrgentOnChainSweep => 6, + LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee => 1008, + LdkConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee => 144, + LdkConfirmationTarget::AnchorChannelFee => 1008, + LdkConfirmationTarget::NonAnchorChannelFee => 12, + LdkConfirmationTarget::ChannelCloseMinimum => 144, + LdkConfirmationTarget::OutputSpendingFee => 12, + }, + } +} + +pub(crate) fn get_fallback_rate_for_target(target: ConfirmationTarget) -> u32 { + match target { + ConfirmationTarget::OnchainPayment => 5000, + ConfirmationTarget::ChannelFunding => 1000, + ConfirmationTarget::Lightning(ldk_target) => match ldk_target { + LdkConfirmationTarget::MaximumFeeEstimate => 8000, + LdkConfirmationTarget::UrgentOnChainSweep => 5000, + LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee => FEERATE_FLOOR_SATS_PER_KW, + LdkConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee => FEERATE_FLOOR_SATS_PER_KW, + LdkConfirmationTarget::AnchorChannelFee => 500, + LdkConfirmationTarget::NonAnchorChannelFee => 1000, + LdkConfirmationTarget::ChannelCloseMinimum => 500, + LdkConfirmationTarget::OutputSpendingFee => 1000, + }, + } +} + +pub(crate) fn get_all_conf_targets() -> [ConfirmationTarget; 10] { + [ + ConfirmationTarget::OnchainPayment, + ConfirmationTarget::ChannelFunding, + LdkConfirmationTarget::MaximumFeeEstimate.into(), + LdkConfirmationTarget::UrgentOnChainSweep.into(), + LdkConfirmationTarget::MinAllowedAnchorChannelRemoteFee.into(), + LdkConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee.into(), + LdkConfirmationTarget::AnchorChannelFee.into(), + LdkConfirmationTarget::NonAnchorChannelFee.into(), + LdkConfirmationTarget::ChannelCloseMinimum.into(), + LdkConfirmationTarget::OutputSpendingFee.into(), + ] +} + +pub(crate) fn apply_post_estimation_adjustments( + target: ConfirmationTarget, estimated_rate: FeeRate, +) -> FeeRate { + match target { + ConfirmationTarget::Lightning( + LdkConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee, + ) => { + let slightly_less_than_background = estimated_rate.to_sat_per_kwu() - 250; + FeeRate::from_sat_per_kwu(slightly_less_than_background) + }, + _ => estimated_rate, } } diff --git a/src/gossip.rs b/src/gossip.rs index 1241b0cdc..450b5b5ee 100644 --- a/src/gossip.rs +++ b/src/gossip.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use crate::config::RGS_SYNC_TIMEOUT_SECS; use crate::logger::{log_trace, FilesystemLogger, Logger}; use crate::types::{GossipSync, Graph, P2PGossipSync, RapidGossipSync}; diff --git a/src/graph.rs b/src/graph.rs index 79a21853d..3e4e58c88 100644 --- a/src/graph.rs +++ b/src/graph.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + //! Objects for querying the network graph. use crate::types::Graph; @@ -158,8 +165,8 @@ pub struct NodeAnnouncementInfo { impl From for NodeAnnouncementInfo { fn from(value: lightning::routing::gossip::NodeAnnouncementInfo) -> Self { Self { - last_update: value.last_update, - alias: value.alias.to_string(), + last_update: value.last_update(), + alias: value.alias().to_string(), addresses: value.addresses().iter().cloned().collect(), } } diff --git a/src/hex_utils.rs b/src/hex_utils.rs index 1b50c5647..d56c6fd99 100644 --- a/src/hex_utils.rs +++ b/src/hex_utils.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use std::fmt::Write; #[cfg(feature = "uniffi")] diff --git a/src/io/mod.rs b/src/io/mod.rs index d545f6b93..3192dbb86 100644 --- a/src/io/mod.rs +++ b/src/io/mod.rs @@ -1,10 +1,16 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + //! Objects and traits for data persistence. pub mod sqlite_store; #[cfg(test)] pub(crate) mod test_utils; pub(crate) mod utils; -#[cfg(any(vss, vss_test))] pub(crate) mod vss_store; /// The event queue will be persisted under this key. @@ -26,12 +32,49 @@ pub(crate) const DEPRECATED_SPENDABLE_OUTPUT_INFO_PERSISTENCE_PRIMARY_NAMESPACE: "spendable_outputs"; pub(crate) const DEPRECATED_SPENDABLE_OUTPUT_INFO_PERSISTENCE_SECONDARY_NAMESPACE: &str = ""; -/// RapidGossipSync's `latest_sync_timestamp` will be persisted under this key. -pub(crate) const LATEST_RGS_SYNC_TIMESTAMP_PRIMARY_NAMESPACE: &str = ""; -pub(crate) const LATEST_RGS_SYNC_TIMESTAMP_SECONDARY_NAMESPACE: &str = ""; -pub(crate) const LATEST_RGS_SYNC_TIMESTAMP_KEY: &str = "latest_rgs_sync_timestamp"; +/// The node metrics will be persisted under this key. +pub(crate) const NODE_METRICS_PRIMARY_NAMESPACE: &str = ""; +pub(crate) const NODE_METRICS_SECONDARY_NAMESPACE: &str = ""; +pub(crate) const NODE_METRICS_KEY: &str = "node_metrics"; + +/// The BDK wallet's [`ChangeSet::descriptor`] will be persisted under this key. +/// +/// [`ChangeSet::descriptor`]: bdk_wallet::ChangeSet::descriptor +pub(crate) const BDK_WALLET_DESCRIPTOR_PRIMARY_NAMESPACE: &str = "bdk_wallet"; +pub(crate) const BDK_WALLET_DESCRIPTOR_SECONDARY_NAMESPACE: &str = ""; +pub(crate) const BDK_WALLET_DESCRIPTOR_KEY: &str = "descriptor"; + +/// The BDK wallet's [`ChangeSet::change_descriptor`] will be persisted under this key. +/// +/// [`ChangeSet::change_descriptor`]: bdk_wallet::ChangeSet::change_descriptor +pub(crate) const BDK_WALLET_CHANGE_DESCRIPTOR_PRIMARY_NAMESPACE: &str = "bdk_wallet"; +pub(crate) const BDK_WALLET_CHANGE_DESCRIPTOR_SECONDARY_NAMESPACE: &str = ""; +pub(crate) const BDK_WALLET_CHANGE_DESCRIPTOR_KEY: &str = "change_descriptor"; + +/// The BDK wallet's [`ChangeSet::network`] will be persisted under this key. +/// +/// [`ChangeSet::network`]: bdk_wallet::ChangeSet::network +pub(crate) const BDK_WALLET_NETWORK_PRIMARY_NAMESPACE: &str = "bdk_wallet"; +pub(crate) const BDK_WALLET_NETWORK_SECONDARY_NAMESPACE: &str = ""; +pub(crate) const BDK_WALLET_NETWORK_KEY: &str = "network"; + +/// The BDK wallet's [`ChangeSet::local_chain`] will be persisted under this key. +/// +/// [`ChangeSet::local_chain`]: bdk_wallet::ChangeSet::local_chain +pub(crate) const BDK_WALLET_LOCAL_CHAIN_PRIMARY_NAMESPACE: &str = "bdk_wallet"; +pub(crate) const BDK_WALLET_LOCAL_CHAIN_SECONDARY_NAMESPACE: &str = ""; +pub(crate) const BDK_WALLET_LOCAL_CHAIN_KEY: &str = "local_chain"; + +/// The BDK wallet's [`ChangeSet::tx_graph`] will be persisted under this key. +/// +/// [`ChangeSet::tx_graph`]: bdk_wallet::ChangeSet::tx_graph +pub(crate) const BDK_WALLET_TX_GRAPH_PRIMARY_NAMESPACE: &str = "bdk_wallet"; +pub(crate) const BDK_WALLET_TX_GRAPH_SECONDARY_NAMESPACE: &str = ""; +pub(crate) const BDK_WALLET_TX_GRAPH_KEY: &str = "tx_graph"; -/// The last time we broadcast a node announcement will be persisted under this key. -pub(crate) const LATEST_NODE_ANN_BCAST_TIMESTAMP_PRIMARY_NAMESPACE: &str = ""; -pub(crate) const LATEST_NODE_ANN_BCAST_TIMESTAMP_SECONDARY_NAMESPACE: &str = ""; -pub(crate) const LATEST_NODE_ANN_BCAST_TIMESTAMP_KEY: &str = "latest_node_ann_bcast_timestamp"; +/// The BDK wallet's [`ChangeSet::indexer`] will be persisted under this key. +/// +/// [`ChangeSet::indexer`]: bdk_wallet::ChangeSet::indexer +pub(crate) const BDK_WALLET_INDEXER_PRIMARY_NAMESPACE: &str = "bdk_wallet"; +pub(crate) const BDK_WALLET_INDEXER_SECONDARY_NAMESPACE: &str = ""; +pub(crate) const BDK_WALLET_INDEXER_KEY: &str = "indexer"; diff --git a/src/io/sqlite_store/migrations.rs b/src/io/sqlite_store/migrations.rs index 6d108185a..0486b8a4f 100644 --- a/src/io/sqlite_store/migrations.rs +++ b/src/io/sqlite_store/migrations.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use rusqlite::Connection; use lightning::io; diff --git a/src/io/sqlite_store/mod.rs b/src/io/sqlite_store/mod.rs index 607105509..b72db5a2b 100644 --- a/src/io/sqlite_store/mod.rs +++ b/src/io/sqlite_store/mod.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + //! Objects related to [`SqliteStore`] live here. use crate::io::utils::check_namespace_key_validity; @@ -125,7 +132,7 @@ impl SqliteStore { impl KVStore for SqliteStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> std::io::Result> { + ) -> io::Result> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "read")?; let locked_conn = self.connection.lock().unwrap(); @@ -135,7 +142,7 @@ impl KVStore for SqliteStore { let mut stmt = locked_conn.prepare_cached(&sql).map_err(|e| { let msg = format!("Failed to prepare statement: {}", e); - std::io::Error::new(std::io::ErrorKind::Other, msg) + io::Error::new(io::ErrorKind::Other, msg) })?; let res = stmt @@ -155,7 +162,7 @@ impl KVStore for SqliteStore { PrintableString(secondary_namespace), PrintableString(key) ); - std::io::Error::new(std::io::ErrorKind::NotFound, msg) + io::Error::new(io::ErrorKind::NotFound, msg) }, e => { let msg = format!( @@ -165,7 +172,7 @@ impl KVStore for SqliteStore { PrintableString(key), e ); - std::io::Error::new(std::io::ErrorKind::Other, msg) + io::Error::new(io::ErrorKind::Other, msg) }, })?; Ok(res) @@ -173,7 +180,7 @@ impl KVStore for SqliteStore { fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8], - ) -> std::io::Result<()> { + ) -> io::Result<()> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "write")?; let locked_conn = self.connection.lock().unwrap(); @@ -185,7 +192,7 @@ impl KVStore for SqliteStore { let mut stmt = locked_conn.prepare_cached(&sql).map_err(|e| { let msg = format!("Failed to prepare statement: {}", e); - std::io::Error::new(std::io::ErrorKind::Other, msg) + io::Error::new(io::ErrorKind::Other, msg) })?; stmt.execute(named_params! { @@ -203,13 +210,13 @@ impl KVStore for SqliteStore { PrintableString(key), e ); - std::io::Error::new(std::io::ErrorKind::Other, msg) + io::Error::new(io::ErrorKind::Other, msg) }) } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, _lazy: bool, - ) -> std::io::Result<()> { + ) -> io::Result<()> { check_namespace_key_validity(primary_namespace, secondary_namespace, Some(key), "remove")?; let locked_conn = self.connection.lock().unwrap(); @@ -218,7 +225,7 @@ impl KVStore for SqliteStore { let mut stmt = locked_conn.prepare_cached(&sql).map_err(|e| { let msg = format!("Failed to prepare statement: {}", e); - std::io::Error::new(std::io::ErrorKind::Other, msg) + io::Error::new(io::ErrorKind::Other, msg) })?; stmt.execute(named_params! { @@ -234,14 +241,12 @@ impl KVStore for SqliteStore { PrintableString(key), e ); - std::io::Error::new(std::io::ErrorKind::Other, msg) + io::Error::new(io::ErrorKind::Other, msg) })?; Ok(()) } - fn list( - &self, primary_namespace: &str, secondary_namespace: &str, - ) -> std::io::Result> { + fn list(&self, primary_namespace: &str, secondary_namespace: &str) -> io::Result> { check_namespace_key_validity(primary_namespace, secondary_namespace, None, "list")?; let locked_conn = self.connection.lock().unwrap(); @@ -252,7 +257,7 @@ impl KVStore for SqliteStore { ); let mut stmt = locked_conn.prepare_cached(&sql).map_err(|e| { let msg = format!("Failed to prepare statement: {}", e); - std::io::Error::new(std::io::ErrorKind::Other, msg) + io::Error::new(io::ErrorKind::Other, msg) })?; let mut keys = Vec::new(); @@ -267,13 +272,13 @@ impl KVStore for SqliteStore { ) .map_err(|e| { let msg = format!("Failed to retrieve queried rows: {}", e); - std::io::Error::new(std::io::ErrorKind::Other, msg) + io::Error::new(io::ErrorKind::Other, msg) })?; for k in rows_iter { keys.push(k.map_err(|e| { let msg = format!("Failed to retrieve queried rows: {}", e); - std::io::Error::new(std::io::ErrorKind::Other, msg) + io::Error::new(io::ErrorKind::Other, msg) })?); } diff --git a/src/io/test_utils.rs b/src/io/test_utils.rs index cf3da452d..98b33fa5f 100644 --- a/src/io/test_utils.rs +++ b/src/io/test_utils.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use lightning::ln::functional_test_utils::{ connect_block, create_announced_chan_between_nodes, create_chanmon_cfgs, create_dummy_block, create_network, create_node_cfgs, create_node_chanmgrs, send_payment, @@ -144,12 +151,13 @@ pub(crate) fn do_test_store(store_0: &K, store_1: &K) { .force_close_broadcasting_latest_txn( &nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id(), + "whoops".to_string(), ) .unwrap(); check_closed_event!( nodes[0], 1, - ClosureReason::HolderForceClosed, + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000 ); diff --git a/src/io/utils.rs b/src/io/utils.rs index 77cc56f55..218fec473 100644 --- a/src/io/utils.rs +++ b/src/io/utils.rs @@ -1,12 +1,27 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use super::*; use crate::config::WALLET_KEYS_SEED_LEN; +use crate::chain::ChainSource; +use crate::fee_estimator::OnchainFeeEstimator; +use crate::io::{ + NODE_METRICS_KEY, NODE_METRICS_PRIMARY_NAMESPACE, NODE_METRICS_SECONDARY_NAMESPACE, +}; use crate::logger::{log_error, FilesystemLogger}; use crate::peer_store::PeerStore; use crate::sweep::DeprecatedSpendableOutputInfo; -use crate::types::{Broadcaster, ChainSource, DynStore, FeeEstimator, KeysManager, Sweeper}; -use crate::{Error, EventQueue, PaymentDetails}; +use crate::types::{Broadcaster, DynStore, KeysManager, Sweeper}; +use crate::wallet::ser::{ChangeSetDeserWrapper, ChangeSetSerWrapper}; +use crate::{Error, EventQueue, NodeMetrics, PaymentDetails}; +use lightning::io::Cursor; +use lightning::ln::msgs::DecodeError; use lightning::routing::gossip::NetworkGraph; use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringDecayParameters}; use lightning::util::logger::Logger; @@ -19,13 +34,21 @@ use lightning::util::persist::{ }; use lightning::util::ser::{Readable, ReadableArgs, Writeable}; use lightning::util::string::PrintableString; +use lightning::util::sweep::{OutputSpendStatus, OutputSweeper}; + +use bdk_chain::indexer::keychain_txout::ChangeSet as BdkIndexerChangeSet; +use bdk_chain::local_chain::ChangeSet as BdkLocalChainChangeSet; +use bdk_chain::miniscript::{Descriptor, DescriptorPublicKey}; +use bdk_chain::tx_graph::ChangeSet as BdkTxGraphChangeSet; +use bdk_chain::ConfirmationBlockTime; +use bdk_wallet::ChangeSet as BdkWalletChangeSet; use bip39::Mnemonic; -use lightning::util::sweep::{OutputSpendStatus, OutputSweeper}; +use bitcoin::Network; use rand::{thread_rng, RngCore}; use std::fs; -use std::io::{Cursor, Write}; +use std::io::Write; use std::ops::Deref; use std::path::Path; use std::sync::Arc; @@ -201,7 +224,7 @@ where /// Read `OutputSweeper` state from the store. pub(crate) fn read_output_sweeper( - broadcaster: Arc, fee_estimator: Arc, + broadcaster: Arc, fee_estimator: Arc, chain_data_source: Arc, keys_manager: Arc, kv_store: Arc, logger: Arc, ) -> Result { @@ -322,98 +345,44 @@ where Ok(()) } -pub(crate) fn read_latest_rgs_sync_timestamp( - kv_store: Arc, logger: L, -) -> Result -where - L::Target: Logger, -{ - let mut reader = Cursor::new(kv_store.read( - LATEST_RGS_SYNC_TIMESTAMP_PRIMARY_NAMESPACE, - LATEST_RGS_SYNC_TIMESTAMP_SECONDARY_NAMESPACE, - LATEST_RGS_SYNC_TIMESTAMP_KEY, - )?); - u32::read(&mut reader).map_err(|e| { - log_error!(logger, "Failed to deserialize latest RGS sync timestamp: {}", e); - std::io::Error::new( - std::io::ErrorKind::InvalidData, - "Failed to deserialize latest RGS sync timestamp", - ) - }) -} - -pub(crate) fn write_latest_rgs_sync_timestamp( - updated_timestamp: u32, kv_store: Arc, logger: L, -) -> Result<(), Error> -where - L::Target: Logger, -{ - let data = updated_timestamp.encode(); - kv_store - .write( - LATEST_RGS_SYNC_TIMESTAMP_PRIMARY_NAMESPACE, - LATEST_RGS_SYNC_TIMESTAMP_SECONDARY_NAMESPACE, - LATEST_RGS_SYNC_TIMESTAMP_KEY, - &data, - ) - .map_err(|e| { - log_error!( - logger, - "Writing data to key {}/{}/{} failed due to: {}", - LATEST_RGS_SYNC_TIMESTAMP_PRIMARY_NAMESPACE, - LATEST_RGS_SYNC_TIMESTAMP_SECONDARY_NAMESPACE, - LATEST_RGS_SYNC_TIMESTAMP_KEY, - e - ); - Error::PersistenceFailed - }) -} - -pub(crate) fn read_latest_node_ann_bcast_timestamp( +pub(crate) fn read_node_metrics( kv_store: Arc, logger: L, -) -> Result +) -> Result where L::Target: Logger, { let mut reader = Cursor::new(kv_store.read( - LATEST_NODE_ANN_BCAST_TIMESTAMP_PRIMARY_NAMESPACE, - LATEST_NODE_ANN_BCAST_TIMESTAMP_SECONDARY_NAMESPACE, - LATEST_NODE_ANN_BCAST_TIMESTAMP_KEY, + NODE_METRICS_PRIMARY_NAMESPACE, + NODE_METRICS_SECONDARY_NAMESPACE, + NODE_METRICS_KEY, )?); - u64::read(&mut reader).map_err(|e| { - log_error!( - logger, - "Failed to deserialize latest node announcement broadcast timestamp: {}", - e - ); - std::io::Error::new( - std::io::ErrorKind::InvalidData, - "Failed to deserialize latest node announcement broadcast timestamp", - ) + NodeMetrics::read(&mut reader).map_err(|e| { + log_error!(logger, "Failed to deserialize NodeMetrics: {}", e); + std::io::Error::new(std::io::ErrorKind::InvalidData, "Failed to deserialize NodeMetrics") }) } -pub(crate) fn write_latest_node_ann_bcast_timestamp( - updated_timestamp: u64, kv_store: Arc, logger: L, +pub(crate) fn write_node_metrics( + node_metrics: &NodeMetrics, kv_store: Arc, logger: L, ) -> Result<(), Error> where L::Target: Logger, { - let data = updated_timestamp.encode(); + let data = node_metrics.encode(); kv_store .write( - LATEST_NODE_ANN_BCAST_TIMESTAMP_PRIMARY_NAMESPACE, - LATEST_NODE_ANN_BCAST_TIMESTAMP_SECONDARY_NAMESPACE, - LATEST_NODE_ANN_BCAST_TIMESTAMP_KEY, + NODE_METRICS_PRIMARY_NAMESPACE, + NODE_METRICS_SECONDARY_NAMESPACE, + NODE_METRICS_KEY, &data, ) .map_err(|e| { log_error!( logger, "Writing data to key {}/{}/{} failed due to: {}", - LATEST_NODE_ANN_BCAST_TIMESTAMP_PRIMARY_NAMESPACE, - LATEST_NODE_ANN_BCAST_TIMESTAMP_SECONDARY_NAMESPACE, - LATEST_NODE_ANN_BCAST_TIMESTAMP_KEY, + NODE_METRICS_PRIMARY_NAMESPACE, + NODE_METRICS_SECONDARY_NAMESPACE, + NODE_METRICS_KEY, e ); Error::PersistenceFailed @@ -511,6 +480,164 @@ pub(crate) fn check_namespace_key_validity( Ok(()) } +macro_rules! impl_read_write_change_set_type { + ( $read_name: ident, $write_name: ident, $change_set_type:ty, $primary_namespace: expr, $secondary_namespace: expr, $key: expr ) => { + pub(crate) fn $read_name( + kv_store: Arc, logger: L, + ) -> Result, std::io::Error> + where + L::Target: Logger, + { + let bytes = match kv_store.read($primary_namespace, $secondary_namespace, $key) { + Ok(bytes) => bytes, + Err(e) => { + if e.kind() == lightning::io::ErrorKind::NotFound { + return Ok(None); + } else { + log_error!( + logger, + "Reading data from key {}/{}/{} failed due to: {}", + $primary_namespace, + $secondary_namespace, + $key, + e + ); + return Err(e.into()); + } + }, + }; + + let mut reader = Cursor::new(bytes); + let res: Result, DecodeError> = + Readable::read(&mut reader); + match res { + Ok(res) => Ok(Some(res.0)), + Err(e) => { + log_error!(logger, "Failed to deserialize BDK wallet field: {}", e); + Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Failed to deserialize BDK wallet field", + )) + }, + } + } + + pub(crate) fn $write_name( + value: &$change_set_type, kv_store: Arc, logger: L, + ) -> Result<(), std::io::Error> + where + L::Target: Logger, + { + let data = ChangeSetSerWrapper(value).encode(); + kv_store.write($primary_namespace, $secondary_namespace, $key, &data).map_err(|e| { + log_error!( + logger, + "Writing data to key {}/{}/{} failed due to: {}", + $primary_namespace, + $secondary_namespace, + $key, + e + ); + e.into() + }) + } + }; +} + +impl_read_write_change_set_type!( + read_bdk_wallet_descriptor, + write_bdk_wallet_descriptor, + Descriptor, + BDK_WALLET_DESCRIPTOR_PRIMARY_NAMESPACE, + BDK_WALLET_DESCRIPTOR_SECONDARY_NAMESPACE, + BDK_WALLET_DESCRIPTOR_KEY +); + +impl_read_write_change_set_type!( + read_bdk_wallet_change_descriptor, + write_bdk_wallet_change_descriptor, + Descriptor, + BDK_WALLET_CHANGE_DESCRIPTOR_PRIMARY_NAMESPACE, + BDK_WALLET_CHANGE_DESCRIPTOR_SECONDARY_NAMESPACE, + BDK_WALLET_CHANGE_DESCRIPTOR_KEY +); + +impl_read_write_change_set_type!( + read_bdk_wallet_network, + write_bdk_wallet_network, + Network, + BDK_WALLET_NETWORK_PRIMARY_NAMESPACE, + BDK_WALLET_NETWORK_SECONDARY_NAMESPACE, + BDK_WALLET_NETWORK_KEY +); + +impl_read_write_change_set_type!( + read_bdk_wallet_local_chain, + write_bdk_wallet_local_chain, + BdkLocalChainChangeSet, + BDK_WALLET_LOCAL_CHAIN_PRIMARY_NAMESPACE, + BDK_WALLET_LOCAL_CHAIN_SECONDARY_NAMESPACE, + BDK_WALLET_LOCAL_CHAIN_KEY +); + +impl_read_write_change_set_type!( + read_bdk_wallet_tx_graph, + write_bdk_wallet_tx_graph, + BdkTxGraphChangeSet, + BDK_WALLET_TX_GRAPH_PRIMARY_NAMESPACE, + BDK_WALLET_TX_GRAPH_SECONDARY_NAMESPACE, + BDK_WALLET_TX_GRAPH_KEY +); + +impl_read_write_change_set_type!( + read_bdk_wallet_indexer, + write_bdk_wallet_indexer, + BdkIndexerChangeSet, + BDK_WALLET_INDEXER_PRIMARY_NAMESPACE, + BDK_WALLET_INDEXER_SECONDARY_NAMESPACE, + BDK_WALLET_INDEXER_KEY +); + +// Reads the full BdkWalletChangeSet or returns default fields +pub(crate) fn read_bdk_wallet_change_set( + kv_store: Arc, logger: Arc, +) -> Result, std::io::Error> { + let mut change_set = BdkWalletChangeSet::default(); + + // We require a descriptor and return `None` to signal creation of a new wallet otherwise. + if let Some(descriptor) = + read_bdk_wallet_descriptor(Arc::clone(&kv_store), Arc::clone(&logger))? + { + change_set.descriptor = Some(descriptor); + } else { + return Ok(None); + } + + // We require a change_descriptor and return `None` to signal creation of a new wallet otherwise. + if let Some(change_descriptor) = + read_bdk_wallet_change_descriptor(Arc::clone(&kv_store), Arc::clone(&logger))? + { + change_set.change_descriptor = Some(change_descriptor); + } else { + return Ok(None); + } + + // We require a network and return `None` to signal creation of a new wallet otherwise. + if let Some(network) = read_bdk_wallet_network(Arc::clone(&kv_store), Arc::clone(&logger))? { + change_set.network = Some(network); + } else { + return Ok(None); + } + + read_bdk_wallet_local_chain(Arc::clone(&kv_store), Arc::clone(&logger))? + .map(|local_chain| change_set.local_chain = local_chain); + read_bdk_wallet_tx_graph(Arc::clone(&kv_store), Arc::clone(&logger))? + .map(|tx_graph| change_set.tx_graph = tx_graph); + read_bdk_wallet_indexer(Arc::clone(&kv_store), Arc::clone(&logger))? + .map(|indexer| change_set.indexer = indexer); + Ok(Some(change_set)) +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/io/vss_store.rs b/src/io/vss_store.rs index 426af1fbb..296eaabe3 100644 --- a/src/io/vss_store.rs +++ b/src/io/vss_store.rs @@ -1,21 +1,29 @@ -use io::Error; -use std::io; -use std::io::ErrorKind; -#[cfg(test)] -use std::panic::RefUnwindSafe; -use std::time::Duration; +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. use crate::io::utils::check_namespace_key_validity; +use bitcoin::hashes::{sha256, Hash, HashEngine, Hmac, HmacEngine}; +use lightning::io::{self, Error, ErrorKind}; use lightning::util::persist::KVStore; use prost::Message; use rand::RngCore; +#[cfg(test)] +use std::panic::RefUnwindSafe; +use std::sync::Arc; +use std::time::Duration; use tokio::runtime::Runtime; use vss_client::client::VssClient; use vss_client::error::VssError; +use vss_client::headers::VssHeaderProvider; use vss_client::types::{ DeleteObjectRequest, GetObjectRequest, KeyValue, ListKeyVersionsRequest, PutObjectRequest, Storable, }; +use vss_client::util::key_obfuscator::KeyObfuscator; use vss_client::util::retry::{ ExponentialBackoffRetryPolicy, FilteredRetryPolicy, JitteredRetryPolicy, MaxAttemptsRetryPolicy, MaxTotalDelayRetryPolicy, RetryPolicy, @@ -35,16 +43,23 @@ pub struct VssStore { store_id: String, runtime: Runtime, storable_builder: StorableBuilder, + key_obfuscator: KeyObfuscator, } impl VssStore { - pub(crate) fn new(base_url: String, store_id: String, data_encryption_key: [u8; 32]) -> Self { - let runtime = tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap(); + pub(crate) fn new( + base_url: String, store_id: String, vss_seed: [u8; 32], + header_provider: Arc, + ) -> io::Result { + let runtime = tokio::runtime::Builder::new_multi_thread().enable_all().build()?; + let (data_encryption_key, obfuscation_master_key) = + derive_data_encryption_and_obfuscation_keys(&vss_seed); + let key_obfuscator = KeyObfuscator::new(obfuscation_master_key); let storable_builder = StorableBuilder::new(data_encryption_key, RandEntropySource); - let retry_policy = ExponentialBackoffRetryPolicy::new(Duration::from_millis(100)) - .with_max_attempts(3) - .with_max_total_delay(Duration::from_secs(2)) - .with_max_jitter(Duration::from_millis(50)) + let retry_policy = ExponentialBackoffRetryPolicy::new(Duration::from_millis(10)) + .with_max_attempts(10) + .with_max_total_delay(Duration::from_secs(15)) + .with_max_jitter(Duration::from_millis(10)) .skip_retry_on_error(Box::new(|e: &VssError| { matches!( e, @@ -54,17 +69,18 @@ impl VssStore { ) }) as _); - let client = VssClient::new(&base_url, retry_policy); - Self { client, store_id, runtime, storable_builder } + let client = VssClient::new_with_headers(base_url, retry_policy, header_provider); + Ok(Self { client, store_id, runtime, storable_builder, key_obfuscator }) } fn build_key( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, ) -> io::Result { + let obfuscated_key = self.key_obfuscator.obfuscate(key); if primary_namespace.is_empty() { - Ok(key.to_string()) + Ok(obfuscated_key) } else { - Ok(format!("{}#{}#{}", primary_namespace, secondary_namespace, key)) + Ok(format!("{}#{}#{}", primary_namespace, secondary_namespace, obfuscated_key)) } } @@ -72,7 +88,10 @@ impl VssStore { let mut parts = unified_key.splitn(3, '#'); let (_primary_namespace, _secondary_namespace) = (parts.next(), parts.next()); match parts.next() { - Some(actual_key) => Ok(actual_key.to_string()), + Some(obfuscated_key) => { + let actual_key = self.key_obfuscator.deobfuscate(obfuscated_key)?; + Ok(actual_key) + }, None => Err(Error::new(ErrorKind::InvalidData, "Invalid key format")), } } @@ -132,7 +151,14 @@ impl KVStore for VssStore { })?; // unwrap safety: resp.value must be always present for a non-erroneous VSS response, otherwise // it is an API-violation which is converted to [`VssError::InternalServerError`] in [`VssClient`] - let storable = Storable::decode(&resp.value.unwrap().value[..])?; + let storable = Storable::decode(&resp.value.unwrap().value[..]).map_err(|e| { + let msg = format!( + "Failed to decode data read from key {}/{}/{}: {}", + primary_namespace, secondary_namespace, key, e + ); + Error::new(ErrorKind::Other, msg) + })?; + Ok(self.storable_builder.deconstruct(storable)?.0) } @@ -207,6 +233,19 @@ impl KVStore for VssStore { } } +fn derive_data_encryption_and_obfuscation_keys(vss_seed: &[u8; 32]) -> ([u8; 32], [u8; 32]) { + let hkdf = |initial_key_material: &[u8], salt: &[u8]| -> [u8; 32] { + let mut engine = HmacEngine::::new(salt); + engine.input(initial_key_material); + Hmac::from_engine(engine).to_byte_array() + }; + + let prk = hkdf(vss_seed, b"pseudo_random_key"); + let k1 = hkdf(&prk, b"data_encryption_key"); + let k2 = hkdf(&prk, &[&k1[..], b"obfuscation_key"].concat()); + (k1, k2) +} + /// A source for generating entropy/randomness using [`rand`]. pub(crate) struct RandEntropySource; @@ -226,15 +265,19 @@ mod tests { use crate::io::test_utils::do_read_write_remove_list_persist; use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng, RngCore}; + use std::collections::HashMap; + use vss_client::headers::FixedHeaders; #[test] fn read_write_remove_list_persist() { let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); let mut rng = thread_rng(); let rand_store_id: String = (0..7).map(|_| rng.sample(Alphanumeric) as char).collect(); - let mut data_encryption_key = [0u8; 32]; - rng.fill_bytes(&mut data_encryption_key); - let vss_store = VssStore::new(vss_base_url, rand_store_id, data_encryption_key); + let mut vss_seed = [0u8; 32]; + rng.fill_bytes(&mut vss_seed); + let header_provider = Arc::new(FixedHeaders::new(HashMap::new())); + let vss_store = + VssStore::new(vss_base_url, rand_store_id, vss_seed, header_provider).unwrap(); do_read_write_remove_list_persist(&vss_store); } diff --git a/src/lib.rs b/src/lib.rs index 9db31e572..cdc9f8c8e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,11 +1,9 @@ -// This file is Copyright its original authors, visible in version contror -// history. +// This file is Copyright its original authors, visible in version control history. // -// This file is licensed under the Apache License, Version 2.0 or the MIT license -// , at your option. -// You may not use this file except in accordance with one or both of these -// licenses. +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. #![crate_name = "ldk_node"] @@ -22,8 +20,7 @@ //! //! The primary abstraction of the library is the [`Node`], which can be retrieved by setting up //! and configuring a [`Builder`] to your liking and calling [`build`]. `Node` can then be -//! controlled via commands such as [`start`], [`stop`], [`connect_open_channel`], -//! [`send`], etc.: +//! controlled via commands such as [`start`], [`stop`], [`open_channel`], [`send`], etc.: //! //! ```no_run //! use ldk_node::Builder; @@ -36,7 +33,7 @@ //! fn main() { //! let mut builder = Builder::new(); //! builder.set_network(Network::Testnet); -//! builder.set_esplora_server("https://blockstream.info/testnet/api".to_string()); +//! builder.set_chain_source_esplora("https://blockstream.info/testnet/api".to_string(), None); //! builder.set_gossip_source_rgs("https://rapidsync.lightningdevkit.org/testnet/snapshot".to_string()); //! //! let node = builder.build().unwrap(); @@ -49,14 +46,14 @@ //! //! let node_id = PublicKey::from_str("NODE_ID").unwrap(); //! let node_addr = SocketAddress::from_str("IP_ADDR:PORT").unwrap(); -//! node.connect_open_channel(node_id, node_addr, 10000, None, None, false).unwrap(); +//! node.open_channel(node_id, node_addr, 10000, None, None).unwrap(); //! //! let event = node.wait_next_event(); //! println!("EVENT: {:?}", event); //! node.event_handled(); //! //! let invoice = Bolt11Invoice::from_str("INVOICE_STR").unwrap(); -//! node.bolt11_payment().send(&invoice).unwrap(); +//! node.bolt11_payment().send(&invoice, None).unwrap(); //! //! node.stop().unwrap(); //! } @@ -65,7 +62,7 @@ //! [`build`]: Builder::build //! [`start`]: Node::start //! [`stop`]: Node::stop -//! [`connect_open_channel`]: Node::connect_open_channel +//! [`open_channel`]: Node::open_channel //! [`send`]: Bolt11Payment::send //! #![cfg_attr(not(feature = "uniffi"), deny(missing_docs))] @@ -77,7 +74,8 @@ mod balance; mod builder; -mod config; +mod chain; +pub mod config; mod connection; mod error; mod event; @@ -102,14 +100,13 @@ pub use bip39; pub use bitcoin; pub use lightning; pub use lightning_invoice; +pub use vss_client; pub use balance::{BalanceDetails, LightningBalance, PendingSweepBalance}; -pub use config::{default_config, AnchorChannelsConfig, Config}; pub use error::Error as NodeError; use error::Error; pub use event::Event; -pub use types::ChannelConfig; pub use io::utils::generate_entropy_mnemonic; @@ -122,39 +119,43 @@ pub use builder::BuildError; #[cfg(not(feature = "uniffi"))] pub use builder::NodeBuilder as Builder; +use chain::ChainSource; use config::{ - default_user_config, ENABLE_BACKGROUND_SYNC, LDK_WALLET_SYNC_TIMEOUT_SECS, - NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, - RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL, RGS_SYNC_INTERVAL, - WALLET_SYNC_INTERVAL_MINIMUM_SECS, + default_user_config, may_announce_channel, ChannelConfig, Config, ENABLE_BACKGROUND_SYNC, + NODE_ANN_BCAST_INTERVAL, PEER_RECONNECTION_INTERVAL, RGS_SYNC_INTERVAL, }; use connection::ConnectionManager; use event::{EventHandler, EventQueue}; use gossip::GossipSource; use graph::NetworkGraph; +use io::utils::write_node_metrics; use liquidity::LiquiditySource; use payment::store::PaymentStore; -use payment::{Bolt11Payment, Bolt12Payment, OnchainPayment, PaymentDetails, SpontaneousPayment}; +use payment::{ + Bolt11Payment, Bolt12Payment, OnchainPayment, PaymentDetails, SpontaneousPayment, + UnifiedQrPayment, +}; use peer_store::{PeerInfo, PeerStore}; use types::{ - Broadcaster, BumpTransactionEventHandler, ChainMonitor, ChannelManager, DynStore, FeeEstimator, - Graph, KeysManager, PeerManager, Router, Scorer, Sweeper, Wallet, + Broadcaster, BumpTransactionEventHandler, ChainMonitor, ChannelManager, DynStore, Graph, + KeysManager, OnionMessenger, PeerManager, Router, Scorer, Sweeper, Wallet, }; pub use types::{ChannelDetails, ChannelType, KeyValue, PeerDetails, TlvEntry, UserChannelId}; use logger::{log_error, log_info, log_trace, FilesystemLogger, Logger}; -use lightning::chain::{BestBlock, Confirm}; +use lightning::chain::BestBlock; use lightning::events::bump_transaction::Wallet as LdkWallet; -use lightning::ln::channelmanager::{ChannelShutdownState, PaymentId}; +use lightning::impl_writeable_tlv_based; +use lightning::ln::channel_state::ChannelShutdownState; +use lightning::ln::channelmanager::PaymentId; use lightning::ln::msgs::SocketAddress; +use lightning::routing::gossip::NodeAlias; pub use lightning::util::logger::Level as LogLevel; use lightning_background_processor::process_events_async; -use lightning_transaction_sync::EsploraSyncClient; - use bitcoin::secp256k1::PublicKey; use rand::Rng; @@ -172,19 +173,19 @@ uniffi::include_scaffolding!("ldk_node"); /// /// Needs to be initialized and instantiated through [`Builder::build`]. pub struct Node { - runtime: Arc>>, + runtime: Arc>>>, stop_sender: tokio::sync::watch::Sender<()>, event_handling_stopped_sender: tokio::sync::watch::Sender<()>, config: Arc, wallet: Arc, - tx_sync: Arc>>, + chain_source: Arc, tx_broadcaster: Arc, - fee_estimator: Arc, event_queue: Arc>>, channel_manager: Arc, chain_monitor: Arc, output_sweeper: Arc, peer_manager: Arc, + onion_messenger: Arc, connection_manager: Arc>>, keys_manager: Arc, network_graph: Arc, @@ -197,12 +198,7 @@ pub struct Node { peer_store: Arc>>, payment_store: Arc>>, is_listening: Arc, - latest_wallet_sync_timestamp: Arc>>, - latest_onchain_wallet_sync_timestamp: Arc>>, - latest_fee_rate_cache_update_timestamp: Arc>>, - latest_rgs_snapshot_timestamp: Arc>>, - latest_node_announcement_broadcast_timestamp: Arc>>, - latest_channel_monitor_archival_height: Arc>>, + node_metrics: Arc>, } impl Node { @@ -212,6 +208,20 @@ impl Node { /// After this returns, the [`Node`] instance can be controlled via the provided API methods in /// a thread-safe manner. pub fn start(&self) -> Result<(), Error> { + let runtime = + Arc::new(tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap()); + self.start_with_runtime(runtime) + } + + /// Starts the necessary background tasks (such as handling events coming from user input, + /// LDK/BDK, and the peer-to-peer network) on the the given `runtime`. + /// + /// This allows to have LDK Node reuse an outer pre-existing runtime, e.g., to avoid stacking Tokio + /// runtime contexts. + /// + /// After this returns, the [`Node`] instance can be controlled via the provided API methods in + /// a thread-safe manner. + pub fn start_with_runtime(&self, runtime: Arc) -> Result<(), Error> { // Acquire a run lock and hold it until we're setup. let mut runtime_lock = self.runtime.write().unwrap(); if runtime_lock.is_some() { @@ -226,208 +236,30 @@ impl Node { self.config.network ); - let runtime = tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap(); - // Block to ensure we update our fee rate cache once on startup - let fee_estimator = Arc::clone(&self.fee_estimator); - let sync_logger = Arc::clone(&self.logger); - let sync_fee_rate_update_timestamp = - Arc::clone(&self.latest_fee_rate_cache_update_timestamp); + let chain_source = Arc::clone(&self.chain_source); let runtime_ref = &runtime; tokio::task::block_in_place(move || { - runtime_ref.block_on(async move { - let now = Instant::now(); - match fee_estimator.update_fee_estimates().await { - Ok(()) => { - log_info!( - sync_logger, - "Initial fee rate cache update finished in {}ms.", - now.elapsed().as_millis() - ); - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - *sync_fee_rate_update_timestamp.write().unwrap() = unix_time_secs_opt; - Ok(()) - }, - Err(e) => { - log_error!(sync_logger, "Initial fee rate cache update failed: {}", e,); - Err(e) - }, - } - }) + runtime_ref.block_on(async move { chain_source.update_fee_rate_estimates().await }) })?; + // Alby: disable default background sync if ENABLE_BACKGROUND_SYNC { - // Setup wallet sync - let wallet = Arc::clone(&self.wallet); - let sync_logger = Arc::clone(&self.logger); - let sync_onchain_wallet_timestamp = - Arc::clone(&self.latest_onchain_wallet_sync_timestamp); - let mut stop_sync = self.stop_sender.subscribe(); - let onchain_wallet_sync_interval_secs = self - .config - .onchain_wallet_sync_interval_secs - .max(config::WALLET_SYNC_INTERVAL_MINIMUM_SECS); - std::thread::spawn(move || { - tokio::runtime::Builder::new_current_thread() - .enable_all() - .build() - .unwrap() - .block_on(async move { - let mut onchain_wallet_sync_interval = tokio::time::interval( - Duration::from_secs(onchain_wallet_sync_interval_secs), - ); - onchain_wallet_sync_interval - .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - loop { - tokio::select! { - _ = stop_sync.changed() => { - log_trace!( - sync_logger, - "Stopping background syncing on-chain wallet.", - ); - return; - } - _ = onchain_wallet_sync_interval.tick() => { - let now = Instant::now(); - match wallet.sync().await { - Ok(()) => { - log_trace!( - sync_logger, - "Background sync of on-chain wallet finished in {}ms.", - now.elapsed().as_millis() - ); - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - *sync_onchain_wallet_timestamp.write().unwrap() = unix_time_secs_opt; - } - Err(err) => { - log_error!( - sync_logger, - "Background sync of on-chain wallet failed: {}", - err - ) - } - } - } - } - } - }); - }); - - let mut stop_fee_updates = self.stop_sender.subscribe(); - let fee_update_logger = Arc::clone(&self.logger); - let fee_update_timestamp = Arc::clone(&self.latest_fee_rate_cache_update_timestamp); - let fee_estimator = Arc::clone(&self.fee_estimator); - let fee_rate_cache_update_interval_secs = self - .config - .fee_rate_cache_update_interval_secs - .max(WALLET_SYNC_INTERVAL_MINIMUM_SECS); - runtime.spawn(async move { - let mut fee_rate_update_interval = - tokio::time::interval(Duration::from_secs(fee_rate_cache_update_interval_secs)); - // We just blocked on updating, so skip the first tick. - fee_rate_update_interval.reset(); - fee_rate_update_interval - .set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - loop { - tokio::select! { - _ = stop_fee_updates.changed() => { - log_trace!( - fee_update_logger, - "Stopping background updates of fee rate cache.", - ); - return; - } - _ = fee_rate_update_interval.tick() => { - let now = Instant::now(); - match fee_estimator.update_fee_estimates().await { - Ok(()) => { - log_trace!( - fee_update_logger, - "Background update of fee rate cache finished in {}ms.", - now.elapsed().as_millis() - ); - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - *fee_update_timestamp.write().unwrap() = unix_time_secs_opt; - } - Err(err) => { - log_error!( - fee_update_logger, - "Background update of fee rate cache failed: {}", - err - ) - } - } - } - } - } - }); - - let tx_sync = Arc::clone(&self.tx_sync); + // Spawn background task continuously syncing onchain, lightning, and fee rate cache. + let stop_sync_receiver = self.stop_sender.subscribe(); + let chain_source = Arc::clone(&self.chain_source); let sync_cman = Arc::clone(&self.channel_manager); - let archive_cman = Arc::clone(&self.channel_manager); let sync_cmon = Arc::clone(&self.chain_monitor); - let archive_cmon = Arc::clone(&self.chain_monitor); let sync_sweeper = Arc::clone(&self.output_sweeper); - let sync_logger = Arc::clone(&self.logger); - let sync_wallet_timestamp = Arc::clone(&self.latest_wallet_sync_timestamp); - let sync_monitor_archival_height = - Arc::clone(&self.latest_channel_monitor_archival_height); - let mut stop_sync = self.stop_sender.subscribe(); - let wallet_sync_interval_secs = - self.config.wallet_sync_interval_secs.max(WALLET_SYNC_INTERVAL_MINIMUM_SECS); runtime.spawn(async move { - let mut wallet_sync_interval = - tokio::time::interval(Duration::from_secs(wallet_sync_interval_secs)); - wallet_sync_interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip); - loop { - tokio::select! { - _ = stop_sync.changed() => { - log_trace!( - sync_logger, - "Stopping background syncing Lightning wallet.", - ); - return; - } - _ = wallet_sync_interval.tick() => { - let confirmables = vec![ - &*sync_cman as &(dyn Confirm + Sync + Send), - &*sync_cmon as &(dyn Confirm + Sync + Send), - &*sync_sweeper as &(dyn Confirm + Sync + Send), - ]; - let now = Instant::now(); - let timeout_fut = tokio::time::timeout(Duration::from_secs(LDK_WALLET_SYNC_TIMEOUT_SECS), tx_sync.sync(confirmables)); - match timeout_fut.await { - Ok(res) => match res { - Ok(()) => { - log_trace!( - sync_logger, - "Background sync of Lightning wallet finished in {}ms.", - now.elapsed().as_millis() - ); - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - *sync_wallet_timestamp.write().unwrap() = unix_time_secs_opt; - - periodically_archive_fully_resolved_monitors( - Arc::clone(&archive_cman), - Arc::clone(&archive_cmon), - Arc::clone(&sync_monitor_archival_height) - ); - } - Err(e) => { - log_error!(sync_logger, "Background sync of Lightning wallet failed: {}", e) - } - } - Err(e) => { - log_error!(sync_logger, "Background sync of Lightning wallet timed out: {}", e) - } - } - } - } - } + chain_source + .continuously_sync_wallets( + stop_sync_receiver, + sync_cman, + sync_cmon, + sync_sweeper, + ) + .await; }); } @@ -435,7 +267,7 @@ impl Node { let gossip_source = Arc::clone(&self.gossip_source); let gossip_sync_store = Arc::clone(&self.kv_store); let gossip_sync_logger = Arc::clone(&self.logger); - let gossip_rgs_sync_timestamp = Arc::clone(&self.latest_rgs_snapshot_timestamp); + let gossip_node_metrics = Arc::clone(&self.node_metrics); let mut stop_gossip_sync = self.stop_sender.subscribe(); runtime.spawn(async move { let mut interval = tokio::time::interval(RGS_SYNC_INTERVAL); @@ -458,22 +290,22 @@ impl Node { "Background sync of RGS gossip data finished in {}ms.", now.elapsed().as_millis() ); - io::utils::write_latest_rgs_sync_timestamp( - updated_timestamp, - Arc::clone(&gossip_sync_store), - Arc::clone(&gossip_sync_logger), - ) - .unwrap_or_else(|e| { - log_error!(gossip_sync_logger, "Persistence failed: {}", e); - panic!("Persistence failed"); - }); - *gossip_rgs_sync_timestamp.write().unwrap() = Some(updated_timestamp as u64); + { + let mut locked_node_metrics = gossip_node_metrics.write().unwrap(); + locked_node_metrics.latest_rgs_snapshot_timestamp = Some(updated_timestamp); + write_node_metrics(&*locked_node_metrics, Arc::clone(&gossip_sync_store), Arc::clone(&gossip_sync_logger)) + .unwrap_or_else(|e| { + log_error!(gossip_sync_logger, "Persistence failed: {}", e); + }); + } + } + Err(e) => { + log_error!( + gossip_sync_logger, + "Background sync of RGS gossip data failed: {}", + e + ) } - Err(e) => log_error!( - gossip_sync_logger, - "Background sync of RGS gossip data failed: {}", - e - ), } } } @@ -595,31 +427,33 @@ impl Node { let bcast_config = Arc::clone(&self.config); let bcast_store = Arc::clone(&self.kv_store); let bcast_logger = Arc::clone(&self.logger); - let bcast_ann_timestamp = Arc::clone(&self.latest_node_announcement_broadcast_timestamp); + let bcast_node_metrics = Arc::clone(&self.node_metrics); let mut stop_bcast = self.stop_sender.subscribe(); - runtime.spawn(async move { - // We check every 30 secs whether our last broadcast is NODE_ANN_BCAST_INTERVAL away. - #[cfg(not(test))] - let mut interval = tokio::time::interval(Duration::from_secs(30)); - #[cfg(test)] - let mut interval = tokio::time::interval(Duration::from_secs(5)); - loop { - tokio::select! { + let node_alias = self.config.node_alias.clone(); + if may_announce_channel(&self.config) { + runtime.spawn(async move { + // We check every 30 secs whether our last broadcast is NODE_ANN_BCAST_INTERVAL away. + #[cfg(not(test))] + let mut interval = tokio::time::interval(Duration::from_secs(30)); + #[cfg(test)] + let mut interval = tokio::time::interval(Duration::from_secs(5)); + loop { + tokio::select! { _ = stop_bcast.changed() => { log_trace!( bcast_logger, "Stopping broadcasting node announcements.", - ); + ); return; } _ = interval.tick() => { - let skip_broadcast = match io::utils::read_latest_node_ann_bcast_timestamp(Arc::clone(&bcast_store), Arc::clone(&bcast_logger)) { - Ok(latest_bcast_time_secs) => { + let skip_broadcast = match bcast_node_metrics.read().unwrap().latest_node_announcement_broadcast_timestamp { + Some(latest_bcast_time_secs) => { // Skip if the time hasn't elapsed yet. let next_bcast_unix_time = SystemTime::UNIX_EPOCH + Duration::from_secs(latest_bcast_time_secs) + NODE_ANN_BCAST_INTERVAL; next_bcast_unix_time.elapsed().is_err() } - Err(_) => { + None => { // Don't skip if we haven't broadcasted before. false } @@ -629,7 +463,7 @@ impl Node { continue; } - if !bcast_cm.list_channels().iter().any(|chan| chan.is_public && chan.is_channel_ready) { + if !bcast_cm.list_channels().iter().any(|chan| chan.is_announced && chan.is_channel_ready) { // Skip if we don't have any public channels that are ready. continue; } @@ -639,33 +473,38 @@ impl Node { continue; } - let addresses = bcast_config.listening_addresses.clone().unwrap_or(Vec::new()); - - if addresses.is_empty() { - // Skip if we are not listening on any addresses. + let addresses = if let Some(addresses) = bcast_config.listening_addresses.clone() { + addresses + } else { + debug_assert!(false, "We checked whether the node may announce, so listening addresses should always be set"); continue; - } - - bcast_pm.broadcast_node_announcement([0; 3], [0; 32], addresses); + }; - let unix_time_secs_opt = - SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); - *bcast_ann_timestamp.write().unwrap() = unix_time_secs_opt; + if let Some(node_alias) = node_alias.as_ref() { + bcast_pm.broadcast_node_announcement([0; 3], node_alias.0, addresses); - if let Some(unix_time_secs) = unix_time_secs_opt { - io::utils::write_latest_node_ann_bcast_timestamp(unix_time_secs, Arc::clone(&bcast_store), Arc::clone(&bcast_logger)) - .unwrap_or_else(|e| { - log_error!(bcast_logger, "Persistence failed: {}", e); - panic!("Persistence failed"); - }); + let unix_time_secs_opt = + SystemTime::now().duration_since(UNIX_EPOCH).ok().map(|d| d.as_secs()); + { + let mut locked_node_metrics = bcast_node_metrics.write().unwrap(); + locked_node_metrics.latest_node_announcement_broadcast_timestamp = unix_time_secs_opt; + write_node_metrics(&*locked_node_metrics, Arc::clone(&bcast_store), Arc::clone(&bcast_logger)) + .unwrap_or_else(|e| { + log_error!(bcast_logger, "Persistence failed: {}", e); + }); + } + } else { + debug_assert!(false, "We checked whether the node may announce, so node alias should always be set"); + continue } } + } } - } - }); + }); + } let mut stop_tx_bcast = self.stop_sender.subscribe(); - let tx_bcaster = Arc::clone(&self.tx_broadcaster); + let chain_source = Arc::clone(&self.chain_source); let tx_bcast_logger = Arc::clone(&self.logger); runtime.spawn(async move { // Every second we try to clear our broadcasting queue. @@ -681,7 +520,7 @@ impl Node { return; } _ = interval.tick() => { - tx_bcaster.process_queue().await; + chain_source.process_broadcast_queue().await; } } } @@ -716,6 +555,7 @@ impl Node { let background_chan_man = Arc::clone(&self.channel_manager); let background_gossip_sync = self.gossip_source.as_gossip_sync(); let background_peer_man = Arc::clone(&self.peer_manager); + let background_onion_messenger = Arc::clone(&self.onion_messenger); let background_logger = Arc::clone(&self.logger); let background_error_logger = Arc::clone(&self.logger); let background_scorer = Arc::clone(&self.scorer); @@ -748,6 +588,7 @@ impl Node { |e| background_event_handler.handle_event(e), background_chain_mon, background_chan_man, + Some(background_onion_messenger), background_gossip_sync, background_peer_man, background_logger, @@ -833,12 +674,14 @@ impl Node { // FIXME: For now, we wait up to 100 secs (BDK_WALLET_SYNC_TIMEOUT_SECS + 10) to allow // event handling to exit gracefully even if it was blocked on the BDK wallet syncing. We // should drop this considerably post upgrading to BDK 1.0. - let timeout_res = runtime.block_on(async { - tokio::time::timeout( - Duration::from_secs(100), - event_handling_stopped_receiver.changed(), - ) - .await + let timeout_res = tokio::task::block_in_place(move || { + runtime.block_on(async { + tokio::time::timeout( + Duration::from_secs(100), + event_handling_stopped_receiver.changed(), + ) + .await + }) }); match timeout_res { @@ -871,9 +714,6 @@ impl Node { ); } - // Shutdown our runtime. By now ~no or only very few tasks should be left. - runtime.shutdown_timeout(Duration::from_secs(10)); - log_info!(self.logger, "Shutdown complete."); Ok(()) } @@ -883,24 +723,30 @@ impl Node { let is_running = self.runtime.read().unwrap().is_some(); let is_listening = self.is_listening.load(Ordering::Acquire); let current_best_block = self.channel_manager.current_best_block().into(); - let latest_wallet_sync_timestamp = *self.latest_wallet_sync_timestamp.read().unwrap(); + let locked_node_metrics = self.node_metrics.read().unwrap(); + let latest_lightning_wallet_sync_timestamp = + locked_node_metrics.latest_lightning_wallet_sync_timestamp; let latest_onchain_wallet_sync_timestamp = - *self.latest_onchain_wallet_sync_timestamp.read().unwrap(); + locked_node_metrics.latest_onchain_wallet_sync_timestamp; let latest_fee_rate_cache_update_timestamp = - *self.latest_fee_rate_cache_update_timestamp.read().unwrap(); - let latest_rgs_snapshot_timestamp = *self.latest_rgs_snapshot_timestamp.read().unwrap(); + locked_node_metrics.latest_fee_rate_cache_update_timestamp; + let latest_rgs_snapshot_timestamp = + locked_node_metrics.latest_rgs_snapshot_timestamp.map(|val| val as u64); let latest_node_announcement_broadcast_timestamp = - *self.latest_node_announcement_broadcast_timestamp.read().unwrap(); + locked_node_metrics.latest_node_announcement_broadcast_timestamp; + let latest_channel_monitor_archival_height = + locked_node_metrics.latest_channel_monitor_archival_height; NodeStatus { is_running, is_listening, current_best_block, - latest_wallet_sync_timestamp, + latest_lightning_wallet_sync_timestamp, latest_onchain_wallet_sync_timestamp, latest_fee_rate_cache_update_timestamp, latest_rgs_snapshot_timestamp, latest_node_announcement_broadcast_timestamp, + latest_channel_monitor_archival_height, } } @@ -960,6 +806,11 @@ impl Node { self.config.listening_addresses.clone() } + /// Returns our node alias. + pub fn node_alias(&self) -> Option { + self.config.node_alias + } + /// Returns a payment handler allowing to create and pay [BOLT 11] invoices. /// /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md @@ -1000,13 +851,13 @@ impl Node { /// /// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md #[cfg(not(feature = "uniffi"))] - pub fn bolt12_payment(&self) -> Arc { - Arc::new(Bolt12Payment::new( + pub fn bolt12_payment(&self) -> Bolt12Payment { + Bolt12Payment::new( Arc::clone(&self.runtime), Arc::clone(&self.channel_manager), Arc::clone(&self.payment_store), Arc::clone(&self.logger), - )) + ) } /// Returns a payment handler allowing to create and pay [BOLT 12] offers and refunds. @@ -1072,6 +923,40 @@ impl Node { )) } + /// Returns a payment handler allowing to create [BIP 21] URIs with an on-chain, [BOLT 11], + /// and [BOLT 12] payment options. + /// + /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md + /// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md + /// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki + #[cfg(not(feature = "uniffi"))] + pub fn unified_qr_payment(&self) -> UnifiedQrPayment { + UnifiedQrPayment::new( + self.onchain_payment().into(), + self.bolt11_payment().into(), + self.bolt12_payment().into(), + Arc::clone(&self.config), + Arc::clone(&self.logger), + ) + } + + /// Returns a payment handler allowing to create [BIP 21] URIs with an on-chain, [BOLT 11], + /// and [BOLT 12] payment options. + /// + /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md + /// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md + /// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki + #[cfg(feature = "uniffi")] + pub fn unified_qr_payment(&self) -> Arc { + Arc::new(UnifiedQrPayment::new( + self.onchain_payment(), + self.bolt11_payment(), + self.bolt12_payment(), + Arc::clone(&self.config), + Arc::clone(&self.logger), + )) + } + /// Retrieve a list of known channels. pub fn list_channels(&self) -> Vec { self.channel_manager.list_channels().into_iter().map(|c| c.into()).collect() @@ -1135,23 +1020,10 @@ impl Node { Ok(()) } - /// Connect to a node and open a new channel. Disconnects and re-connects are handled automatically - /// - /// Disconnects and reconnects are handled automatically. - /// - /// If `push_to_counterparty_msat` is set, the given value will be pushed (read: sent) to the - /// channel counterparty on channel open. This can be useful to start out with the balance not - /// entirely shifted to one side, therefore allowing to receive payments from the getgo. - /// - /// If Anchor channels are enabled, this will ensure the configured - /// [`AnchorChannelsConfig::per_channel_reserve_sats`] is available and will be retained before - /// opening the channel. - /// - /// Returns a [`UserChannelId`] allowing to locally keep track of the channel. - pub fn connect_open_channel( + fn open_channel_inner( &self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64, - push_to_counterparty_msat: Option, channel_config: Option>, - announce_channel: bool, + push_to_counterparty_msat: Option, channel_config: Option, + announce_for_forwarding: bool, ) -> Result { let rt_lock = self.runtime.read().unwrap(); if rt_lock.is_none() { @@ -1213,12 +1085,12 @@ impl Node { } let mut user_config = default_user_config(&self.config); - user_config.channel_handshake_config.announced_channel = announce_channel; - user_config.channel_config = (*(channel_config.unwrap_or_default())).clone().into(); + user_config.channel_handshake_config.announce_for_forwarding = announce_for_forwarding; + user_config.channel_config = (channel_config.unwrap_or_default()).clone().into(); // We set the max inflight to 100% for private channels. // FIXME: LDK will default to this behavior soon, too, at which point we should drop this // manual override. - if !announce_channel { + if !announce_for_forwarding { user_config .channel_handshake_config .max_inbound_htlc_value_in_flight_percent_of_channel = 100; @@ -1255,6 +1127,77 @@ impl Node { } } + /// Connect to a node and open a new unannounced channel. + /// + /// To open an announced channel, see [`Node::open_announced_channel`]. + /// + /// Disconnects and reconnects are handled automatically. + /// + /// If `push_to_counterparty_msat` is set, the given value will be pushed (read: sent) to the + /// channel counterparty on channel open. This can be useful to start out with the balance not + /// entirely shifted to one side, therefore allowing to receive payments from the getgo. + /// + /// If Anchor channels are enabled, this will ensure the configured + /// [`AnchorChannelsConfig::per_channel_reserve_sats`] is available and will be retained before + /// opening the channel. + /// + /// Returns a [`UserChannelId`] allowing to locally keep track of the channel. + /// + /// [`AnchorChannelsConfig::per_channel_reserve_sats`]: crate::config::AnchorChannelsConfig::per_channel_reserve_sats + pub fn open_channel( + &self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64, + push_to_counterparty_msat: Option, channel_config: Option, + ) -> Result { + self.open_channel_inner( + node_id, + address, + channel_amount_sats, + push_to_counterparty_msat, + channel_config, + false, + ) + } + + /// Connect to a node and open a new announced channel. + /// + /// This will return an error if the node has not been sufficiently configured to operate as a + /// forwarding node that can properly announce its existence to the publip network graph, i.e., + /// [`Config::listening_addresses`] and [`Config::node_alias`] are unset. + /// + /// To open an unannounced channel, see [`Node::open_channel`]. + /// + /// Disconnects and reconnects are handled automatically. + /// + /// If `push_to_counterparty_msat` is set, the given value will be pushed (read: sent) to the + /// channel counterparty on channel open. This can be useful to start out with the balance not + /// entirely shifted to one side, therefore allowing to receive payments from the getgo. + /// + /// If Anchor channels are enabled, this will ensure the configured + /// [`AnchorChannelsConfig::per_channel_reserve_sats`] is available and will be retained before + /// opening the channel. + /// + /// Returns a [`UserChannelId`] allowing to locally keep track of the channel. + /// + /// [`AnchorChannelsConfig::per_channel_reserve_sats`]: crate::config::AnchorChannelsConfig::per_channel_reserve_sats + pub fn open_announced_channel( + &self, node_id: PublicKey, address: SocketAddress, channel_amount_sats: u64, + push_to_counterparty_msat: Option, channel_config: Option, + ) -> Result { + if may_announce_channel(&self.config) { + self.open_channel_inner( + node_id, + address, + channel_amount_sats, + push_to_counterparty_msat, + channel_config, + true, + ) + } else { + log_error!(self.logger, "Failed to open announced channel as the node hasn't been sufficiently configured to act as a forwarding node. Please make sure to configure listening addreesses and node alias"); + return Err(Error::ChannelCreationFailed); + } + } + /// Alby: update fee estimates separately rather than doing a full sync pub fn update_fee_estimates(&self) -> Result<(), Error> { let rt_lock = self.runtime.read().unwrap(); @@ -1262,36 +1205,19 @@ impl Node { return Err(Error::NotRunning); } - let fee_estimator = Arc::clone(&self.fee_estimator); - let sync_logger = Arc::clone(&self.logger); - let sync_fee_rate_update_timestamp = - Arc::clone(&self.latest_fee_rate_cache_update_timestamp); - + let chain_source = Arc::clone(&self.chain_source); tokio::task::block_in_place(move || { tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap().block_on( async move { - let now = Instant::now(); - // We don't add an additional timeout here, as - // `FeeEstimator::update_fee_estimates` already returns after a timeout. - match fee_estimator.update_fee_estimates().await { - Ok(()) => { - log_info!( - sync_logger, - "Fee rate cache update finished in {}ms.", - now.elapsed().as_millis() - ); - let unix_time_secs_opt = SystemTime::now() - .duration_since(UNIX_EPOCH) - .ok() - .map(|d| d.as_secs()); - *sync_fee_rate_update_timestamp.write().unwrap() = unix_time_secs_opt; - Ok(()) + match chain_source.as_ref() { + ChainSource::Esplora { .. } => { + chain_source.update_fee_rate_estimates().await?; }, - Err(e) => { - log_error!(sync_logger, "Fee rate cache update failed: {}", e,); - return Err(e); + ChainSource::BitcoindRpc { .. } => { + chain_source.update_fee_rate_estimates().await?; }, } + Ok(()) }, ) }) @@ -1300,10 +1226,14 @@ impl Node { /// Manually sync the LDK and BDK wallets with the current chain state and update the fee rate /// cache. /// - /// **Note:** The wallets are regularly synced in the background, which is configurable via - /// [`Config::onchain_wallet_sync_interval_secs`] and [`Config::wallet_sync_interval_secs`]. - /// Therefore, using this blocking sync method is almost always redundant and should be avoided - /// where possible. + /// **Note:** The wallets are regularly synced in the background, which is configurable via the + /// respective config object, e.g., via + /// [`EsploraSyncConfig::onchain_wallet_sync_interval_secs`] and + /// [`EsploraSyncConfig::lightning_wallet_sync_interval_secs`]. Therefore, using this blocking + /// sync method is almost always redundant and should be avoided where possible. + /// + /// [`EsploraSyncConfig::onchain_wallet_sync_interval_secs`]: crate::config::EsploraSyncConfig::onchain_wallet_sync_interval_secs + /// [`EsploraSyncConfig::lightning_wallet_sync_interval_secs`]: crate::config::EsploraSyncConfig::lightning_wallet_sync_interval_secs /// **Note:** this is currently used by Alby (combined with disabled background syncs) to have /// dynamic sync intervals. pub fn sync_wallets(&self) -> Result<(), Error> { @@ -1312,113 +1242,29 @@ impl Node { return Err(Error::NotRunning); } - let wallet = Arc::clone(&self.wallet); - let tx_sync = Arc::clone(&self.tx_sync); + let chain_source = Arc::clone(&self.chain_source); let sync_cman = Arc::clone(&self.channel_manager); - let archive_cman = Arc::clone(&self.channel_manager); let sync_cmon = Arc::clone(&self.chain_monitor); - let archive_cmon = Arc::clone(&self.chain_monitor); - let fee_estimator = Arc::clone(&self.fee_estimator); let sync_sweeper = Arc::clone(&self.output_sweeper); - let sync_logger = Arc::clone(&self.logger); - let confirmables = vec![ - &*sync_cman as &(dyn Confirm + Sync + Send), - &*sync_cmon as &(dyn Confirm + Sync + Send), - &*sync_sweeper as &(dyn Confirm + Sync + Send), - ]; - let sync_wallet_timestamp = Arc::clone(&self.latest_wallet_sync_timestamp); - let sync_fee_rate_update_timestamp = - Arc::clone(&self.latest_fee_rate_cache_update_timestamp); - let sync_onchain_wallet_timestamp = Arc::clone(&self.latest_onchain_wallet_sync_timestamp); - let sync_monitor_archival_height = Arc::clone(&self.latest_channel_monitor_archival_height); - tokio::task::block_in_place(move || { tokio::runtime::Builder::new_multi_thread().enable_all().build().unwrap().block_on( async move { - let now = Instant::now(); - // We don't add an additional timeout here, as `Wallet::sync` already returns - // after a timeout. - log_info!(sync_logger, "Starting wallet sync"); - match wallet.sync().await { - Ok(()) => { - log_info!( - sync_logger, - "Sync of on-chain wallet finished in {}ms.", - now.elapsed().as_millis() - ); - let unix_time_secs_opt = SystemTime::now() - .duration_since(UNIX_EPOCH) - .ok() - .map(|d| d.as_secs()); - *sync_onchain_wallet_timestamp.write().unwrap() = unix_time_secs_opt; - }, - Err(e) => { - log_error!(sync_logger, "Sync of on-chain wallet failed: {}", e); - return Err(e); - }, - }; - - log_info!(sync_logger, "Starting fee estimates sync"); - let now = Instant::now(); - // We don't add an additional timeout here, as - // `FeeEstimator::update_fee_estimates` already returns after a timeout. - match fee_estimator.update_fee_estimates().await { - Ok(()) => { - log_info!( - sync_logger, - "Fee rate cache update finished in {}ms.", - now.elapsed().as_millis() - ); - let unix_time_secs_opt = SystemTime::now() - .duration_since(UNIX_EPOCH) - .ok() - .map(|d| d.as_secs()); - *sync_fee_rate_update_timestamp.write().unwrap() = unix_time_secs_opt; + match chain_source.as_ref() { + ChainSource::Esplora { .. } => { + chain_source.update_fee_rate_estimates().await?; + chain_source + .sync_lightning_wallet(sync_cman, sync_cmon, sync_sweeper) + .await?; + chain_source.sync_onchain_wallet().await?; }, - Err(e) => { - log_error!(sync_logger, "Fee rate cache update failed: {}", e,); - return Err(e); - }, - } - - log_info!(sync_logger, "Starting LDK wallet sync"); - let now = Instant::now(); - let tx_sync_timeout_fut = tokio::time::timeout( - Duration::from_secs(LDK_WALLET_SYNC_TIMEOUT_SECS), - tx_sync.sync(confirmables), - ); - match tx_sync_timeout_fut.await { - Ok(res) => match res { - Ok(()) => { - log_info!( - sync_logger, - "Sync of Lightning wallet finished in {}ms.", - now.elapsed().as_millis() - ); - - let unix_time_secs_opt = SystemTime::now() - .duration_since(UNIX_EPOCH) - .ok() - .map(|d| d.as_secs()); - *sync_wallet_timestamp.write().unwrap() = unix_time_secs_opt; - - periodically_archive_fully_resolved_monitors( - archive_cman, - archive_cmon, - sync_monitor_archival_height, - ); - Ok(()) - }, - Err(e) => { - log_error!(sync_logger, "Sync of Lightning wallet failed: {}", e); - Err(e.into()) - }, - }, - Err(e) => { - log_error!(sync_logger, "Sync of Lightning wallet timed out: {}", e); - Err(Error::TxSyncTimeout) + ChainSource::BitcoindRpc { .. } => { + chain_source.update_fee_rate_estimates().await?; + chain_source + .poll_and_update_listeners(sync_cman, sync_cmon, sync_sweeper) + .await?; }, } + Ok(()) }, ) }) @@ -1431,7 +1277,7 @@ impl Node { pub fn close_channel( &self, user_channel_id: &UserChannelId, counterparty_node_id: PublicKey, ) -> Result<(), Error> { - self.close_channel_internal(user_channel_id, counterparty_node_id, false) + self.close_channel_internal(user_channel_id, counterparty_node_id, false, None) } /// Force-close a previously opened channel. @@ -1445,15 +1291,23 @@ impl Node { /// Broadcasting the closing transactions will be omitted for Anchor channels if we trust the /// counterparty to broadcast for us (see [`AnchorChannelsConfig::trusted_peers_no_reserve`] /// for more information). + /// + /// [`AnchorChannelsConfig::trusted_peers_no_reserve`]: crate::config::AnchorChannelsConfig::trusted_peers_no_reserve pub fn force_close_channel( &self, user_channel_id: &UserChannelId, counterparty_node_id: PublicKey, + reason: Option, ) -> Result<(), Error> { - self.close_channel_internal(user_channel_id, counterparty_node_id, true) + self.close_channel_internal(user_channel_id, counterparty_node_id, true, reason) } fn close_channel_internal( &self, user_channel_id: &UserChannelId, counterparty_node_id: PublicKey, force: bool, + force_close_reason: Option, ) -> Result<(), Error> { + debug_assert!( + force_close_reason.is_none() || force, + "Reason can only be set for force closures" + ); let open_channels = self.channel_manager.list_channels_with_counterparty(&counterparty_node_id); if let Some(channel_details) = @@ -1467,6 +1321,7 @@ impl Node { .force_close_without_broadcasting_txn( &channel_details.channel_id, &counterparty_node_id, + force_close_reason.unwrap_or_default(), ) .map_err(|e| { log_error!( @@ -1481,6 +1336,7 @@ impl Node { .force_close_broadcasting_latest_txn( &channel_details.channel_id, &counterparty_node_id, + force_close_reason.unwrap_or_default(), ) .map_err(|e| { log_error!(self.logger, "Failed to force-close channel: {:?}", e); @@ -1508,7 +1364,7 @@ impl Node { /// Update the config for a previously opened channel. pub fn update_channel_config( &self, user_channel_id: &UserChannelId, counterparty_node_id: PublicKey, - channel_config: Arc, + channel_config: ChannelConfig, ) -> Result<(), Error> { let open_channels = self.channel_manager.list_channels_with_counterparty(&counterparty_node_id); @@ -1519,7 +1375,7 @@ impl Node { .update_channel_config( &counterparty_node_id, &[channel_details.channel_id], - &(*channel_config).clone().into(), + &(channel_config).clone().into(), ) .map_err(|_| Error::ChannelConfigUpdateFailed) } else { @@ -1541,7 +1397,9 @@ impl Node { /// Alby: Used to recover funds after restoring static channel backup pub fn force_close_all_channels_without_broadcasting_txn(&self) { - self.channel_manager.force_close_all_channels_without_broadcasting_txn(); + self.channel_manager.force_close_all_channels_without_broadcasting_txn( + "lost or corrupted channel state".to_string(), + ); } /// Alby: Return encoded channel monitors for a recovery of last resort @@ -1620,7 +1478,8 @@ impl Node { /// /// For example, you could retrieve all stored outbound payments as follows: /// ``` - /// # use ldk_node::{Builder, Config}; + /// # use ldk_node::Builder; + /// # use ldk_node::config::Config; /// # use ldk_node::payment::PaymentDirection; /// # use ldk_node::bitcoin::Network; /// # let mut config = Config::default(); @@ -1701,7 +1560,7 @@ impl Node { /// can be sure that the signature was generated by the caller. /// Signatures are EC recoverable, meaning that given the message and the /// signature the `PublicKey` of the signer can be extracted. - pub fn sign_message(&self, msg: &[u8]) -> Result { + pub fn sign_message(&self, msg: &[u8]) -> String { self.keys_manager.sign_message(msg) } @@ -1731,30 +1590,67 @@ pub struct NodeStatus { /// The timestamp, in seconds since start of the UNIX epoch, when we last successfully synced /// our Lightning wallet to the chain tip. /// - /// Will be `None` if the wallet hasn't been synced since the [`Node`] was initialized. - pub latest_wallet_sync_timestamp: Option, + /// Will be `None` if the wallet hasn't been synced yet. + pub latest_lightning_wallet_sync_timestamp: Option, /// The timestamp, in seconds since start of the UNIX epoch, when we last successfully synced /// our on-chain wallet to the chain tip. /// - /// Will be `None` if the wallet hasn't been synced since the [`Node`] was initialized. + /// Will be `None` if the wallet hasn't been synced yet. pub latest_onchain_wallet_sync_timestamp: Option, /// The timestamp, in seconds since start of the UNIX epoch, when we last successfully update /// our fee rate cache. /// - /// Will be `None` if the cache hasn't been updated since the [`Node`] was initialized. + /// Will be `None` if the cache hasn't been updated yet. pub latest_fee_rate_cache_update_timestamp: Option, /// The timestamp, in seconds since start of the UNIX epoch, when the last rapid gossip sync /// (RGS) snapshot we successfully applied was generated. /// - /// Will be `None` if RGS isn't configured or the snapshot hasn't been updated since the [`Node`] was initialized. + /// Will be `None` if RGS isn't configured or the snapshot hasn't been updated yet. pub latest_rgs_snapshot_timestamp: Option, /// The timestamp, in seconds since start of the UNIX epoch, when we last broadcasted a node /// announcement. /// - /// Will be `None` if we have no public channels or we haven't broadcasted since the [`Node`] was initialized. + /// Will be `None` if we have no public channels or we haven't broadcasted yet. pub latest_node_announcement_broadcast_timestamp: Option, + /// The block height when we last archived closed channel monitor data. + /// + /// Will be `None` if we haven't archived any monitors of closed channels yet. + pub latest_channel_monitor_archival_height: Option, +} + +/// Status fields that are persisted across restarts. +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct NodeMetrics { + latest_lightning_wallet_sync_timestamp: Option, + latest_onchain_wallet_sync_timestamp: Option, + latest_fee_rate_cache_update_timestamp: Option, + latest_rgs_snapshot_timestamp: Option, + latest_node_announcement_broadcast_timestamp: Option, + latest_channel_monitor_archival_height: Option, +} + +impl Default for NodeMetrics { + fn default() -> Self { + Self { + latest_lightning_wallet_sync_timestamp: None, + latest_onchain_wallet_sync_timestamp: None, + latest_fee_rate_cache_update_timestamp: None, + latest_rgs_snapshot_timestamp: None, + latest_node_announcement_broadcast_timestamp: None, + latest_channel_monitor_archival_height: None, + } + } } +impl_writeable_tlv_based!(NodeMetrics, { + (0, latest_lightning_wallet_sync_timestamp, option), + (2, latest_onchain_wallet_sync_timestamp, option), + (4, latest_fee_rate_cache_update_timestamp, option), + (6, latest_rgs_snapshot_timestamp, option), + (8, latest_node_announcement_broadcast_timestamp, option), + (10, latest_channel_monitor_archival_height, option), +}); + pub(crate) fn total_anchor_channels_reserve_sats( channel_manager: &ChannelManager, config: &Config, ) -> u64 { @@ -1774,19 +1670,3 @@ pub(crate) fn total_anchor_channels_reserve_sats( * anchor_channels_config.per_channel_reserve_sats }) } - -fn periodically_archive_fully_resolved_monitors( - channel_manager: Arc, chain_monitor: Arc, - latest_channel_monitor_archival_height: Arc>>, -) { - let mut latest_archival_height_lock = latest_channel_monitor_archival_height.write().unwrap(); - let cur_height = channel_manager.current_best_block().height; - let should_archive = latest_archival_height_lock - .as_ref() - .map_or(true, |h| cur_height >= h + RESOLVED_CHANNEL_MONITOR_ARCHIVAL_INTERVAL); - - if should_archive { - chain_monitor.archive_fully_resolved_channel_monitors(); - *latest_archival_height_lock = Some(cur_height); - } -} diff --git a/src/liquidity.rs b/src/liquidity.rs index 00e9f5717..1dfb5453a 100644 --- a/src/liquidity.rs +++ b/src/liquidity.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use crate::logger::{log_debug, log_error, log_info, Logger}; use crate::types::{ChannelManager, KeysManager, LiquidityManager, PeerManager}; use crate::{Config, Error}; diff --git a/src/logger.rs b/src/logger.rs index 8e5cd84f5..9ebe6f8ca 100644 --- a/src/logger.rs +++ b/src/logger.rs @@ -1,12 +1,19 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + pub(crate) use lightning::util::logger::Logger; pub(crate) use lightning::{log_bytes, log_debug, log_error, log_info, log_trace}; use lightning::util::logger::{Level, Record}; -use lightning::util::ser::Writer; use chrono::Utc; use std::fs; +use std::io::Write; #[cfg(not(target_os = "windows"))] use std::os::unix::fs::symlink; use std::path::{Path, PathBuf}; diff --git a/src/message_handler.rs b/src/message_handler.rs index 89d67d846..38999512e 100644 --- a/src/message_handler.rs +++ b/src/message_handler.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use crate::liquidity::LiquiditySource; use lightning::ln::features::{InitFeatures, NodeFeatures}; @@ -92,4 +99,24 @@ where }, } } + + fn peer_connected( + &self, their_node_id: &PublicKey, msg: &lightning::ln::msgs::Init, inbound: bool, + ) -> Result<(), ()> { + match self { + Self::Ignoring => Ok(()), + Self::Liquidity { liquidity_source, .. } => { + liquidity_source.liquidity_manager().peer_connected(their_node_id, msg, inbound) + }, + } + } + + fn peer_disconnected(&self, their_node_id: &PublicKey) { + match self { + Self::Ignoring => {}, + Self::Liquidity { liquidity_source, .. } => { + liquidity_source.liquidity_manager().peer_disconnected(their_node_id) + }, + } + } } diff --git a/src/payment/bolt11.rs b/src/payment/bolt11.rs index e14bb9823..79afcd37e 100644 --- a/src/payment/bolt11.rs +++ b/src/payment/bolt11.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + //! Holds a payment handler allowing to create and pay [BOLT 11] invoices. //! //! [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md @@ -11,14 +18,20 @@ use crate::payment::store::{ LSPFeeLimits, PaymentDetails, PaymentDetailsUpdate, PaymentDirection, PaymentKind, PaymentStatus, PaymentStore, }; +use crate::payment::SendingParameters; use crate::peer_store::{PeerInfo, PeerStore}; use crate::types::{ChannelManager, KeysManager}; use lightning::ln::channelmanager::{PaymentId, RecipientOnionFields, Retry, RetryableSendFailure}; +use lightning::ln::invoice_utils::{ + create_invoice_from_channelmanager_and_duration_since_epoch, + create_invoice_from_channelmanager_and_duration_since_epoch_with_payment_hash, +}; use lightning::ln::{PaymentHash, PaymentPreimage}; use lightning::routing::router::{PaymentParameters, RouteParameters}; -use lightning_invoice::{payment, Bolt11Invoice, Currency}; +use lightning::ln::bolt11_payment; +use lightning_invoice::{Bolt11Invoice, Currency}; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; @@ -33,7 +46,7 @@ use std::time::SystemTime; /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md /// [`Node::bolt11_payment`]: crate::Node::bolt11_payment pub struct Bolt11Payment { - runtime: Arc>>, + runtime: Arc>>>, channel_manager: Arc, connection_manager: Arc>>, keys_manager: Arc, @@ -46,7 +59,7 @@ pub struct Bolt11Payment { impl Bolt11Payment { pub(crate) fn new( - runtime: Arc>>, + runtime: Arc>>>, channel_manager: Arc, connection_manager: Arc>>, keys_manager: Arc, @@ -69,13 +82,18 @@ impl Bolt11Payment { } /// Send a payment given an invoice. - pub fn send(&self, invoice: &Bolt11Invoice) -> Result { + /// + /// If `sending_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::sending_parameters`] on a per-field basis. + pub fn send( + &self, invoice: &Bolt11Invoice, sending_parameters: Option, + ) -> Result { let rt_lock = self.runtime.read().unwrap(); if rt_lock.is_none() { return Err(Error::NotRunning); } - let (payment_hash, recipient_onion, route_params) = payment::payment_parameters_from_invoice(&invoice).map_err(|_| { + let (payment_hash, recipient_onion, mut route_params) = bolt11_payment::payment_parameters_from_invoice(&invoice).map_err(|_| { log_error!(self.logger, "Failed to send payment due to the given invoice being \"zero-amount\". Please use send_using_amount instead."); Error::InvalidInvoice })?; @@ -90,6 +108,21 @@ impl Bolt11Payment { } } + let override_params = + sending_parameters.as_ref().or(self.config.sending_parameters.as_ref()); + if let Some(override_params) = override_params { + override_params + .max_total_routing_fee_msat + .map(|f| route_params.max_total_routing_fee_msat = f.into()); + override_params + .max_total_cltv_expiry_delta + .map(|d| route_params.payment_params.max_total_cltv_expiry_delta = d); + override_params.max_path_count.map(|p| route_params.payment_params.max_path_count = p); + override_params + .max_channel_saturation_power_of_half + .map(|s| route_params.payment_params.max_channel_saturation_power_of_half = s); + }; + let payment_secret = Some(*invoice.payment_secret()); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); @@ -150,14 +183,18 @@ impl Bolt11Payment { } } - /// Send a payment given an invoice and an amount in millisatoshi. + /// Send a payment given an invoice and an amount in millisatoshis. /// /// This will fail if the amount given is less than the value required by the given invoice. /// /// This can be used to pay a so-called "zero-amount" invoice, i.e., an invoice that leaves the /// amount paid to be determined by the user. + /// + /// If `sending_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::sending_parameters`] on a per-field basis. pub fn send_using_amount( &self, invoice: &Bolt11Invoice, amount_msat: u64, + sending_parameters: Option, ) -> Result { let rt_lock = self.runtime.read().unwrap(); if rt_lock.is_none() { @@ -198,9 +235,24 @@ impl Bolt11Payment { .with_bolt11_features(features.clone()) .map_err(|_| Error::InvalidInvoice)?; } - let route_params = + let mut route_params = RouteParameters::from_payment_params_and_value(payment_params, amount_msat); + let override_params = + sending_parameters.as_ref().or(self.config.sending_parameters.as_ref()); + if let Some(override_params) = override_params { + override_params + .max_total_routing_fee_msat + .map(|f| route_params.max_total_routing_fee_msat = f.into()); + override_params + .max_total_cltv_expiry_delta + .map(|d| route_params.payment_params.max_total_cltv_expiry_delta = d); + override_params.max_path_count.map(|p| route_params.payment_params.max_path_count = p); + override_params + .max_channel_saturation_power_of_half + .map(|s| route_params.payment_params.max_channel_saturation_power_of_half = s); + }; + let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); let recipient_fields = RecipientOnionFields::secret_only(*payment_secret); @@ -430,7 +482,7 @@ impl Bolt11Payment { let invoice = { let invoice_res = if let Some(payment_hash) = manual_claim_payment_hash { - lightning_invoice::utils::create_invoice_from_channelmanager_and_duration_since_epoch_with_payment_hash( + create_invoice_from_channelmanager_and_duration_since_epoch_with_payment_hash( &self.channel_manager, keys_manager, Arc::clone(&self.logger), @@ -443,7 +495,7 @@ impl Bolt11Payment { None, ) } else { - lightning_invoice::utils::create_invoice_from_channelmanager_and_duration_since_epoch( + create_invoice_from_channelmanager_and_duration_since_epoch( &self.channel_manager, keys_manager, Arc::clone(&self.logger), @@ -658,7 +710,7 @@ impl Bolt11Payment { return Err(Error::NotRunning); } - let (_payment_hash, _recipient_onion, route_params) = payment::payment_parameters_from_invoice(&invoice).map_err(|_| { + let (_payment_hash, _recipient_onion, route_params) = bolt11_payment::payment_parameters_from_invoice(&invoice).map_err(|_| { log_error!(self.logger, "Failed to send probes due to the given invoice being \"zero-amount\". Please use send_probes_using_amount instead."); Error::InvalidInvoice })?; @@ -700,12 +752,12 @@ impl Bolt11Payment { return Err(Error::InvalidAmount); } - payment::payment_parameters_from_invoice(&invoice).map_err(|_| { + bolt11_payment::payment_parameters_from_invoice(&invoice).map_err(|_| { log_error!(self.logger, "Failed to send probes due to the given invoice unexpectedly being \"zero-amount\"."); Error::InvalidInvoice })? } else { - payment::payment_parameters_from_zero_amount_invoice(&invoice, amount_msat).map_err(|_| { + bolt11_payment::payment_parameters_from_zero_amount_invoice(&invoice, amount_msat).map_err(|_| { log_error!(self.logger, "Failed to send probes due to the given invoice unexpectedly being not \"zero-amount\"."); Error::InvalidInvoice })? diff --git a/src/payment/bolt12.rs b/src/payment/bolt12.rs index 5fd1208cc..90024b7d3 100644 --- a/src/payment/bolt12.rs +++ b/src/payment/bolt12.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + //! Holds a payment handler allowing to create and pay [BOLT 12] offers and refunds. //! //! [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md @@ -12,12 +19,14 @@ use crate::types::ChannelManager; use lightning::ln::channelmanager::{PaymentId, Retry}; use lightning::offers::invoice::Bolt12Invoice; -use lightning::offers::offer::{Amount, Offer}; +use lightning::offers::offer::{Amount, Offer, Quantity}; use lightning::offers::parse::Bolt12SemanticError; use lightning::offers::refund::Refund; +use lightning::util::string::UntrustedString; use rand::RngCore; +use std::num::NonZeroU64; use std::sync::{Arc, RwLock}; use std::time::{Duration, SystemTime, UNIX_EPOCH}; @@ -28,7 +37,7 @@ use std::time::{Duration, SystemTime, UNIX_EPOCH}; /// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md /// [`Node::bolt12_payment`]: crate::Node::bolt12_payment pub struct Bolt12Payment { - runtime: Arc>>, + runtime: Arc>>>, channel_manager: Arc, payment_store: Arc>>, logger: Arc, @@ -36,7 +45,7 @@ pub struct Bolt12Payment { impl Bolt12Payment { pub(crate) fn new( - runtime: Arc>>, + runtime: Arc>>>, channel_manager: Arc, payment_store: Arc>>, logger: Arc, ) -> Self { @@ -47,13 +56,15 @@ impl Bolt12Payment { /// /// If `payer_note` is `Some` it will be seen by the recipient and reflected back in the invoice /// response. - pub fn send(&self, offer: &Offer, payer_note: Option) -> Result { + /// + /// If `quantity` is `Some` it represents the number of items requested. + pub fn send( + &self, offer: &Offer, quantity: Option, payer_note: Option, + ) -> Result { let rt_lock = self.runtime.read().unwrap(); if rt_lock.is_none() { return Err(Error::NotRunning); } - - let quantity = None; let mut random_bytes = [0u8; 32]; rand::thread_rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); @@ -76,7 +87,7 @@ impl Bolt12Payment { &offer, quantity, None, - payer_note, + payer_note.clone(), payment_id, retry_strategy, max_total_routing_fee_msat, @@ -95,11 +106,13 @@ impl Bolt12Payment { preimage: None, secret: None, offer_id: offer.id(), + payer_note: payer_note.map(UntrustedString), + quantity, }; let payment = PaymentDetails::new( payment_id, kind, - Some(*offer_amount_msat), + Some(offer_amount_msat), PaymentDirection::Outbound, PaymentStatus::Pending, ); @@ -117,11 +130,13 @@ impl Bolt12Payment { preimage: None, secret: None, offer_id: offer.id(), + payer_note: payer_note.map(UntrustedString), + quantity, }; let payment = PaymentDetails::new( payment_id, kind, - Some(*offer_amount_msat), + Some(offer_amount_msat), PaymentDirection::Outbound, PaymentStatus::Failed, ); @@ -143,14 +158,13 @@ impl Bolt12Payment { /// If `payer_note` is `Some` it will be seen by the recipient and reflected back in the invoice /// response. pub fn send_using_amount( - &self, offer: &Offer, payer_note: Option, amount_msat: u64, + &self, offer: &Offer, amount_msat: u64, quantity: Option, payer_note: Option, ) -> Result { let rt_lock = self.runtime.read().unwrap(); if rt_lock.is_none() { return Err(Error::NotRunning); } - let quantity = None; let mut random_bytes = [0u8; 32]; rand::thread_rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); @@ -158,7 +172,7 @@ impl Bolt12Payment { let max_total_routing_fee_msat = None; let offer_amount_msat = match offer.amount() { - Some(Amount::Bitcoin { amount_msats }) => *amount_msats, + Some(Amount::Bitcoin { amount_msats }) => amount_msats, Some(_) => { log_error!(self.logger, "Failed to send payment as the provided offer was denominated in an unsupported currency."); return Err(Error::UnsupportedCurrency); @@ -177,7 +191,7 @@ impl Bolt12Payment { &offer, quantity, Some(amount_msat), - payer_note, + payer_note.clone(), payment_id, retry_strategy, max_total_routing_fee_msat, @@ -196,6 +210,8 @@ impl Bolt12Payment { preimage: None, secret: None, offer_id: offer.id(), + payer_note: payer_note.map(UntrustedString), + quantity, }; let payment = PaymentDetails::new( payment_id, @@ -218,6 +234,8 @@ impl Bolt12Payment { preimage: None, secret: None, offer_id: offer.id(), + payer_note: payer_note.map(UntrustedString), + quantity, }; let payment = PaymentDetails::new( payment_id, @@ -236,30 +254,57 @@ impl Bolt12Payment { /// Returns a payable offer that can be used to request and receive a payment of the amount /// given. - pub fn receive(&self, amount_msat: u64, description: &str) -> Result { - let offer_builder = self.channel_manager.create_offer_builder().map_err(|e| { - log_error!(self.logger, "Failed to create offer builder: {:?}", e); - Error::OfferCreationFailed - })?; - let offer = offer_builder - .amount_msats(amount_msat) - .description(description.to_string()) - .build() - .map_err(|e| { - log_error!(self.logger, "Failed to create offer: {:?}", e); + pub fn receive( + &self, amount_msat: u64, description: &str, expiry_secs: Option, quantity: Option, + ) -> Result { + let absolute_expiry = expiry_secs.map(|secs| { + (SystemTime::now() + Duration::from_secs(secs as u64)) + .duration_since(UNIX_EPOCH) + .unwrap() + }); + + let offer_builder = + self.channel_manager.create_offer_builder(absolute_expiry).map_err(|e| { + log_error!(self.logger, "Failed to create offer builder: {:?}", e); Error::OfferCreationFailed })?; - Ok(offer) + let mut offer = + offer_builder.amount_msats(amount_msat).description(description.to_string()); + + if let Some(qty) = quantity { + if qty == 0 { + log_error!(self.logger, "Failed to create offer: quantity can't be zero."); + return Err(Error::InvalidQuantity); + } else { + offer = offer.supported_quantity(Quantity::Bounded(NonZeroU64::new(qty).unwrap())) + }; + }; + + let finalized_offer = offer.build().map_err(|e| { + log_error!(self.logger, "Failed to create offer: {:?}", e); + Error::OfferCreationFailed + })?; + + Ok(finalized_offer) } /// Returns a payable offer that can be used to request and receive a payment for which the /// amount is to be determined by the user, also known as a "zero-amount" offer. - pub fn receive_variable_amount(&self, description: &str) -> Result { - let offer_builder = self.channel_manager.create_offer_builder().map_err(|e| { - log_error!(self.logger, "Failed to create offer builder: {:?}", e); - Error::OfferCreationFailed - })?; + pub fn receive_variable_amount( + &self, description: &str, expiry_secs: Option, + ) -> Result { + let absolute_expiry = expiry_secs.map(|secs| { + (SystemTime::now() + Duration::from_secs(secs as u64)) + .duration_since(UNIX_EPOCH) + .unwrap() + }); + + let offer_builder = + self.channel_manager.create_offer_builder(absolute_expiry).map_err(|e| { + log_error!(self.logger, "Failed to create offer builder: {:?}", e); + Error::OfferCreationFailed + })?; let offer = offer_builder.description(description.to_string()).build().map_err(|e| { log_error!(self.logger, "Failed to create offer: {:?}", e); Error::OfferCreationFailed @@ -281,8 +326,13 @@ impl Bolt12Payment { let payment_hash = invoice.payment_hash(); let payment_id = PaymentId(payment_hash.0); - let kind = - PaymentKind::Bolt12Refund { hash: Some(payment_hash), preimage: None, secret: None }; + let kind = PaymentKind::Bolt12Refund { + hash: Some(payment_hash), + preimage: None, + secret: None, + payer_note: refund.payer_note().map(|note| UntrustedString(note.0.to_string())), + quantity: refund.quantity(), + }; let payment = PaymentDetails::new( payment_id, @@ -298,22 +348,25 @@ impl Bolt12Payment { } /// Returns a [`Refund`] object that can be used to offer a refund payment of the amount given. - pub fn initiate_refund(&self, amount_msat: u64, expiry_secs: u32) -> Result { + pub fn initiate_refund( + &self, amount_msat: u64, expiry_secs: u32, quantity: Option, + payer_note: Option, + ) -> Result { let mut random_bytes = [0u8; 32]; rand::thread_rng().fill_bytes(&mut random_bytes); let payment_id = PaymentId(random_bytes); - let expiration = (SystemTime::now() + Duration::from_secs(expiry_secs as u64)) + let absolute_expiry = (SystemTime::now() + Duration::from_secs(expiry_secs as u64)) .duration_since(UNIX_EPOCH) .unwrap(); let retry_strategy = Retry::Timeout(LDK_PAYMENT_RETRY_TIMEOUT); let max_total_routing_fee_msat = None; - let refund = self + let mut refund_builder = self .channel_manager .create_refund_builder( amount_msat, - expiration, + absolute_expiry, payment_id, retry_strategy, max_total_routing_fee_msat, @@ -321,17 +374,30 @@ impl Bolt12Payment { .map_err(|e| { log_error!(self.logger, "Failed to create refund builder: {:?}", e); Error::RefundCreationFailed - })? - .build() - .map_err(|e| { - log_error!(self.logger, "Failed to create refund: {:?}", e); - Error::RefundCreationFailed })?; - log_info!(self.logger, "Offering refund of {}msat", amount_msat); + if let Some(qty) = quantity { + refund_builder = refund_builder.quantity(qty); + } + + if let Some(note) = payer_note.clone() { + refund_builder = refund_builder.payer_note(note); + } + + let refund = refund_builder.build().map_err(|e| { + log_error!(self.logger, "Failed to create refund: {:?}", e); + Error::RefundCreationFailed + })?; - let kind = PaymentKind::Bolt12Refund { hash: None, preimage: None, secret: None }; + log_info!(self.logger, "Offering refund of {}msat", amount_msat); + let kind = PaymentKind::Bolt12Refund { + hash: None, + preimage: None, + secret: None, + payer_note: payer_note.map(|note| UntrustedString(note)), + quantity, + }; let payment = PaymentDetails::new( payment_id, kind, diff --git a/src/payment/mod.rs b/src/payment/mod.rs index 1862bf2df..5c99cfcf8 100644 --- a/src/payment/mod.rs +++ b/src/payment/mod.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + //! Objects for different types of payments. mod bolt11; @@ -5,9 +12,95 @@ mod bolt12; mod onchain; mod spontaneous; pub(crate) mod store; +mod unified_qr; pub use bolt11::Bolt11Payment; pub use bolt12::Bolt12Payment; pub use onchain::OnchainPayment; pub use spontaneous::SpontaneousPayment; pub use store::{LSPFeeLimits, PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus}; +pub use unified_qr::{QrPaymentResult, UnifiedQrPayment}; + +/// Represents information used to send a payment. +#[derive(Clone, Debug, PartialEq)] +pub struct SendingParameters { + /// The maximum total fees, in millisatoshi, that may accrue during route finding. + /// + /// This limit also applies to the total fees that may arise while retrying failed payment + /// paths. + /// + /// Note that values below a few sats may result in some paths being spuriously ignored. + #[cfg(not(feature = "uniffi"))] + pub max_total_routing_fee_msat: Option>, + /// The maximum total fees, in millisatoshi, that may accrue during route finding. + /// + /// This limit also applies to the total fees that may arise while retrying failed payment + /// paths. + /// + /// Note that values below a few sats may result in some paths being spuriously ignored. + #[cfg(feature = "uniffi")] + pub max_total_routing_fee_msat: Option, + /// The maximum total CLTV delta we accept for the route. + /// + /// Defaults to [`DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA`]. + /// + /// [`DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA`]: lightning::routing::router::DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA + pub max_total_cltv_expiry_delta: Option, + /// The maximum number of paths that may be used by (MPP) payments. + /// + /// Defaults to [`DEFAULT_MAX_PATH_COUNT`]. + /// + /// [`DEFAULT_MAX_PATH_COUNT`]: lightning::routing::router::DEFAULT_MAX_PATH_COUNT + pub max_path_count: Option, + /// Selects the maximum share of a channel's total capacity which will be sent over a channel, + /// as a power of 1/2. + /// + /// A higher value prefers to send the payment using more MPP parts whereas + /// a lower value prefers to send larger MPP parts, potentially saturating channels and + /// increasing failure probability for those paths. + /// + /// Note that this restriction will be relaxed during pathfinding after paths which meet this + /// restriction have been found. While paths which meet this criteria will be searched for, it + /// is ultimately up to the scorer to select them over other paths. + /// + /// Examples: + /// + /// | Value | Max Proportion of Channel Capacity Used | + /// |-------|-----------------------------------------| + /// | 0 | Up to 100% of the channel’s capacity | + /// | 1 | Up to 50% of the channel’s capacity | + /// | 2 | Up to 25% of the channel’s capacity | + /// | 3 | Up to 12.5% of the channel’s capacity | + /// + /// Default value: 2 + pub max_channel_saturation_power_of_half: Option, +} + +/// Represents the possible states of [`SendingParameters::max_total_routing_fee_msat`]. +// +// Required only in bindings as UniFFI can't expose `Option>`. +#[cfg(feature = "uniffi")] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum MaxTotalRoutingFeeLimit { + None, + Some { amount_msat: u64 }, +} + +#[cfg(feature = "uniffi")] +impl From for Option { + fn from(value: MaxTotalRoutingFeeLimit) -> Self { + match value { + MaxTotalRoutingFeeLimit::Some { amount_msat } => Some(amount_msat), + MaxTotalRoutingFeeLimit::None => None, + } + } +} + +#[cfg(feature = "uniffi")] +impl From> for MaxTotalRoutingFeeLimit { + fn from(value: Option) -> Self { + value.map_or(MaxTotalRoutingFeeLimit::None, |amount_msat| MaxTotalRoutingFeeLimit::Some { + amount_msat, + }) + } +} diff --git a/src/payment/onchain.rs b/src/payment/onchain.rs index 8a879ae8c..b43765a97 100644 --- a/src/payment/onchain.rs +++ b/src/payment/onchain.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + //! Holds a payment handler allowing to send and receive on-chain payments. use crate::config::Config; @@ -5,7 +12,7 @@ use crate::error::Error; use crate::logger::{log_error, log_info, FilesystemLogger, Logger}; use crate::types::{ChannelManager, Wallet}; -use bitcoin::{Address, Txid}; +use bitcoin::{Address, Amount, Txid}; use std::sync::{Arc, RwLock}; @@ -15,7 +22,7 @@ use std::sync::{Arc, RwLock}; /// /// [`Node::onchain_payment`]: crate::Node::onchain_payment pub struct OnchainPayment { - runtime: Arc>>, + runtime: Arc>>>, wallet: Arc, channel_manager: Arc, config: Arc, @@ -24,7 +31,7 @@ pub struct OnchainPayment { impl OnchainPayment { pub(crate) fn new( - runtime: Arc>>, wallet: Arc, + runtime: Arc>>>, wallet: Arc, channel_manager: Arc, config: Arc, logger: Arc, ) -> Self { Self { runtime, wallet, channel_manager, config, logger } @@ -63,7 +70,9 @@ impl OnchainPayment { ); return Err(Error::InsufficientFunds); } - self.wallet.send_to_address(address, Some(amount_sats)) + + let amount = Amount::from_sat(amount_sats); + self.wallet.send_to_address(address, Some(amount)) } /// Send an on-chain payment to the given address, draining all the available funds. diff --git a/src/payment/spontaneous.rs b/src/payment/spontaneous.rs index 192565f40..1afd72c6f 100644 --- a/src/payment/spontaneous.rs +++ b/src/payment/spontaneous.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + //! Holds a payment handler allowing to send spontaneous ("keysend") payments. use crate::config::{Config, LDK_PAYMENT_RETRY_TIMEOUT}; @@ -6,6 +13,7 @@ use crate::logger::{log_error, log_info, FilesystemLogger, Logger}; use crate::payment::store::{ PaymentDetails, PaymentDirection, PaymentKind, PaymentStatus, PaymentStore, }; +use crate::payment::SendingParameters; use crate::types::{ChannelManager, KeysManager, TlvEntry}; use lightning::ln::channelmanager::{PaymentId, RecipientOnionFields, Retry, RetryableSendFailure}; @@ -17,13 +25,16 @@ use bitcoin::secp256k1::PublicKey; use std::sync::{Arc, RwLock}; +// The default `final_cltv_expiry_delta` we apply when not set. +const LDK_DEFAULT_FINAL_CLTV_EXPIRY_DELTA: u32 = 144; + /// A payment handler allowing to send spontaneous ("keysend") payments. /// /// Should be retrieved by calling [`Node::spontaneous_payment`]. /// /// [`Node::spontaneous_payment`]: crate::Node::spontaneous_payment pub struct SpontaneousPayment { - runtime: Arc>>, + runtime: Arc>>>, channel_manager: Arc, keys_manager: Arc, payment_store: Arc>>, @@ -33,7 +44,7 @@ pub struct SpontaneousPayment { impl SpontaneousPayment { pub(crate) fn new( - runtime: Arc>>, + runtime: Arc>>>, channel_manager: Arc, keys_manager: Arc, payment_store: Arc>>, config: Arc, logger: Arc, @@ -41,10 +52,13 @@ impl SpontaneousPayment { Self { runtime, channel_manager, keys_manager, payment_store, config, logger } } - /// Send a spontaneous, aka. "keysend", payment + /// Send a spontaneous aka. "keysend", payment. + /// + /// If `sending_parameters` are provided they will override the default as well as the + /// node-wide parameters configured via [`Config::sending_parameters`] on a per-field basis. pub fn send( - &self, amount_msat: u64, node_id: PublicKey, custom_tlvs: Vec, - preimage: Option, + &self, amount_msat: u64, node_id: PublicKey, sending_parameters: Option, + custom_tlvs: Vec, preimage: Option, ) -> Result { let rt_lock = self.runtime.read().unwrap(); if rt_lock.is_none() { @@ -65,10 +79,26 @@ impl SpontaneousPayment { } } - let route_params = RouteParameters::from_payment_params_and_value( - PaymentParameters::from_node_id(node_id, self.config.default_cltv_expiry_delta), + let mut route_params = RouteParameters::from_payment_params_and_value( + PaymentParameters::from_node_id(node_id, LDK_DEFAULT_FINAL_CLTV_EXPIRY_DELTA), amount_msat, ); + + let override_params = + sending_parameters.as_ref().or(self.config.sending_parameters.as_ref()); + if let Some(override_params) = override_params { + override_params + .max_total_routing_fee_msat + .map(|f| route_params.max_total_routing_fee_msat = f.into()); + override_params + .max_total_cltv_expiry_delta + .map(|d| route_params.payment_params.max_total_cltv_expiry_delta = d); + override_params.max_path_count.map(|p| route_params.payment_params.max_path_count = p); + override_params + .max_channel_saturation_power_of_half + .map(|s| route_params.payment_params.max_channel_saturation_power_of_half = s); + }; + let recipient_fields = RecipientOnionFields::spontaneous_empty() .with_custom_tlvs( custom_tlvs.iter().map(|tlv| (tlv.r#type, tlv.value.clone())).collect(), @@ -145,13 +175,12 @@ impl SpontaneousPayment { } let liquidity_limit_multiplier = Some(self.config.probing_liquidity_limit_multiplier); - let cltv_expiry_delta = self.config.default_cltv_expiry_delta; self.channel_manager .send_spontaneous_preflight_probes( node_id, amount_msat, - cltv_expiry_delta, + LDK_DEFAULT_FINAL_CLTV_EXPIRY_DELTA, liquidity_limit_multiplier, ) .map_err(|e| { diff --git a/src/payment/store.rs b/src/payment/store.rs index 354044f51..a4d2fd735 100644 --- a/src/payment/store.rs +++ b/src/payment/store.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use crate::hex_utils; use crate::io::{ PAYMENT_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PAYMENT_INFO_PERSISTENCE_SECONDARY_NAMESPACE, @@ -11,6 +18,7 @@ use lightning::ln::msgs::DecodeError; use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; use lightning::offers::offer::OfferId; use lightning::util::ser::{Readable, Writeable}; +use lightning::util::string::UntrustedString; use lightning::{ _init_and_read_len_prefixed_tlv_fields, impl_writeable_tlv_based, impl_writeable_tlv_based_enum, write_tlv_fields, @@ -182,7 +190,7 @@ pub enum PaymentDirection { impl_writeable_tlv_based_enum!(PaymentDirection, (0, Inbound) => {}, - (1, Outbound) => {}; + (1, Outbound) => {} ); /// Represents the current status of a payment. @@ -199,7 +207,7 @@ pub enum PaymentStatus { impl_writeable_tlv_based_enum!(PaymentStatus, (0, Pending) => {}, (2, Succeeded) => {}, - (4, Failed) => {}; + (4, Failed) => {} ); /// Represents the kind of a payment. @@ -254,6 +262,18 @@ pub enum PaymentKind { secret: Option, /// The ID of the offer this payment is for. offer_id: OfferId, + /// The payer note for the payment. + /// + /// Truncated to [`PAYER_NOTE_LIMIT`] characters. + /// + /// This will always be `None` for payments serialized with version `v0.3.0`. + /// + /// [`PAYER_NOTE_LIMIT`]: lightning::offers::invoice_request::PAYER_NOTE_LIMIT + payer_note: Option, + /// The quantity of an item requested in the offer. + /// + /// This will always be `None` for payments serialized with version `v0.3.0`. + quantity: Option, }, /// A [BOLT 12] 'refund' payment, i.e., a payment for a [`Refund`]. /// @@ -266,6 +286,14 @@ pub enum PaymentKind { preimage: Option, /// The secret used by the payment. secret: Option, + /// The payer note for the refund payment. + /// + /// This will always be `None` for payments serialized with version `v0.3.0`. + payer_note: Option, + /// The quantity of an item that the refund is for. + /// + /// This will always be `None` for payments serialized with version `v0.3.0`. + quantity: Option, }, /// A spontaneous ("keysend") payment. Spontaneous { @@ -294,7 +322,9 @@ impl_writeable_tlv_based_enum!(PaymentKind, }, (6, Bolt12Offer) => { (0, hash, option), + (1, payer_note, option), (2, preimage, option), + (3, quantity, option), (4, secret, option), (6, offer_id, required), }, @@ -305,9 +335,11 @@ impl_writeable_tlv_based_enum!(PaymentKind, }, (10, Bolt12Refund) => { (0, hash, option), + (1, payer_note, option), (2, preimage, option), + (3, quantity, option), (4, secret, option), - }; + } ); /// Limits applying to how much fee we allow an LSP to deduct from the payment amount. @@ -531,11 +563,11 @@ where #[cfg(test)] mod tests { use super::*; + use bitcoin::io::Cursor; use lightning::util::{ ser::Readable, test_utils::{TestLogger, TestStore}, }; - use std::io::Cursor; use std::sync::Arc; /// We refactored `PaymentDetails` to hold a payment id and moved some required fields into diff --git a/src/payment/unified_qr.rs b/src/payment/unified_qr.rs new file mode 100644 index 000000000..88d372456 --- /dev/null +++ b/src/payment/unified_qr.rs @@ -0,0 +1,420 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +//! Holds a payment handler allowing to create [BIP 21] URIs with an on-chain, [BOLT 11], and [BOLT 12] payment +//! options. +//! +//! [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki +//! [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md +//! [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md +use crate::error::Error; +use crate::logger::{log_error, FilesystemLogger, Logger}; +use crate::payment::{Bolt11Payment, Bolt12Payment, OnchainPayment}; +use crate::Config; + +use lightning::ln::channelmanager::PaymentId; +use lightning::offers::offer::Offer; +use lightning_invoice::Bolt11Invoice; + +use bip21::de::ParamKind; +use bip21::{DeserializationError, DeserializeParams, Param, SerializeParams}; +use bitcoin::address::{NetworkChecked, NetworkUnchecked}; +use bitcoin::{Amount, Txid}; + +use std::sync::Arc; +use std::vec::IntoIter; + +type Uri<'a> = bip21::Uri<'a, NetworkChecked, Extras>; + +#[derive(Debug, Clone)] +struct Extras { + bolt11_invoice: Option, + bolt12_offer: Option, +} + +/// A payment handler allowing to create [BIP 21] URIs with an on-chain, [BOLT 11], and [BOLT 12] payment +/// option. +/// +/// Should be retrieved by calling [`Node::unified_qr_payment`] +/// +/// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki +/// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md +/// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md +/// [`Node::unified_qr_payment`]: crate::Node::unified_qr_payment +pub struct UnifiedQrPayment { + onchain_payment: Arc, + bolt11_invoice: Arc, + bolt12_payment: Arc, + config: Arc, + logger: Arc, +} + +impl UnifiedQrPayment { + pub(crate) fn new( + onchain_payment: Arc, bolt11_invoice: Arc, + bolt12_payment: Arc, config: Arc, logger: Arc, + ) -> Self { + Self { onchain_payment, bolt11_invoice, bolt12_payment, config, logger } + } + + /// Generates a URI with an on-chain address, [BOLT 11] invoice and [BOLT 12] offer. + /// + /// The URI allows users to send the payment request allowing the wallet to decide + /// which payment method to use. This enables a fallback mechanism: older wallets + /// can always pay using the provided on-chain address, while newer wallets will + /// typically opt to use the provided BOLT11 invoice or BOLT12 offer. + /// + /// # Parameters + /// - `amount_sats`: The amount to be received, specified in satoshis. + /// - `description`: A description or note associated with the payment. + /// This message is visible to the payer and can provide context or details about the payment. + /// - `expiry_sec`: The expiration time for the payment, specified in seconds. + /// + /// Returns a payable URI that can be used to request and receive a payment of the amount + /// given. In case of an error, the function returns `Error::WalletOperationFailed`for on-chain + /// address issues, `Error::InvoiceCreationFailed` for BOLT11 invoice issues, or + /// `Error::OfferCreationFailed` for BOLT12 offer issues. + /// + /// The generated URI can then be given to a QR code library. + /// + /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md + /// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md + pub fn receive( + &self, amount_sats: u64, description: &str, expiry_sec: u32, + ) -> Result { + let onchain_address = self.onchain_payment.new_address()?; + + let amount_msats = amount_sats * 1_000; + + let bolt12_offer = match self.bolt12_payment.receive(amount_msats, description, None, None) + { + Ok(offer) => Some(offer), + Err(e) => { + log_error!(self.logger, "Failed to create offer: {}", e); + return Err(Error::OfferCreationFailed); + }, + }; + + let bolt11_invoice = + match self.bolt11_invoice.receive(amount_msats, description, expiry_sec) { + Ok(invoice) => Some(invoice), + Err(e) => { + log_error!(self.logger, "Failed to create invoice {}", e); + return Err(Error::InvoiceCreationFailed); + }, + }; + + let extras = Extras { bolt11_invoice, bolt12_offer }; + + let mut uri = Uri::with_extras(onchain_address, extras); + uri.amount = Some(Amount::from_sat(amount_sats)); + uri.message = Some(description.into()); + + Ok(format_uri(uri)) + } + + /// Sends a payment given a [BIP 21] URI. + /// + /// This method parses the provided URI string and attempts to send the payment. If the URI + /// has an offer and or invoice, it will try to pay the offer first followed by the invoice. + /// If they both fail, the on-chain payment will be paid. + /// + /// Returns a `QrPaymentResult` indicating the outcome of the payment. If an error + /// occurs, an `Error` is returned detailing the issue encountered. + /// + /// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki + pub fn send(&self, uri_str: &str) -> Result { + let uri: bip21::Uri = + uri_str.parse().map_err(|_| Error::InvalidUri)?; + + let uri_network_checked = + uri.clone().require_network(self.config.network).map_err(|_| Error::InvalidNetwork)?; + + if let Some(offer) = uri_network_checked.extras.bolt12_offer { + match self.bolt12_payment.send(&offer, None, None) { + Ok(payment_id) => return Ok(QrPaymentResult::Bolt12 { payment_id }), + Err(e) => log_error!(self.logger, "Failed to send BOLT12 offer: {:?}. This is part of a unified QR code payment. Falling back to the BOLT11 invoice.", e), + } + } + + if let Some(invoice) = uri_network_checked.extras.bolt11_invoice { + match self.bolt11_invoice.send(&invoice, None) { + Ok(payment_id) => return Ok(QrPaymentResult::Bolt11 { payment_id }), + Err(e) => log_error!(self.logger, "Failed to send BOLT11 invoice: {:?}. This is part of a unified QR code payment. Falling back to the on-chain transaction.", e), + } + } + + let amount = match uri_network_checked.amount { + Some(amount) => amount, + None => { + log_error!(self.logger, "No amount specified in the URI. Aborting the payment."); + return Err(Error::InvalidAmount); + }, + }; + + let txid = + self.onchain_payment.send_to_address(&uri_network_checked.address, amount.to_sat())?; + + Ok(QrPaymentResult::Onchain { txid }) + } +} + +/// Represents the result of a payment made using a [BIP 21] QR code. +/// +/// After a successful on-chain transaction, the transaction ID ([`Txid`]) is returned. +/// For BOLT11 and BOLT12 payments, the corresponding [`PaymentId`] is returned. +/// +/// [BIP 21]: https://github.com/bitcoin/bips/blob/master/bip-0021.mediawiki +/// [`PaymentId`]: lightning::ln::channelmanager::PaymentId +/// [`Txid`]: bitcoin::hash_types::Txid +pub enum QrPaymentResult { + /// An on-chain payment. + Onchain { + /// The transaction ID (txid) of the on-chain payment. + txid: Txid, + }, + /// A [BOLT 11] payment. + /// + /// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md + Bolt11 { + /// The payment ID for the BOLT11 invoice. + payment_id: PaymentId, + }, + /// A [BOLT 12] offer payment, i.e., a payment for an [`Offer`]. + /// + /// [BOLT 12]: https://github.com/lightning/bolts/blob/master/12-offer-encoding.md + /// [`Offer`]: crate::lightning::offers::offer::Offer + Bolt12 { + /// The payment ID for the BOLT12 offer. + payment_id: PaymentId, + }, +} + +fn format_uri(uri: bip21::Uri) -> String { + let mut uri = format!("{:#}", uri); + + fn value_to_uppercase(uri: &mut String, key: &str) { + let mut start = 0; + while let Some(index) = uri[start..].find(key) { + let start_index = start + index; + let end_index = uri[start_index..].find('&').map_or(uri.len(), |i| start_index + i); + let lightning_value = &uri[start_index + key.len()..end_index]; + let uppercase_lighting_value = lightning_value.to_uppercase(); + uri.replace_range(start_index + key.len()..end_index, &uppercase_lighting_value); + start = end_index + } + } + value_to_uppercase(&mut uri, "lightning="); + value_to_uppercase(&mut uri, "lno="); + uri +} + +impl<'a> SerializeParams for &'a Extras { + type Key = &'static str; + type Value = String; + type Iterator = IntoIter<(Self::Key, Self::Value)>; + + fn serialize_params(self) -> Self::Iterator { + let mut params = Vec::new(); + + if let Some(bolt11_invoice) = &self.bolt11_invoice { + params.push(("lightning", bolt11_invoice.to_string())); + } + if let Some(bolt12_offer) = &self.bolt12_offer { + params.push(("lno", bolt12_offer.to_string())); + } + + params.into_iter() + } +} + +impl<'a> DeserializeParams<'a> for Extras { + type DeserializationState = DeserializationState; +} + +#[derive(Default)] +struct DeserializationState { + bolt11_invoice: Option, + bolt12_offer: Option, +} + +impl<'a> bip21::de::DeserializationState<'a> for DeserializationState { + type Value = Extras; + + fn is_param_known(&self, key: &str) -> bool { + key == "lightning" || key == "lno" + } + + fn deserialize_temp( + &mut self, key: &str, value: Param<'_>, + ) -> Result::Error> { + match key { + "lightning" => { + let bolt11_value = + String::try_from(value).map_err(|_| Error::UriParameterParsingFailed)?; + if let Ok(invoice) = bolt11_value.parse::() { + self.bolt11_invoice = Some(invoice); + Ok(bip21::de::ParamKind::Known) + } else { + Ok(bip21::de::ParamKind::Unknown) + } + }, + "lno" => { + let bolt12_value = + String::try_from(value).map_err(|_| Error::UriParameterParsingFailed)?; + if let Ok(offer) = bolt12_value.parse::() { + self.bolt12_offer = Some(offer); + Ok(bip21::de::ParamKind::Known) + } else { + Ok(bip21::de::ParamKind::Unknown) + } + }, + _ => Ok(bip21::de::ParamKind::Unknown), + } + } + + fn finalize(self) -> Result::Error> { + Ok(Extras { bolt11_invoice: self.bolt11_invoice, bolt12_offer: self.bolt12_offer }) + } +} + +impl DeserializationError for Extras { + type Error = Error; +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::payment::unified_qr::Extras; + use bitcoin::{Address, Network}; + use std::str::FromStr; + + #[test] + fn parse_uri() { + let uri_test1 = "BITCOIN:TB1QRSCD05XNY6QZ63TF9GJELGVK6D3UDJFEKK62VU?amount=1&message=Test%20message&lightning=LNTB1000M1PNXWM7MDQ523JHXAPQD4JHXUMPVAJSNP4QWP9QD2JFP8DUZ46JQG5LTKVDH04YG52G6UF2YAXP8H7YZPZM3DM5PP5KUP7YT429UP9Z4ACPA60R7WETSTL66549MG05P0JN0C4L2NCC40SSP5R0LH86DJCL0NK8HZHNZHX92VVUAAVNE48Z5RVKVY5DKTRQ0DMP7S9QYYSGQCQPCXQRRAQYR59FGN2VVC5R6DS0AZMETH493ZU56H0WSVMGYCW9LEPZ032PGQNZMQ6XKVEH90Z02C0NH3J5QGDAWCS2YC2ZNP22J0ZD0PPF78N4QQQEXTYS2"; + let expected_bolt11_invoice_1 = "LNTB1000M1PNXWM7MDQ523JHXAPQD4JHXUMPVAJSNP4QWP9QD2JFP8DUZ46JQG5LTKVDH04YG52G6UF2YAXP8H7YZPZM3DM5PP5KUP7YT429UP9Z4ACPA60R7WETSTL66549MG05P0JN0C4L2NCC40SSP5R0LH86DJCL0NK8HZHNZHX92VVUAAVNE48Z5RVKVY5DKTRQ0DMP7S9QYYSGQCQPCXQRRAQYR59FGN2VVC5R6DS0AZMETH493ZU56H0WSVMGYCW9LEPZ032PGQNZMQ6XKVEH90Z02C0NH3J5QGDAWCS2YC2ZNP22J0ZD0PPF78N4QQQEXTYS2"; + let parsed_uri = uri_test1 + .parse::>() + .expect("Failed Parsing") + .require_network(Network::Testnet) + .expect("Invalid Network"); + + assert_eq!( + parsed_uri.address, + bitcoin::Address::from_str("TB1QRSCD05XNY6QZ63TF9GJELGVK6D3UDJFEKK62VU") + .unwrap() + .require_network(Network::Testnet) + .unwrap() + ); + + assert_eq!(Amount::from_sat(100_000_000), Amount::from(parsed_uri.amount.unwrap())); + + if let Some(invoice) = parsed_uri.extras.bolt11_invoice { + assert_eq!(invoice, Bolt11Invoice::from_str(expected_bolt11_invoice_1).unwrap()); + } else { + panic!("No Lightning invoice found"); + } + + let uri_with_offer = "BITCOIN:BCRT1QM0NW9S05QDPGC6F52FPKA9U6Q6VWTT5WVS30R2?amount=0.001&message=asdf&lightning=LNBCRT1M1PNGMY98DQ8V9EKGESNP4QDH5SL00QK4842UZMZVJVX2NLUZT4E6P2ZC2DLAGCU565TP42AUDYPP5XD0PRS5CRDLZVU8DNQQU08W9F4YP0XRXW06ZSHCLCHZU9X28HSSSSP5ES30JG9J4VK2CRW80YXTLRJU2M097TXMFTHR00VC5V0LGKVMURRQ9QYYSGQCQPCXQRRAQRZJQ0Q0K9CDYFSVZAJ5V3PDWYWDMHLEYCVD7TG0SVMY4AM4P6GQZJZ5XQQQQYQQX2QQQUQQQQLGQQQQQQQQFQWDQZX24PSHN68A9D4X4HD89F3XVC7DGGRDTFCA5WH4KZ546GSRTJVACA34QQ3DZ9W4JHLJD3XZRW44RA0RET6RDSRJCEZQC6AXANX6QPHZKHJK&lno=LNO1QGSQVGNWGCG35Z6EE2H3YCZRADDM72XRFUA9UVE2RLRM9DEU7XYFZRCYZPGTGRDWMGU44QPYUXLHLLMLWN4QSPQ97HSSQZSYV9EKGESSWCPK7JRAAUZ6574TSTVFJFSE20LSFWH8G9GTPFHL4RRJN23VX4TH35SRWKCNQ6S8R9ZW9HU5RXMPXVYCJVK2KY3NTEA8VXZTMWJF4NAJCCAQZQ7YZ7KDDZ600LAW2S2E7Q6XDYLPSMLMV4YAY0QXX5NC8QH05JRNUYQPQCAHK8Y5KQ8H9X624LS6A9GWFTGKYYPUZVUKKM93DWETTL8A7NE84L7SNHCSGR006EACQRQP8YWY6WPS0TS"; + let expected_bolt11_invoice_2 = "LNBCRT1M1PNGMY98DQ8V9EKGESNP4QDH5SL00QK4842UZMZVJVX2NLUZT4E6P2ZC2DLAGCU565TP42AUDYPP5XD0PRS5CRDLZVU8DNQQU08W9F4YP0XRXW06ZSHCLCHZU9X28HSSSSP5ES30JG9J4VK2CRW80YXTLRJU2M097TXMFTHR00VC5V0LGKVMURRQ9QYYSGQCQPCXQRRAQRZJQ0Q0K9CDYFSVZAJ5V3PDWYWDMHLEYCVD7TG0SVMY4AM4P6GQZJZ5XQQQQYQQX2QQQUQQQQLGQQQQQQQQFQWDQZX24PSHN68A9D4X4HD89F3XVC7DGGRDTFCA5WH4KZ546GSRTJVACA34QQ3DZ9W4JHLJD3XZRW44RA0RET6RDSRJCEZQC6AXANX6QPHZKHJK"; + let expected_bolt12_offer_2 = "LNO1QGSQVGNWGCG35Z6EE2H3YCZRADDM72XRFUA9UVE2RLRM9DEU7XYFZRCYZPGTGRDWMGU44QPYUXLHLLMLWN4QSPQ97HSSQZSYV9EKGESSWCPK7JRAAUZ6574TSTVFJFSE20LSFWH8G9GTPFHL4RRJN23VX4TH35SRWKCNQ6S8R9ZW9HU5RXMPXVYCJVK2KY3NTEA8VXZTMWJF4NAJCCAQZQ7YZ7KDDZ600LAW2S2E7Q6XDYLPSMLMV4YAY0QXX5NC8QH05JRNUYQPQCAHK8Y5KQ8H9X624LS6A9GWFTGKYYPUZVUKKM93DWETTL8A7NE84L7SNHCSGR006EACQRQP8YWY6WPS0TS"; + let parsed_uri_with_offer = uri_with_offer + .parse::>() + .expect("Failed Parsing") + .require_network(Network::Regtest) + .expect("Invalid Network"); + + assert_eq!(Amount::from_sat(100_000), Amount::from(parsed_uri_with_offer.amount.unwrap())); + + assert_eq!( + parsed_uri_with_offer.address, + bitcoin::Address::from_str("BCRT1QM0NW9S05QDPGC6F52FPKA9U6Q6VWTT5WVS30R2") + .unwrap() + .require_network(Network::Regtest) + .unwrap() + ); + + if let Some(invoice) = parsed_uri_with_offer.extras.bolt11_invoice { + assert_eq!(invoice, Bolt11Invoice::from_str(expected_bolt11_invoice_2).unwrap()); + } else { + panic!("No invoice found.") + } + + if let Some(offer) = parsed_uri_with_offer.extras.bolt12_offer { + assert_eq!(offer, Offer::from_str(expected_bolt12_offer_2).unwrap()); + } else { + panic!("No offer found."); + } + + let zeus_test = "bitcoin:TB1QQ32G6LM2XKT0U2UGASH5DC4CFT3JTPEW65PZZ5?lightning=LNTB500U1PN89HH6PP5MA7K6DRM5SYVD05NTXMGSRNM728J7EHM8KV6VC96YNLKN7G7VDYQDQQCQZRCXQR8Q7SP5HU30L0EEXKYYPQSQYEZELZWUPT62HLJ0KV2662CALGPAML50QPXQ9QXPQYSGQDKTVFXEC8H2DG2GY3C95ETAJ0QKX50XAUCU304PPFV2SQVGFHZ6RMZWJV8MC3M0LXF3GW852C5VSK0DELK0JHLYUTYZDF7QKNAMT4PQQQN24WM&amount=0.0005"; + let expected_bolt11_invoice_3 = "LNTB500U1PN89HH6PP5MA7K6DRM5SYVD05NTXMGSRNM728J7EHM8KV6VC96YNLKN7G7VDYQDQQCQZRCXQR8Q7SP5HU30L0EEXKYYPQSQYEZELZWUPT62HLJ0KV2662CALGPAML50QPXQ9QXPQYSGQDKTVFXEC8H2DG2GY3C95ETAJ0QKX50XAUCU304PPFV2SQVGFHZ6RMZWJV8MC3M0LXF3GW852C5VSK0DELK0JHLYUTYZDF7QKNAMT4PQQQN24WM"; + let uri_test2 = zeus_test + .parse::>() + .expect("Failed Parsing") + .require_network(Network::Testnet) + .expect("Invalid Network"); + + assert_eq!( + uri_test2.address, + bitcoin::Address::from_str("TB1QQ32G6LM2XKT0U2UGASH5DC4CFT3JTPEW65PZZ5") + .unwrap() + .require_network(Network::Testnet) + .unwrap() + ); + + if let Some(invoice) = uri_test2.extras.bolt11_invoice { + assert_eq!(invoice, Bolt11Invoice::from_str(expected_bolt11_invoice_3).unwrap()); + } else { + panic!("No invoice found."); + } + assert_eq!(Amount::from(uri_test2.amount.unwrap()), Amount::from_sat(50000)); + + let muun_test = "bitcoin:bc1q6fmtam67h8wxfwtpumhazhtwyrh3uf039n058zke9xt5hr4ljzwsdcm2pj?amount=0.01&lightning=lnbc10m1pn8g2j4pp575tg4wt8jwgu2lvtk3aj6hy7mc6tnupw07wwkxcvyhtt3wlzw0zsdqqcqzzgxqyz5vqrzjqwnvuc0u4txn35cafc7w94gxvq5p3cu9dd95f7hlrh0fvs46wpvhdv6dzdeg0ww2eyqqqqryqqqqthqqpysp5fkd3k2rzvwdt2av068p58evf6eg50q0eftfhrpugaxkuyje4d25q9qrsgqqkfmnn67s5g6hadrcvf5h0l7p92rtlkwrfqdvc7uuf6lew0czxksvqhyux3zjrl3tlakwhtvezwl24zshnfumukwh0yntqsng9z6glcquvw7kc"; + let expected_bolt11_invoice_4 = "lnbc10m1pn8g2j4pp575tg4wt8jwgu2lvtk3aj6hy7mc6tnupw07wwkxcvyhtt3wlzw0zsdqqcqzzgxqyz5vqrzjqwnvuc0u4txn35cafc7w94gxvq5p3cu9dd95f7hlrh0fvs46wpvhdv6dzdeg0ww2eyqqqqryqqqqthqqpysp5fkd3k2rzvwdt2av068p58evf6eg50q0eftfhrpugaxkuyje4d25q9qrsgqqkfmnn67s5g6hadrcvf5h0l7p92rtlkwrfqdvc7uuf6lew0czxksvqhyux3zjrl3tlakwhtvezwl24zshnfumukwh0yntqsng9z6glcquvw7kc"; + let uri_test3 = muun_test + .parse::>() + .expect("Failed Parsing") + .require_network(Network::Bitcoin) + .expect("Invalid Network"); + assert_eq!( + uri_test3.address, + bitcoin::Address::from_str( + "bc1q6fmtam67h8wxfwtpumhazhtwyrh3uf039n058zke9xt5hr4ljzwsdcm2pj" + ) + .unwrap() + .require_network(Network::Bitcoin) + .unwrap() + ); + + if let Some(invoice) = uri_test3.extras.bolt11_invoice { + assert_eq!(invoice, Bolt11Invoice::from_str(expected_bolt11_invoice_4).unwrap()); + } else { + panic!("No invoice found"); + } + assert_eq!(Amount::from(uri_test3.amount.unwrap()), Amount::from_sat(1_000_000)); + + let muun_test_no_amount = "bitcoin:bc1qwe94y974pjl9kg5afg8tmsc0nz4hct04u78hdhukxvnnphgu48hs9lx3k5?lightning=lnbc1pn8g249pp5f6ytj32ty90jhvw69enf30hwfgdhyymjewywcmfjevflg6s4z86qdqqcqzzgxqyz5vqrzjqwnvuc0u4txn35cafc7w94gxvq5p3cu9dd95f7hlrh0fvs46wpvhdfjjzh2j9f7ye5qqqqryqqqqthqqpysp5mm832athgcal3m7h35sc29j63lmgzvwc5smfjh2es65elc2ns7dq9qrsgqu2xcje2gsnjp0wn97aknyd3h58an7sjj6nhcrm40846jxphv47958c6th76whmec8ttr2wmg6sxwchvxmsc00kqrzqcga6lvsf9jtqgqy5yexa"; + let expected_bolt11_invoice_5 = "lnbc1pn8g249pp5f6ytj32ty90jhvw69enf30hwfgdhyymjewywcmfjevflg6s4z86qdqqcqzzgxqyz5vqrzjqwnvuc0u4txn35cafc7w94gxvq5p3cu9dd95f7hlrh0fvs46wpvhdfjjzh2j9f7ye5qqqqryqqqqthqqpysp5mm832athgcal3m7h35sc29j63lmgzvwc5smfjh2es65elc2ns7dq9qrsgqu2xcje2gsnjp0wn97aknyd3h58an7sjj6nhcrm40846jxphv47958c6th76whmec8ttr2wmg6sxwchvxmsc00kqrzqcga6lvsf9jtqgqy5yexa"; + let uri_test4 = muun_test_no_amount + .parse::>() + .expect("Failed Parsing") + .require_network(Network::Bitcoin) + .expect("Invalid Network"); + assert_eq!( + uri_test4.address, + Address::from_str("bc1qwe94y974pjl9kg5afg8tmsc0nz4hct04u78hdhukxvnnphgu48hs9lx3k5") + .unwrap() + .require_network(Network::Bitcoin) + .unwrap() + ); + if let Some(invoice) = uri_test4.extras.bolt11_invoice { + assert_eq!(invoice, Bolt11Invoice::from_str(expected_bolt11_invoice_5).unwrap()); + } else { + panic!("No invoice found"); + } + } +} diff --git a/src/peer_store.rs b/src/peer_store.rs index 21bd50872..d4d6bbb97 100644 --- a/src/peer_store.rs +++ b/src/peer_store.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + use crate::io::{ PEER_INFO_PERSISTENCE_KEY, PEER_INFO_PERSISTENCE_PRIMARY_NAMESPACE, PEER_INFO_PERSISTENCE_SECONDARY_NAMESPACE, diff --git a/src/sweep.rs b/src/sweep.rs index 1c772d4e9..ba10869b8 100644 --- a/src/sweep.rs +++ b/src/sweep.rs @@ -1,12 +1,19 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + //! The output sweeper used to live here before we upstreamed it to `rust-lightning` and migrated //! to the upstreamed version with LDK Node v0.3.0 (May 2024). We should drop this module entirely //! once sufficient time has passed for us to be confident any users completed the migration. use lightning::impl_writeable_tlv_based; -use lightning::ln::ChannelId; +use lightning::ln::types::ChannelId; use lightning::sign::SpendableOutputDescriptor; -use bitcoin::{BlockHash, Transaction}; +use bitcoin::{Amount, BlockHash, Transaction}; #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct DeprecatedSpendableOutputInfo { @@ -31,7 +38,7 @@ impl_writeable_tlv_based!(DeprecatedSpendableOutputInfo, { (14, confirmation_hash, option), }); -pub(crate) fn value_satoshis_from_descriptor(descriptor: &SpendableOutputDescriptor) -> u64 { +pub(crate) fn value_from_descriptor(descriptor: &SpendableOutputDescriptor) -> Amount { match &descriptor { SpendableOutputDescriptor::StaticOutput { output, .. } => output.value, SpendableOutputDescriptor::DelayedPaymentOutput(output) => output.output.value, diff --git a/src/tx_broadcaster.rs b/src/tx_broadcaster.rs index 4492bcfc6..5aded03c6 100644 --- a/src/tx_broadcaster.rs +++ b/src/tx_broadcaster.rs @@ -1,19 +1,20 @@ -use crate::config::TX_BROADCAST_TIMEOUT_SECS; -use crate::logger::{log_bytes, log_error, log_trace, Logger}; +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. -use lightning::chain::chaininterface::BroadcasterInterface; -use lightning::util::ser::Writeable; +use crate::logger::{log_error, Logger}; -use esplora_client::AsyncClient as EsploraClient; +use lightning::chain::chaininterface::BroadcasterInterface; use bitcoin::Transaction; -use reqwest::StatusCode; use tokio::sync::mpsc; -use tokio::sync::Mutex; +use tokio::sync::{Mutex, MutexGuard}; use std::ops::Deref; -use std::time::Duration; const BCAST_PACKAGE_QUEUE_SIZE: usize = 50; @@ -23,7 +24,6 @@ where { queue_sender: mpsc::Sender>, queue_receiver: Mutex>>, - esplora_client: EsploraClient, logger: L, } @@ -31,80 +31,13 @@ impl TransactionBroadcaster where L::Target: Logger, { - pub(crate) fn new(esplora_client: EsploraClient, logger: L) -> Self { + pub(crate) fn new(logger: L) -> Self { let (queue_sender, queue_receiver) = mpsc::channel(BCAST_PACKAGE_QUEUE_SIZE); - Self { queue_sender, queue_receiver: Mutex::new(queue_receiver), esplora_client, logger } + Self { queue_sender, queue_receiver: Mutex::new(queue_receiver), logger } } - pub(crate) async fn process_queue(&self) { - let mut receiver = self.queue_receiver.lock().await; - while let Some(next_package) = receiver.recv().await { - for tx in &next_package { - let timeout_fut = tokio::time::timeout( - Duration::from_secs(TX_BROADCAST_TIMEOUT_SECS), - self.esplora_client.broadcast(tx), - ); - match timeout_fut.await { - Ok(res) => match res { - Ok(()) => { - log_trace!( - self.logger, - "Successfully broadcast transaction {}", - tx.txid() - ); - }, - Err(e) => match e { - esplora_client::Error::Reqwest(err) => { - if err.status() == StatusCode::from_u16(400).ok() { - // Ignore 400, as this just means bitcoind already knows the - // transaction. - // FIXME: We can further differentiate here based on the error - // message which will be available with rust-esplora-client 0.7 and - // later. - } else { - log_error!( - self.logger, - "Failed to broadcast due to HTTP connection error: {}", - err - ); - } - log_trace!( - self.logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) - ); - }, - _ => { - log_error!( - self.logger, - "Failed to broadcast transaction {}: {}", - tx.txid(), - e - ); - log_trace!( - self.logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) - ); - }, - }, - }, - Err(e) => { - log_error!( - self.logger, - "Failed to broadcast transaction due to timeout {}: {}", - tx.txid(), - e - ); - log_trace!( - self.logger, - "Failed broadcast transaction bytes: {}", - log_bytes!(tx.encode()) - ); - }, - } - } - } + pub(crate) async fn get_broadcast_queue(&self) -> MutexGuard>> { + self.queue_receiver.lock().await } } diff --git a/src/types.rs b/src/types.rs index 5f9814cb0..31bac8b2e 100644 --- a/src/types.rs +++ b/src/types.rs @@ -1,29 +1,36 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use crate::chain::ChainSource; +use crate::config::ChannelConfig; +use crate::fee_estimator::OnchainFeeEstimator; use crate::logger::FilesystemLogger; use crate::message_handler::NodeCustomMessageHandler; use lightning::chain::chainmonitor; use lightning::impl_writeable_tlv_based; -use lightning::ln::channelmanager::ChannelDetails as LdkChannelDetails; +use lightning::ln::channel_state::ChannelDetails as LdkChannelDetails; use lightning::ln::msgs::RoutingMessageHandler; use lightning::ln::msgs::SocketAddress; use lightning::ln::peer_handler::IgnoringMessageHandler; -use lightning::ln::ChannelId; +use lightning::ln::types::ChannelId; use lightning::routing::gossip; use lightning::routing::router::DefaultRouter; use lightning::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters}; use lightning::sign::InMemorySigner; -use lightning::util::config::ChannelConfig as LdkChannelConfig; -use lightning::util::config::MaxDustHTLCExposure as LdkMaxDustHTLCExposure; use lightning::util::persist::KVStore; use lightning::util::ser::{Readable, Writeable, Writer}; use lightning::util::sweep::OutputSweeper; use lightning_net_tokio::SocketDescriptor; -use lightning_transaction_sync::EsploraSyncClient; use bitcoin::secp256k1::PublicKey; use bitcoin::OutPoint; -use std::sync::{Arc, Mutex, RwLock}; +use std::sync::{Arc, Mutex}; pub(crate) type DynStore = dyn KVStore + Sync + Send; @@ -31,7 +38,7 @@ pub(crate) type ChainMonitor = chainmonitor::ChainMonitor< InMemorySigner, Arc, Arc, - Arc, + Arc, Arc, Arc, >; @@ -46,8 +53,6 @@ pub(crate) type PeerManager = lightning::ln::peer_handler::PeerManager< Arc, >; -pub(crate) type ChainSource = EsploraSyncClient>; - pub(crate) type LiquidityManager = lightning_liquidity::LiquidityManager, Arc, Arc>; @@ -57,26 +62,19 @@ pub(crate) type ChannelManager = lightning::ln::channelmanager::ChannelManager< Arc, Arc, Arc, - Arc, + Arc, Arc, Arc, >; pub(crate) type Broadcaster = crate::tx_broadcaster::TransactionBroadcaster>; -pub(crate) type FeeEstimator = crate::fee_estimator::OnchainFeeEstimator>; - -pub(crate) type Wallet = crate::wallet::Wallet< - bdk::database::SqliteDatabase, - Arc, - Arc, - Arc, ->; +pub(crate) type Wallet = + crate::wallet::Wallet, Arc, Arc>; pub(crate) type KeysManager = crate::wallet::WalletKeysManager< - bdk::database::SqliteDatabase, Arc, - Arc, + Arc, Arc, >; @@ -115,6 +113,7 @@ pub(crate) type OnionMessenger = lightning::onion_message::messenger::OnionMesse Arc, Arc, IgnoringMessageHandler, + IgnoringMessageHandler, >; pub(crate) type MessageRouter = lightning::onion_message::messenger::DefaultMessageRouter< @@ -126,7 +125,7 @@ pub(crate) type MessageRouter = lightning::onion_message::messenger::DefaultMess pub(crate) type Sweeper = OutputSweeper< Arc, Arc, - Arc, + Arc, Arc, Arc, Arc, @@ -218,11 +217,11 @@ pub struct ChannelDetails { /// balance is not available for inclusion in new outbound HTLCs). This further does not include /// any pending outgoing HTLCs which are awaiting some other resolution to be sent. pub outbound_capacity_msat: u64, - /// The available outbound capacity for sending HTLCs to the remote peer. + /// The available inbound capacity for receiving HTLCs from the remote peer. /// /// The amount does not include any pending HTLCs which are not yet resolved /// (and, thus, whose balance is not available for inclusion in new inbound HTLCs). This further - /// does not include any pending outgoing HTLCs which are awaiting some other resolution to be + /// does not include any pending incoming HTLCs which are awaiting some other resolution to be /// sent. pub inbound_capacity_msat: u64, /// The number of required confirmations on the funding transactions before the funding is @@ -245,7 +244,7 @@ pub struct ChannelDetails { /// This is a strict superset of `is_channel_ready`. pub is_usable: bool, /// Returns `true` if this channel is (or will be) publicly-announced - pub is_public: bool, + pub is_announced: bool, /// The difference in the CLTV value between incoming HTLCs and an outbound HTLC forwarded over /// the channel. pub cltv_expiry_delta: Option, @@ -297,7 +296,7 @@ pub struct ChannelDetails { /// The largest value HTLC (in msat) we currently will accept, for this channel. pub inbound_htlc_maximum_msat: Option, /// Set of configurable parameters that affect channel operation. - pub config: Arc, + pub config: ChannelConfig, } impl From for ChannelDetails { @@ -328,7 +327,7 @@ impl From for ChannelDetails { is_outbound: value.is_outbound, is_channel_ready: value.is_channel_ready, is_usable: value.is_usable, - is_public: value.is_public, + is_announced: value.is_announced, cltv_expiry_delta: value.config.map(|c| c.cltv_expiry_delta), counterparty_unspendable_punishment_reserve: value .counterparty @@ -357,7 +356,7 @@ impl From for ChannelDetails { inbound_htlc_minimum_msat: value.inbound_htlc_minimum_msat.unwrap_or(0), inbound_htlc_maximum_msat: value.inbound_htlc_maximum_msat, // unwrap safety: `config` is only `None` for LDK objects serialized prior to 0.0.109. - config: value.config.map(|c| Arc::new(c.into())).unwrap(), + config: value.config.map(|c| c.into()).unwrap(), } } } @@ -377,107 +376,6 @@ pub struct PeerDetails { pub is_connected: bool, } -/// Options which apply on a per-channel basis. -/// -/// See documentation of [`LdkChannelConfig`] for details. -#[derive(Debug)] -pub struct ChannelConfig { - inner: RwLock, -} - -impl Clone for ChannelConfig { - fn clone(&self) -> Self { - self.inner.read().unwrap().clone().into() - } -} - -impl ChannelConfig { - /// Constructs a new `ChannelConfig`. - pub fn new() -> Self { - Self::default() - } - - /// Returns the set `forwarding_fee_proportional_millionths`. - pub fn forwarding_fee_proportional_millionths(&self) -> u32 { - self.inner.read().unwrap().forwarding_fee_proportional_millionths - } - - /// Sets the `forwarding_fee_proportional_millionths`. - pub fn set_forwarding_fee_proportional_millionths(&self, value: u32) { - self.inner.write().unwrap().forwarding_fee_proportional_millionths = value; - } - - /// Returns the set `forwarding_fee_base_msat`. - pub fn forwarding_fee_base_msat(&self) -> u32 { - self.inner.read().unwrap().forwarding_fee_base_msat - } - - /// Sets the `forwarding_fee_base_msat`. - pub fn set_forwarding_fee_base_msat(&self, fee_msat: u32) { - self.inner.write().unwrap().forwarding_fee_base_msat = fee_msat; - } - - /// Returns the set `cltv_expiry_delta`. - pub fn cltv_expiry_delta(&self) -> u16 { - self.inner.read().unwrap().cltv_expiry_delta - } - - /// Sets the `cltv_expiry_delta`. - pub fn set_cltv_expiry_delta(&self, value: u16) { - self.inner.write().unwrap().cltv_expiry_delta = value; - } - - /// Returns the set `force_close_avoidance_max_fee_satoshis`. - pub fn force_close_avoidance_max_fee_satoshis(&self) -> u64 { - self.inner.read().unwrap().force_close_avoidance_max_fee_satoshis - } - - /// Sets the `force_close_avoidance_max_fee_satoshis`. - pub fn set_force_close_avoidance_max_fee_satoshis(&self, value_sat: u64) { - self.inner.write().unwrap().force_close_avoidance_max_fee_satoshis = value_sat; - } - - /// Returns the set `accept_underpaying_htlcs`. - pub fn accept_underpaying_htlcs(&self) -> bool { - self.inner.read().unwrap().accept_underpaying_htlcs - } - - /// Sets the `accept_underpaying_htlcs`. - pub fn set_accept_underpaying_htlcs(&self, value: bool) { - self.inner.write().unwrap().accept_underpaying_htlcs = value; - } - - /// Sets the `max_dust_htlc_exposure` from a fixed limit. - pub fn set_max_dust_htlc_exposure_from_fixed_limit(&self, limit_msat: u64) { - self.inner.write().unwrap().max_dust_htlc_exposure = - LdkMaxDustHTLCExposure::FixedLimitMsat(limit_msat); - } - - /// Sets the `max_dust_htlc_exposure` from a fee rate multiplier. - pub fn set_max_dust_htlc_exposure_from_fee_rate_multiplier(&self, multiplier: u64) { - self.inner.write().unwrap().max_dust_htlc_exposure = - LdkMaxDustHTLCExposure::FeeRateMultiplier(multiplier); - } -} - -impl From for ChannelConfig { - fn from(value: LdkChannelConfig) -> Self { - Self { inner: RwLock::new(value) } - } -} - -impl From for LdkChannelConfig { - fn from(value: ChannelConfig) -> Self { - *value.inner.read().unwrap() - } -} - -impl Default for ChannelConfig { - fn default() -> Self { - LdkChannelConfig::default().into() - } -} - /// Custom TLV entry. #[derive(Debug, Clone, PartialEq, Eq)] pub struct TlvEntry { diff --git a/src/uniffi_types.rs b/src/uniffi_types.rs index 13e7d2ea9..e3fe11e7a 100644 --- a/src/uniffi_types.rs +++ b/src/uniffi_types.rs @@ -1,12 +1,29 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +// Importing these items ensures they are accessible in the uniffi bindings +// without introducing unused import warnings in lib.rs. +// +// Make sure to add any re-exported items that need to be used in uniffi below. + +pub use crate::config::{ + default_config, AnchorChannelsConfig, EsploraSyncConfig, MaxDustHTLCExposure, +}; pub use crate::graph::{ChannelInfo, ChannelUpdateInfo, NodeAnnouncementInfo, NodeInfo}; pub use crate::payment::store::{LSPFeeLimits, PaymentDirection, PaymentKind, PaymentStatus}; +pub use crate::payment::{MaxTotalRoutingFeeLimit, QrPaymentResult, SendingParameters}; +pub use lightning::chain::channelmonitor::BalanceSource; pub use lightning::events::{ClosureReason, PaymentFailureReason}; -pub use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret}; +pub use lightning::ln::types::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret}; pub use lightning::offers::invoice::Bolt12Invoice; pub use lightning::offers::offer::{Offer, OfferId}; pub use lightning::offers::refund::Refund; -pub use lightning::routing::gossip::{NodeId, RoutingFees}; +pub use lightning::routing::gossip::{NodeAlias, NodeId, RoutingFees}; pub use lightning::util::string::UntrustedString; pub use lightning_invoice::Bolt11Invoice; @@ -15,8 +32,11 @@ pub use bitcoin::{Address, BlockHash, Network, OutPoint, Txid}; pub use bip39::Mnemonic; +pub use vss_client::headers::{VssHeaderProvider, VssHeaderProviderError}; + use crate::UniffiCustomTypeConverter; +use crate::builder::sanitize_alias; use crate::error::Error; use crate::hex_utils; use crate::{SocketAddress, UserChannelId}; @@ -322,3 +342,15 @@ impl UniffiCustomTypeConverter for Network { obj.to_string() } } + +impl UniffiCustomTypeConverter for NodeAlias { + type Builtin = String; + + fn into_custom(val: Self::Builtin) -> uniffi::Result { + Ok(sanitize_alias(&val).map_err(|_| Error::InvalidNodeAlias)?) + } + + fn from_custom(obj: Self) -> Self::Builtin { + obj.to_string() + } +} diff --git a/src/wallet.rs b/src/wallet.rs deleted file mode 100644 index 33838da84..000000000 --- a/src/wallet.rs +++ /dev/null @@ -1,682 +0,0 @@ -use crate::logger::{log_error, log_info, log_trace, Logger}; - -use crate::config::BDK_WALLET_SYNC_TIMEOUT_SECS; -use crate::Error; - -use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; - -use lightning::events::bump_transaction::{Utxo, WalletSource}; -use lightning::ln::msgs::{DecodeError, UnsignedGossipMessage}; -use lightning::ln::script::ShutdownScript; -use lightning::sign::{ - ChangeDestinationSource, EntropySource, InMemorySigner, KeyMaterial, KeysManager, NodeSigner, - OutputSpender, Recipient, SignerProvider, SpendableOutputDescriptor, -}; - -use lightning::util::message_signing; - -use bdk::blockchain::EsploraBlockchain; -use bdk::database::BatchDatabase; -use bdk::wallet::AddressIndex; -use bdk::{Balance, FeeRate}; -use bdk::{SignOptions, SyncOptions}; - -use bitcoin::address::{Payload, WitnessVersion}; -use bitcoin::bech32::u5; -use bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR; -use bitcoin::blockdata::locktime::absolute::LockTime; -use bitcoin::hash_types::WPubkeyHash; -use bitcoin::hashes::Hash; -use bitcoin::key::XOnlyPublicKey; -use bitcoin::psbt::PartiallySignedTransaction; -use bitcoin::secp256k1::ecdh::SharedSecret; -use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature}; -use bitcoin::secp256k1::{PublicKey, Scalar, Secp256k1, SecretKey, Signing}; -use bitcoin::{ScriptBuf, Transaction, TxOut, Txid}; - -use std::ops::{Deref, DerefMut}; -use std::sync::{Arc, Mutex, RwLock}; -use std::time::Duration; - -enum WalletSyncStatus { - Completed, - InProgress { subscribers: tokio::sync::broadcast::Sender> }, -} - -pub struct Wallet -where - D: BatchDatabase, - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: Logger, -{ - // A BDK blockchain used for wallet sync. - blockchain: EsploraBlockchain, - // A BDK on-chain wallet. - inner: Mutex>, - // A cache storing the most recently retrieved fee rate estimations. - broadcaster: B, - fee_estimator: E, - // A Mutex holding the current sync status. - sync_status: Mutex, - // TODO: Drop this workaround after BDK 1.0 upgrade. - balance_cache: RwLock, - logger: L, -} - -impl Wallet -where - D: BatchDatabase, - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: Logger, -{ - pub(crate) fn new( - blockchain: EsploraBlockchain, wallet: bdk::Wallet, broadcaster: B, fee_estimator: E, - logger: L, - ) -> Self { - let start_balance = wallet.get_balance().unwrap_or(Balance { - immature: 0, - trusted_pending: 0, - untrusted_pending: 0, - confirmed: 0, - }); - - let inner = Mutex::new(wallet); - let sync_status = Mutex::new(WalletSyncStatus::Completed); - let balance_cache = RwLock::new(start_balance); - Self { blockchain, inner, broadcaster, fee_estimator, sync_status, balance_cache, logger } - } - - pub(crate) async fn sync(&self) -> Result<(), Error> { - if let Some(mut sync_receiver) = self.register_or_subscribe_pending_sync() { - log_info!(self.logger, "Sync in progress, skipping."); - return sync_receiver.recv().await.map_err(|e| { - debug_assert!(false, "Failed to receive wallet sync result: {:?}", e); - log_error!(self.logger, "Failed to receive wallet sync result: {:?}", e); - Error::WalletOperationFailed - })?; - } - - let res = { - let wallet_lock = self.inner.lock().unwrap(); - - let wallet_sync_timeout_fut = tokio::time::timeout( - Duration::from_secs(BDK_WALLET_SYNC_TIMEOUT_SECS), - wallet_lock.sync(&self.blockchain, SyncOptions { progress: None }), - ); - - match wallet_sync_timeout_fut.await { - Ok(res) => match res { - Ok(()) => { - // TODO: Drop this workaround after BDK 1.0 upgrade. - // Update balance cache after syncing. - if let Ok(balance) = wallet_lock.get_balance() { - *self.balance_cache.write().unwrap() = balance; - } - Ok(()) - }, - Err(e) => match e { - bdk::Error::Esplora(ref be) => match **be { - bdk::blockchain::esplora::EsploraError::Reqwest(_) => { - log_error!( - self.logger, - "Sync failed due to HTTP connection error: {}", - e - ); - Err(From::from(e)) - }, - _ => { - log_error!(self.logger, "Sync failed due to Esplora error: {}", e); - Err(From::from(e)) - }, - }, - _ => { - log_error!(self.logger, "Wallet sync error: {}", e); - Err(From::from(e)) - }, - }, - }, - Err(e) => { - log_error!(self.logger, "On-chain wallet sync timed out: {}", e); - Err(Error::WalletOperationTimeout) - }, - } - }; - - self.propagate_result_to_subscribers(res); - - res - } - - pub(crate) fn create_funding_transaction( - &self, output_script: ScriptBuf, value_sats: u64, confirmation_target: ConfirmationTarget, - locktime: LockTime, - ) -> Result { - let fee_rate = FeeRate::from_sat_per_kwu( - self.fee_estimator.get_est_sat_per_1000_weight(confirmation_target) as f32, - ); - - let locked_wallet = self.inner.lock().unwrap(); - let mut tx_builder = locked_wallet.build_tx(); - - tx_builder - .add_recipient(output_script, value_sats) - .fee_rate(fee_rate) - .nlocktime(locktime) - .enable_rbf(); - - let mut psbt = match tx_builder.finish() { - Ok((psbt, _)) => { - log_trace!(self.logger, "Created funding PSBT: {:?}", psbt); - psbt - }, - Err(err) => { - log_error!(self.logger, "Failed to create funding transaction: {}", err); - return Err(err.into()); - }, - }; - - let mut sign_options = SignOptions::default(); - sign_options.trust_witness_utxo = true; - - match locked_wallet.sign(&mut psbt, sign_options) { - Ok(finalized) => { - if !finalized { - return Err(Error::OnchainTxCreationFailed); - } - }, - Err(err) => { - log_error!(self.logger, "Failed to create funding transaction: {}", err); - return Err(err.into()); - }, - } - - Ok(psbt.extract_tx()) - } - - pub(crate) fn get_new_address(&self) -> Result { - let address_info = self.inner.lock().unwrap().get_address(AddressIndex::New)?; - Ok(address_info.address) - } - - fn get_new_internal_address(&self) -> Result { - let address_info = - self.inner.lock().unwrap().get_internal_address(AddressIndex::LastUnused)?; - Ok(address_info.address) - } - - pub(crate) fn get_balances( - &self, total_anchor_channels_reserve_sats: u64, - ) -> Result<(u64, u64), Error> { - // TODO: Drop this workaround after BDK 1.0 upgrade. - // We get the balance and update our cache if we can do so without blocking on the wallet - // Mutex. Otherwise, we return a cached value. - let balance = match self.inner.try_lock() { - Ok(wallet_lock) => { - // Update balance cache if we can. - let balance = wallet_lock.get_balance()?; - *self.balance_cache.write().unwrap() = balance.clone(); - balance - }, - Err(_) => self.balance_cache.read().unwrap().clone(), - }; - - let (total, spendable) = ( - balance.get_total(), - balance.get_spendable().saturating_sub(total_anchor_channels_reserve_sats), - ); - - Ok((total, spendable)) - } - - pub(crate) fn get_spendable_amount_sats( - &self, total_anchor_channels_reserve_sats: u64, - ) -> Result { - self.get_balances(total_anchor_channels_reserve_sats).map(|(_, s)| s) - } - - /// Send funds to the given address. - /// - /// If `amount_msat_or_drain` is `None` the wallet will be drained, i.e., all available funds will be - /// spent. - pub(crate) fn send_to_address( - &self, address: &bitcoin::Address, amount_msat_or_drain: Option, - ) -> Result { - let confirmation_target = ConfirmationTarget::OutputSpendingFee; - let fee_rate = FeeRate::from_sat_per_kwu( - self.fee_estimator.get_est_sat_per_1000_weight(confirmation_target) as f32, - ); - - let tx = { - let locked_wallet = self.inner.lock().unwrap(); - let mut tx_builder = locked_wallet.build_tx(); - - if let Some(amount_sats) = amount_msat_or_drain { - tx_builder - .add_recipient(address.script_pubkey(), amount_sats) - .fee_rate(fee_rate) - .enable_rbf(); - } else { - tx_builder - .drain_wallet() - .drain_to(address.script_pubkey()) - .fee_rate(fee_rate) - .enable_rbf(); - } - - let mut psbt = match tx_builder.finish() { - Ok((psbt, _)) => { - log_trace!(self.logger, "Created PSBT: {:?}", psbt); - psbt - }, - Err(err) => { - log_error!(self.logger, "Failed to create transaction: {}", err); - return Err(err.into()); - }, - }; - - match locked_wallet.sign(&mut psbt, SignOptions::default()) { - Ok(finalized) => { - if !finalized { - return Err(Error::OnchainTxCreationFailed); - } - }, - Err(err) => { - log_error!(self.logger, "Failed to create transaction: {}", err); - return Err(err.into()); - }, - } - psbt.extract_tx() - }; - - self.broadcaster.broadcast_transactions(&[&tx]); - - let txid = tx.txid(); - - if let Some(amount_sats) = amount_msat_or_drain { - log_info!( - self.logger, - "Created new transaction {} sending {}sats on-chain to address {}", - txid, - amount_sats, - address - ); - } else { - log_info!( - self.logger, - "Created new transaction {} sending all available on-chain funds to address {}", - txid, - address - ); - } - - Ok(txid) - } - - fn register_or_subscribe_pending_sync( - &self, - ) -> Option>> { - let mut sync_status_lock = self.sync_status.lock().unwrap(); - match sync_status_lock.deref_mut() { - WalletSyncStatus::Completed => { - // We're first to register for a sync. - let (tx, _) = tokio::sync::broadcast::channel(1); - *sync_status_lock = WalletSyncStatus::InProgress { subscribers: tx }; - None - }, - WalletSyncStatus::InProgress { subscribers } => { - // A sync is in-progress, we subscribe. - let rx = subscribers.subscribe(); - Some(rx) - }, - } - } - - fn propagate_result_to_subscribers(&self, res: Result<(), Error>) { - // Send the notification to any other tasks that might be waiting on it by now. - { - let mut sync_status_lock = self.sync_status.lock().unwrap(); - match sync_status_lock.deref_mut() { - WalletSyncStatus::Completed => { - // No sync in-progress, do nothing. - return; - }, - WalletSyncStatus::InProgress { subscribers } => { - // A sync is in-progress, we notify subscribers. - if subscribers.receiver_count() > 0 { - match subscribers.send(res) { - Ok(_) => (), - Err(e) => { - debug_assert!( - false, - "Failed to send wallet sync result to subscribers: {:?}", - e - ); - log_error!( - self.logger, - "Failed to send wallet sync result to subscribers: {:?}", - e - ); - }, - } - } - *sync_status_lock = WalletSyncStatus::Completed; - }, - } - } - } -} - -impl WalletSource for Wallet -where - D: BatchDatabase, - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: Logger, -{ - fn list_confirmed_utxos(&self) -> Result, ()> { - let locked_wallet = self.inner.lock().unwrap(); - let mut utxos = Vec::new(); - let confirmed_txs: Vec = locked_wallet - .list_transactions(false) - .map_err(|e| { - log_error!(self.logger, "Failed to retrieve transactions from wallet: {}", e); - })? - .into_iter() - .filter(|t| t.confirmation_time.is_some()) - .collect(); - let unspent_confirmed_utxos = locked_wallet - .list_unspent() - .map_err(|e| { - log_error!( - self.logger, - "Failed to retrieve unspent transactions from wallet: {}", - e - ); - })? - .into_iter() - .filter(|u| confirmed_txs.iter().find(|t| t.txid == u.outpoint.txid).is_some()); - - for u in unspent_confirmed_utxos { - let payload = Payload::from_script(&u.txout.script_pubkey).map_err(|e| { - log_error!(self.logger, "Failed to retrieve script payload: {}", e); - })?; - - match payload { - Payload::WitnessProgram(program) => match program.version() { - WitnessVersion::V0 if program.program().len() == 20 => { - let wpkh = - WPubkeyHash::from_slice(program.program().as_bytes()).map_err(|e| { - log_error!(self.logger, "Failed to retrieve script payload: {}", e); - })?; - let utxo = Utxo::new_v0_p2wpkh(u.outpoint, u.txout.value, &wpkh); - utxos.push(utxo); - }, - WitnessVersion::V1 => { - XOnlyPublicKey::from_slice(program.program().as_bytes()).map_err(|e| { - log_error!(self.logger, "Failed to retrieve script payload: {}", e); - })?; - - let utxo = Utxo { - outpoint: u.outpoint, - output: TxOut { - value: u.txout.value, - script_pubkey: ScriptBuf::new_witness_program(&program), - }, - satisfaction_weight: 1 /* empty script_sig */ * WITNESS_SCALE_FACTOR as u64 + - 1 /* witness items */ + 1 /* schnorr sig len */ + 64, /* schnorr sig */ - }; - utxos.push(utxo); - }, - _ => { - log_error!( - self.logger, - "Unexpected witness version or length. Version: {}, Length: {}", - program.version(), - program.program().len() - ); - }, - }, - _ => { - log_error!( - self.logger, - "Tried to use a non-witness script. This must never happen." - ); - panic!("Tried to use a non-witness script. This must never happen."); - }, - } - } - - Ok(utxos) - } - - fn get_change_script(&self) -> Result { - let locked_wallet = self.inner.lock().unwrap(); - let address_info = - locked_wallet.get_internal_address(AddressIndex::LastUnused).map_err(|e| { - log_error!(self.logger, "Failed to retrieve new address from wallet: {}", e); - })?; - - Ok(address_info.address.script_pubkey()) - } - - fn sign_psbt(&self, mut psbt: PartiallySignedTransaction) -> Result { - let locked_wallet = self.inner.lock().unwrap(); - - // While BDK populates both `witness_utxo` and `non_witness_utxo` fields, LDK does not. As - // BDK by default doesn't trust the witness UTXO to account for the Segwit bug, we must - // disable it here as otherwise we fail to sign. - let mut sign_options = SignOptions::default(); - sign_options.trust_witness_utxo = true; - - match locked_wallet.sign(&mut psbt, sign_options) { - Ok(_finalized) => { - // BDK will fail to finalize for all LDK-provided inputs of the PSBT. Unfortunately - // we can't check more fine grained if it succeeded for all the other inputs here, - // so we just ignore the returned `finalized` bool. - }, - Err(err) => { - log_error!(self.logger, "Failed to sign transaction: {}", err); - return Err(()); - }, - } - - Ok(psbt.extract_tx()) - } -} - -/// Similar to [`KeysManager`], but overrides the destination and shutdown scripts so they are -/// directly spendable by the BDK wallet. -pub struct WalletKeysManager -where - D: BatchDatabase, - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: Logger, -{ - inner: KeysManager, - wallet: Arc>, - logger: L, -} - -impl WalletKeysManager -where - D: BatchDatabase, - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: Logger, -{ - /// Constructs a `WalletKeysManager` that overrides the destination and shutdown scripts. - /// - /// See [`KeysManager::new`] for more information on `seed`, `starting_time_secs`, and - /// `starting_time_nanos`. - pub fn new( - seed: &[u8; 32], starting_time_secs: u64, starting_time_nanos: u32, - wallet: Arc>, logger: L, - ) -> Self { - let inner = KeysManager::new(seed, starting_time_secs, starting_time_nanos); - Self { inner, wallet, logger } - } - - pub fn sign_message(&self, msg: &[u8]) -> Result { - message_signing::sign(msg, &self.inner.get_node_secret_key()) - .or(Err(Error::MessageSigningFailed)) - } - - pub fn get_node_secret_key(&self) -> SecretKey { - self.inner.get_node_secret_key() - } - - pub fn verify_signature(&self, msg: &[u8], sig: &str, pkey: &PublicKey) -> bool { - message_signing::verify(msg, sig, pkey) - } -} - -impl NodeSigner for WalletKeysManager -where - D: BatchDatabase, - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: Logger, -{ - fn get_node_id(&self, recipient: Recipient) -> Result { - self.inner.get_node_id(recipient) - } - - fn ecdh( - &self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>, - ) -> Result { - self.inner.ecdh(recipient, other_key, tweak) - } - - fn get_inbound_payment_key_material(&self) -> KeyMaterial { - self.inner.get_inbound_payment_key_material() - } - - fn sign_invoice( - &self, hrp_bytes: &[u8], invoice_data: &[u5], recipient: Recipient, - ) -> Result { - self.inner.sign_invoice(hrp_bytes, invoice_data, recipient) - } - - fn sign_gossip_message(&self, msg: UnsignedGossipMessage<'_>) -> Result { - self.inner.sign_gossip_message(msg) - } - - fn sign_bolt12_invoice( - &self, invoice: &lightning::offers::invoice::UnsignedBolt12Invoice, - ) -> Result { - self.inner.sign_bolt12_invoice(invoice) - } - - fn sign_bolt12_invoice_request( - &self, invoice_request: &lightning::offers::invoice_request::UnsignedInvoiceRequest, - ) -> Result { - self.inner.sign_bolt12_invoice_request(invoice_request) - } -} - -impl OutputSpender for WalletKeysManager -where - D: BatchDatabase, - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: Logger, -{ - /// See [`KeysManager::spend_spendable_outputs`] for documentation on this method. - fn spend_spendable_outputs( - &self, descriptors: &[&SpendableOutputDescriptor], outputs: Vec, - change_destination_script: ScriptBuf, feerate_sat_per_1000_weight: u32, - locktime: Option, secp_ctx: &Secp256k1, - ) -> Result { - self.inner.spend_spendable_outputs( - descriptors, - outputs, - change_destination_script, - feerate_sat_per_1000_weight, - locktime, - secp_ctx, - ) - } -} - -impl EntropySource for WalletKeysManager -where - D: BatchDatabase, - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: Logger, -{ - fn get_secure_random_bytes(&self) -> [u8; 32] { - self.inner.get_secure_random_bytes() - } -} - -impl SignerProvider for WalletKeysManager -where - D: BatchDatabase, - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: Logger, -{ - type EcdsaSigner = InMemorySigner; - - fn generate_channel_keys_id( - &self, inbound: bool, channel_value_satoshis: u64, user_channel_id: u128, - ) -> [u8; 32] { - self.inner.generate_channel_keys_id(inbound, channel_value_satoshis, user_channel_id) - } - - fn derive_channel_signer( - &self, channel_value_satoshis: u64, channel_keys_id: [u8; 32], - ) -> Self::EcdsaSigner { - self.inner.derive_channel_signer(channel_value_satoshis, channel_keys_id) - } - - fn read_chan_signer(&self, reader: &[u8]) -> Result { - self.inner.read_chan_signer(reader) - } - - fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result { - let address = self.wallet.get_new_address().map_err(|e| { - log_error!(self.logger, "Failed to retrieve new address from wallet: {}", e); - })?; - Ok(address.script_pubkey()) - } - - fn get_shutdown_scriptpubkey(&self) -> Result { - let address = self.wallet.get_new_address().map_err(|e| { - log_error!(self.logger, "Failed to retrieve new address from wallet: {}", e); - })?; - - match address.payload { - Payload::WitnessProgram(program) => ShutdownScript::new_witness_program(&program) - .map_err(|e| { - log_error!(self.logger, "Invalid shutdown script: {:?}", e); - }), - _ => { - log_error!( - self.logger, - "Tried to use a non-witness address. This must never happen." - ); - panic!("Tried to use a non-witness address. This must never happen."); - }, - } - } -} - -impl ChangeDestinationSource for WalletKeysManager -where - D: BatchDatabase, - B::Target: BroadcasterInterface, - E::Target: FeeEstimator, - L::Target: Logger, -{ - fn get_change_destination_script(&self) -> Result { - let address = self.wallet.get_new_internal_address().map_err(|e| { - log_error!(self.logger, "Failed to retrieve new address from wallet: {}", e); - })?; - Ok(address.script_pubkey()) - } -} diff --git a/src/wallet/mod.rs b/src/wallet/mod.rs new file mode 100644 index 000000000..494fcd768 --- /dev/null +++ b/src/wallet/mod.rs @@ -0,0 +1,667 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use persist::KVStoreWalletPersister; + +use crate::logger::{log_debug, log_error, log_info, log_trace, Logger}; + +use crate::fee_estimator::{ConfirmationTarget, FeeEstimator}; +use crate::Error; + +use lightning::chain::chaininterface::BroadcasterInterface; +use lightning::chain::{BestBlock, Listen}; + +use lightning::events::bump_transaction::{Utxo, WalletSource}; +use lightning::ln::msgs::{DecodeError, UnsignedGossipMessage}; +use lightning::ln::script::ShutdownScript; +use lightning::sign::{ + ChangeDestinationSource, EntropySource, InMemorySigner, KeyMaterial, KeysManager, NodeSigner, + OutputSpender, Recipient, SignerProvider, SpendableOutputDescriptor, +}; + +use lightning::util::message_signing; +use lightning_invoice::RawBolt11Invoice; + +use bdk_chain::spk_client::{FullScanRequest, SyncRequest}; +use bdk_chain::ChainPosition; +use bdk_wallet::{KeychainKind, PersistedWallet, SignOptions, Update}; + +use bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR; +use bitcoin::blockdata::locktime::absolute::LockTime; +use bitcoin::hashes::Hash; +use bitcoin::key::XOnlyPublicKey; +use bitcoin::psbt::Psbt; +use bitcoin::secp256k1::ecdh::SharedSecret; +use bitcoin::secp256k1::ecdsa::{RecoverableSignature, Signature}; +use bitcoin::secp256k1::{PublicKey, Scalar, Secp256k1, SecretKey, Signing}; +use bitcoin::{ + Amount, ScriptBuf, Transaction, TxOut, Txid, WPubkeyHash, WitnessProgram, WitnessVersion, +}; + +use std::ops::Deref; +use std::sync::{Arc, Mutex}; + +pub(crate) mod persist; +pub(crate) mod ser; + +pub(crate) struct Wallet +where + B::Target: BroadcasterInterface, + E::Target: FeeEstimator, + L::Target: Logger, +{ + // A BDK on-chain wallet. + inner: Mutex>, + persister: Mutex, + broadcaster: B, + fee_estimator: E, + logger: L, +} + +impl Wallet +where + B::Target: BroadcasterInterface, + E::Target: FeeEstimator, + L::Target: Logger, +{ + pub(crate) fn new( + wallet: bdk_wallet::PersistedWallet, + wallet_persister: KVStoreWalletPersister, broadcaster: B, fee_estimator: E, logger: L, + ) -> Self { + let inner = Mutex::new(wallet); + let persister = Mutex::new(wallet_persister); + Self { inner, persister, broadcaster, fee_estimator, logger } + } + + pub(crate) fn get_full_scan_request(&self) -> FullScanRequest { + self.inner.lock().unwrap().start_full_scan().build() + } + + pub(crate) fn get_incremental_sync_request(&self) -> SyncRequest<(KeychainKind, u32)> { + self.inner.lock().unwrap().start_sync_with_revealed_spks().build() + } + + pub(crate) fn current_best_block(&self) -> BestBlock { + let checkpoint = self.inner.lock().unwrap().latest_checkpoint(); + BestBlock { block_hash: checkpoint.hash(), height: checkpoint.height() } + } + + pub(crate) fn apply_update(&self, update: impl Into) -> Result<(), Error> { + let mut locked_wallet = self.inner.lock().unwrap(); + match locked_wallet.apply_update(update) { + Ok(()) => { + let mut locked_persister = self.persister.lock().unwrap(); + locked_wallet.persist(&mut locked_persister).map_err(|e| { + log_error!(self.logger, "Failed to persist wallet: {}", e); + Error::PersistenceFailed + })?; + + Ok(()) + }, + Err(e) => { + log_error!(self.logger, "Sync failed due to chain connection error: {}", e); + Err(Error::WalletOperationFailed) + }, + } + } + + pub(crate) fn apply_unconfirmed_txs( + &self, unconfirmed_txs: Vec<(Transaction, u64)>, + ) -> Result<(), Error> { + let mut locked_wallet = self.inner.lock().unwrap(); + locked_wallet.apply_unconfirmed_txs(unconfirmed_txs); + + let mut locked_persister = self.persister.lock().unwrap(); + locked_wallet.persist(&mut locked_persister).map_err(|e| { + log_error!(self.logger, "Failed to persist wallet: {}", e); + Error::PersistenceFailed + })?; + + Ok(()) + } + + pub(crate) fn create_funding_transaction( + &self, output_script: ScriptBuf, amount: Amount, confirmation_target: ConfirmationTarget, + locktime: LockTime, + ) -> Result { + let fee_rate = self.fee_estimator.estimate_fee_rate(confirmation_target); + + let mut locked_wallet = self.inner.lock().unwrap(); + let mut tx_builder = locked_wallet.build_tx(); + + tx_builder + .add_recipient(output_script, amount) + .fee_rate(fee_rate) + .nlocktime(locktime) + .enable_rbf(); + + let mut psbt = match tx_builder.finish() { + Ok(psbt) => { + log_trace!(self.logger, "Created funding PSBT: {:?}", psbt); + psbt + }, + Err(err) => { + log_error!(self.logger, "Failed to create funding transaction: {}", err); + return Err(err.into()); + }, + }; + + match locked_wallet.sign(&mut psbt, SignOptions::default()) { + Ok(finalized) => { + if !finalized { + return Err(Error::OnchainTxCreationFailed); + } + }, + Err(err) => { + log_error!(self.logger, "Failed to create funding transaction: {}", err); + return Err(err.into()); + }, + } + + let mut locked_persister = self.persister.lock().unwrap(); + locked_wallet.persist(&mut locked_persister).map_err(|e| { + log_error!(self.logger, "Failed to persist wallet: {}", e); + Error::PersistenceFailed + })?; + + let tx = psbt.extract_tx().map_err(|e| { + log_error!(self.logger, "Failed to extract transaction: {}", e); + e + })?; + + Ok(tx) + } + + pub(crate) fn get_new_address(&self) -> Result { + let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_persister = self.persister.lock().unwrap(); + + let address_info = locked_wallet.reveal_next_address(KeychainKind::External); + locked_wallet.persist(&mut locked_persister).map_err(|e| { + log_error!(self.logger, "Failed to persist wallet: {}", e); + Error::PersistenceFailed + })?; + Ok(address_info.address) + } + + fn get_new_internal_address(&self) -> Result { + let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_persister = self.persister.lock().unwrap(); + + let address_info = locked_wallet.next_unused_address(KeychainKind::Internal); + locked_wallet.persist(&mut locked_persister).map_err(|e| { + log_error!(self.logger, "Failed to persist wallet: {}", e); + Error::PersistenceFailed + })?; + Ok(address_info.address) + } + + pub(crate) fn get_balances( + &self, total_anchor_channels_reserve_sats: u64, + ) -> Result<(u64, u64), Error> { + let balance = self.inner.lock().unwrap().balance(); + + let (total, spendable) = ( + balance.total().to_sat(), + balance.trusted_spendable().to_sat().saturating_sub(total_anchor_channels_reserve_sats), + ); + + Ok((total, spendable)) + } + + pub(crate) fn get_spendable_amount_sats( + &self, total_anchor_channels_reserve_sats: u64, + ) -> Result { + self.get_balances(total_anchor_channels_reserve_sats).map(|(_, s)| s) + } + + /// Send funds to the given address. + /// + /// If `amount_msat_or_drain` is `None` the wallet will be drained, i.e., all available funds will be + /// spent. + pub(crate) fn send_to_address( + &self, address: &bitcoin::Address, amount_or_drain: Option, + ) -> Result { + let confirmation_target = ConfirmationTarget::OnchainPayment; + let fee_rate = self.fee_estimator.estimate_fee_rate(confirmation_target); + + let tx = { + let mut locked_wallet = self.inner.lock().unwrap(); + let mut tx_builder = locked_wallet.build_tx(); + + if let Some(amount) = amount_or_drain { + tx_builder + .add_recipient(address.script_pubkey(), amount) + .fee_rate(fee_rate) + .enable_rbf(); + } else { + tx_builder + .drain_wallet() + .drain_to(address.script_pubkey()) + .fee_rate(fee_rate) + .enable_rbf(); + } + + let mut psbt = match tx_builder.finish() { + Ok(psbt) => { + log_trace!(self.logger, "Created PSBT: {:?}", psbt); + psbt + }, + Err(err) => { + log_error!(self.logger, "Failed to create transaction: {}", err); + return Err(err.into()); + }, + }; + + match locked_wallet.sign(&mut psbt, SignOptions::default()) { + Ok(finalized) => { + if !finalized { + return Err(Error::OnchainTxCreationFailed); + } + }, + Err(err) => { + log_error!(self.logger, "Failed to create transaction: {}", err); + return Err(err.into()); + }, + } + + let mut locked_persister = self.persister.lock().unwrap(); + locked_wallet.persist(&mut locked_persister).map_err(|e| { + log_error!(self.logger, "Failed to persist wallet: {}", e); + Error::PersistenceFailed + })?; + + psbt.extract_tx().map_err(|e| { + log_error!(self.logger, "Failed to extract transaction: {}", e); + e + })? + }; + + self.broadcaster.broadcast_transactions(&[&tx]); + + let txid = tx.compute_txid(); + + if let Some(amount) = amount_or_drain { + log_info!( + self.logger, + "Created new transaction {} sending {}sats on-chain to address {}", + txid, + amount.to_sat(), + address + ); + } else { + log_info!( + self.logger, + "Created new transaction {} sending all available on-chain funds to address {}", + txid, + address + ); + } + + Ok(txid) + } +} + +impl Listen for Wallet +where + B::Target: BroadcasterInterface, + E::Target: FeeEstimator, + L::Target: Logger, +{ + fn filtered_block_connected( + &self, _header: &bitcoin::block::Header, + _txdata: &lightning::chain::transaction::TransactionData, _height: u32, + ) { + debug_assert!(false, "Syncing filtered blocks is currently not supported"); + // As far as we can tell this would be a no-op anyways as we don't have to tell BDK about + // the header chain of intermediate blocks. According to the BDK team, it's sufficient to + // only connect full blocks starting from the last point of disagreement. + } + + fn block_connected(&self, block: &bitcoin::Block, height: u32) { + let mut locked_wallet = self.inner.lock().unwrap(); + + let pre_checkpoint = locked_wallet.latest_checkpoint(); + if pre_checkpoint.height() != height - 1 + || pre_checkpoint.hash() != block.header.prev_blockhash + { + log_debug!( + self.logger, + "Detected reorg while applying a connected block to on-chain wallet: new block with hash {} at height {}", + block.header.block_hash(), + height + ); + } + + match locked_wallet.apply_block(block, height) { + Ok(()) => (), + Err(e) => { + log_error!( + self.logger, + "Failed to apply connected block to on-chain wallet: {}", + e + ); + return; + }, + }; + + let mut locked_persister = self.persister.lock().unwrap(); + match locked_wallet.persist(&mut locked_persister) { + Ok(_) => (), + Err(e) => { + log_error!(self.logger, "Failed to persist on-chain wallet: {}", e); + return; + }, + }; + } + + fn block_disconnected(&self, _header: &bitcoin::block::Header, _height: u32) { + // This is a no-op as we don't have to tell BDK about disconnections. According to the BDK + // team, it's sufficient in case of a reorg to always connect blocks starting from the last + // point of disagreement. + } +} + +impl WalletSource for Wallet +where + B::Target: BroadcasterInterface, + E::Target: FeeEstimator, + L::Target: Logger, +{ + fn list_confirmed_utxos(&self) -> Result, ()> { + let locked_wallet = self.inner.lock().unwrap(); + let mut utxos = Vec::new(); + let confirmed_txs: Vec = locked_wallet + .transactions() + .filter(|t| matches!(t.chain_position, ChainPosition::Confirmed(_))) + .map(|t| t.tx_node.txid) + .collect(); + let unspent_confirmed_utxos = + locked_wallet.list_unspent().filter(|u| confirmed_txs.contains(&u.outpoint.txid)); + + for u in unspent_confirmed_utxos { + let script_pubkey = u.txout.script_pubkey; + match script_pubkey.witness_version() { + Some(version @ WitnessVersion::V0) => { + let witness_program = WitnessProgram::new(version, script_pubkey.as_bytes()) + .map_err(|e| { + log_error!(self.logger, "Failed to retrieve script payload: {}", e); + })?; + + let wpkh = WPubkeyHash::from_slice(&witness_program.program().as_bytes()) + .map_err(|e| { + log_error!(self.logger, "Failed to retrieve script payload: {}", e); + })?; + let utxo = Utxo::new_v0_p2wpkh(u.outpoint, u.txout.value, &wpkh); + utxos.push(utxo); + }, + Some(version @ WitnessVersion::V1) => { + let witness_program = WitnessProgram::new(version, script_pubkey.as_bytes()) + .map_err(|e| { + log_error!(self.logger, "Failed to retrieve script payload: {}", e); + })?; + + XOnlyPublicKey::from_slice(&witness_program.program().as_bytes()).map_err( + |e| { + log_error!(self.logger, "Failed to retrieve script payload: {}", e); + }, + )?; + + let utxo = Utxo { + outpoint: u.outpoint, + output: TxOut { + value: u.txout.value, + script_pubkey: ScriptBuf::new_witness_program(&witness_program), + }, + satisfaction_weight: 1 /* empty script_sig */ * WITNESS_SCALE_FACTOR as u64 + + 1 /* witness items */ + 1 /* schnorr sig len */ + 64, /* schnorr sig */ + }; + utxos.push(utxo); + }, + Some(version) => { + log_error!(self.logger, "Unexpected witness version: {}", version,); + }, + None => { + log_error!( + self.logger, + "Tried to use a non-witness script. This must never happen." + ); + panic!("Tried to use a non-witness script. This must never happen."); + }, + } + } + + Ok(utxos) + } + + fn get_change_script(&self) -> Result { + let mut locked_wallet = self.inner.lock().unwrap(); + let mut locked_persister = self.persister.lock().unwrap(); + + let address_info = locked_wallet.next_unused_address(KeychainKind::Internal); + locked_wallet.persist(&mut locked_persister).map_err(|e| { + log_error!(self.logger, "Failed to persist wallet: {}", e); + () + })?; + Ok(address_info.address.script_pubkey()) + } + + fn sign_psbt(&self, mut psbt: Psbt) -> Result { + let locked_wallet = self.inner.lock().unwrap(); + + // While BDK populates both `witness_utxo` and `non_witness_utxo` fields, LDK does not. As + // BDK by default doesn't trust the witness UTXO to account for the Segwit bug, we must + // disable it here as otherwise we fail to sign. + let mut sign_options = SignOptions::default(); + sign_options.trust_witness_utxo = true; + + match locked_wallet.sign(&mut psbt, sign_options) { + Ok(_finalized) => { + // BDK will fail to finalize for all LDK-provided inputs of the PSBT. Unfortunately + // we can't check more fine grained if it succeeded for all the other inputs here, + // so we just ignore the returned `finalized` bool. + }, + Err(err) => { + log_error!(self.logger, "Failed to sign transaction: {}", err); + return Err(()); + }, + } + + let tx = psbt.extract_tx().map_err(|e| { + log_error!(self.logger, "Failed to extract transaction: {}", e); + () + })?; + + Ok(tx) + } +} + +/// Similar to [`KeysManager`], but overrides the destination and shutdown scripts so they are +/// directly spendable by the BDK wallet. +pub(crate) struct WalletKeysManager +where + B::Target: BroadcasterInterface, + E::Target: FeeEstimator, + L::Target: Logger, +{ + inner: KeysManager, + wallet: Arc>, + logger: L, +} + +impl WalletKeysManager +where + B::Target: BroadcasterInterface, + E::Target: FeeEstimator, + L::Target: Logger, +{ + /// Constructs a `WalletKeysManager` that overrides the destination and shutdown scripts. + /// + /// See [`KeysManager::new`] for more information on `seed`, `starting_time_secs`, and + /// `starting_time_nanos`. + pub fn new( + seed: &[u8; 32], starting_time_secs: u64, starting_time_nanos: u32, + wallet: Arc>, logger: L, + ) -> Self { + let inner = KeysManager::new(seed, starting_time_secs, starting_time_nanos); + Self { inner, wallet, logger } + } + + pub fn sign_message(&self, msg: &[u8]) -> String { + message_signing::sign(msg, &self.inner.get_node_secret_key()) + } + + pub fn get_node_secret_key(&self) -> SecretKey { + self.inner.get_node_secret_key() + } + + pub fn verify_signature(&self, msg: &[u8], sig: &str, pkey: &PublicKey) -> bool { + message_signing::verify(msg, sig, pkey) + } +} + +impl NodeSigner for WalletKeysManager +where + B::Target: BroadcasterInterface, + E::Target: FeeEstimator, + L::Target: Logger, +{ + fn get_node_id(&self, recipient: Recipient) -> Result { + self.inner.get_node_id(recipient) + } + + fn ecdh( + &self, recipient: Recipient, other_key: &PublicKey, tweak: Option<&Scalar>, + ) -> Result { + self.inner.ecdh(recipient, other_key, tweak) + } + + fn get_inbound_payment_key_material(&self) -> KeyMaterial { + self.inner.get_inbound_payment_key_material() + } + + fn sign_invoice( + &self, invoice: &RawBolt11Invoice, recipient: Recipient, + ) -> Result { + self.inner.sign_invoice(invoice, recipient) + } + + fn sign_gossip_message(&self, msg: UnsignedGossipMessage<'_>) -> Result { + self.inner.sign_gossip_message(msg) + } + + fn sign_bolt12_invoice( + &self, invoice: &lightning::offers::invoice::UnsignedBolt12Invoice, + ) -> Result { + self.inner.sign_bolt12_invoice(invoice) + } + + fn sign_bolt12_invoice_request( + &self, invoice_request: &lightning::offers::invoice_request::UnsignedInvoiceRequest, + ) -> Result { + self.inner.sign_bolt12_invoice_request(invoice_request) + } +} + +impl OutputSpender for WalletKeysManager +where + B::Target: BroadcasterInterface, + E::Target: FeeEstimator, + L::Target: Logger, +{ + /// See [`KeysManager::spend_spendable_outputs`] for documentation on this method. + fn spend_spendable_outputs( + &self, descriptors: &[&SpendableOutputDescriptor], outputs: Vec, + change_destination_script: ScriptBuf, feerate_sat_per_1000_weight: u32, + locktime: Option, secp_ctx: &Secp256k1, + ) -> Result { + self.inner.spend_spendable_outputs( + descriptors, + outputs, + change_destination_script, + feerate_sat_per_1000_weight, + locktime, + secp_ctx, + ) + } +} + +impl EntropySource for WalletKeysManager +where + B::Target: BroadcasterInterface, + E::Target: FeeEstimator, + L::Target: Logger, +{ + fn get_secure_random_bytes(&self) -> [u8; 32] { + self.inner.get_secure_random_bytes() + } +} + +impl SignerProvider for WalletKeysManager +where + B::Target: BroadcasterInterface, + E::Target: FeeEstimator, + L::Target: Logger, +{ + type EcdsaSigner = InMemorySigner; + + fn generate_channel_keys_id( + &self, inbound: bool, channel_value_satoshis: u64, user_channel_id: u128, + ) -> [u8; 32] { + self.inner.generate_channel_keys_id(inbound, channel_value_satoshis, user_channel_id) + } + + fn derive_channel_signer( + &self, channel_value_satoshis: u64, channel_keys_id: [u8; 32], + ) -> Self::EcdsaSigner { + self.inner.derive_channel_signer(channel_value_satoshis, channel_keys_id) + } + + fn read_chan_signer(&self, reader: &[u8]) -> Result { + self.inner.read_chan_signer(reader) + } + + fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result { + let address = self.wallet.get_new_address().map_err(|e| { + log_error!(self.logger, "Failed to retrieve new address from wallet: {}", e); + })?; + Ok(address.script_pubkey()) + } + + fn get_shutdown_scriptpubkey(&self) -> Result { + let address = self.wallet.get_new_address().map_err(|e| { + log_error!(self.logger, "Failed to retrieve new address from wallet: {}", e); + })?; + + match address.witness_program() { + Some(program) => ShutdownScript::new_witness_program(&program).map_err(|e| { + log_error!(self.logger, "Invalid shutdown script: {:?}", e); + }), + _ => { + log_error!( + self.logger, + "Tried to use a non-witness address. This must never happen." + ); + panic!("Tried to use a non-witness address. This must never happen."); + }, + } + } +} + +impl ChangeDestinationSource for WalletKeysManager +where + B::Target: BroadcasterInterface, + E::Target: FeeEstimator, + L::Target: Logger, +{ + fn get_change_destination_script(&self) -> Result { + let address = self.wallet.get_new_internal_address().map_err(|e| { + log_error!(self.logger, "Failed to retrieve new address from wallet: {}", e); + })?; + Ok(address.script_pubkey()) + } +} diff --git a/src/wallet/persist.rs b/src/wallet/persist.rs new file mode 100644 index 000000000..06af541a2 --- /dev/null +++ b/src/wallet/persist.rs @@ -0,0 +1,187 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use crate::io::utils::{ + read_bdk_wallet_change_set, write_bdk_wallet_change_descriptor, write_bdk_wallet_descriptor, + write_bdk_wallet_indexer, write_bdk_wallet_local_chain, write_bdk_wallet_network, + write_bdk_wallet_tx_graph, +}; +use crate::logger::{log_error, FilesystemLogger}; +use crate::types::DynStore; + +use lightning::util::logger::Logger; + +use bdk_chain::Merge; +use bdk_wallet::{ChangeSet, WalletPersister}; + +use std::sync::Arc; +pub(crate) struct KVStoreWalletPersister { + latest_change_set: Option, + kv_store: Arc, + logger: Arc, +} + +impl KVStoreWalletPersister { + pub(crate) fn new(kv_store: Arc, logger: Arc) -> Self { + Self { latest_change_set: None, kv_store, logger } + } +} + +impl WalletPersister for KVStoreWalletPersister { + type Error = std::io::Error; + + fn initialize(persister: &mut Self) -> Result { + // Return immediately if we have already been initialized. + if let Some(latest_change_set) = persister.latest_change_set.as_ref() { + return Ok(latest_change_set.clone()); + } + + let change_set_opt = read_bdk_wallet_change_set( + Arc::clone(&persister.kv_store), + Arc::clone(&persister.logger), + )?; + + let change_set = match change_set_opt { + Some(persisted_change_set) => persisted_change_set, + None => { + // BDK docs state: "The implementation must return all data currently stored in the + // persister. If there is no data, return an empty changeset (using + // ChangeSet::default())." + ChangeSet::default() + }, + }; + persister.latest_change_set = Some(change_set.clone()); + Ok(change_set) + } + + fn persist(persister: &mut Self, change_set: &ChangeSet) -> Result<(), Self::Error> { + if change_set.is_empty() { + return Ok(()); + } + + // We're allowed to fail here if we're not initialized, BDK docs state: "This method can fail if the + // persister is not initialized." + let latest_change_set = persister.latest_change_set.as_mut().ok_or_else(|| { + std::io::Error::new( + std::io::ErrorKind::Other, + "Wallet must be initialized before calling persist", + ) + })?; + + // Check that we'd never accidentally override any persisted data if the change set doesn't + // match our descriptor/change_descriptor/network. + if let Some(descriptor) = change_set.descriptor.as_ref() { + if latest_change_set.descriptor.is_some() + && latest_change_set.descriptor.as_ref() != Some(descriptor) + { + debug_assert!(false, "Wallet descriptor must never change"); + log_error!( + persister.logger, + "Wallet change set doesn't match persisted descriptor. This should never happen." + ); + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Wallet change set doesn't match persisted descriptor. This should never happen." + )); + } else { + latest_change_set.descriptor = Some(descriptor.clone()); + write_bdk_wallet_descriptor( + &descriptor, + Arc::clone(&persister.kv_store), + Arc::clone(&persister.logger), + )?; + } + } + + if let Some(change_descriptor) = change_set.change_descriptor.as_ref() { + if latest_change_set.change_descriptor.is_some() + && latest_change_set.change_descriptor.as_ref() != Some(change_descriptor) + { + debug_assert!(false, "Wallet change_descriptor must never change"); + log_error!( + persister.logger, + "Wallet change set doesn't match persisted change_descriptor. This should never happen." + ); + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Wallet change set doesn't match persisted change_descriptor. This should never happen." + )); + } else { + latest_change_set.change_descriptor = Some(change_descriptor.clone()); + write_bdk_wallet_change_descriptor( + &change_descriptor, + Arc::clone(&persister.kv_store), + Arc::clone(&persister.logger), + )?; + } + } + + if let Some(network) = change_set.network { + if latest_change_set.network.is_some() && latest_change_set.network != Some(network) { + debug_assert!(false, "Wallet network must never change"); + log_error!( + persister.logger, + "Wallet change set doesn't match persisted network. This should never happen." + ); + return Err(std::io::Error::new( + std::io::ErrorKind::InvalidData, + "Wallet change set doesn't match persisted network. This should never happen.", + )); + } else { + latest_change_set.network = Some(network); + write_bdk_wallet_network( + &network, + Arc::clone(&persister.kv_store), + Arc::clone(&persister.logger), + )?; + } + } + + debug_assert!( + latest_change_set.descriptor.is_some() + && latest_change_set.change_descriptor.is_some() + && latest_change_set.network.is_some(), + "descriptor, change_descriptor, and network are mandatory ChangeSet fields" + ); + + // Merge and persist the sub-changesets individually if necessary. + // + // According to the BDK team the individual sub-changesets can be persisted + // individually/non-atomically, "(h)owever, the localchain tip is used by block-by-block + // chain sources as a reference as to where to sync from, so I would persist that last", "I + // would write in this order: indexer, tx_graph, local_chain", which is why we follow this + // particular order. + if !change_set.indexer.is_empty() { + latest_change_set.indexer.merge(change_set.indexer.clone()); + write_bdk_wallet_indexer( + &latest_change_set.indexer, + Arc::clone(&persister.kv_store), + Arc::clone(&persister.logger), + )?; + } + + if !change_set.tx_graph.is_empty() { + latest_change_set.tx_graph.merge(change_set.tx_graph.clone()); + write_bdk_wallet_tx_graph( + &latest_change_set.tx_graph, + Arc::clone(&persister.kv_store), + Arc::clone(&persister.logger), + )?; + } + + if !change_set.local_chain.is_empty() { + latest_change_set.local_chain.merge(change_set.local_chain.clone()); + write_bdk_wallet_local_chain( + &latest_change_set.local_chain, + Arc::clone(&persister.kv_store), + Arc::clone(&persister.logger), + )?; + } + + Ok(()) + } +} diff --git a/src/wallet/ser.rs b/src/wallet/ser.rs new file mode 100644 index 000000000..2e33992a8 --- /dev/null +++ b/src/wallet/ser.rs @@ -0,0 +1,346 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + +use lightning::ln::msgs::DecodeError; +use lightning::util::ser::{BigSize, Readable, RequiredWrapper, Writeable, Writer}; +use lightning::{decode_tlv_stream, encode_tlv_stream, read_tlv_fields, write_tlv_fields}; + +use bdk_chain::bdk_core::{BlockId, ConfirmationBlockTime}; +use bdk_chain::indexer::keychain_txout::ChangeSet as BdkIndexerChangeSet; +use bdk_chain::local_chain::ChangeSet as BdkLocalChainChangeSet; +use bdk_chain::tx_graph::ChangeSet as BdkTxGraphChangeSet; +use bdk_chain::DescriptorId; + +use bdk_wallet::descriptor::Descriptor; +use bdk_wallet::keys::DescriptorPublicKey; + +use bitcoin::hashes::sha256::Hash as Sha256Hash; +use bitcoin::p2p::Magic; +use bitcoin::{BlockHash, Network, OutPoint, Transaction, TxOut, Txid}; + +use std::collections::{BTreeMap, BTreeSet}; +use std::str::FromStr; +use std::sync::Arc; + +const CHANGESET_SERIALIZATION_VERSION: u8 = 1; + +pub(crate) struct ChangeSetSerWrapper<'a, T>(pub &'a T); +pub(crate) struct ChangeSetDeserWrapper(pub T); + +impl<'a> Writeable for ChangeSetSerWrapper<'a, Descriptor> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + CHANGESET_SERIALIZATION_VERSION.write(writer)?; + + self.0.to_string().write(writer) + } +} + +impl Readable for ChangeSetDeserWrapper> { + fn read(reader: &mut R) -> Result { + let version: u8 = Readable::read(reader)?; + if version != CHANGESET_SERIALIZATION_VERSION { + return Err(DecodeError::UnknownVersion); + } + + let descriptor_str: String = Readable::read(reader)?; + let descriptor = Descriptor::::from_str(&descriptor_str) + .map_err(|_| DecodeError::InvalidValue)?; + Ok(Self(descriptor)) + } +} + +impl<'a> Writeable for ChangeSetSerWrapper<'a, Network> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + CHANGESET_SERIALIZATION_VERSION.write(writer)?; + + self.0.magic().to_bytes().write(writer) + } +} + +impl Readable for ChangeSetDeserWrapper { + fn read(reader: &mut R) -> Result { + let version: u8 = Readable::read(reader)?; + if version != CHANGESET_SERIALIZATION_VERSION { + return Err(DecodeError::UnknownVersion); + } + + let buf: [u8; 4] = Readable::read(reader)?; + let magic = Magic::from_bytes(buf); + let network = Network::from_magic(magic).ok_or(DecodeError::InvalidValue)?; + Ok(Self(network)) + } +} + +impl<'a> Writeable for ChangeSetSerWrapper<'a, BdkLocalChainChangeSet> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + CHANGESET_SERIALIZATION_VERSION.write(writer)?; + + encode_tlv_stream!(writer, { + (0, self.0.blocks, required), + }); + Ok(()) + } +} + +impl Readable for ChangeSetDeserWrapper { + fn read(reader: &mut R) -> Result { + let version: u8 = Readable::read(reader)?; + if version != CHANGESET_SERIALIZATION_VERSION { + return Err(DecodeError::UnknownVersion); + } + + let mut blocks = RequiredWrapper(None); + decode_tlv_stream!(reader, { + (0, blocks, required), + }); + Ok(Self(BdkLocalChainChangeSet { blocks: blocks.0.unwrap() })) + } +} + +impl<'a> Writeable for ChangeSetSerWrapper<'a, BdkTxGraphChangeSet> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + CHANGESET_SERIALIZATION_VERSION.write(writer)?; + + encode_tlv_stream!(writer, { + (0, ChangeSetSerWrapper(&self.0.txs), required), + (2, self.0.txouts, required), + (4, ChangeSetSerWrapper(&self.0.anchors), required), + (6, self.0.last_seen, required), + }); + Ok(()) + } +} + +impl Readable for ChangeSetDeserWrapper> { + fn read(reader: &mut R) -> Result { + let version: u8 = Readable::read(reader)?; + if version != CHANGESET_SERIALIZATION_VERSION { + return Err(DecodeError::UnknownVersion); + } + + let mut txs: RequiredWrapper>>> = + RequiredWrapper(None); + let mut txouts: RequiredWrapper> = RequiredWrapper(None); + let mut anchors: RequiredWrapper< + ChangeSetDeserWrapper>, + > = RequiredWrapper(None); + let mut last_seen: RequiredWrapper> = RequiredWrapper(None); + + decode_tlv_stream!(reader, { + (0, txs, required), + (2, txouts, required), + (4, anchors, required), + (6, last_seen, required), + }); + + Ok(Self(BdkTxGraphChangeSet { + txs: txs.0.unwrap().0, + txouts: txouts.0.unwrap(), + anchors: anchors.0.unwrap().0, + last_seen: last_seen.0.unwrap(), + })) + } +} + +impl<'a> Writeable for ChangeSetSerWrapper<'a, BTreeSet<(ConfirmationBlockTime, Txid)>> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + let len = BigSize(self.0.len() as u64); + len.write(writer)?; + for (time, txid) in self.0.iter() { + write_tlv_fields!(writer, { + (0, ChangeSetSerWrapper(time), required), + (2, txid, required), + }); + } + Ok(()) + } +} + +impl Readable for ChangeSetDeserWrapper> { + fn read(reader: &mut R) -> Result { + let len: BigSize = Readable::read(reader)?; + let mut set = BTreeSet::new(); + for _ in 0..len.0 { + let mut time: RequiredWrapper> = + RequiredWrapper(None); + let mut txid: RequiredWrapper = RequiredWrapper(None); + read_tlv_fields!(reader, { + (0, time, required), + (2, txid, required), + }); + set.insert((time.0.unwrap().0, txid.0.unwrap())); + } + Ok(Self(set)) + } +} + +impl<'a> Writeable for ChangeSetSerWrapper<'a, BTreeSet>> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + let len = BigSize(self.0.len() as u64); + len.write(writer)?; + for tx in self.0.iter() { + write_tlv_fields!(writer, { + (0, tx, required), + }); + } + Ok(()) + } +} + +impl Readable for ChangeSetDeserWrapper>> { + fn read(reader: &mut R) -> Result { + let len: BigSize = Readable::read(reader)?; + let mut set = BTreeSet::new(); + for _ in 0..len.0 { + let mut tx: RequiredWrapper = RequiredWrapper(None); + read_tlv_fields!(reader, { + (0, tx, required), + }); + set.insert(Arc::new(tx.0.unwrap())); + } + Ok(Self(set)) + } +} + +impl<'a> Writeable for ChangeSetSerWrapper<'a, ConfirmationBlockTime> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + encode_tlv_stream!(writer, { + (0, ChangeSetSerWrapper(&self.0.block_id), required), + (2, self.0.confirmation_time, required), + }); + Ok(()) + } +} + +impl Readable for ChangeSetDeserWrapper { + fn read(reader: &mut R) -> Result { + let mut block_id: RequiredWrapper> = RequiredWrapper(None); + let mut confirmation_time: RequiredWrapper = RequiredWrapper(None); + + decode_tlv_stream!(reader, { + (0, block_id, required), + (2, confirmation_time, required), + }); + + Ok(Self(ConfirmationBlockTime { + block_id: block_id.0.unwrap().0, + confirmation_time: confirmation_time.0.unwrap(), + })) + } +} + +impl<'a> Writeable for ChangeSetSerWrapper<'a, BlockId> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + encode_tlv_stream!(writer, { + (0, self.0.height, required), + (2, self.0.hash, required), + }); + Ok(()) + } +} + +impl Readable for ChangeSetDeserWrapper { + fn read(reader: &mut R) -> Result { + let mut height: RequiredWrapper = RequiredWrapper(None); + let mut hash: RequiredWrapper = RequiredWrapper(None); + decode_tlv_stream!(reader, { + (0, height, required), + (2, hash, required), + }); + + Ok(Self(BlockId { height: height.0.unwrap(), hash: hash.0.unwrap() })) + } +} + +impl<'a> Writeable for ChangeSetSerWrapper<'a, BdkIndexerChangeSet> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + CHANGESET_SERIALIZATION_VERSION.write(writer)?; + + encode_tlv_stream!(writer, { (0, ChangeSetSerWrapper(&self.0.last_revealed), required) }); + Ok(()) + } +} + +impl Readable for ChangeSetDeserWrapper { + fn read(reader: &mut R) -> Result { + let version: u8 = Readable::read(reader)?; + if version != CHANGESET_SERIALIZATION_VERSION { + return Err(DecodeError::UnknownVersion); + } + + let mut last_revealed: RequiredWrapper>> = + RequiredWrapper(None); + + decode_tlv_stream!(reader, { (0, last_revealed, required) }); + + Ok(Self(BdkIndexerChangeSet { last_revealed: last_revealed.0.unwrap().0 })) + } +} + +impl<'a> Writeable for ChangeSetSerWrapper<'a, BTreeMap> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + let len = BigSize(self.0.len() as u64); + len.write(writer)?; + for (descriptor_id, last_index) in self.0.iter() { + write_tlv_fields!(writer, { + (0, ChangeSetSerWrapper(descriptor_id), required), + (2, last_index, required), + }); + } + Ok(()) + } +} + +impl Readable for ChangeSetDeserWrapper> { + fn read(reader: &mut R) -> Result { + let len: BigSize = Readable::read(reader)?; + let mut set = BTreeMap::new(); + for _ in 0..len.0 { + let mut descriptor_id: RequiredWrapper> = + RequiredWrapper(None); + let mut last_index: RequiredWrapper = RequiredWrapper(None); + read_tlv_fields!(reader, { + (0, descriptor_id, required), + (2, last_index, required), + }); + set.insert(descriptor_id.0.unwrap().0, last_index.0.unwrap()); + } + Ok(Self(set)) + } +} + +impl<'a> Writeable for ChangeSetSerWrapper<'a, DescriptorId> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + encode_tlv_stream!(writer, { (0, ChangeSetSerWrapper(&self.0 .0), required) }); + Ok(()) + } +} + +impl Readable for ChangeSetDeserWrapper { + fn read(reader: &mut R) -> Result { + let mut hash: RequiredWrapper> = RequiredWrapper(None); + + decode_tlv_stream!(reader, { (0, hash, required) }); + + Ok(Self(DescriptorId(hash.0.unwrap().0))) + } +} + +impl<'a> Writeable for ChangeSetSerWrapper<'a, Sha256Hash> { + fn write(&self, writer: &mut W) -> Result<(), lightning::io::Error> { + writer.write_all(&self.0[..]) + } +} + +impl Readable for ChangeSetDeserWrapper { + fn read(reader: &mut R) -> Result { + use bitcoin::hashes::Hash; + + let buf: [u8; 32] = Readable::read(reader)?; + Ok(Self(Sha256Hash::from_slice(&buf[..]).unwrap())) + } +} diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 272d9f61e..378881396 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,15 +1,23 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + #![cfg(any(test, cln_test, vss_test))] #![allow(dead_code)] +use ldk_node::config::{Config, EsploraSyncConfig}; use ldk_node::io::sqlite_store::SqliteStore; use ldk_node::payment::{PaymentDirection, PaymentKind, PaymentStatus}; use ldk_node::{ - Builder, Config, Event, LightningBalance, LogLevel, Node, NodeError, PendingSweepBalance, - TlvEntry, + Builder, Event, LightningBalance, LogLevel, Node, NodeError, PendingSweepBalance, TlvEntry, }; use lightning::ln::msgs::SocketAddress; use lightning::ln::{PaymentHash, PaymentPreimage}; +use lightning::routing::gossip::NodeAlias; use lightning::util::persist::KVStore; use lightning::util::test_utils::TestStore; use lightning_persister::fs_store::FilesystemStore; @@ -194,6 +202,15 @@ pub(crate) fn random_listening_addresses() -> Vec { listening_addresses } +pub(crate) fn random_node_alias() -> Option { + let mut rng = thread_rng(); + let rand_val = rng.gen_range(0..1000); + let alias = format!("ldk-node-{}", rand_val); + let mut bytes = [0u8; 32]; + bytes[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes()); + Some(NodeAlias(bytes)) +} + pub(crate) fn random_config(anchor_channels: bool) -> Config { let mut config = Config::default(); @@ -202,8 +219,6 @@ pub(crate) fn random_config(anchor_channels: bool) -> Config { } config.network = Network::Regtest; - config.onchain_wallet_sync_interval_secs = 100000; - config.wallet_sync_interval_secs = 100000; println!("Setting network: {}", config.network); let rand_dir = random_storage_path(); @@ -214,6 +229,10 @@ pub(crate) fn random_config(anchor_channels: bool) -> Config { println!("Setting random LDK listening addresses: {:?}", rand_listening_addresses); config.listening_addresses = Some(rand_listening_addresses); + let alias = random_node_alias(); + println!("Setting random LDK node alias: {:?}", alias); + config.node_alias = alias; + config.log_level = LogLevel::Gossip; config @@ -224,6 +243,12 @@ type TestNode = Arc; #[cfg(not(feature = "uniffi"))] type TestNode = Node; +#[derive(Clone)] +pub(crate) enum TestChainSource<'a> { + Esplora(&'a ElectrsD), + BitcoindRpc(&'a BitcoinD), +} + macro_rules! setup_builder { ($builder: ident, $config: expr) => { #[cfg(feature = "uniffi")] @@ -236,11 +261,12 @@ macro_rules! setup_builder { pub(crate) use setup_builder; pub(crate) fn setup_two_nodes( - electrsd: &ElectrsD, allow_0conf: bool, anchor_channels: bool, anchors_trusted_no_reserve: bool, + chain_source: &TestChainSource, allow_0conf: bool, anchor_channels: bool, + anchors_trusted_no_reserve: bool, ) -> (TestNode, TestNode) { println!("== Node A =="); let config_a = random_config(anchor_channels); - let node_a = setup_node(electrsd, config_a); + let node_a = setup_node(chain_source, config_a); println!("\n== Node B =="); let mut config_b = random_config(anchor_channels); @@ -255,14 +281,29 @@ pub(crate) fn setup_two_nodes( .trusted_peers_no_reserve .push(node_a.node_id()); } - let node_b = setup_node(electrsd, config_b); + let node_b = setup_node(chain_source, config_b); (node_a, node_b) } -pub(crate) fn setup_node(electrsd: &ElectrsD, config: Config) -> TestNode { - let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); +pub(crate) fn setup_node(chain_source: &TestChainSource, config: Config) -> TestNode { setup_builder!(builder, config); - builder.set_esplora_server(esplora_url.clone()); + match chain_source { + TestChainSource::Esplora(electrsd) => { + let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); + let mut sync_config = EsploraSyncConfig::default(); + sync_config.onchain_wallet_sync_interval_secs = 100000; + sync_config.lightning_wallet_sync_interval_secs = 100000; + builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); + }, + TestChainSource::BitcoindRpc(bitcoind) => { + let rpc_host = bitcoind.params.rpc_socket.ip().to_string(); + let rpc_port = bitcoind.params.rpc_socket.port(); + let values = bitcoind.params.get_cookie_values().unwrap().unwrap(); + let rpc_user = values.user; + let rpc_password = values.password; + builder.set_chain_source_bitcoind_rpc(rpc_host, rpc_port, rpc_user, rpc_password); + }, + } let test_sync_store = Arc::new(TestSyncStore::new(config.storage_dir_path.into())); let node = builder.build_with_store(test_sync_store).unwrap(); node.start().unwrap(); @@ -379,19 +420,30 @@ pub(crate) fn premine_and_distribute_funds( } pub fn open_channel( - node_a: &TestNode, node_b: &TestNode, funding_amount_sat: u64, announce: bool, + node_a: &TestNode, node_b: &TestNode, funding_amount_sat: u64, should_announce: bool, electrsd: &ElectrsD, ) { - node_a - .connect_open_channel( - node_b.node_id(), - node_b.listening_addresses().unwrap().first().unwrap().clone(), - funding_amount_sat, - None, - None, - announce, - ) - .unwrap(); + if should_announce { + node_a + .open_announced_channel( + node_b.node_id(), + node_b.listening_addresses().unwrap().first().unwrap().clone(), + funding_amount_sat, + None, + None, + ) + .unwrap(); + } else { + node_a + .open_channel( + node_b.node_id(), + node_b.listening_addresses().unwrap().first().unwrap().clone(), + funding_amount_sat, + None, + None, + ) + .unwrap(); + } assert!(node_a.list_peers().iter().find(|c| { c.node_id == node_b.node_id() }).is_some()); let funding_txo_a = expect_channel_pending_event!(node_a, node_b.node_id()); @@ -424,17 +476,16 @@ pub(crate) fn do_channel_full_cycle( assert_eq!(node_a.next_event(), None); assert_eq!(node_b.next_event(), None); - println!("\nA -- connect_open_channel -> B"); + println!("\nA -- open_channel -> B"); let funding_amount_sat = 2_080_000; let push_msat = (funding_amount_sat / 2) * 1000; // balance the channel node_a - .connect_open_channel( + .open_announced_channel( node_b.node_id(), node_b.listening_addresses().unwrap().first().unwrap().clone(), funding_amount_sat, Some(push_msat), None, - true, ) .unwrap(); @@ -453,7 +504,7 @@ pub(crate) fn do_channel_full_cycle( node_a.sync_wallets().unwrap(); node_b.sync_wallets().unwrap(); - let onchain_fee_buffer_sat = 1500; + let onchain_fee_buffer_sat = 5000; let node_a_anchor_reserve_sat = if expect_anchor_channel { 25_000 } else { 0 }; let node_a_upper_bound_sat = premine_amount_sat - node_a_anchor_reserve_sat - funding_amount_sat; @@ -494,8 +545,8 @@ pub(crate) fn do_channel_full_cycle( let invoice = node_b.bolt11_payment().receive(invoice_amount_1_msat, &"asdf", 9217).unwrap(); println!("\nA send"); - let payment_id = node_a.bolt11_payment().send(&invoice).unwrap(); - assert_eq!(node_a.bolt11_payment().send(&invoice), Err(NodeError::DuplicatePayment)); + let payment_id = node_a.bolt11_payment().send(&invoice, None).unwrap(); + assert_eq!(node_a.bolt11_payment().send(&invoice, None), Err(NodeError::DuplicatePayment)); assert_eq!(node_a.list_payments().first().unwrap().id, payment_id); @@ -527,7 +578,7 @@ pub(crate) fn do_channel_full_cycle( assert!(matches!(node_b.payment(&payment_id).unwrap().kind, PaymentKind::Bolt11 { .. })); // Assert we fail duplicate outbound payments and check the status hasn't changed. - assert_eq!(Err(NodeError::DuplicatePayment), node_a.bolt11_payment().send(&invoice)); + assert_eq!(Err(NodeError::DuplicatePayment), node_a.bolt11_payment().send(&invoice, None)); assert_eq!(node_a.payment(&payment_id).unwrap().status, PaymentStatus::Succeeded); assert_eq!(node_a.payment(&payment_id).unwrap().direction, PaymentDirection::Outbound); assert_eq!(node_a.payment(&payment_id).unwrap().amount_msat, Some(invoice_amount_1_msat)); @@ -542,7 +593,7 @@ pub(crate) fn do_channel_full_cycle( let underpaid_amount = invoice_amount_2_msat - 1; assert_eq!( Err(NodeError::InvalidAmount), - node_a.bolt11_payment().send_using_amount(&invoice, underpaid_amount) + node_a.bolt11_payment().send_using_amount(&invoice, underpaid_amount, None) ); println!("\nB overpaid receive"); @@ -551,7 +602,7 @@ pub(crate) fn do_channel_full_cycle( println!("\nA overpaid send"); let payment_id = - node_a.bolt11_payment().send_using_amount(&invoice, overpaid_amount_msat).unwrap(); + node_a.bolt11_payment().send_using_amount(&invoice, overpaid_amount_msat, None).unwrap(); expect_event!(node_a, PaymentSuccessful); let received_amount = match node_b.wait_next_event() { ref e @ Event::PaymentReceived { amount_msat, .. } => { @@ -580,12 +631,12 @@ pub(crate) fn do_channel_full_cycle( let determined_amount_msat = 2345_678; assert_eq!( Err(NodeError::InvalidInvoice), - node_a.bolt11_payment().send(&variable_amount_invoice) + node_a.bolt11_payment().send(&variable_amount_invoice, None) ); println!("\nA send_using_amount"); let payment_id = node_a .bolt11_payment() - .send_using_amount(&variable_amount_invoice, determined_amount_msat) + .send_using_amount(&variable_amount_invoice, determined_amount_msat, None) .unwrap(); expect_event!(node_a, PaymentSuccessful); @@ -617,7 +668,7 @@ pub(crate) fn do_channel_full_cycle( .bolt11_payment() .receive_for_hash(invoice_amount_3_msat, &"asdf", 9217, manual_payment_hash) .unwrap(); - let manual_payment_id = node_a.bolt11_payment().send(&manual_invoice).unwrap(); + let manual_payment_id = node_a.bolt11_payment().send(&manual_invoice, None).unwrap(); let claimable_amount_msat = expect_payment_claimable_event!( node_b, @@ -655,7 +706,7 @@ pub(crate) fn do_channel_full_cycle( .bolt11_payment() .receive_for_hash(invoice_amount_3_msat, &"asdf", 9217, manual_fail_payment_hash) .unwrap(); - let manual_fail_payment_id = node_a.bolt11_payment().send(&manual_fail_invoice).unwrap(); + let manual_fail_payment_id = node_a.bolt11_payment().send(&manual_fail_invoice, None).unwrap(); expect_payment_claimable_event!( node_b, @@ -699,7 +750,7 @@ pub(crate) fn do_channel_full_cycle( let tlv2 = TlvEntry { r#type: 131075, value: vec![0xaa, 0xbb] }; let keysend_payment_id = node_a .spontaneous_payment() - .send(keysend_amount_msat, node_b.node_id(), vec![tlv1, tlv2], None) + .send(keysend_amount_msat, node_b.node_id(), None, vec![tlv1, tlv2], None) .unwrap(); expect_event!(node_a, PaymentSuccessful); let received_keysend_amount = match node_b.wait_next_event() { @@ -733,7 +784,7 @@ pub(crate) fn do_channel_full_cycle( println!("\nB close_channel (force: {})", force_close); if force_close { std::thread::sleep(Duration::from_secs(1)); - node_a.force_close_channel(&user_channel_id, node_b.node_id()).unwrap(); + node_a.force_close_channel(&user_channel_id, node_b.node_id(), None).unwrap(); } else { node_a.close_channel(&user_channel_id, node_b.node_id()).unwrap(); } @@ -887,7 +938,7 @@ impl TestSyncStore { fn do_list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> std::io::Result> { + ) -> lightning::io::Result> { let fs_res = self.fs_store.list(primary_namespace, secondary_namespace); let sqlite_res = self.sqlite_store.list(primary_namespace, secondary_namespace); let test_res = self.test_store.list(primary_namespace, secondary_namespace); @@ -918,7 +969,7 @@ impl TestSyncStore { impl KVStore for TestSyncStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> std::io::Result> { + ) -> lightning::io::Result> { let _guard = self.serializer.read().unwrap(); let fs_res = self.fs_store.read(primary_namespace, secondary_namespace, key); @@ -943,7 +994,7 @@ impl KVStore for TestSyncStore { fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8], - ) -> std::io::Result<()> { + ) -> lightning::io::Result<()> { let _guard = self.serializer.write().unwrap(); let fs_res = self.fs_store.write(primary_namespace, secondary_namespace, key, buf); let sqlite_res = self.sqlite_store.write(primary_namespace, secondary_namespace, key, buf); @@ -970,7 +1021,7 @@ impl KVStore for TestSyncStore { fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> std::io::Result<()> { + ) -> lightning::io::Result<()> { let _guard = self.serializer.write().unwrap(); let fs_res = self.fs_store.remove(primary_namespace, secondary_namespace, key, lazy); let sqlite_res = @@ -998,7 +1049,7 @@ impl KVStore for TestSyncStore { fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> std::io::Result> { + ) -> lightning::io::Result> { let _guard = self.serializer.read().unwrap(); self.do_list(primary_namespace, secondary_namespace) } diff --git a/tests/integration_tests_cln.rs b/tests/integration_tests_cln.rs index fc3b10bfc..a577b106d 100644 --- a/tests/integration_tests_cln.rs +++ b/tests/integration_tests_cln.rs @@ -1,3 +1,10 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + #![cfg(cln_test)] mod common; @@ -38,7 +45,7 @@ fn test_cln() { // Setup LDK Node let config = common::random_config(true); let mut builder = Builder::from_config(config); - builder.set_esplora_server("http://127.0.0.1:3002".to_string()); + builder.set_chain_source_esplora("http://127.0.0.1:3002".to_string(), None); let node = builder.build().unwrap(); node.start().unwrap(); @@ -75,15 +82,8 @@ fn test_cln() { // Open the channel let funding_amount_sat = 1_000_000; - node.connect_open_channel( - cln_node_id, - cln_address, - funding_amount_sat, - Some(500_000_000), - None, - false, - ) - .unwrap(); + node.open_channel(cln_node_id, cln_address, funding_amount_sat, Some(500_000_000), None) + .unwrap(); let funding_txo = common::expect_channel_pending_event!(node, cln_node_id); common::wait_for_tx(&electrs_client, funding_txo.txid); @@ -99,7 +99,7 @@ fn test_cln() { cln_client.invoice(Some(10_000_000), &rand_label, &rand_label, None, None, None).unwrap(); let parsed_invoice = Bolt11Invoice::from_str(&cln_invoice.bolt11).unwrap(); - node.bolt11_payment().send(&parsed_invoice).unwrap(); + node.bolt11_payment().send(&parsed_invoice, None).unwrap(); common::expect_event!(node, PaymentSuccessful); let cln_listed_invoices = cln_client.listinvoices(Some(&rand_label), None, None, None).unwrap().invoices; diff --git a/tests/integration_tests_rust.rs b/tests/integration_tests_rust.rs index 37ddeb9a7..dc5c4b818 100644 --- a/tests/integration_tests_rust.rs +++ b/tests/integration_tests_rust.rs @@ -1,63 +1,83 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + mod common; use common::{ - do_channel_full_cycle, expect_event, expect_payment_received_event, + do_channel_full_cycle, expect_channel_ready_event, expect_event, expect_payment_received_event, expect_payment_successful_event, generate_blocks_and_wait, open_channel, premine_and_distribute_funds, random_config, setup_bitcoind_and_electrsd, setup_builder, - setup_node, setup_two_nodes, wait_for_tx, TestSyncStore, + setup_node, setup_two_nodes, wait_for_tx, TestChainSource, TestSyncStore, }; -use ldk_node::payment::PaymentKind; +use ldk_node::config::EsploraSyncConfig; +use ldk_node::payment::{PaymentKind, QrPaymentResult, SendingParameters}; use ldk_node::{Builder, Event, NodeError}; use lightning::ln::channelmanager::PaymentId; use lightning::util::persist::KVStore; -use bitcoin::{Amount, Network}; +use bitcoin::Amount; use std::sync::Arc; -use crate::common::expect_channel_ready_event; - #[test] fn channel_full_cycle() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); + do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false); +} + +#[test] +fn channel_full_cycle_bitcoind() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::BitcoindRpc(&bitcoind); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, false); } #[test] fn channel_full_cycle_force_close() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, true); } #[test] fn channel_full_cycle_force_close_trusted_no_reserve() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, true); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, true); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, true, true); } #[test] fn channel_full_cycle_0conf() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, true, true, false); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, true, true, false); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, true, true, false) } #[test] fn channel_full_cycle_legacy_staticremotekey() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, false, false); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, false, false); do_channel_full_cycle(node_a, node_b, &bitcoind.client, &electrsd.client, false, false, false); } #[test] fn channel_open_fails_when_funds_insufficient() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); let addr_a = node_a.onchain_payment().new_address().unwrap(); let addr_b = node_b.onchain_payment().new_address().unwrap(); @@ -75,16 +95,15 @@ fn channel_open_fails_when_funds_insufficient() { assert_eq!(node_a.list_balances().spendable_onchain_balance_sats, premine_amount_sat); assert_eq!(node_b.list_balances().spendable_onchain_balance_sats, premine_amount_sat); - println!("\nA -- connect_open_channel -> B"); + println!("\nA -- open_channel -> B"); assert_eq!( Err(NodeError::InsufficientFunds), - node_a.connect_open_channel( + node_a.open_channel( node_b.node_id(), node_b.listening_addresses().unwrap().first().unwrap().clone(), 120000, None, None, - true ) ); } @@ -98,8 +117,11 @@ fn multi_hop_sending() { let mut nodes = Vec::new(); for _ in 0..5 { let config = random_config(true); + let mut sync_config = EsploraSyncConfig::default(); + sync_config.onchain_wallet_sync_interval_secs = 100000; + sync_config.lightning_wallet_sync_interval_secs = 100000; setup_builder!(builder, config); - builder.set_esplora_server(esplora_url.clone()); + builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); let node = builder.build().unwrap(); node.start().unwrap(); nodes.push(node); @@ -158,25 +180,21 @@ fn multi_hop_sending() { // Sleep a bit for gossip to propagate. std::thread::sleep(std::time::Duration::from_secs(1)); + let sending_params = SendingParameters { + max_total_routing_fee_msat: Some(Some(75_000).into()), + max_total_cltv_expiry_delta: Some(1000), + max_path_count: Some(10), + max_channel_saturation_power_of_half: Some(2), + }; + let invoice = nodes[4].bolt11_payment().receive(2_500_000, &"asdf", 9217).unwrap(); - nodes[0].bolt11_payment().send(&invoice).unwrap(); + nodes[0].bolt11_payment().send(&invoice, Some(sending_params)).unwrap(); let payment_id = expect_payment_received_event!(&nodes[4], 2_500_000); let fee_paid_msat = Some(2000); expect_payment_successful_event!(nodes[0], payment_id, Some(fee_paid_msat)); } -#[test] -fn connect_to_public_testnet_esplora() { - let mut config = random_config(true); - config.network = Network::Testnet; - setup_builder!(builder, config); - builder.set_esplora_server("https://blockstream.info/testnet/api".to_string()); - let node = builder.build().unwrap(); - node.start().unwrap(); - node.stop().unwrap(); -} - #[test] fn start_stop_reinit() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); @@ -187,8 +205,11 @@ fn start_stop_reinit() { let test_sync_store: Arc = Arc::new(TestSyncStore::new(config.storage_dir_path.clone().into())); + let mut sync_config = EsploraSyncConfig::default(); + sync_config.onchain_wallet_sync_interval_secs = 100000; + sync_config.lightning_wallet_sync_interval_secs = 100000; setup_builder!(builder, config); - builder.set_esplora_server(esplora_url.clone()); + builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); let node = builder.build_with_store(Arc::clone(&test_sync_store)).unwrap(); node.start().unwrap(); @@ -225,7 +246,7 @@ fn start_stop_reinit() { drop(node); setup_builder!(builder, config); - builder.set_esplora_server(esplora_url.clone()); + builder.set_chain_source_esplora(esplora_url.clone(), Some(sync_config)); let reinitialized_node = builder.build_with_store(Arc::clone(&test_sync_store)).unwrap(); reinitialized_node.start().unwrap(); @@ -248,7 +269,8 @@ fn start_stop_reinit() { #[test] fn onchain_spend_receive() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); let addr_a = node_a.onchain_payment().new_address().unwrap(); let addr_b = node_b.onchain_payment().new_address().unwrap(); @@ -297,11 +319,12 @@ fn onchain_spend_receive() { fn sign_verify_msg() { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); let config = random_config(true); - let node = setup_node(&electrsd, config); + let chain_source = TestChainSource::Esplora(&electrsd); + let node = setup_node(&chain_source, config); // Tests arbitrary message signing and later verification let msg = "OK computer".as_bytes(); - let sig = node.sign_message(msg).unwrap(); + let sig = node.sign_message(msg); let pkey = node.node_id(); assert!(node.verify_signature(msg, sig.as_str(), &pkey)); } @@ -314,7 +337,8 @@ fn connection_restart_behavior() { fn do_connection_restart_behavior(persist: bool) { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, false, false); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, false, false); let node_id_a = node_a.node_id(); let node_id_b = node_b.node_id(); @@ -365,7 +389,8 @@ fn do_connection_restart_behavior(persist: bool) { #[test] fn concurrent_connections_succeed() { let (_bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); let node_a = Arc::new(node_a); let node_b = Arc::new(node_b); @@ -395,7 +420,8 @@ fn concurrent_connections_succeed() { #[test] fn simple_bolt12_send_receive() { let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); - let (node_a, node_b) = setup_two_nodes(&electrsd, false, true, false); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); let address_a = node_a.onchain_payment().new_address().unwrap(); let premine_amount_sat = 5_000_000; @@ -426,17 +452,32 @@ fn simple_bolt12_send_receive() { std::thread::sleep(std::time::Duration::from_secs(1)); let expected_amount_msat = 100_000_000; - let offer = node_b.bolt12_payment().receive(expected_amount_msat, "asdf").unwrap(); - let payment_id = node_a.bolt12_payment().send(&offer, None).unwrap(); + let offer = + node_b.bolt12_payment().receive(expected_amount_msat, "asdf", None, Some(1)).unwrap(); + let expected_quantity = Some(1); + let expected_payer_note = Some("Test".to_string()); + let payment_id = node_a + .bolt12_payment() + .send(&offer, expected_quantity, expected_payer_note.clone()) + .unwrap(); expect_payment_successful_event!(node_a, Some(payment_id), None); let node_a_payments = node_a.list_payments(); assert_eq!(node_a_payments.len(), 1); match node_a_payments.first().unwrap().kind { - PaymentKind::Bolt12Offer { hash, preimage, secret: _, offer_id } => { + PaymentKind::Bolt12Offer { + hash, + preimage, + secret: _, + offer_id, + quantity: ref qty, + payer_note: ref note, + } => { assert!(hash.is_some()); assert!(preimage.is_some()); assert_eq!(offer_id, offer.id()); + assert_eq!(&expected_quantity, qty); + assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0); //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 //API currently doesn't allow to do that. }, @@ -450,7 +491,7 @@ fn simple_bolt12_send_receive() { let node_b_payments = node_b.list_payments(); assert_eq!(node_b_payments.len(), 1); match node_b_payments.first().unwrap().kind { - PaymentKind::Bolt12Offer { hash, preimage, secret, offer_id } => { + PaymentKind::Bolt12Offer { hash, preimage, secret, offer_id, .. } => { assert!(hash.is_some()); assert!(preimage.is_some()); assert!(secret.is_some()); @@ -466,22 +507,40 @@ fn simple_bolt12_send_receive() { let offer_amount_msat = 100_000_000; let less_than_offer_amount = offer_amount_msat - 10_000; let expected_amount_msat = offer_amount_msat + 10_000; - let offer = node_b.bolt12_payment().receive(offer_amount_msat, "asdf").unwrap(); + let offer = node_b.bolt12_payment().receive(offer_amount_msat, "asdf", None, Some(1)).unwrap(); + let expected_quantity = Some(1); + let expected_payer_note = Some("Test".to_string()); assert!(node_a .bolt12_payment() - .send_using_amount(&offer, None, less_than_offer_amount) + .send_using_amount(&offer, less_than_offer_amount, None, None) .is_err()); - let payment_id = - node_a.bolt12_payment().send_using_amount(&offer, None, expected_amount_msat).unwrap(); + let payment_id = node_a + .bolt12_payment() + .send_using_amount( + &offer, + expected_amount_msat, + expected_quantity, + expected_payer_note.clone(), + ) + .unwrap(); expect_payment_successful_event!(node_a, Some(payment_id), None); let node_a_payments = node_a.list_payments_with_filter(|p| p.id == payment_id); assert_eq!(node_a_payments.len(), 1); let payment_hash = match node_a_payments.first().unwrap().kind { - PaymentKind::Bolt12Offer { hash, preimage, secret: _, offer_id } => { + PaymentKind::Bolt12Offer { + hash, + preimage, + secret: _, + offer_id, + quantity: ref qty, + payer_note: ref note, + } => { assert!(hash.is_some()); assert!(preimage.is_some()); assert_eq!(offer_id, offer.id()); + assert_eq!(&expected_quantity, qty); + assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0); //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 //API currently doesn't allow to do that. hash.unwrap() @@ -497,7 +556,7 @@ fn simple_bolt12_send_receive() { let node_b_payments = node_b.list_payments_with_filter(|p| p.id == node_b_payment_id); assert_eq!(node_b_payments.len(), 1); match node_b_payments.first().unwrap().kind { - PaymentKind::Bolt12Offer { hash, preimage, secret, offer_id } => { + PaymentKind::Bolt12Offer { hash, preimage, secret, offer_id, .. } => { assert!(hash.is_some()); assert!(preimage.is_some()); assert!(secret.is_some()); @@ -511,7 +570,12 @@ fn simple_bolt12_send_receive() { // Now node_b refunds the amount node_a just overpaid. let overpaid_amount = expected_amount_msat - offer_amount_msat; - let refund = node_b.bolt12_payment().initiate_refund(overpaid_amount, 3600).unwrap(); + let expected_quantity = Some(1); + let expected_payer_note = Some("Test".to_string()); + let refund = node_b + .bolt12_payment() + .initiate_refund(overpaid_amount, 3600, expected_quantity, expected_payer_note.clone()) + .unwrap(); let invoice = node_a.bolt12_payment().request_refund_payment(&refund).unwrap(); expect_payment_received_event!(node_a, overpaid_amount); @@ -525,9 +589,17 @@ fn simple_bolt12_send_receive() { let node_b_payments = node_b.list_payments_with_filter(|p| p.id == node_b_payment_id); assert_eq!(node_b_payments.len(), 1); match node_b_payments.first().unwrap().kind { - PaymentKind::Bolt12Refund { hash, preimage, secret: _ } => { + PaymentKind::Bolt12Refund { + hash, + preimage, + secret: _, + quantity: ref qty, + payer_note: ref note, + } => { assert!(hash.is_some()); assert!(preimage.is_some()); + assert_eq!(&expected_quantity, qty); + assert_eq!(expected_payer_note.unwrap(), note.clone().unwrap().0) //TODO: We should eventually set and assert the secret sender-side, too, but the BOLT12 //API currently doesn't allow to do that. }, @@ -541,7 +613,7 @@ fn simple_bolt12_send_receive() { let node_a_payments = node_a.list_payments_with_filter(|p| p.id == node_a_payment_id); assert_eq!(node_a_payments.len(), 1); match node_a_payments.first().unwrap().kind { - PaymentKind::Bolt12Refund { hash, preimage, secret } => { + PaymentKind::Bolt12Refund { hash, preimage, secret, .. } => { assert!(hash.is_some()); assert!(preimage.is_some()); assert!(secret.is_some()); @@ -552,3 +624,157 @@ fn simple_bolt12_send_receive() { } assert_eq!(node_a_payments.first().unwrap().amount_msat, Some(overpaid_amount)); } + +#[test] +fn generate_bip21_uri() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); + + let address_a = node_a.onchain_payment().new_address().unwrap(); + let premined_sats = 5_000_000; + + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![address_a], + Amount::from_sat(premined_sats), + ); + + node_a.sync_wallets().unwrap(); + open_channel(&node_a, &node_b, 4_000_000, true, &electrsd); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + let expected_amount_sats = 100_000; + let expiry_sec = 4_000; + + let uqr_payment = node_b.unified_qr_payment().receive(expected_amount_sats, "asdf", expiry_sec); + + match uqr_payment.clone() { + Ok(ref uri) => { + println!("Generated URI: {}", uri); + assert!(uri.contains("bitcoin:")); + assert!(uri.contains("lightning=")); + assert!(uri.contains("lno=")); + }, + Err(e) => panic!("Failed to generate URI: {:?}", e), + } +} + +#[test] +fn unified_qr_send_receive() { + let (bitcoind, electrsd) = setup_bitcoind_and_electrsd(); + let chain_source = TestChainSource::Esplora(&electrsd); + let (node_a, node_b) = setup_two_nodes(&chain_source, false, true, false); + + let address_a = node_a.onchain_payment().new_address().unwrap(); + let premined_sats = 5_000_000; + + premine_and_distribute_funds( + &bitcoind.client, + &electrsd.client, + vec![address_a], + Amount::from_sat(premined_sats), + ); + + node_a.sync_wallets().unwrap(); + open_channel(&node_a, &node_b, 4_000_000, true, &electrsd); + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + expect_channel_ready_event!(node_a, node_b.node_id()); + expect_channel_ready_event!(node_b, node_a.node_id()); + + // Sleep until we broadcast a node announcement. + while node_b.status().latest_node_announcement_broadcast_timestamp.is_none() { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + + // Sleep one more sec to make sure the node announcement propagates. + std::thread::sleep(std::time::Duration::from_secs(1)); + + let expected_amount_sats = 100_000; + let expiry_sec = 4_000; + + let uqr_payment = node_b.unified_qr_payment().receive(expected_amount_sats, "asdf", expiry_sec); + let uri_str = uqr_payment.clone().unwrap(); + let offer_payment_id: PaymentId = match node_a.unified_qr_payment().send(&uri_str) { + Ok(QrPaymentResult::Bolt12 { payment_id }) => { + println!("\nBolt12 payment sent successfully with PaymentID: {:?}", payment_id); + payment_id + }, + Ok(QrPaymentResult::Bolt11 { payment_id: _ }) => { + panic!("Expected Bolt12 payment but got Bolt11"); + }, + Ok(QrPaymentResult::Onchain { txid: _ }) => { + panic!("Expected Bolt12 payment but get On-chain transaction"); + }, + Err(e) => { + panic!("Expected Bolt12 payment but got error: {:?}", e); + }, + }; + + expect_payment_successful_event!(node_a, Some(offer_payment_id), None); + + // Removed one character from the offer to fall back on to invoice. + // Still needs work + let uri_str_with_invalid_offer = &uri_str[..uri_str.len() - 1]; + let invoice_payment_id: PaymentId = + match node_a.unified_qr_payment().send(uri_str_with_invalid_offer) { + Ok(QrPaymentResult::Bolt12 { payment_id: _ }) => { + panic!("Expected Bolt11 payment but got Bolt12"); + }, + Ok(QrPaymentResult::Bolt11 { payment_id }) => { + println!("\nBolt11 payment sent successfully with PaymentID: {:?}", payment_id); + payment_id + }, + Ok(QrPaymentResult::Onchain { txid: _ }) => { + panic!("Expected Bolt11 payment but got on-chain transaction"); + }, + Err(e) => { + panic!("Expected Bolt11 payment but got error: {:?}", e); + }, + }; + expect_payment_successful_event!(node_a, Some(invoice_payment_id), None); + + let expect_onchain_amount_sats = 800_000; + let onchain_uqr_payment = + node_b.unified_qr_payment().receive(expect_onchain_amount_sats, "asdf", 4_000).unwrap(); + + // Removed a character from the offer, so it would move on to the other parameters. + let txid = match node_a + .unified_qr_payment() + .send(&onchain_uqr_payment.as_str()[..onchain_uqr_payment.len() - 1]) + { + Ok(QrPaymentResult::Bolt12 { payment_id: _ }) => { + panic!("Expected on-chain payment but got Bolt12") + }, + Ok(QrPaymentResult::Bolt11 { payment_id: _ }) => { + panic!("Expected on-chain payment but got Bolt11"); + }, + Ok(QrPaymentResult::Onchain { txid }) => { + println!("\nOn-chain transaction successful with Txid: {}", txid); + txid + }, + Err(e) => { + panic!("Expected on-chain payment but got error: {:?}", e); + }, + }; + + generate_blocks_and_wait(&bitcoind.client, &electrsd.client, 6); + wait_for_tx(&electrsd.client, txid); + + node_a.sync_wallets().unwrap(); + node_b.sync_wallets().unwrap(); + + assert_eq!(node_b.list_balances().total_onchain_balance_sats, 800_000); + assert_eq!(node_b.list_balances().total_lightning_balance_sats, 200_000); +} diff --git a/tests/integration_tests_vss.rs b/tests/integration_tests_vss.rs index 2a57ccffc..525c1f1f1 100644 --- a/tests/integration_tests_vss.rs +++ b/tests/integration_tests_vss.rs @@ -1,8 +1,16 @@ +// This file is Copyright its original authors, visible in version control history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license , at your option. You may not use this file except in +// accordance with one or both of these licenses. + #![cfg(vss_test)] mod common; use ldk_node::Builder; +use std::collections::HashMap; #[test] fn channel_full_cycle_with_vss_store() { @@ -11,17 +19,28 @@ fn channel_full_cycle_with_vss_store() { let esplora_url = format!("http://{}", electrsd.esplora_url.as_ref().unwrap()); let config_a = common::random_config(true); let mut builder_a = Builder::from_config(config_a); - builder_a.set_esplora_server(esplora_url.clone()); + builder_a.set_chain_source_esplora(esplora_url.clone(), None); let vss_base_url = std::env::var("TEST_VSS_BASE_URL").unwrap(); - let node_a = - builder_a.build_with_vss_store(vss_base_url.clone(), "node_1_store".to_string()).unwrap(); + let node_a = builder_a + .build_with_vss_store_and_fixed_headers( + vss_base_url.clone(), + "node_1_store".to_string(), + HashMap::new(), + ) + .unwrap(); node_a.start().unwrap(); println!("\n== Node B =="); let config_b = common::random_config(true); let mut builder_b = Builder::from_config(config_b); - builder_b.set_esplora_server(esplora_url); - let node_b = builder_b.build_with_vss_store(vss_base_url, "node_2_store".to_string()).unwrap(); + builder_b.set_chain_source_esplora(esplora_url.clone(), None); + let node_b = builder_b + .build_with_vss_store_and_fixed_headers( + vss_base_url, + "node_2_store".to_string(), + HashMap::new(), + ) + .unwrap(); node_b.start().unwrap(); common::do_channel_full_cycle(