From 7956af05aa8a092a3e546db65f8f0ad6a013e889 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 25 Oct 2025 13:31:21 +0000 Subject: [PATCH 01/22] Move `lightning-transaction-sync` back into the main workspace Now that it has the same MSRV as everything else in the workspace, it doesn't need to live on its own. --- .github/workflows/build.yml | 27 +++++-------------------- Cargo.toml | 2 +- ci/ci-tests.sh | 21 +++++++++++++++++++- ci/ci-tx-sync-tests.sh | 39 ------------------------------------- 4 files changed, 26 insertions(+), 63 deletions(-) delete mode 100755 ci/ci-tx-sync-tests.sh diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3c50b2a0041..2658ff454e9 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -62,52 +62,35 @@ jobs: - name: Set RUSTFLAGS to deny warnings if: "matrix.toolchain == '1.75.0'" run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV" - - name: Run CI script - shell: bash # Default on Winblows is powershell - run: CI_ENV=1 CI_MINIMIZE_DISK_USAGE=1 ./ci/ci-tests.sh - - build-tx-sync: - strategy: - fail-fast: false - matrix: - platform: [ ubuntu-latest, macos-latest ] - toolchain: [ stable, beta, 1.75.0 ] - runs-on: ${{ matrix.platform }} - steps: - - name: Checkout source code - uses: actions/checkout@v4 - - name: Install Rust ${{ matrix.toolchain }} toolchain - run: | - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ matrix.toolchain }} - - name: Set RUSTFLAGS to deny warnings - if: "matrix.toolchain == '1.75.0'" - run: echo "RUSTFLAGS=-D warnings" >> "$GITHUB_ENV" - name: Enable caching for bitcoind + if: matrix.platform != 'windows-latest' id: cache-bitcoind uses: actions/cache@v4 with: path: bin/bitcoind-${{ runner.os }}-${{ runner.arch }} key: bitcoind-${{ runner.os }}-${{ runner.arch }} - name: Enable caching for electrs + if: matrix.platform != 'windows-latest' id: cache-electrs uses: actions/cache@v4 with: path: bin/electrs-${{ runner.os }}-${{ runner.arch }} key: electrs-${{ runner.os }}-${{ runner.arch }} - name: Download bitcoind/electrs - if: "steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true'" + if: "matrix.platform != 'windows-latest' && (steps.cache-bitcoind.outputs.cache-hit != 'true' || steps.cache-electrs.outputs.cache-hit != 'true')" run: | source ./contrib/download_bitcoind_electrs.sh mkdir bin mv "$BITCOIND_EXE" bin/bitcoind-${{ runner.os }}-${{ runner.arch }} mv "$ELECTRS_EXE" bin/electrs-${{ runner.os }}-${{ runner.arch }} - name: Set bitcoind/electrs environment variables + if: matrix.platform != 'windows-latest' run: | echo "BITCOIND_EXE=$( pwd )/bin/bitcoind-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" echo "ELECTRS_EXE=$( pwd )/bin/electrs-${{ runner.os }}-${{ runner.arch }}" >> "$GITHUB_ENV" - name: Run CI script shell: bash # Default on Winblows is powershell - run: CI_ENV=1 CI_MINIMIZE_DISK_USAGE=1 ./ci/ci-tx-sync-tests.sh + run: CI_ENV=1 CI_MINIMIZE_DISK_USAGE=1 ./ci/ci-tests.sh coverage: needs: fuzz diff --git a/Cargo.toml b/Cargo.toml index f9f7406339e..a0895fe1641 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,11 +16,11 @@ members = [ "lightning-macros", "lightning-dns-resolver", "lightning-liquidity", + "lightning-transaction-sync", "possiblyrandom", ] exclude = [ - "lightning-transaction-sync", "lightning-tests", "ext-functional-test-demo", "no-std-check", diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index 91ead9903cb..488c5ac4826 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -2,7 +2,6 @@ #shellcheck disable=SC2002,SC2207 set -eox pipefail -# Currently unused as we don't have to pin anything for MSRV: RUSTC_MINOR_VERSION=$(rustc --version | awk '{ split($2,a,"."); print a[2] }') # Some crates require pinning to meet our MSRV even for our downstream users, @@ -20,6 +19,9 @@ PIN_RELEASE_DEPS # pin the release dependencies in our main workspace # proptest 1.9.0 requires rustc 1.82.0 [ "$RUSTC_MINOR_VERSION" -lt 82 ] && cargo update -p proptest --precise "1.8.0" --verbose +# Starting with version 1.2.0, the `idna_adapter` crate has an MSRV of rustc 1.81.0. +[ "$RUSTC_MINOR_VERSION" -lt 81 ] && cargo update -p idna_adapter --precise "1.1.0" --verbose + export RUST_BACKTRACE=1 echo -e "\n\nChecking the workspace, except lightning-transaction-sync." @@ -57,6 +59,23 @@ cargo check -p lightning-block-sync --verbose --color always --features rpc-clie cargo test -p lightning-block-sync --verbose --color always --features rpc-client,rest-client,tokio cargo check -p lightning-block-sync --verbose --color always --features rpc-client,rest-client,tokio +echo -e "\n\nChecking Transaction Sync Clients with features." +cargo check -p lightning-transaction-sync --verbose --color always --features esplora-blocking +cargo check -p lightning-transaction-sync --verbose --color always --features esplora-async +cargo check -p lightning-transaction-sync --verbose --color always --features esplora-async-https +cargo check -p lightning-transaction-sync --verbose --color always --features electrum + +if [ -z "$CI_ENV" ] && [[ -z "$BITCOIND_EXE" || -z "$ELECTRS_EXE" ]]; then + echo -e "\n\nSkipping testing Transaction Sync Clients due to BITCOIND_EXE or ELECTRS_EXE being unset." + cargo check -p lightning-transaction-sync --tests +else + echo -e "\n\nTesting Transaction Sync Clients with features." + cargo test -p lightning-transaction-sync --verbose --color always --features esplora-blocking + cargo test -p lightning-transaction-sync --verbose --color always --features esplora-async + cargo test -p lightning-transaction-sync --verbose --color always --features esplora-async-https + cargo test -p lightning-transaction-sync --verbose --color always --features electrum +fi + echo -e "\n\nChecking and testing lightning-persister with features" cargo test -p lightning-persister --verbose --color always --features tokio cargo check -p lightning-persister --verbose --color always --features tokio diff --git a/ci/ci-tx-sync-tests.sh b/ci/ci-tx-sync-tests.sh deleted file mode 100755 index 0839e2ced3d..00000000000 --- a/ci/ci-tx-sync-tests.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -set -eox pipefail - -RUSTC_MINOR_VERSION=$(rustc --version | awk '{ split($2,a,"."); print a[2] }') - -pushd lightning-transaction-sync - -# Some crates require pinning to meet our MSRV even for our downstream users, -# which we do here. -# Further crates which appear only as dev-dependencies are pinned further down. -function PIN_RELEASE_DEPS { - return 0 # Don't fail the script if our rustc is higher than the last check -} - -PIN_RELEASE_DEPS # pin the release dependencies - -# Starting with version 1.2.0, the `idna_adapter` crate has an MSRV of rustc 1.81.0. -[ "$RUSTC_MINOR_VERSION" -lt 81 ] && cargo update -p idna_adapter --precise "1.1.0" --verbose - -export RUST_BACKTRACE=1 - -echo -e "\n\nChecking Transaction Sync Clients with features." -cargo check --verbose --color always --features esplora-blocking -cargo check --verbose --color always --features esplora-async -cargo check --verbose --color always --features esplora-async-https -cargo check --verbose --color always --features electrum - -if [ -z "$CI_ENV" ] && [[ -z "$BITCOIND_EXE" || -z "$ELECTRS_EXE" ]]; then - echo -e "\n\nSkipping testing Transaction Sync Clients due to BITCOIND_EXE or ELECTRS_EXE being unset." - cargo check --tests -else - echo -e "\n\nTesting Transaction Sync Clients with features." - cargo test --verbose --color always --features esplora-blocking - cargo test --verbose --color always --features esplora-async - cargo test --verbose --color always --features esplora-async-https - cargo test --verbose --color always --features electrum -fi - -popd From 210528475e876b96b59d2844727b09c28f427e4b Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 25 Oct 2025 13:41:47 +0000 Subject: [PATCH 02/22] Trivially replace `Box::pin` with `pin!` in a few places Now that our MSRV is above 1.68 we can use the `pin!` macro to avoid having to `Box` various futures, avoiding some allocations, especially in `lightning-net-tokio`, which happens in a tight loop. --- lightning-liquidity/src/lsps2/service.rs | 17 ++++++++--------- lightning-liquidity/src/manager.rs | 8 ++++---- lightning-net-tokio/src/lib.rs | 13 ++++++------- lightning/src/events/bump_transaction/sync.rs | 3 ++- lightning/src/util/persist.rs | 5 ++--- lightning/src/util/sweep.rs | 5 +++-- 6 files changed, 25 insertions(+), 26 deletions(-) diff --git a/lightning-liquidity/src/lsps2/service.rs b/lightning-liquidity/src/lsps2/service.rs index a6736e63eef..5c4bd63bc48 100644 --- a/lightning-liquidity/src/lsps2/service.rs +++ b/lightning-liquidity/src/lsps2/service.rs @@ -9,7 +9,6 @@ //! Contains the main bLIP-52 / LSPS2 server-side object, [`LSPS2ServiceHandler`]. -use alloc::boxed::Box; use alloc::string::{String, ToString}; use alloc::vec::Vec; use lightning::util::persist::KVStore; @@ -17,6 +16,7 @@ use lightning::util::persist::KVStore; use core::cmp::Ordering as CmpOrdering; use core::future::Future as StdFuture; use core::ops::Deref; +use core::pin::pin; use core::sync::atomic::{AtomicUsize, Ordering}; use core::task; @@ -2173,7 +2173,7 @@ where &self, counterparty_node_id: &PublicKey, request_id: LSPSRequestId, intercept_scid: u64, cltv_expiry_delta: u32, client_trusts_lsp: bool, user_channel_id: u128, ) -> Result<(), APIError> { - let mut fut = Box::pin(self.inner.invoice_parameters_generated( + let mut fut = pin!(self.inner.invoice_parameters_generated( counterparty_node_id, request_id, intercept_scid, @@ -2202,7 +2202,7 @@ where &self, intercept_scid: u64, intercept_id: InterceptId, expected_outbound_amount_msat: u64, payment_hash: PaymentHash, ) -> Result<(), APIError> { - let mut fut = Box::pin(self.inner.htlc_intercepted( + let mut fut = pin!(self.inner.htlc_intercepted( intercept_scid, intercept_id, expected_outbound_amount_msat, @@ -2228,7 +2228,7 @@ where pub fn htlc_handling_failed( &self, failure_type: HTLCHandlingFailureType, ) -> Result<(), APIError> { - let mut fut = Box::pin(self.inner.htlc_handling_failed(failure_type)); + let mut fut = pin!(self.inner.htlc_handling_failed(failure_type)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); @@ -2249,7 +2249,7 @@ where pub fn payment_forwarded( &self, next_channel_id: ChannelId, skimmed_fee_msat: u64, ) -> Result<(), APIError> { - let mut fut = Box::pin(self.inner.payment_forwarded(next_channel_id, skimmed_fee_msat)); + let mut fut = pin!(self.inner.payment_forwarded(next_channel_id, skimmed_fee_msat)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); @@ -2290,7 +2290,7 @@ where &self, counterparty_node_id: &PublicKey, user_channel_id: u128, ) -> Result<(), APIError> { let mut fut = - Box::pin(self.inner.channel_open_abandoned(counterparty_node_id, user_channel_id)); + pin!(self.inner.channel_open_abandoned(counterparty_node_id, user_channel_id)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); @@ -2309,8 +2309,7 @@ where pub fn channel_open_failed( &self, counterparty_node_id: &PublicKey, user_channel_id: u128, ) -> Result<(), APIError> { - let mut fut = - Box::pin(self.inner.channel_open_failed(counterparty_node_id, user_channel_id)); + let mut fut = pin!(self.inner.channel_open_failed(counterparty_node_id, user_channel_id)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); @@ -2332,7 +2331,7 @@ where &self, user_channel_id: u128, channel_id: &ChannelId, counterparty_node_id: &PublicKey, ) -> Result<(), APIError> { let mut fut = - Box::pin(self.inner.channel_ready(user_channel_id, channel_id, counterparty_node_id)); + pin!(self.inner.channel_ready(user_channel_id, channel_id, counterparty_node_id)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); diff --git a/lightning-liquidity/src/manager.rs b/lightning-liquidity/src/manager.rs index f0143fc624f..d3822715b8d 100644 --- a/lightning-liquidity/src/manager.rs +++ b/lightning-liquidity/src/manager.rs @@ -7,7 +7,6 @@ // You may not use this file except in accordance with one or both of these // licenses. -use alloc::boxed::Box; use alloc::string::ToString; use alloc::vec::Vec; @@ -61,6 +60,7 @@ use bitcoin::secp256k1::PublicKey; use core::future::Future as StdFuture; use core::ops::Deref; +use core::pin::pin; use core::task; const LSPS_FEATURE_BIT: usize = 729; @@ -1106,7 +1106,7 @@ where ) -> Result { let kv_store = KVStoreSyncWrapper(kv_store_sync); - let mut fut = Box::pin(LiquidityManager::new( + let mut fut = pin!(LiquidityManager::new( entropy_source, node_signer, channel_manager, @@ -1159,7 +1159,7 @@ where client_config: Option, time_provider: TP, ) -> Result { let kv_store = KVStoreSyncWrapper(kv_store_sync); - let mut fut = Box::pin(LiquidityManager::new_with_custom_time_provider( + let mut fut = pin!(LiquidityManager::new_with_custom_time_provider( entropy_source, node_signer, channel_manager, @@ -1289,7 +1289,7 @@ where pub fn persist(&self) -> Result<(), lightning::io::Error> { let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); - match Box::pin(self.inner.persist()).as_mut().poll(&mut ctx) { + match pin!(self.inner.persist()).as_mut().poll(&mut ctx) { task::Poll::Ready(result) => result, task::Poll::Pending => { // In a sync context, we can't wait for the future to complete. diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 068f77a84bb..c6fbd3dc3c5 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -43,7 +43,7 @@ use std::hash::Hash; use std::net::SocketAddr; use std::net::TcpStream as StdTcpStream; use std::ops::Deref; -use std::pin::Pin; +use std::pin::{pin, Pin}; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; use std::task::{self, Poll}; @@ -205,18 +205,17 @@ impl Connection { } us_lock.read_paused }; - // TODO: Drop the Box'ing of the futures once Rust has pin-on-stack support. let select_result = if read_paused { TwoSelector { - a: Box::pin(write_avail_receiver.recv()), - b: Box::pin(read_wake_receiver.recv()), + a: pin!(write_avail_receiver.recv()), + b: pin!(read_wake_receiver.recv()), } .await } else { ThreeSelector { - a: Box::pin(write_avail_receiver.recv()), - b: Box::pin(read_wake_receiver.recv()), - c: Box::pin(reader.readable()), + a: pin!(write_avail_receiver.recv()), + b: pin!(read_wake_receiver.recv()), + c: pin!(reader.readable()), } .await }; diff --git a/lightning/src/events/bump_transaction/sync.rs b/lightning/src/events/bump_transaction/sync.rs index 653710a3358..cbc686ed8fe 100644 --- a/lightning/src/events/bump_transaction/sync.rs +++ b/lightning/src/events/bump_transaction/sync.rs @@ -11,6 +11,7 @@ use core::future::Future; use core::ops::Deref; +use core::pin::pin; use core::task; use crate::chain::chaininterface::BroadcasterInterface; @@ -289,7 +290,7 @@ where /// Handles all variants of [`BumpTransactionEvent`]. pub fn handle_event(&self, event: &BumpTransactionEvent) { - let mut fut = Box::pin(self.bump_transaction_event_handler.handle_event(event)); + let mut fut = pin!(self.bump_transaction_event_handler.handle_event(event)); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); match fut.as_mut().poll(&mut ctx) { diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index d00e29e686a..3ad9b4270c5 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -19,7 +19,7 @@ use bitcoin::{BlockHash, Txid}; use core::future::Future; use core::mem; use core::ops::Deref; -use core::pin::Pin; +use core::pin::{pin, Pin}; use core::str::FromStr; use core::task; @@ -490,8 +490,7 @@ impl FutureSpawner for PanicingSpawner { fn poll_sync_future(future: F) -> F::Output { let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); - // TODO A future MSRV bump to 1.68 should allow for the pin macro - match Pin::new(&mut Box::pin(future)).poll(&mut ctx) { + match pin!(future).poll(&mut ctx) { task::Poll::Ready(result) => result, task::Poll::Pending => { // In a sync context, we can't wait for the future to complete. diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index 5a1ffad3e04..a3ded6f32b8 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -35,6 +35,7 @@ use bitcoin::{BlockHash, ScriptBuf, Transaction, Txid}; use core::future::Future; use core::ops::Deref; +use core::pin::pin; use core::sync::atomic::{AtomicBool, Ordering}; use core::task; @@ -970,7 +971,7 @@ where &self, output_descriptors: Vec, channel_id: Option, exclude_static_outputs: bool, delay_until_height: Option, ) -> Result<(), ()> { - let mut fut = Box::pin(self.sweeper.track_spendable_outputs( + let mut fut = pin!(self.sweeper.track_spendable_outputs( output_descriptors, channel_id, exclude_static_outputs, @@ -1005,7 +1006,7 @@ where /// /// Wraps [`OutputSweeper::regenerate_and_broadcast_spend_if_necessary`]. pub fn regenerate_and_broadcast_spend_if_necessary(&self) -> Result<(), ()> { - let mut fut = Box::pin(self.sweeper.regenerate_and_broadcast_spend_if_necessary()); + let mut fut = pin!(self.sweeper.regenerate_and_broadcast_spend_if_necessary()); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); match fut.as_mut().poll(&mut ctx) { From 0b4e1b5c58a864a2ca7dda7f3b0737df2ec2b8fa Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 31 Oct 2025 14:37:38 +0000 Subject: [PATCH 03/22] Drop required `Box`ing of `KVStore` `Future`s Now that our MSRV is 1.75, we can return `impl Trait` from trait methods. Here we use this to clean up `KVStore` methods, dropping the `Pin>` we had to use to have trait methods return a concrete type. Sadly, there's two places where we can't drop a `Box::pin` until we switch to edition 2024. --- ci/check-lint.sh | 3 +- lightning-background-processor/src/lib.rs | 43 +++++++- lightning-persister/src/fs_store.rs | 113 ++++++++++------------ lightning/src/util/persist.rs | 71 ++++++++------ lightning/src/util/sweep.rs | 27 +++++- lightning/src/util/test_utils.rs | 18 ++-- 6 files changed, 165 insertions(+), 110 deletions(-) diff --git a/ci/check-lint.sh b/ci/check-lint.sh index 39c10692310..c1f1b08a1e1 100755 --- a/ci/check-lint.sh +++ b/ci/check-lint.sh @@ -107,7 +107,8 @@ CLIPPY() { -A clippy::useless_conversion \ -A clippy::manual_repeat_n `# to be removed once we hit MSRV 1.86` \ -A clippy::manual_is_multiple_of `# to be removed once we hit MSRV 1.87` \ - -A clippy::uninlined-format-args + -A clippy::uninlined-format-args \ + -A clippy::manual-async-fn # Not really sure why this is even a warning when there's a Send bound } CLIPPY diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 19333c5823a..bc0d42ac191 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -41,6 +41,8 @@ use lightning::events::ReplayEvent; use lightning::events::{Event, PathFailure}; use lightning::util::ser::Writeable; +#[cfg(not(c_bindings))] +use lightning::io::Error; use lightning::ln::channelmanager::AChannelManager; use lightning::ln::msgs::OnionMessageHandler; use lightning::ln::peer_handler::APeerManager; @@ -51,6 +53,8 @@ use lightning::routing::utxo::UtxoLookup; use lightning::sign::{ ChangeDestinationSource, ChangeDestinationSourceSync, EntropySource, OutputSpender, }; +#[cfg(not(c_bindings))] +use lightning::util::async_poll::MaybeSend; use lightning::util::logger::Logger; use lightning::util::persist::{ KVStore, KVStoreSync, KVStoreSyncWrapper, CHANNEL_MANAGER_PERSISTENCE_KEY, @@ -83,7 +87,11 @@ use std::time::Instant; #[cfg(not(feature = "std"))] use alloc::boxed::Box; #[cfg(all(not(c_bindings), not(feature = "std")))] +use alloc::string::String; +#[cfg(all(not(c_bindings), not(feature = "std")))] use alloc::sync::Arc; +#[cfg(all(not(c_bindings), not(feature = "std")))] +use alloc::vec::Vec; /// `BackgroundProcessor` takes care of tasks that (1) need to happen periodically to keep /// Rust-Lightning running properly, and (2) either can or should be run in the background. Its @@ -416,6 +424,37 @@ pub const NO_ONION_MESSENGER: Option< >, > = None; +#[cfg(not(c_bindings))] +/// A panicking implementation of [`KVStore`] that is used in [`NO_LIQUIDITY_MANAGER`]. +pub struct DummyKVStore; + +#[cfg(not(c_bindings))] +impl KVStore for DummyKVStore { + fn read( + &self, _: &str, _: &str, _: &str, + ) -> impl core::future::Future, Error>> + MaybeSend + 'static { + async { unimplemented!() } + } + + fn write( + &self, _: &str, _: &str, _: &str, _: Vec, + ) -> impl core::future::Future> + MaybeSend + 'static { + async { unimplemented!() } + } + + fn remove( + &self, _: &str, _: &str, _: &str, _: bool, + ) -> impl core::future::Future> + MaybeSend + 'static { + async { unimplemented!() } + } + + fn list( + &self, _: &str, _: &str, + ) -> impl core::future::Future, Error>> + MaybeSend + 'static { + async { unimplemented!() } + } +} + /// When initializing a background processor without a liquidity manager, this can be used to avoid /// specifying a concrete `LiquidityManager` type. #[cfg(not(c_bindings))] @@ -430,8 +469,8 @@ pub const NO_LIQUIDITY_MANAGER: Option< CM = &DynChannelManager, Filter = dyn chain::Filter + Send + Sync, C = &(dyn chain::Filter + Send + Sync), - KVStore = dyn lightning::util::persist::KVStore + Send + Sync, - K = &(dyn lightning::util::persist::KVStore + Send + Sync), + KVStore = DummyKVStore, + K = &DummyKVStore, TimeProvider = dyn lightning_liquidity::utils::time::TimeProvider + Send + Sync, TP = &(dyn lightning_liquidity::utils::time::TimeProvider + Send + Sync), BroadcasterInterface = dyn lightning::chain::chaininterface::BroadcasterInterface diff --git a/lightning-persister/src/fs_store.rs b/lightning-persister/src/fs_store.rs index 9b15398d4d1..b2d327f6bc1 100644 --- a/lightning-persister/src/fs_store.rs +++ b/lightning-persister/src/fs_store.rs @@ -14,8 +14,6 @@ use std::sync::{Arc, Mutex, RwLock}; #[cfg(feature = "tokio")] use core::future::Future; #[cfg(feature = "tokio")] -use core::pin::Pin; -#[cfg(feature = "tokio")] use lightning::util::persist::KVStore; #[cfg(target_os = "windows")] @@ -464,93 +462,85 @@ impl FilesystemStoreInner { impl KVStore for FilesystemStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> Pin, lightning::io::Error>> + 'static + Send>> { + ) -> impl Future, lightning::io::Error>> + 'static + Send { let this = Arc::clone(&self.inner); - let path = match this.get_checked_dest_file_path( + let path = this.get_checked_dest_file_path( primary_namespace, secondary_namespace, Some(key), "read", - ) { - Ok(path) => path, - Err(e) => return Box::pin(async move { Err(e) }), - }; + ); - Box::pin(async move { + async move { + let path = match path { + Ok(path) => path, + Err(e) => return Err(e), + }; tokio::task::spawn_blocking(move || this.read(path)).await.unwrap_or_else(|e| { Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e)) }) - }) + } } fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> Pin> + 'static + Send>> { + ) -> impl Future> + 'static + Send { let this = Arc::clone(&self.inner); - let path = match this.get_checked_dest_file_path( - primary_namespace, - secondary_namespace, - Some(key), - "write", - ) { - Ok(path) => path, - Err(e) => return Box::pin(async move { Err(e) }), - }; - - let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(path.clone()); - Box::pin(async move { + let path = this + .get_checked_dest_file_path(primary_namespace, secondary_namespace, Some(key), "write") + .map(|path| (self.get_new_version_and_lock_ref(path.clone()), path)); + + async move { + let ((inner_lock_ref, version), path) = match path { + Ok(res) => res, + Err(e) => return Err(e), + }; tokio::task::spawn_blocking(move || { this.write_version(inner_lock_ref, path, buf, version) }) .await .unwrap_or_else(|e| Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e))) - }) + } } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> Pin> + 'static + Send>> { + ) -> impl Future> + 'static + Send { let this = Arc::clone(&self.inner); - let path = match this.get_checked_dest_file_path( - primary_namespace, - secondary_namespace, - Some(key), - "remove", - ) { - Ok(path) => path, - Err(e) => return Box::pin(async move { Err(e) }), - }; - - let (inner_lock_ref, version) = self.get_new_version_and_lock_ref(path.clone()); - Box::pin(async move { + let path = this + .get_checked_dest_file_path(primary_namespace, secondary_namespace, Some(key), "remove") + .map(|path| (self.get_new_version_and_lock_ref(path.clone()), path)); + + async move { + let ((inner_lock_ref, version), path) = match path { + Ok(res) => res, + Err(e) => return Err(e), + }; tokio::task::spawn_blocking(move || { this.remove_version(inner_lock_ref, path, lazy, version) }) .await .unwrap_or_else(|e| Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e))) - }) + } } fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> Pin, lightning::io::Error>> + 'static + Send>> { + ) -> impl Future, lightning::io::Error>> + 'static + Send { let this = Arc::clone(&self.inner); - let path = match this.get_checked_dest_file_path( - primary_namespace, - secondary_namespace, - None, - "list", - ) { - Ok(path) => path, - Err(e) => return Box::pin(async move { Err(e) }), - }; + let path = + this.get_checked_dest_file_path(primary_namespace, secondary_namespace, None, "list"); - Box::pin(async move { + async move { + let path = match path { + Ok(path) => path, + Err(e) => return Err(e), + }; tokio::task::spawn_blocking(move || this.list(path)).await.unwrap_or_else(|e| { Err(lightning::io::Error::new(lightning::io::ErrorKind::Other, e)) }) - }) + } } } @@ -758,24 +748,24 @@ mod tests { let fs_store = Arc::new(FilesystemStore::new(temp_path)); assert_eq!(fs_store.state_size(), 0); - let async_fs_store: Arc = fs_store.clone(); + let async_fs_store = Arc::clone(&fs_store); let data1 = vec![42u8; 32]; let data2 = vec![43u8; 32]; - let primary_namespace = "testspace"; - let secondary_namespace = "testsubspace"; + let primary = "testspace"; + let secondary = "testsubspace"; let key = "testkey"; // Test writing the same key twice with different data. Execute the asynchronous part out of order to ensure // that eventual consistency works. - let fut1 = async_fs_store.write(primary_namespace, secondary_namespace, key, data1); + let fut1 = KVStore::write(&*async_fs_store, primary, secondary, key, data1); assert_eq!(fs_store.state_size(), 1); - let fut2 = async_fs_store.remove(primary_namespace, secondary_namespace, key, false); + let fut2 = KVStore::remove(&*async_fs_store, primary, secondary, key, false); assert_eq!(fs_store.state_size(), 1); - let fut3 = async_fs_store.write(primary_namespace, secondary_namespace, key, data2.clone()); + let fut3 = KVStore::write(&*async_fs_store, primary, secondary, key, data2.clone()); assert_eq!(fs_store.state_size(), 1); fut3.await.unwrap(); @@ -788,21 +778,18 @@ mod tests { assert_eq!(fs_store.state_size(), 0); // Test list. - let listed_keys = - async_fs_store.list(primary_namespace, secondary_namespace).await.unwrap(); + let listed_keys = KVStore::list(&*async_fs_store, primary, secondary).await.unwrap(); assert_eq!(listed_keys.len(), 1); assert_eq!(listed_keys[0], key); // Test read. We expect to read data2, as the write call was initiated later. - let read_data = - async_fs_store.read(primary_namespace, secondary_namespace, key).await.unwrap(); + let read_data = KVStore::read(&*async_fs_store, primary, secondary, key).await.unwrap(); assert_eq!(data2, &*read_data); // Test remove. - async_fs_store.remove(primary_namespace, secondary_namespace, key, false).await.unwrap(); + KVStore::remove(&*async_fs_store, primary, secondary, key, false).await.unwrap(); - let listed_keys = - async_fs_store.list(primary_namespace, secondary_namespace).await.unwrap(); + let listed_keys = KVStore::list(&*async_fs_store, primary, secondary).await.unwrap(); assert_eq!(listed_keys.len(), 0); } diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 3ad9b4270c5..7feb781a57a 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -34,7 +34,7 @@ use crate::chain::transaction::OutPoint; use crate::ln::types::ChannelId; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, SignerProvider}; use crate::sync::Mutex; -use crate::util::async_poll::{dummy_waker, AsyncResult, MaybeSend, MaybeSync}; +use crate::util::async_poll::{dummy_waker, MaybeSend, MaybeSync}; use crate::util::logger::Logger; use crate::util::native_async::FutureSpawner; use crate::util::ser::{Readable, ReadableArgs, Writeable}; @@ -216,34 +216,34 @@ where { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> AsyncResult<'static, Vec, io::Error> { + ) -> impl Future, io::Error>> + 'static + MaybeSend { let res = self.0.read(primary_namespace, secondary_namespace, key); - Box::pin(async move { res }) + async move { res } } fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> AsyncResult<'static, (), io::Error> { + ) -> impl Future> + 'static + MaybeSend { let res = self.0.write(primary_namespace, secondary_namespace, key, buf); - Box::pin(async move { res }) + async move { res } } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> AsyncResult<'static, (), io::Error> { + ) -> impl Future> + 'static + MaybeSend { let res = self.0.remove(primary_namespace, secondary_namespace, key, lazy); - Box::pin(async move { res }) + async move { res } } fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> AsyncResult<'static, Vec, io::Error> { + ) -> impl Future, io::Error>> + 'static + MaybeSend { let res = self.0.list(primary_namespace, secondary_namespace); - Box::pin(async move { res }) + async move { res } } } @@ -283,16 +283,18 @@ pub trait KVStore { /// [`ErrorKind::NotFound`]: io::ErrorKind::NotFound fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> AsyncResult<'static, Vec, io::Error>; + ) -> impl Future, io::Error>> + 'static + MaybeSend; /// Persists the given data under the given `key`. /// - /// The order of multiple writes to the same key needs to be retained while persisting - /// asynchronously. In other words, if two writes to the same key occur, the state (as seen by - /// [`Self::read`]) must either see the first write then the second, or only ever the second, - /// no matter when the futures complete (and must always contain the second write once the - /// second future completes). The state should never contain the first write after the second - /// write's future completes, nor should it contain the second write, then contain the first - /// write at any point thereafter (even if the second write's future hasn't yet completed). + /// Note that this is *not* an `async fn`. Rather, the order of multiple writes to the same key + /// (as defined by the order of the synchronous function calls) needs to be retained while + /// persisting asynchronously. In other words, if two writes to the same key occur, the state + /// (as seen by [`Self::read`]) must either see the first write then the second, or only ever + /// the second, no matter when the futures complete (and must always contain the second write + /// once the second future completes). The state should never contain the first write after the + /// second write's future completes, nor should it contain the second write, then contain the + /// first write at any point thereafter (even if the second write's future hasn't yet + /// completed). /// /// One way to ensure this requirement is met is by assigning a version number to each write /// before returning the future, and then during asynchronous execution, ensuring that the @@ -303,7 +305,7 @@ pub trait KVStore { /// Will create the given `primary_namespace` and `secondary_namespace` if not already present in the store. fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> AsyncResult<'static, (), io::Error>; + ) -> impl Future> + 'static + MaybeSend; /// Removes any data that had previously been persisted under the given `key`. /// /// If the `lazy` flag is set to `true`, the backend implementation might choose to lazily @@ -311,6 +313,10 @@ pub trait KVStore { /// eventual batch deletion of multiple keys. As a consequence, subsequent calls to /// [`KVStoreSync::list`] might include the removed key until the changes are actually persisted. /// + /// Note that similar to [`Self::write`] this is *not* an `async fn`, but rather a sync fn + /// which defines the order of writes to a given key, but which may complete its operation + /// asynchronously. + /// /// Note that while setting the `lazy` flag reduces the I/O burden of multiple subsequent /// `remove` calls, it also influences the atomicity guarantees as lazy `remove`s could /// potentially get lost on crash after the method returns. Therefore, this flag should only be @@ -321,12 +327,13 @@ pub trait KVStore { /// to the same key which occur before a removal completes must cancel/overwrite the pending /// removal. /// + /// /// Returns successfully if no data will be stored for the given `primary_namespace`, /// `secondary_namespace`, and `key`, independently of whether it was present before its /// invokation or not. fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> AsyncResult<'static, (), io::Error>; + ) -> impl Future> + 'static + MaybeSend; /// Returns a list of keys that are stored under the given `secondary_namespace` in /// `primary_namespace`. /// @@ -334,7 +341,7 @@ pub trait KVStore { /// returned keys. Returns an empty list if `primary_namespace` or `secondary_namespace` is unknown. fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> AsyncResult<'static, Vec, io::Error>; + ) -> impl Future, io::Error>> + 'static + MaybeSend; } /// Provides additional interface methods that are required for [`KVStore`]-to-[`KVStore`] @@ -1005,6 +1012,9 @@ where } } +trait MaybeSendableFuture: Future> + MaybeSend {} +impl> + MaybeSend> MaybeSendableFuture for F {} + impl MonitorUpdatingPersisterAsyncInner where @@ -1178,9 +1188,9 @@ where Ok(()) } - fn persist_new_channel( - &self, monitor_name: MonitorName, monitor: &ChannelMonitor, - ) -> impl Future> { + fn persist_new_channel<'a, ChannelSigner: EcdsaChannelSigner>( + &'a self, monitor_name: MonitorName, monitor: &'a ChannelMonitor, + ) -> Pin> + 'static>> { // Determine the proper key for this monitor let monitor_key = monitor_name.to_string(); // Serialize and write the new monitor @@ -1199,7 +1209,10 @@ where // completion of the write. This ensures monitor persistence ordering is preserved. let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; - self.kv_store.write(primary, secondary, monitor_key.as_str(), monitor_bytes) + // There's no real reason why this needs to be boxed, but dropping it rams into the "hidden + // type for impl... captures lifetime that does not appear in bounds" issue. This can + // trivially be dropped once we upgrade to edition 2024/MSRV 1.85. + Box::pin(self.kv_store.write(primary, secondary, monitor_key.as_str(), monitor_bytes)) } fn update_persisted_channel<'a, ChannelSigner: EcdsaChannelSigner + 'a>( @@ -1225,12 +1238,10 @@ where // write method, allowing it to do its queueing immediately, and then return a // future for the completion of the write. This ensures monitor persistence // ordering is preserved. - res_a = Some(self.kv_store.write( - primary, - &monitor_key, - update_name.as_str(), - update.encode(), - )); + let encoded = update.encode(); + res_a = Some(async move { + self.kv_store.write(primary, &monitor_key, update_name.as_str(), encoded).await + }); } else { // We could write this update, but it meets criteria of our design that calls for a full monitor write. // Note that this is NOT an async function, but rather calls the *sync* KVStore diff --git a/lightning/src/util/sweep.rs b/lightning/src/util/sweep.rs index a3ded6f32b8..bf048efdae1 100644 --- a/lightning/src/util/sweep.rs +++ b/lightning/src/util/sweep.rs @@ -35,11 +35,11 @@ use bitcoin::{BlockHash, ScriptBuf, Transaction, Txid}; use core::future::Future; use core::ops::Deref; -use core::pin::pin; +use core::pin::{pin, Pin}; use core::sync::atomic::{AtomicBool, Ordering}; use core::task; -use super::async_poll::{dummy_waker, AsyncResult}; +use super::async_poll::dummy_waker; /// The number of blocks we wait before we prune the tracked spendable outputs. pub const PRUNE_DELAY_BLOCKS: u32 = ARCHIVAL_DELAY_BLOCKS + ANTI_REORG_DELAY; @@ -610,15 +610,32 @@ where sweeper_state.dirty = true; } - fn persist_state<'a>(&self, sweeper_state: &SweeperState) -> AsyncResult<'a, (), io::Error> { + #[cfg(feature = "std")] + fn persist_state<'a>( + &'a self, sweeper_state: &SweeperState, + ) -> Pin> + Send + 'static>> { let encoded = sweeper_state.encode(); - self.kv_store.write( + Box::pin(self.kv_store.write( OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, OUTPUT_SWEEPER_PERSISTENCE_KEY, encoded, - ) + )) + } + + #[cfg(not(feature = "std"))] + fn persist_state<'a>( + &'a self, sweeper_state: &SweeperState, + ) -> Pin> + 'static>> { + let encoded = sweeper_state.encode(); + + Box::pin(self.kv_store.write( + OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE, + OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE, + OUTPUT_SWEEPER_PERSISTENCE_KEY, + encoded, + )) } /// Updates the sweeper state by executing the given callback. Persists the state afterwards if it is marked dirty, diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index ad8ea224205..b4db17bee20 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -50,7 +50,7 @@ use crate::sign::{self, ReceiveAuthKey}; use crate::sign::{ChannelSigner, PeerStorageKey}; use crate::sync::RwLock; use crate::types::features::{ChannelFeatures, InitFeatures, NodeFeatures}; -use crate::util::async_poll::AsyncResult; +use crate::util::async_poll::MaybeSend; use crate::util::config::UserConfig; use crate::util::dyn_signer::{ DynKeysInterface, DynKeysInterfaceTrait, DynPhantomKeysInterface, DynSigner, @@ -1012,13 +1012,13 @@ impl TestStore { impl KVStore for TestStore { fn read( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, - ) -> AsyncResult<'static, Vec, io::Error> { + ) -> impl Future, io::Error>> + 'static + MaybeSend { let res = self.read_internal(&primary_namespace, &secondary_namespace, &key); - Box::pin(async move { res }) + async move { res } } fn write( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: Vec, - ) -> AsyncResult<'static, (), io::Error> { + ) -> impl Future> + 'static + MaybeSend { let path = format!("{primary_namespace}/{secondary_namespace}/{key}"); let future = Arc::new(Mutex::new((None, None))); @@ -1027,19 +1027,19 @@ impl KVStore for TestStore { let new_id = pending_writes.last().map(|(id, _, _)| id + 1).unwrap_or(0); pending_writes.push((new_id, Arc::clone(&future), buf)); - Box::pin(OneShotChannel(future)) + OneShotChannel(future) } fn remove( &self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool, - ) -> AsyncResult<'static, (), io::Error> { + ) -> impl Future> + 'static + MaybeSend { let res = self.remove_internal(&primary_namespace, &secondary_namespace, &key, lazy); - Box::pin(async move { res }) + async move { res } } fn list( &self, primary_namespace: &str, secondary_namespace: &str, - ) -> AsyncResult<'static, Vec, io::Error> { + ) -> impl Future, io::Error>> + 'static + MaybeSend { let res = self.list_internal(primary_namespace, secondary_namespace); - Box::pin(async move { res }) + async move { res } } } From a435228ce6a9a668a0cf065012d42e7abd6f83b8 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 25 Oct 2025 18:11:18 +0000 Subject: [PATCH 04/22] Drop required `Box`ing of `lightning-block-sync` `Future`s Now that our MSRV is 1.75, we can return `impl Trait` from trait methods. Here we use this to clean up `lightning-block-sync` trait methods, dropping the `Pin>` we had to use to have trait methods return a concrete type. --- lightning-block-sync/src/gossip.rs | 49 +++++++++++++++----------- lightning-block-sync/src/lib.rs | 18 ++++------ lightning-block-sync/src/poll.rs | 30 +++++++--------- lightning-block-sync/src/rest.rs | 37 ++++++++++--------- lightning-block-sync/src/rpc.rs | 35 ++++++++++-------- lightning-block-sync/src/test_utils.rs | 24 +++++++------ 6 files changed, 101 insertions(+), 92 deletions(-) diff --git a/lightning-block-sync/src/gossip.rs b/lightning-block-sync/src/gossip.rs index 0fe221b9231..596098350c7 100644 --- a/lightning-block-sync/src/gossip.rs +++ b/lightning-block-sync/src/gossip.rs @@ -2,7 +2,7 @@ //! current UTXO set. This module defines an implementation of the LDK API required to do so //! against a [`BlockSource`] which implements a few additional methods for accessing the UTXO set. -use crate::{AsyncBlockSourceResult, BlockData, BlockSource, BlockSourceError}; +use crate::{BlockData, BlockSource, BlockSourceError, BlockSourceResult}; use bitcoin::block::Block; use bitcoin::constants::ChainHash; @@ -18,7 +18,7 @@ use lightning::util::native_async::FutureSpawner; use std::collections::VecDeque; use std::future::Future; use std::ops::Deref; -use std::pin::Pin; +use std::pin::{pin, Pin}; use std::sync::{Arc, Mutex}; use std::task::Poll; @@ -35,11 +35,13 @@ pub trait UtxoSource: BlockSource + 'static { /// for gossip validation. fn get_block_hash_by_height<'a>( &'a self, block_height: u32, - ) -> AsyncBlockSourceResult<'a, BlockHash>; + ) -> impl Future> + Send + 'a; /// Returns true if the given output has *not* been spent, i.e. is a member of the current UTXO /// set. - fn is_output_unspent<'a>(&'a self, outpoint: OutPoint) -> AsyncBlockSourceResult<'a, bool>; + fn is_output_unspent<'a>( + &'a self, outpoint: OutPoint, + ) -> impl Future> + Send + 'a; } #[cfg(feature = "tokio")] @@ -55,34 +57,37 @@ impl FutureSpawner for TokioSpawner { /// A trivial future which joins two other futures and polls them at the same time, returning only /// once both complete. pub(crate) struct Joiner< - A: Future), BlockSourceError>> + Unpin, - B: Future> + Unpin, + 'a, + A: Future), BlockSourceError>>, + B: Future>, > { - pub a: A, - pub b: B, + pub a: Pin<&'a mut A>, + pub b: Pin<&'a mut B>, a_res: Option<(BlockHash, Option)>, b_res: Option, } impl< - A: Future), BlockSourceError>> + Unpin, - B: Future> + Unpin, - > Joiner + 'a, + A: Future), BlockSourceError>>, + B: Future>, + > Joiner<'a, A, B> { - fn new(a: A, b: B) -> Self { + fn new(a: Pin<&'a mut A>, b: Pin<&'a mut B>) -> Self { Self { a, b, a_res: None, b_res: None } } } impl< - A: Future), BlockSourceError>> + Unpin, - B: Future> + Unpin, - > Future for Joiner + 'a, + A: Future), BlockSourceError>>, + B: Future>, + > Future for Joiner<'a, A, B> { type Output = Result<((BlockHash, Option), BlockHash), BlockSourceError>; fn poll(mut self: Pin<&mut Self>, ctx: &mut core::task::Context<'_>) -> Poll { if self.a_res.is_none() { - match Pin::new(&mut self.a).poll(ctx) { + match self.a.as_mut().poll(ctx) { Poll::Ready(res) => { if let Ok(ok) = res { self.a_res = Some(ok); @@ -94,7 +99,7 @@ impl< } } if self.b_res.is_none() { - match Pin::new(&mut self.b).poll(ctx) { + match self.b.as_mut().poll(ctx) { Poll::Ready(res) => { if let Ok(ok) = res { self.b_res = Some(ok); @@ -200,10 +205,12 @@ where } } - let ((_, tip_height_opt), block_hash) = - Joiner::new(source.get_best_block(), source.get_block_hash_by_height(block_height)) - .await - .map_err(|_| UtxoLookupError::UnknownTx)?; + let ((_, tip_height_opt), block_hash) = Joiner::new( + pin!(source.get_best_block()), + pin!(source.get_block_hash_by_height(block_height)), + ) + .await + .map_err(|_| UtxoLookupError::UnknownTx)?; if let Some(tip_height) = tip_height_opt { // If the block doesn't yet have five confirmations, error out. // diff --git a/lightning-block-sync/src/lib.rs b/lightning-block-sync/src/lib.rs index 8656ba6ec6b..02593047658 100644 --- a/lightning-block-sync/src/lib.rs +++ b/lightning-block-sync/src/lib.rs @@ -53,7 +53,6 @@ use lightning::chain::{BestBlock, Listen}; use std::future::Future; use std::ops::Deref; -use std::pin::Pin; /// Abstract type for retrieving block headers and data. pub trait BlockSource: Sync + Send { @@ -65,12 +64,13 @@ pub trait BlockSource: Sync + Send { /// when `height_hint` is `None`. fn get_header<'a>( &'a self, header_hash: &'a BlockHash, height_hint: Option, - ) -> AsyncBlockSourceResult<'a, BlockHeaderData>; + ) -> impl Future> + Send + 'a; /// Returns the block for a given hash. A headers-only block source should return a `Transient` /// error. - fn get_block<'a>(&'a self, header_hash: &'a BlockHash) - -> AsyncBlockSourceResult<'a, BlockData>; + fn get_block<'a>( + &'a self, header_hash: &'a BlockHash, + ) -> impl Future> + Send + 'a; /// Returns the hash of the best block and, optionally, its height. /// @@ -78,18 +78,14 @@ pub trait BlockSource: Sync + Send { /// to allow for a more efficient lookup. /// /// [`get_header`]: Self::get_header - fn get_best_block(&self) -> AsyncBlockSourceResult<'_, (BlockHash, Option)>; + fn get_best_block<'a>( + &'a self, + ) -> impl Future)>> + Send + 'a; } /// Result type for `BlockSource` requests. pub type BlockSourceResult = Result; -// TODO: Replace with BlockSourceResult once `async` trait functions are supported. For details, -// see: https://areweasyncyet.rs. -/// Result type for asynchronous `BlockSource` requests. -pub type AsyncBlockSourceResult<'a, T> = - Pin> + 'a + Send>>; - /// Error type for `BlockSource` requests. /// /// Transient errors may be resolved when re-polling, but no attempt will be made to re-poll on diff --git a/lightning-block-sync/src/poll.rs b/lightning-block-sync/src/poll.rs index 843cc961899..13e0403c3b6 100644 --- a/lightning-block-sync/src/poll.rs +++ b/lightning-block-sync/src/poll.rs @@ -1,14 +1,12 @@ //! Adapters that make one or more [`BlockSource`]s simpler to poll for new chain tip transitions. -use crate::{ - AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource, BlockSourceError, - BlockSourceResult, -}; +use crate::{BlockData, BlockHeaderData, BlockSource, BlockSourceError, BlockSourceResult}; use bitcoin::hash_types::BlockHash; use bitcoin::network::Network; use lightning::chain::BestBlock; +use std::future::Future; use std::ops::Deref; /// The `Poll` trait defines behavior for polling block sources for a chain tip and retrieving @@ -22,17 +20,17 @@ pub trait Poll { /// Returns a chain tip in terms of its relationship to the provided chain tip. fn poll_chain_tip<'a>( &'a self, best_known_chain_tip: ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ChainTip>; + ) -> impl Future> + Send + 'a; /// Returns the header that preceded the given header in the chain. fn look_up_previous_header<'a>( &'a self, header: &'a ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ValidatedBlockHeader>; + ) -> impl Future> + Send + 'a; /// Returns the block associated with the given header. fn fetch_block<'a>( &'a self, header: &'a ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ValidatedBlock>; + ) -> impl Future> + Send + 'a; } /// A chain tip relative to another chain tip in terms of block hash and chainwork. @@ -217,8 +215,8 @@ impl + Sized + Send + Sync, T: BlockSource + ?Sized> Poll { fn poll_chain_tip<'a>( &'a self, best_known_chain_tip: ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ChainTip> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let (block_hash, height) = self.block_source.get_best_block().await?; if block_hash == best_known_chain_tip.header.block_hash() { return Ok(ChainTip::Common); @@ -231,13 +229,13 @@ impl + Sized + Send + Sync, T: BlockSource + ?Sized> Poll } else { Ok(ChainTip::Worse(chain_tip)) } - }) + } } fn look_up_previous_header<'a>( &'a self, header: &'a ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ValidatedBlockHeader> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { if header.height == 0 { return Err(BlockSourceError::persistent("genesis block reached")); } @@ -252,15 +250,13 @@ impl + Sized + Send + Sync, T: BlockSource + ?Sized> Poll header.check_builds_on(&previous_header, self.network)?; Ok(previous_header) - }) + } } fn fetch_block<'a>( &'a self, header: &'a ValidatedBlockHeader, - ) -> AsyncBlockSourceResult<'a, ValidatedBlock> { - Box::pin(async move { - self.block_source.get_block(&header.block_hash).await?.validate(header.block_hash) - }) + ) -> impl Future> + Send + 'a { + async move { self.block_source.get_block(&header.block_hash).await?.validate(header.block_hash) } } } diff --git a/lightning-block-sync/src/rest.rs b/lightning-block-sync/src/rest.rs index 1f79ab4a0b0..619981bb4d0 100644 --- a/lightning-block-sync/src/rest.rs +++ b/lightning-block-sync/src/rest.rs @@ -4,13 +4,14 @@ use crate::convert::GetUtxosResponse; use crate::gossip::UtxoSource; use crate::http::{BinaryResponse, HttpClient, HttpEndpoint, JsonResponse}; -use crate::{AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource}; +use crate::{BlockData, BlockHeaderData, BlockSource, BlockSourceResult}; use bitcoin::hash_types::BlockHash; use bitcoin::OutPoint; use std::convert::TryFrom; use std::convert::TryInto; +use std::future::Future; use std::sync::Mutex; /// A simple REST client for requesting resources using HTTP `GET`. @@ -49,49 +50,51 @@ impl RestClient { impl BlockSource for RestClient { fn get_header<'a>( &'a self, header_hash: &'a BlockHash, _height: Option, - ) -> AsyncBlockSourceResult<'a, BlockHeaderData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let resource_path = format!("headers/1/{}.json", header_hash.to_string()); Ok(self.request_resource::(&resource_path).await?) - }) + } } fn get_block<'a>( &'a self, header_hash: &'a BlockHash, - ) -> AsyncBlockSourceResult<'a, BlockData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let resource_path = format!("block/{}.bin", header_hash.to_string()); Ok(BlockData::FullBlock( self.request_resource::(&resource_path).await?, )) - }) + } } - fn get_best_block<'a>(&'a self) -> AsyncBlockSourceResult<'a, (BlockHash, Option)> { - Box::pin( - async move { Ok(self.request_resource::("chaininfo.json").await?) }, - ) + fn get_best_block<'a>( + &'a self, + ) -> impl Future)>> + Send + 'a { + async move { Ok(self.request_resource::("chaininfo.json").await?) } } } impl UtxoSource for RestClient { fn get_block_hash_by_height<'a>( &'a self, block_height: u32, - ) -> AsyncBlockSourceResult<'a, BlockHash> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let resource_path = format!("blockhashbyheight/{}.bin", block_height); Ok(self.request_resource::(&resource_path).await?) - }) + } } - fn is_output_unspent<'a>(&'a self, outpoint: OutPoint) -> AsyncBlockSourceResult<'a, bool> { - Box::pin(async move { + fn is_output_unspent<'a>( + &'a self, outpoint: OutPoint, + ) -> impl Future> + Send + 'a { + async move { let resource_path = format!("getutxos/{}-{}.json", outpoint.txid.to_string(), outpoint.vout); let utxo_result = self.request_resource::(&resource_path).await?; Ok(utxo_result.hit_bitmap_nonempty) - }) + } } } diff --git a/lightning-block-sync/src/rpc.rs b/lightning-block-sync/src/rpc.rs index 3df50a2267b..d851ba2ccf0 100644 --- a/lightning-block-sync/src/rpc.rs +++ b/lightning-block-sync/src/rpc.rs @@ -3,7 +3,7 @@ use crate::gossip::UtxoSource; use crate::http::{HttpClient, HttpEndpoint, HttpError, JsonResponse}; -use crate::{AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource}; +use crate::{BlockData, BlockHeaderData, BlockSource, BlockSourceResult}; use bitcoin::hash_types::BlockHash; use bitcoin::OutPoint; @@ -16,6 +16,7 @@ use std::convert::TryFrom; use std::convert::TryInto; use std::error::Error; use std::fmt; +use std::future::Future; use std::sync::atomic::{AtomicUsize, Ordering}; /// An error returned by the RPC server. @@ -135,47 +136,51 @@ impl RpcClient { impl BlockSource for RpcClient { fn get_header<'a>( &'a self, header_hash: &'a BlockHash, _height: Option, - ) -> AsyncBlockSourceResult<'a, BlockHeaderData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let header_hash = serde_json::json!(header_hash.to_string()); Ok(self.call_method("getblockheader", &[header_hash]).await?) - }) + } } fn get_block<'a>( &'a self, header_hash: &'a BlockHash, - ) -> AsyncBlockSourceResult<'a, BlockData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let header_hash = serde_json::json!(header_hash.to_string()); let verbosity = serde_json::json!(0); Ok(BlockData::FullBlock(self.call_method("getblock", &[header_hash, verbosity]).await?)) - }) + } } - fn get_best_block<'a>(&'a self) -> AsyncBlockSourceResult<'a, (BlockHash, Option)> { - Box::pin(async move { Ok(self.call_method("getblockchaininfo", &[]).await?) }) + fn get_best_block<'a>( + &'a self, + ) -> impl Future)>> + Send + 'a { + async move { Ok(self.call_method("getblockchaininfo", &[]).await?) } } } impl UtxoSource for RpcClient { fn get_block_hash_by_height<'a>( &'a self, block_height: u32, - ) -> AsyncBlockSourceResult<'a, BlockHash> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { let height_param = serde_json::json!(block_height); Ok(self.call_method("getblockhash", &[height_param]).await?) - }) + } } - fn is_output_unspent<'a>(&'a self, outpoint: OutPoint) -> AsyncBlockSourceResult<'a, bool> { - Box::pin(async move { + fn is_output_unspent<'a>( + &'a self, outpoint: OutPoint, + ) -> impl Future> + Send + 'a { + async move { let txid_param = serde_json::json!(outpoint.txid.to_string()); let vout_param = serde_json::json!(outpoint.vout); let include_mempool = serde_json::json!(false); let utxo_opt: serde_json::Value = self.call_method("gettxout", &[txid_param, vout_param, include_mempool]).await?; Ok(!utxo_opt.is_null()) - }) + } } } diff --git a/lightning-block-sync/src/test_utils.rs b/lightning-block-sync/src/test_utils.rs index d307c4506eb..40788e4d08c 100644 --- a/lightning-block-sync/src/test_utils.rs +++ b/lightning-block-sync/src/test_utils.rs @@ -1,7 +1,6 @@ use crate::poll::{Validate, ValidatedBlockHeader}; use crate::{ - AsyncBlockSourceResult, BlockData, BlockHeaderData, BlockSource, BlockSourceError, - UnboundedCache, + BlockData, BlockHeaderData, BlockSource, BlockSourceError, BlockSourceResult, UnboundedCache, }; use bitcoin::block::{Block, Header, Version}; @@ -17,6 +16,7 @@ use lightning::chain::BestBlock; use std::cell::RefCell; use std::collections::VecDeque; +use std::future::Future; #[derive(Default)] pub struct Blockchain { @@ -141,8 +141,8 @@ impl Blockchain { impl BlockSource for Blockchain { fn get_header<'a>( &'a self, header_hash: &'a BlockHash, _height_hint: Option, - ) -> AsyncBlockSourceResult<'a, BlockHeaderData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { if self.without_headers { return Err(BlockSourceError::persistent("header not found")); } @@ -158,13 +158,13 @@ impl BlockSource for Blockchain { } } Err(BlockSourceError::transient("header not found")) - }) + } } fn get_block<'a>( &'a self, header_hash: &'a BlockHash, - ) -> AsyncBlockSourceResult<'a, BlockData> { - Box::pin(async move { + ) -> impl Future> + Send + 'a { + async move { for (height, block) in self.blocks.iter().enumerate() { if block.header.block_hash() == *header_hash { if let Some(without_blocks) = &self.without_blocks { @@ -181,11 +181,13 @@ impl BlockSource for Blockchain { } } Err(BlockSourceError::transient("block not found")) - }) + } } - fn get_best_block<'a>(&'a self) -> AsyncBlockSourceResult<'a, (BlockHash, Option)> { - Box::pin(async move { + fn get_best_block<'a>( + &'a self, + ) -> impl Future)>> + Send + 'a { + async move { match self.blocks.last() { None => Err(BlockSourceError::transient("empty chain")), Some(block) => { @@ -193,7 +195,7 @@ impl BlockSource for Blockchain { Ok((block.block_hash(), Some(height))) }, } - }) + } } } From 3da5f583e503d742a185ebf7b0207b2f6cd6c0d6 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 25 Oct 2025 18:24:51 +0000 Subject: [PATCH 05/22] Drop required `Box`ing of `lightning` trait `Future`s Now that our MSRV is 1.75, we can return `impl Trait` from trait methods. Here we use this to clean up `lightning` crate trait methods, dropping the `Pin>`/`AsyncResult` we had to use to have trait methods return a concrete type. --- lightning/src/events/bump_transaction/mod.rs | 31 ++++++++++----- lightning/src/events/bump_transaction/sync.rs | 38 +++++++++++-------- lightning/src/sign/mod.rs | 14 +++++-- lightning/src/util/async_poll.rs | 12 ------ 4 files changed, 54 insertions(+), 41 deletions(-) diff --git a/lightning/src/events/bump_transaction/mod.rs b/lightning/src/events/bump_transaction/mod.rs index 3d9beb82c07..e141d9b8abc 100644 --- a/lightning/src/events/bump_transaction/mod.rs +++ b/lightning/src/events/bump_transaction/mod.rs @@ -14,6 +14,7 @@ pub mod sync; use alloc::collections::BTreeMap; +use core::future::Future; use core::ops::Deref; use crate::chain::chaininterface::{ @@ -36,7 +37,7 @@ use crate::sign::{ ChannelDerivationParameters, HTLCDescriptor, SignerProvider, P2WPKH_WITNESS_WEIGHT, }; use crate::sync::Mutex; -use crate::util::async_poll::{AsyncResult, MaybeSend, MaybeSync}; +use crate::util::async_poll::{MaybeSend, MaybeSync}; use crate::util::logger::Logger; use bitcoin::amount::Amount; @@ -394,13 +395,15 @@ pub trait CoinSelectionSource { fn select_confirmed_utxos<'a>( &'a self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &'a [TxOut], target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, - ) -> AsyncResult<'a, CoinSelection, ()>; + ) -> impl Future> + MaybeSend + 'a; /// Signs and provides the full witness for all inputs within the transaction known to the /// trait (i.e., any provided via [`CoinSelectionSource::select_confirmed_utxos`]). /// /// If your wallet does not support signing PSBTs you can call `psbt.extract_tx()` to get the /// unsigned transaction and then sign it with your wallet. - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()>; + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a; } /// An alternative to [`CoinSelectionSource`] that can be implemented and used along [`Wallet`] to @@ -412,17 +415,23 @@ pub trait CoinSelectionSource { // Note that updates to documentation on this trait should be copied to the synchronous version. pub trait WalletSource { /// Returns all UTXOs, with at least 1 confirmation each, that are available to spend. - fn list_confirmed_utxos<'a>(&'a self) -> AsyncResult<'a, Vec, ()>; + fn list_confirmed_utxos<'a>( + &'a self, + ) -> impl Future, ()>> + MaybeSend + 'a; /// Returns a script to use for change above dust resulting from a successful coin selection /// attempt. - fn get_change_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()>; + fn get_change_script<'a>( + &'a self, + ) -> impl Future> + MaybeSend + 'a; /// Signs and provides the full [`TxIn::script_sig`] and [`TxIn::witness`] for all inputs within /// the transaction known to the wallet (i.e., any provided via /// [`WalletSource::list_confirmed_utxos`]). /// /// If your wallet does not support signing PSBTs you can call `psbt.extract_tx()` to get the /// unsigned transaction and then sign it with your wallet. - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()>; + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a; } /// A wrapper over [`WalletSource`] that implements [`CoinSelectionSource`] by preferring UTXOs @@ -617,8 +626,8 @@ where fn select_confirmed_utxos<'a>( &'a self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &'a [TxOut], target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, - ) -> AsyncResult<'a, CoinSelection, ()> { - Box::pin(async move { + ) -> impl Future> + MaybeSend + 'a { + async move { let utxos = self.source.list_confirmed_utxos().await?; // TODO: Use fee estimation utils when we upgrade to bitcoin v0.30.0. let total_output_size: u64 = must_pay_to @@ -665,10 +674,12 @@ where } } Err(()) - }) + } } - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()> { + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a { self.source.sign_psbt(psbt) } } diff --git a/lightning/src/events/bump_transaction/sync.rs b/lightning/src/events/bump_transaction/sync.rs index cbc686ed8fe..1328c2c1b3a 100644 --- a/lightning/src/events/bump_transaction/sync.rs +++ b/lightning/src/events/bump_transaction/sync.rs @@ -18,7 +18,7 @@ use crate::chain::chaininterface::BroadcasterInterface; use crate::chain::ClaimId; use crate::prelude::*; use crate::sign::SignerProvider; -use crate::util::async_poll::{dummy_waker, AsyncResult, MaybeSend, MaybeSync}; +use crate::util::async_poll::{dummy_waker, MaybeSend, MaybeSync}; use crate::util::logger::Logger; use bitcoin::{Psbt, ScriptBuf, Transaction, TxOut}; @@ -72,19 +72,25 @@ impl WalletSource for WalletSourceSyncWrapper where T::Target: WalletSourceSync, { - fn list_confirmed_utxos<'a>(&'a self) -> AsyncResult<'a, Vec, ()> { + fn list_confirmed_utxos<'a>( + &'a self, + ) -> impl Future, ()>> + MaybeSend + 'a { let utxos = self.0.list_confirmed_utxos(); - Box::pin(async move { utxos }) + async move { utxos } } - fn get_change_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()> { + fn get_change_script<'a>( + &'a self, + ) -> impl Future> + MaybeSend + 'a { let script = self.0.get_change_script(); - Box::pin(async move { script }) + async move { script } } - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()> { + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a { let signed_psbt = self.0.sign_psbt(psbt); - Box::pin(async move { signed_psbt }) + async move { signed_psbt } } } @@ -123,7 +129,7 @@ where &self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &[TxOut], target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, ) -> Result { - let mut fut = self.wallet.select_confirmed_utxos( + let fut = self.wallet.select_confirmed_utxos( claim_id, must_spend, must_pay_to, @@ -132,7 +138,7 @@ where ); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); - match fut.as_mut().poll(&mut ctx) { + match pin!(fut).poll(&mut ctx) { task::Poll::Ready(result) => result, task::Poll::Pending => { unreachable!( @@ -143,10 +149,10 @@ where } fn sign_psbt(&self, psbt: Psbt) -> Result { - let mut fut = self.wallet.sign_psbt(psbt); + let fut = self.wallet.sign_psbt(psbt); let mut waker = dummy_waker(); let mut ctx = task::Context::from_waker(&mut waker); - match fut.as_mut().poll(&mut ctx) { + match pin!(fut).poll(&mut ctx) { task::Poll::Ready(result) => result, task::Poll::Pending => { unreachable!("Wallet::sign_psbt should not be pending in a sync context"); @@ -234,7 +240,7 @@ where fn select_confirmed_utxos<'a>( &'a self, claim_id: ClaimId, must_spend: Vec, must_pay_to: &'a [TxOut], target_feerate_sat_per_1000_weight: u32, max_tx_weight: u64, - ) -> AsyncResult<'a, CoinSelection, ()> { + ) -> impl Future> + MaybeSend + 'a { let coins = self.0.select_confirmed_utxos( claim_id, must_spend, @@ -242,12 +248,14 @@ where target_feerate_sat_per_1000_weight, max_tx_weight, ); - Box::pin(async move { coins }) + async move { coins } } - fn sign_psbt<'a>(&'a self, psbt: Psbt) -> AsyncResult<'a, Transaction, ()> { + fn sign_psbt<'a>( + &'a self, psbt: Psbt, + ) -> impl Future> + MaybeSend + 'a { let psbt = self.0.sign_psbt(psbt); - Box::pin(async move { psbt }) + async move { psbt } } } diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 1d771d22783..6d0d5bf405a 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -58,7 +58,7 @@ use crate::ln::script::ShutdownScript; use crate::offers::invoice::UnsignedBolt12Invoice; use crate::types::features::ChannelTypeFeatures; use crate::types::payment::PaymentPreimage; -use crate::util::async_poll::AsyncResult; +use crate::util::async_poll::MaybeSend; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::transaction_utils; @@ -68,7 +68,9 @@ use crate::sign::ecdsa::EcdsaChannelSigner; #[cfg(taproot)] use crate::sign::taproot::TaprootChannelSigner; use crate::util::atomic_counter::AtomicCounter; + use core::convert::TryInto; +use core::future::Future; use core::ops::Deref; use core::sync::atomic::{AtomicUsize, Ordering}; #[cfg(taproot)] @@ -1066,7 +1068,9 @@ pub trait ChangeDestinationSource { /// /// This method should return a different value each time it is called, to avoid linking /// on-chain funds controlled to the same user. - fn get_change_destination_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()>; + fn get_change_destination_script<'a>( + &'a self, + ) -> impl Future> + MaybeSend + 'a; } /// A synchronous helper trait that describes an on-chain wallet capable of returning a (change) destination script. @@ -1101,9 +1105,11 @@ impl ChangeDestinationSource for ChangeDestinationSourceSyncWrapper where T::Target: ChangeDestinationSourceSync, { - fn get_change_destination_script<'a>(&'a self) -> AsyncResult<'a, ScriptBuf, ()> { + fn get_change_destination_script<'a>( + &'a self, + ) -> impl Future> + MaybeSend + 'a { let script = self.0.get_change_destination_script(); - Box::pin(async move { script }) + async move { script } } } diff --git a/lightning/src/util/async_poll.rs b/lightning/src/util/async_poll.rs index eefa40d1055..9c2ca4c247f 100644 --- a/lightning/src/util/async_poll.rs +++ b/lightning/src/util/async_poll.rs @@ -9,7 +9,6 @@ //! Some utilities to make working with the standard library's [`Future`]s easier -use alloc::boxed::Box; use alloc::vec::Vec; use core::future::Future; use core::marker::Unpin; @@ -92,17 +91,6 @@ pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } } -#[cfg(feature = "std")] -/// A type alias for a future that returns a result of type `T` or error `E`. -/// -/// This is not exported to bindings users as async is only supported in Rust. -pub type AsyncResult<'a, T, E> = Pin> + 'a + Send>>; -#[cfg(not(feature = "std"))] -/// A type alias for a future that returns a result of type `T` or error `E`. -/// -/// This is not exported to bindings users as async is only supported in Rust. -pub type AsyncResult<'a, T, E> = Pin> + 'a>>; - /// Marker trait to optionally implement `Sync` under std. /// /// This is not exported to bindings users as async is only supported in Rust. From b1f1ee2a1d36f611c187b21b7a0dbbb2efb52036 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 25 Oct 2025 14:41:09 +0000 Subject: [PATCH 06/22] Drop `Box`ing of iterators during BOLT 11 invoice serialization Now that we have an MSRV that supports returning `impl Trait` in trait methods, we can use it to avoid the `Box` we had spewed all over our BOLT 11 invoice serialization. --- lightning-invoice/src/lib.rs | 5 +- lightning-invoice/src/ser.rs | 136 ++++++++++++++++++----------------- 2 files changed, 71 insertions(+), 70 deletions(-) diff --git a/lightning-invoice/src/lib.rs b/lightning-invoice/src/lib.rs index 47f929377de..60d413cf76a 100644 --- a/lightning-invoice/src/lib.rs +++ b/lightning-invoice/src/lib.rs @@ -40,7 +40,6 @@ use bitcoin::secp256k1::ecdsa::RecoverableSignature; use bitcoin::secp256k1::PublicKey; use bitcoin::secp256k1::{Message, Secp256k1}; -use alloc::boxed::Box; use alloc::string; use core::cmp::Ordering; use core::fmt::{self, Display, Formatter}; @@ -1081,8 +1080,8 @@ macro_rules! find_all_extract { #[allow(missing_docs)] impl RawBolt11Invoice { /// Hash the HRP (as bytes) and signatureless data part (as Fe32 iterator) - fn hash_from_parts<'s>( - hrp_bytes: &[u8], data_without_signature: Box + 's>, + fn hash_from_parts<'s, I: Iterator + 's>( + hrp_bytes: &[u8], data_without_signature: I, ) -> [u8; 32] { use crate::bech32::Fe32IterExt; use bitcoin::hashes::HashEngine; diff --git a/lightning-invoice/src/ser.rs b/lightning-invoice/src/ser.rs index 5c93fa84ae0..853accdd3ca 100644 --- a/lightning-invoice/src/ser.rs +++ b/lightning-invoice/src/ser.rs @@ -1,4 +1,3 @@ -use alloc::boxed::Box; use core::fmt; use core::fmt::{Display, Formatter}; use core::{array, iter}; @@ -13,14 +12,28 @@ use super::{ SignedRawBolt11Invoice, TaggedField, }; +macro_rules! define_iterator_enum { + ($name: ident, $($n: ident),*) => { + enum $name<$($n: Iterator,)*> { + $($n($n),)* + } + impl<$($n: Iterator,)*> Iterator for $name<$($n,)*> { + type Item = Fe32; + fn next(&mut self) -> Option { + match self { + $(Self::$n(iter) => iter.next(),)* + } + } + } + } +} + /// Objects that can be encoded to base32 (bech32). /// -/// Private to this crate to avoid polluting the API. +/// Private to this crate (except in fuzzing) to avoid polluting the API. pub trait Base32Iterable { - /// apoelstra: In future we want to replace this Box with an explicit - /// associated type, to avoid the allocation. But we cannot do this until - /// Rust 1.65 and GATs since the iterator may contain a reference to self. - fn fe_iter<'s>(&'s self) -> Box + 's>; + /// Serialize this object, returning an iterator over bech32 field elements. + fn fe_iter<'s>(&'s self) -> impl Iterator + 's; } /// Interface to calculate the length of the base32 representation before actually serializing @@ -32,7 +45,7 @@ pub(crate) trait Base32Len: Base32Iterable { // Base32Iterable & Base32Len implementations are here, because the traits are in this module. impl Base32Iterable for [u8; N] { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { self[..].fe_iter() } } @@ -45,8 +58,8 @@ impl Base32Len for [u8; N] { } impl Base32Iterable for [u8] { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.iter().copied().bytes_to_fes()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.iter().copied().bytes_to_fes() } } @@ -58,8 +71,8 @@ impl Base32Len for [u8] { } impl Base32Iterable for Vec { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.iter().copied().bytes_to_fes()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.iter().copied().bytes_to_fes() } } @@ -71,8 +84,8 @@ impl Base32Len for Vec { } impl Base32Iterable for PaymentSecret { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.0[..].fe_iter()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.0[..].fe_iter() } } @@ -88,7 +101,7 @@ impl Base32Iterable for Bolt11InvoiceFeatures { /// starting from the rightmost bit, /// and taking the resulting 5-bit values in reverse (left-to-right), /// with the leading 0's skipped. - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { // Fe32 conversion cannot be used, because this packs from right, right-to-left let mut input_iter = self.le_flags().iter(); // Carry bits, 0..7 bits @@ -126,7 +139,7 @@ impl Base32Iterable for Bolt11InvoiceFeatures { output.push(Fe32::try_from(next_out8 & 31u8).expect("<32")) } // Take result in reverse order, and skip leading 0s - Box::new(output.into_iter().rev().skip_while(|e| *e == Fe32::Q)) + output.into_iter().rev().skip_while(|e| *e == Fe32::Q) } } @@ -241,36 +254,35 @@ fn encoded_int_be_base32_size(int: u64) -> usize { } impl Base32Iterable for RawDataPart { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { let ts_iter = self.timestamp.fe_iter(); let fields_iter = self.tagged_fields.iter().map(RawTaggedField::fe_iter).flatten(); - Box::new(ts_iter.chain(fields_iter)) + ts_iter.chain(fields_iter) } } impl Base32Iterable for PositiveTimestamp { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { let fes = encode_int_be_base32(self.as_unix_timestamp()); debug_assert!(fes.len() <= 7, "Invalid timestamp length"); let to_pad = 7 - fes.len(); - Box::new(core::iter::repeat(Fe32::Q).take(to_pad).chain(fes)) + core::iter::repeat(Fe32::Q).take(to_pad).chain(fes) } } impl Base32Iterable for RawTaggedField { - fn fe_iter<'s>(&'s self) -> Box + 's> { - // Annoyingly, when we move to explicit types, we will need an - // explicit enum holding the two iterator variants. + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + define_iterator_enum!(TwoIters, A, B); match *self { - RawTaggedField::UnknownSemantics(ref content) => Box::new(content.iter().copied()), - RawTaggedField::KnownSemantics(ref tagged_field) => tagged_field.fe_iter(), + RawTaggedField::UnknownSemantics(ref content) => TwoIters::A(content.iter().copied()), + RawTaggedField::KnownSemantics(ref tagged_field) => TwoIters::B(tagged_field.fe_iter()), } } } impl Base32Iterable for Sha256 { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.0[..].fe_iter()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.0[..].fe_iter() } } @@ -281,8 +293,8 @@ impl Base32Len for Sha256 { } impl Base32Iterable for Description { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.0 .0.as_bytes().fe_iter()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.0 .0.as_bytes().fe_iter() } } @@ -293,8 +305,8 @@ impl Base32Len for Description { } impl Base32Iterable for PayeePubKey { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(self.serialize().into_iter().bytes_to_fes()) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + self.serialize().into_iter().bytes_to_fes() } } @@ -305,8 +317,8 @@ impl Base32Len for PayeePubKey { } impl Base32Iterable for ExpiryTime { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(encode_int_be_base32(self.as_seconds())) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + encode_int_be_base32(self.as_seconds()) } } @@ -317,8 +329,8 @@ impl Base32Len for ExpiryTime { } impl Base32Iterable for MinFinalCltvExpiryDelta { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(encode_int_be_base32(self.0)) + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + encode_int_be_base32(self.0) } } @@ -329,8 +341,8 @@ impl Base32Len for MinFinalCltvExpiryDelta { } impl Base32Iterable for Fallback { - fn fe_iter<'s>(&'s self) -> Box + 's> { - Box::new(match *self { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { + match *self { Fallback::SegWitProgram { version: v, program: ref p } => { let v = Fe32::try_from(v.to_num()).expect("valid version"); core::iter::once(v).chain(p[..].fe_iter()) @@ -343,7 +355,7 @@ impl Base32Iterable for Fallback { // 18 'J' core::iter::once(Fe32::J).chain(hash[..].fe_iter()) }, - }) + } } } @@ -371,7 +383,7 @@ type RouteHintHopIter = iter::Chain< >; impl Base32Iterable for PrivateRoute { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { fn serialize_to_iter(hop: &RouteHintHop) -> RouteHintHopIter { let i1 = hop.src_node_id.serialize().into_iter(); let i2 = u64::to_be_bytes(hop.short_channel_id).into_iter(); @@ -381,7 +393,7 @@ impl Base32Iterable for PrivateRoute { i1.chain(i2).chain(i3).chain(i4).chain(i5) } - Box::new(self.0 .0.iter().map(serialize_to_iter).flatten().bytes_to_fes()) + self.0 .0.iter().map(serialize_to_iter).flatten().bytes_to_fes() } } @@ -391,16 +403,11 @@ impl Base32Len for PrivateRoute { } } -// Shorthand type -type TaggedFieldIter = core::iter::Chain, I>; - impl Base32Iterable for TaggedField { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { /// Writes a tagged field: tag, length and data. `tag` should be in `0..32` otherwise the /// function will panic. - fn write_tagged_field<'s, P>( - tag: u8, payload: &'s P, - ) -> TaggedFieldIter + 's>> + fn write_tagged_field<'s, P>(tag: u8, payload: &'s P) -> impl Iterator + 's where P: Base32Iterable + Base32Len + ?Sized, { @@ -416,54 +423,49 @@ impl Base32Iterable for TaggedField { .chain(payload.fe_iter()) } - // we will also need a giant enum for this - Box::new(match *self { + define_iterator_enum!(ManyIters, A, B, C, D, E, F, G, H, I, J, K); + match *self { TaggedField::PaymentHash(ref hash) => { - write_tagged_field(constants::TAG_PAYMENT_HASH, hash) + ManyIters::A(write_tagged_field(constants::TAG_PAYMENT_HASH, hash)) }, TaggedField::Description(ref description) => { - write_tagged_field(constants::TAG_DESCRIPTION, description) + ManyIters::B(write_tagged_field(constants::TAG_DESCRIPTION, description)) }, TaggedField::PayeePubKey(ref pub_key) => { - write_tagged_field(constants::TAG_PAYEE_PUB_KEY, pub_key) + ManyIters::C(write_tagged_field(constants::TAG_PAYEE_PUB_KEY, pub_key)) }, TaggedField::DescriptionHash(ref hash) => { - write_tagged_field(constants::TAG_DESCRIPTION_HASH, hash) + ManyIters::D(write_tagged_field(constants::TAG_DESCRIPTION_HASH, hash)) }, TaggedField::ExpiryTime(ref duration) => { - write_tagged_field(constants::TAG_EXPIRY_TIME, duration) + ManyIters::E(write_tagged_field(constants::TAG_EXPIRY_TIME, duration)) }, TaggedField::MinFinalCltvExpiryDelta(ref expiry) => { - write_tagged_field(constants::TAG_MIN_FINAL_CLTV_EXPIRY_DELTA, expiry) + ManyIters::F(write_tagged_field(constants::TAG_MIN_FINAL_CLTV_EXPIRY_DELTA, expiry)) }, TaggedField::Fallback(ref fallback_address) => { - write_tagged_field(constants::TAG_FALLBACK, fallback_address) + ManyIters::G(write_tagged_field(constants::TAG_FALLBACK, fallback_address)) }, TaggedField::PrivateRoute(ref route_hops) => { - write_tagged_field(constants::TAG_PRIVATE_ROUTE, route_hops) + ManyIters::H(write_tagged_field(constants::TAG_PRIVATE_ROUTE, route_hops)) }, TaggedField::PaymentSecret(ref payment_secret) => { - write_tagged_field(constants::TAG_PAYMENT_SECRET, payment_secret) + ManyIters::I(write_tagged_field(constants::TAG_PAYMENT_SECRET, payment_secret)) }, TaggedField::PaymentMetadata(ref payment_metadata) => { - write_tagged_field(constants::TAG_PAYMENT_METADATA, payment_metadata) + ManyIters::J(write_tagged_field(constants::TAG_PAYMENT_METADATA, payment_metadata)) }, TaggedField::Features(ref features) => { - write_tagged_field(constants::TAG_FEATURES, features) + ManyIters::K(write_tagged_field(constants::TAG_FEATURES, features)) }, - }) + } } } impl Base32Iterable for Bolt11InvoiceSignature { - fn fe_iter<'s>(&'s self) -> Box + 's> { + fn fe_iter<'s>(&'s self) -> impl Iterator + 's { let (recovery_id, signature) = self.0.serialize_compact(); - Box::new( - signature - .into_iter() - .chain(core::iter::once(recovery_id.to_i32() as u8)) - .bytes_to_fes(), - ) + signature.into_iter().chain(core::iter::once(recovery_id.to_i32() as u8)).bytes_to_fes() } } From 5ef35f20f466f5405114674f8cc7a1d218f96985 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 8 Oct 2025 13:15:38 +0000 Subject: [PATCH 07/22] Parallelize `ChannelMonitor` loading from async `KVStore`s Reading `ChannelMonitor`s on startup is one of the slowest parts of LDK initialization. Now that we have an async `KVStore`, there's no need for that, we can simply paralellize their loading, which we do here. Sadly, because Rust futures are pretty unergonomic, we have to add some `unsafe {}` here, but arguing its fine is relatively straightforward. --- lightning/src/util/async_poll.rs | 25 +++++++++++++++---------- lightning/src/util/persist.rs | 17 ++++++++++++----- 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/lightning/src/util/async_poll.rs b/lightning/src/util/async_poll.rs index 9c2ca4c247f..931d2817e30 100644 --- a/lightning/src/util/async_poll.rs +++ b/lightning/src/util/async_poll.rs @@ -15,26 +15,31 @@ use core::marker::Unpin; use core::pin::Pin; use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; -pub(crate) enum ResultFuture>, E: Unpin> { +pub(crate) enum ResultFuture + Unpin, O> { Pending(F), - Ready(Result<(), E>), + Ready(O), } -pub(crate) struct MultiResultFuturePoller> + Unpin, E: Unpin> { - futures_state: Vec>, +pub(crate) struct MultiResultFuturePoller + Unpin, O> { + futures_state: Vec>, } -impl> + Unpin, E: Unpin> MultiResultFuturePoller { - pub fn new(futures_state: Vec>) -> Self { +impl + Unpin, O> MultiResultFuturePoller { + pub fn new(futures_state: Vec>) -> Self { Self { futures_state } } } -impl> + Unpin, E: Unpin> Future for MultiResultFuturePoller { - type Output = Vec>; - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>> { +impl + Unpin, O> Future for MultiResultFuturePoller { + type Output = Vec; + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut have_pending_futures = false; - let futures_state = &mut self.get_mut().futures_state; + // SAFETY: While we are pinned, we can't get direct access to `futures_state` because we + // aren't `Unpin`. However, we don't actually need the `Pin` - we only use it below on the + // `Future` in the `ResultFuture::Pending` case, and the `Future` is bound by `Unpin`. + // Thus, the `Pin` is not actually used, and its safe to bypass it and access the inner + // reference directly. + let futures_state = unsafe { &mut self.get_unchecked_mut().futures_state }; for state in futures_state.iter_mut() { match state { ResultFuture::Pending(ref mut fut) => match Pin::new(fut).poll(cx) { diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 7feb781a57a..2e338a86e06 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -34,7 +34,9 @@ use crate::chain::transaction::OutPoint; use crate::ln::types::ChannelId; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, SignerProvider}; use crate::sync::Mutex; -use crate::util::async_poll::{dummy_waker, MaybeSend, MaybeSync}; +use crate::util::async_poll::{ + dummy_waker, MaybeSend, MaybeSync, MultiResultFuturePoller, ResultFuture, +}; use crate::util::logger::Logger; use crate::util::native_async::FutureSpawner; use crate::util::ser::{Readable, ReadableArgs, Writeable}; @@ -875,11 +877,16 @@ where let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; let monitor_list = self.0.kv_store.list(primary, secondary).await?; - let mut res = Vec::with_capacity(monitor_list.len()); + let mut futures = Vec::with_capacity(monitor_list.len()); for monitor_key in monitor_list { - let result = - self.0.maybe_read_channel_monitor_with_updates(monitor_key.as_str()).await?; - if let Some(read_res) = result { + futures.push(ResultFuture::Pending(Box::pin(async move { + self.0.maybe_read_channel_monitor_with_updates(monitor_key.as_str()).await + }))); + } + let future_results = MultiResultFuturePoller::new(futures).await; + let mut res = Vec::with_capacity(future_results.len()); + for result in future_results { + if let Some(read_res) = result? { res.push(read_res); } } From 572007a6f91222c846696fd545f6f72da0fe0543 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 9 Oct 2025 00:43:28 +0000 Subject: [PATCH 08/22] Allow `FutureSpawner` to return the result of the spawned future `tokio::spawn` can be use both to spawn a forever-running background task or to spawn a task which gets `poll`ed independently and eventually returns a result which the callsite wants. In LDK, we have only ever needed the first, and thus didn't bother defining a return type for `FutureSpawner::spawn`. However, in the next commit we'll start using `FutureSpawner` in a context where we actually do want the spawned future's result. Thus, here, we add a result output to `FutureSpawner::spawn`, mirroring the `tokio::spawn` API. --- lightning-block-sync/src/gossip.rs | 10 ++- lightning/src/util/native_async.rs | 115 +++++++++++++++++++++++++++-- lightning/src/util/persist.rs | 13 +++- 3 files changed, 125 insertions(+), 13 deletions(-) diff --git a/lightning-block-sync/src/gossip.rs b/lightning-block-sync/src/gossip.rs index 596098350c7..004914daac9 100644 --- a/lightning-block-sync/src/gossip.rs +++ b/lightning-block-sync/src/gossip.rs @@ -49,8 +49,12 @@ pub trait UtxoSource: BlockSource + 'static { pub struct TokioSpawner; #[cfg(feature = "tokio")] impl FutureSpawner for TokioSpawner { - fn spawn + Send + 'static>(&self, future: T) { - tokio::spawn(future); + type E = tokio::task::JoinError; + type SpawnedFutureResult = tokio::task::JoinHandle; + fn spawn + Send + 'static>( + &self, future: F, + ) -> Self::SpawnedFutureResult { + tokio::spawn(future) } } @@ -280,7 +284,7 @@ where let gossiper = Arc::clone(&self.gossiper); let block_cache = Arc::clone(&self.block_cache); let pmw = Arc::clone(&self.peer_manager_wake); - self.spawn.spawn(async move { + let _ = self.spawn.spawn(async move { let res = Self::retrieve_utxo(source, block_cache, short_channel_id).await; fut.resolve(gossiper.network_graph(), &*gossiper, res); (pmw)(); diff --git a/lightning/src/util/native_async.rs b/lightning/src/util/native_async.rs index 886146e976d..a9f7e01ceea 100644 --- a/lightning/src/util/native_async.rs +++ b/lightning/src/util/native_async.rs @@ -8,23 +8,44 @@ //! environment. #[cfg(all(test, feature = "std"))] -use crate::sync::Mutex; +use crate::sync::{Arc, Mutex}; use crate::util::async_poll::{MaybeSend, MaybeSync}; +#[cfg(all(test, not(feature = "std")))] +use alloc::rc::Rc; + #[cfg(all(test, not(feature = "std")))] use core::cell::RefCell; +#[cfg(test)] +use core::convert::Infallible; use core::future::Future; #[cfg(test)] use core::pin::Pin; +#[cfg(test)] +use core::task::{Context, Poll}; -/// A generic trait which is able to spawn futures in the background. +/// A generic trait which is able to spawn futures to be polled in the background. +/// +/// When the spawned future completes, the returned [`Self::SpawnedFutureResult`] should resolve +/// with the output of the spawned future. +/// +/// Spawned futures must be polled independently in the background even if the returned +/// [`Self::SpawnedFutureResult`] is dropped without being polled. This matches the semantics of +/// `tokio::spawn`. /// /// This is not exported to bindings users as async is only supported in Rust. pub trait FutureSpawner: MaybeSend + MaybeSync + 'static { + /// The error type of [`Self::SpawnedFutureResult`]. This can be used to indicate that the + /// spawned future was cancelled or panicked. + type E; + /// The result of [`Self::spawn`], a future which completes when the spawned future completes. + type SpawnedFutureResult: Future> + Unpin; /// Spawns the given future as a background task. /// /// This method MUST NOT block on the given future immediately. - fn spawn + MaybeSend + 'static>(&self, future: T); + fn spawn + MaybeSend + 'static>( + &self, future: T, + ) -> Self::SpawnedFutureResult; } #[cfg(test)] @@ -39,6 +60,69 @@ pub(crate) struct FutureQueue(Mutex>>>); #[cfg(all(test, not(feature = "std")))] pub(crate) struct FutureQueue(RefCell>>>); +#[cfg(all(test, feature = "std"))] +pub struct FutureQueueCompletion(Arc>>); +#[cfg(all(test, not(feature = "std")))] +pub struct FutureQueueCompletion(Rc>>); + +#[cfg(all(test, feature = "std"))] +impl FutureQueueCompletion { + fn new() -> Self { + Self(Arc::new(Mutex::new(None))) + } + + fn complete(&self, o: O) { + *self.0.lock().unwrap() = Some(o); + } +} + +#[cfg(all(test, feature = "std"))] +impl Clone for FutureQueueCompletion { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} + +#[cfg(all(test, not(feature = "std")))] +impl FutureQueueCompletion { + fn new() -> Self { + Self(Rc::new(RefCell::new(None))) + } + + fn complete(&self, o: O) { + *self.0.borrow_mut() = Some(o); + } +} + +#[cfg(all(test, not(feature = "std")))] +impl Clone for FutureQueueCompletion { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} + +#[cfg(all(test, feature = "std"))] +impl Future for FutureQueueCompletion { + type Output = Result; + fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + match Pin::into_inner(self).0.lock().unwrap().take() { + None => Poll::Pending, + Some(o) => Poll::Ready(Ok(o)), + } + } +} + +#[cfg(all(test, not(feature = "std")))] +impl Future for FutureQueueCompletion { + type Output = Result; + fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + match Pin::into_inner(self).0.borrow_mut().take() { + None => Poll::Pending, + Some(o) => Poll::Ready(Ok(o)), + } + } +} + #[cfg(test)] impl FutureQueue { pub(crate) fn new() -> Self { @@ -74,7 +158,6 @@ impl FutureQueue { futures = self.0.borrow_mut(); } futures.retain_mut(|fut| { - use core::task::{Context, Poll}; let waker = crate::util::async_poll::dummy_waker(); match fut.as_mut().poll(&mut Context::from_waker(&waker)) { Poll::Ready(()) => false, @@ -86,7 +169,16 @@ impl FutureQueue { #[cfg(test)] impl FutureSpawner for FutureQueue { - fn spawn + MaybeSend + 'static>(&self, future: T) { + type E = Infallible; + type SpawnedFutureResult = FutureQueueCompletion; + fn spawn + MaybeSend + 'static>( + &self, f: F, + ) -> FutureQueueCompletion { + let completion = FutureQueueCompletion::new(); + let compl_ref = completion.clone(); + let future = async move { + compl_ref.complete(f.await); + }; #[cfg(feature = "std")] { self.0.lock().unwrap().push(Box::pin(future)); @@ -95,6 +187,7 @@ impl FutureSpawner for FutureQueue { { self.0.borrow_mut().push(Box::pin(future)); } + completion } } @@ -102,7 +195,16 @@ impl FutureSpawner for FutureQueue { impl + MaybeSend + MaybeSync + 'static> FutureSpawner for D { - fn spawn + MaybeSend + 'static>(&self, future: T) { + type E = Infallible; + type SpawnedFutureResult = FutureQueueCompletion; + fn spawn + MaybeSend + 'static>( + &self, f: F, + ) -> FutureQueueCompletion { + let completion = FutureQueueCompletion::new(); + let compl_ref = completion.clone(); + let future = async move { + compl_ref.complete(f.await); + }; #[cfg(feature = "std")] { self.0.lock().unwrap().push(Box::pin(future)); @@ -111,5 +213,6 @@ impl + MaybeSend + MaybeSync + 'static { self.0.borrow_mut().push(Box::pin(future)); } + completion } } diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 2e338a86e06..5f3a44ce60f 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -16,6 +16,7 @@ use alloc::sync::Arc; use bitcoin::hashes::hex::FromHex; use bitcoin::{BlockHash, Txid}; +use core::convert::Infallible; use core::future::Future; use core::mem; use core::ops::Deref; @@ -491,7 +492,11 @@ where struct PanicingSpawner; impl FutureSpawner for PanicingSpawner { - fn spawn + MaybeSend + 'static>(&self, _: T) { + type E = Infallible; + type SpawnedFutureResult = Box> + Unpin>; + fn spawn + MaybeSend + 'static>( + &self, _: T, + ) -> Self::SpawnedFutureResult { unreachable!(); } } @@ -959,7 +964,7 @@ where let future = inner.persist_new_channel(monitor_name, monitor); let channel_id = monitor.channel_id(); let completion = (monitor.channel_id(), monitor.get_latest_update_id()); - self.0.future_spawner.spawn(async move { + let _runs_free = self.0.future_spawner.spawn(async move { match future.await { Ok(()) => { inner.async_completed_updates.lock().unwrap().push(completion); @@ -991,7 +996,7 @@ where None }; let inner = Arc::clone(&self.0); - self.0.future_spawner.spawn(async move { + let _runs_free = self.0.future_spawner.spawn(async move { match future.await { Ok(()) => if let Some(completion) = completion { inner.async_completed_updates.lock().unwrap().push(completion); @@ -1009,7 +1014,7 @@ where pub(crate) fn spawn_async_archive_persisted_channel(&self, monitor_name: MonitorName) { let inner = Arc::clone(&self.0); - self.0.future_spawner.spawn(async move { + let _runs_free = self.0.future_spawner.spawn(async move { inner.archive_persisted_channel(monitor_name).await; }); } From 15abec0700aad29f89cb467ba3ec3eb6e8b90521 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 12 Oct 2025 23:00:36 +0000 Subject: [PATCH 09/22] Add an option to deserialize monitors in parallel in async load `MonitorUpdatingPersister::read_all_channel_monitors_with_updates` was made to do the IO operations in parallel in a previous commit, however in practice this doesn't provide material parallelism for large routing nodes. Because deserializing `ChannelMonitor`s is the bulk of the work (when IO operations are sufficiently fast), we end up blocked in single-threaded work nearly the entire time. Here, we add an alternative option - a new `read_all_channel_monitors_with_updates_parallel` method which uses the `FutureSpawner` to cause the deserialization operations to proceed in parallel. --- lightning/src/util/persist.rs | 54 +++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 5f3a44ce60f..266229171cf 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -870,6 +870,10 @@ where /// Reads all stored channel monitors, along with any stored updates for them. /// + /// While the reads themselves are performend in parallel, deserializing the + /// [`ChannelMonitor`]s is not. For large [`ChannelMonitor`]s actively used for forwarding, + /// this may substantially limit the parallelism of this method. + /// /// It is extremely important that your [`KVStore::read`] implementation uses the /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the /// documentation for [`MonitorUpdatingPersister`]. @@ -898,6 +902,56 @@ where Ok(res) } + /// Reads all stored channel monitors, along with any stored updates for them, in parallel. + /// + /// Because deserializing large [`ChannelMonitor`]s from forwarding nodes is often CPU-bound, + /// this version of [`Self::read_all_channel_monitors_with_updates`] uses the [`FutureSpawner`] + /// to parallelize deserialization as well as the IO operations. + /// + /// Because [`FutureSpawner`] requires that the spawned future be `'static` (matching `tokio` + /// and other multi-threaded runtime requirements), this method requires that `self` be an + /// `Arc` that can live for `'static` and be sent and accessed across threads. + /// + /// It is extremely important that your [`KVStore::read`] implementation uses the + /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the + /// documentation for [`MonitorUpdatingPersister`]. + pub async fn read_all_channel_monitors_with_updates_parallel( + self: &Arc, + ) -> Result< + Vec<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, + io::Error, + > where + K: MaybeSend + MaybeSync + 'static, + L: MaybeSend + MaybeSync + 'static, + ES: MaybeSend + MaybeSync + 'static, + SP: MaybeSend + MaybeSync + 'static, + BI: MaybeSend + MaybeSync + 'static, + FE: MaybeSend + MaybeSync + 'static, + ::EcdsaSigner: MaybeSend, + { + let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; + let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; + let monitor_list = self.0.kv_store.list(primary, secondary).await?; + let mut futures = Vec::with_capacity(monitor_list.len()); + for monitor_key in monitor_list { + let us = Arc::clone(&self); + futures.push(ResultFuture::Pending(self.0.future_spawner.spawn(async move { + us.0.maybe_read_channel_monitor_with_updates(monitor_key.as_str()).await + }))); + } + let future_results = MultiResultFuturePoller::new(futures).await; + let mut res = Vec::with_capacity(future_results.len()); + for result in future_results { + match result { + Err(_) => return Err(io::Error::new(io::ErrorKind::Other, "Future was cancelled")), + Ok(Err(e)) => return Err(e), + Ok(Ok(Some(read_res))) => res.push(read_res), + Ok(Ok(None)) => {}, + } + } + Ok(res) + } + /// Read a single channel monitor, along with any stored updates for it. /// /// It is extremely important that your [`KVStoreSync::read`] implementation uses the From d93c809539d8fa6e91b376b5dbc02789911c18ed Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 8 Oct 2025 14:38:03 +0000 Subject: [PATCH 10/22] Avoid a storage RTT when loading `ChannelMonitor`s without updates When reading `ChannelMonitor`s from a `MonitorUpdatingPersister` on startup, we have to make sure to load any `ChannelMonitorUpdate`s and re-apply them as well. For users of async persistence who don't have any `ChannelMonitorUpdate`s (e.g. because they set `maximum_pending_updates` to 0 or, in the future, we avoid persisting updates for small `ChannelMonitor`s), this means two round-trips to the storage backend, one to load the `ChannelMonitor` and one to try to read the next `ChannelMonitorUpdate` only to have it fail. Instead, here, we use `KVStore::list` to fetch the list of stored `ChannelMonitorUpdate`s, which for async `KVStore` users allows us to parallelize the list of update fetching and the `ChannelMonitor` loading itself. Then we know exactly when to stop reading `ChannelMonitorUpdate`s, including reading none if there are none to read. This also avoids relying on `KVStore::read` correctly returning `NotFound` in order to correctly discover when to stop reading `ChannelMonitorUpdate`s. --- lightning/src/util/async_poll.rs | 69 ++++++++++++++++++++++++ lightning/src/util/persist.rs | 90 ++++++++++---------------------- 2 files changed, 98 insertions(+), 61 deletions(-) diff --git a/lightning/src/util/async_poll.rs b/lightning/src/util/async_poll.rs index 931d2817e30..57df5b26cb0 100644 --- a/lightning/src/util/async_poll.rs +++ b/lightning/src/util/async_poll.rs @@ -20,6 +20,75 @@ pub(crate) enum ResultFuture + Unpin, O> { Ready(O), } +pub(crate) struct TwoFutureJoiner< + AO, + BO, + AF: Future + Unpin, + BF: Future + Unpin, +> { + a: Option>, + b: Option>, +} + +impl + Unpin, BF: Future + Unpin> + TwoFutureJoiner +{ + pub fn new(future_a: AF, future_b: BF) -> Self { + Self { a: Some(ResultFuture::Pending(future_a)), b: Some(ResultFuture::Pending(future_b)) } + } +} + +impl + Unpin, BF: Future + Unpin> Future + for TwoFutureJoiner +{ + type Output = (AO, BO); + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<(AO, BO)> { + let mut have_pending_futures = false; + // SAFETY: While we are pinned, we can't get direct access to our internal state because we + // aren't `Unpin`. However, we don't actually need the `Pin` - we only use it below on the + // `Future` in the `ResultFuture::Pending` case, and the `Future` is bound by `Unpin`. + // Thus, the `Pin` is not actually used, and its safe to bypass it and access the inner + // reference directly. + let state = unsafe { &mut self.get_unchecked_mut() }; + macro_rules! poll_future { + ($future: ident) => { + match state.$future { + Some(ResultFuture::Pending(ref mut fut)) => match Pin::new(fut).poll(cx) { + Poll::Ready(res) => { + state.$future = Some(ResultFuture::Ready(res)); + }, + Poll::Pending => { + have_pending_futures = true; + }, + }, + Some(ResultFuture::Ready(_)) => {}, + None => { + debug_assert!(false, "Future polled after Ready"); + return Poll::Pending; + }, + } + }; + } + poll_future!(a); + poll_future!(b); + + if have_pending_futures { + Poll::Pending + } else { + Poll::Ready(( + match state.a.take() { + Some(ResultFuture::Ready(a)) => a, + _ => unreachable!(), + }, + match state.b.take() { + Some(ResultFuture::Ready(b)) => b, + _ => unreachable!(), + }, + )) + } + } +} + pub(crate) struct MultiResultFuturePoller + Unpin, O> { futures_state: Vec>, } diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 266229171cf..1b1780fee26 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -36,7 +36,7 @@ use crate::ln::types::ChannelId; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, SignerProvider}; use crate::sync::Mutex; use crate::util::async_poll::{ - dummy_waker, MaybeSend, MaybeSync, MultiResultFuturePoller, ResultFuture, + dummy_waker, MaybeSend, MaybeSync, MultiResultFuturePoller, ResultFuture, TwoFutureJoiner, }; use crate::util::logger::Logger; use crate::util::native_async::FutureSpawner; @@ -576,15 +576,6 @@ fn poll_sync_future(future: F) -> F::Output { /// list channel monitors themselves and load channels individually using /// [`MonitorUpdatingPersister::read_channel_monitor_with_updates`]. /// -/// ## EXTREMELY IMPORTANT -/// -/// It is extremely important that your [`KVStoreSync::read`] implementation uses the -/// [`io::ErrorKind::NotFound`] variant correctly: that is, when a file is not found, and _only_ in -/// that circumstance (not when there is really a permissions error, for example). This is because -/// neither channel monitor reading function lists updates. Instead, either reads the monitor, and -/// using its stored `update_id`, synthesizes update storage keys, and tries them in sequence until -/// one is not found. All _other_ errors will be bubbled up in the function's [`Result`]. -/// /// # Pruning stale channel updates /// /// Stale updates are pruned when the consolidation threshold is reached according to `maximum_pending_updates`. @@ -658,10 +649,6 @@ where } /// Reads all stored channel monitors, along with any stored updates for them. - /// - /// It is extremely important that your [`KVStoreSync::read`] implementation uses the - /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the - /// documentation for [`MonitorUpdatingPersister`]. pub fn read_all_channel_monitors_with_updates( &self, ) -> Result< @@ -673,10 +660,6 @@ where /// Read a single channel monitor, along with any stored updates for it. /// - /// It is extremely important that your [`KVStoreSync::read`] implementation uses the - /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the - /// documentation for [`MonitorUpdatingPersister`]. - /// /// For `monitor_key`, channel storage keys can be the channel's funding [`OutPoint`], with an /// underscore `_` between txid and index for v1 channels. For example, given: /// @@ -873,10 +856,6 @@ where /// While the reads themselves are performend in parallel, deserializing the /// [`ChannelMonitor`]s is not. For large [`ChannelMonitor`]s actively used for forwarding, /// this may substantially limit the parallelism of this method. - /// - /// It is extremely important that your [`KVStore::read`] implementation uses the - /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the - /// documentation for [`MonitorUpdatingPersister`]. pub async fn read_all_channel_monitors_with_updates( &self, ) -> Result< @@ -911,10 +890,6 @@ where /// Because [`FutureSpawner`] requires that the spawned future be `'static` (matching `tokio` /// and other multi-threaded runtime requirements), this method requires that `self` be an /// `Arc` that can live for `'static` and be sent and accessed across threads. - /// - /// It is extremely important that your [`KVStore::read`] implementation uses the - /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the - /// documentation for [`MonitorUpdatingPersister`]. pub async fn read_all_channel_monitors_with_updates_parallel( self: &Arc, ) -> Result< @@ -954,10 +929,6 @@ where /// Read a single channel monitor, along with any stored updates for it. /// - /// It is extremely important that your [`KVStoreSync::read`] implementation uses the - /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the - /// documentation for [`MonitorUpdatingPersister`]. - /// /// For `monitor_key`, channel storage keys can be the channel's funding [`OutPoint`], with an /// underscore `_` between txid and index for v1 channels. For example, given: /// @@ -1116,40 +1087,37 @@ where io::Error, > { let monitor_name = MonitorName::from_str(monitor_key)?; - let read_res = self.maybe_read_monitor(&monitor_name, monitor_key).await?; - let (block_hash, monitor) = match read_res { + let read_future = pin!(self.maybe_read_monitor(&monitor_name, monitor_key)); + let list_future = pin!(self + .kv_store + .list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_key)); + let (read_res, list_res) = TwoFutureJoiner::new(read_future, list_future).await; + let (block_hash, monitor) = match read_res? { Some(res) => res, None => return Ok(None), }; let mut current_update_id = monitor.get_latest_update_id(); - // TODO: Parallelize this loop by speculatively reading a batch of updates - loop { - current_update_id = match current_update_id.checked_add(1) { - Some(next_update_id) => next_update_id, - None => break, - }; - let update_name = UpdateName::from(current_update_id); - let update = match self.read_monitor_update(monitor_key, &update_name).await { - Ok(update) => update, - Err(err) if err.kind() == io::ErrorKind::NotFound => { - // We can't find any more updates, so we are done. - break; - }, - Err(err) => return Err(err), - }; - - monitor - .update_monitor(&update, &self.broadcaster, &self.fee_estimator, &self.logger) - .map_err(|e| { - log_error!( - self.logger, - "Monitor update failed. monitor: {} update: {} reason: {:?}", - monitor_key, - update_name.as_str(), - e - ); - io::Error::new(io::ErrorKind::Other, "Monitor update failed") - })?; + let updates: Result, _> = + list_res?.into_iter().map(|name| UpdateName::new(name)).collect(); + let mut updates = updates?; + updates.sort_unstable(); + // TODO: Parallelize this loop + for update_name in updates { + if update_name.0 > current_update_id { + let update = self.read_monitor_update(monitor_key, &update_name).await?; + monitor + .update_monitor(&update, &self.broadcaster, &self.fee_estimator, &self.logger) + .map_err(|e| { + log_error!( + self.logger, + "Monitor update failed. monitor: {} update: {} reason: {:?}", + monitor_key, + update_name.as_str(), + e + ); + io::Error::new(io::ErrorKind::Other, "Monitor update failed") + })?; + } } Ok(Some((block_hash, monitor))) } @@ -1524,7 +1492,7 @@ impl core::fmt::Display for MonitorName { /// let monitor_name = "some_monitor_name"; /// let storage_key = format!("channel_monitor_updates/{}/{}", monitor_name, update_name.as_str()); /// ``` -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] pub struct UpdateName(pub u64, String); impl UpdateName { From 0503f6b20350c85e2173e9915179f42e07d4a61e Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 9 Oct 2025 13:11:07 +0000 Subject: [PATCH 11/22] Parallelize `ChannelMonitorUpdate` loading When reading `ChannelMonitor`s from a `MonitorUpdatingPersister` on startup, we have to make sure to load any `ChannelMonitorUpdate`s and re-apply them as well. Now that we know which `ChannelMonitorUpdate`s to load from `list`ing the entries from the `KVStore` we can parallelize the reads themselves, which we do here. Now, loading all `ChannelMonitor`s from an async `KVStore` requires only three full RTTs - one to list the set of `ChannelMonitor`s, one to both fetch the `ChanelMonitor` and list the set of `ChannelMonitorUpdate`s, and one to fetch all the `ChannelMonitorUpdate`s (with the last one skipped when there are no `ChannelMonitorUpdate`s to read). --- lightning/src/util/persist.rs | 40 +++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 1b1780fee26..ca2da7593b5 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -1096,28 +1096,32 @@ where Some(res) => res, None => return Ok(None), }; - let mut current_update_id = monitor.get_latest_update_id(); + let current_update_id = monitor.get_latest_update_id(); let updates: Result, _> = list_res?.into_iter().map(|name| UpdateName::new(name)).collect(); let mut updates = updates?; updates.sort_unstable(); - // TODO: Parallelize this loop - for update_name in updates { - if update_name.0 > current_update_id { - let update = self.read_monitor_update(monitor_key, &update_name).await?; - monitor - .update_monitor(&update, &self.broadcaster, &self.fee_estimator, &self.logger) - .map_err(|e| { - log_error!( - self.logger, - "Monitor update failed. monitor: {} update: {} reason: {:?}", - monitor_key, - update_name.as_str(), - e - ); - io::Error::new(io::ErrorKind::Other, "Monitor update failed") - })?; - } + let updates_to_load = updates.iter().filter(|update| update.0 > current_update_id); + let mut update_futures = Vec::with_capacity(updates_to_load.clone().count()); + for update_name in updates_to_load { + update_futures.push(ResultFuture::Pending(Box::pin(async move { + (update_name, self.read_monitor_update(monitor_key, update_name).await) + }))); + } + for (update_name, update_res) in MultiResultFuturePoller::new(update_futures).await { + let update = update_res?; + monitor + .update_monitor(&update, &self.broadcaster, &self.fee_estimator, &self.logger) + .map_err(|e| { + log_error!( + self.logger, + "Monitor update failed. monitor: {} update: {} reason: {:?}", + monitor_key, + update_name.as_str(), + e + ); + io::Error::new(io::ErrorKind::Other, "Monitor update failed") + })?; } Ok(Some((block_hash, monitor))) } From 47c34fa044c9cf740bc10347d7e2d2822daf5ddf Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 12 Oct 2025 13:48:04 +0000 Subject: [PATCH 12/22] Update BestBlock to store ANTI_REORG_DELAY * 2 recent block hashes On restart, LDK expects the chain to be replayed starting from where it was when objects were last serialized. This is fine in the normal case, but if there was a reorg and the node which we were syncing from either resynced or was changed, the last block that we were synced as of might no longer be available. As a result, it becomes impossible to figure out where the fork point is, and thus to replay the chain. Luckily, changing the block source during a reorg isn't exactly common, but we shouldn't end up with a bricked node. To address this, `lightning-block-sync` allows the user to pass in `Cache` which can be used to cache recent blocks and thus allow for reorg handling in this case. However, serialization for, and a reasonable default implementation of a `Cache` was never built. Instead, here, we start taking a different approach. To avoid developers having to persist yet another object, we move `BestBlock` to storing some number of recent block hashes. This allows us to find the fork point with just the serialized state. In conjunction with 403dc1a48bb71ae794f6883ae0b760aad44cda39 (which allows us to disconnect blocks without having the stored header), this should allow us to replay chain state after a reorg even if we no longer have access to the top few blocks of the old chain tip. While we only really need to store `ANTI_REORG_DELAY` blocks (as we generally assume that any deeper reorg won't happen and thus we don't guarantee we handle it correctly), its nice to store a few more to be able to handle more than a six block reorg. While other parts of the codebase may not be entirely robust against such a reorg if the transactions confirmed change out from under us, its entirely possible (and, indeed, common) for reorgs to contain nearly identical transactions. --- lightning/src/chain/mod.rs | 78 ++++++++++++++++++++++++++++++++++++-- lightning/src/util/ser.rs | 27 +++++++++++++ 2 files changed, 101 insertions(+), 4 deletions(-) diff --git a/lightning/src/chain/mod.rs b/lightning/src/chain/mod.rs index b4cc6a302ae..8ef796ecb2e 100644 --- a/lightning/src/chain/mod.rs +++ b/lightning/src/chain/mod.rs @@ -18,9 +18,10 @@ use bitcoin::network::Network; use bitcoin::script::{Script, ScriptBuf}; use bitcoin::secp256k1::PublicKey; -use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, MonitorEvent}; +use crate::chain::channelmonitor::{ + ANTI_REORG_DELAY, ChannelMonitor, ChannelMonitorUpdate, MonitorEvent, +}; use crate::chain::transaction::{OutPoint, TransactionData}; -use crate::impl_writeable_tlv_based; use crate::ln::types::ChannelId; use crate::sign::ecdsa::EcdsaChannelSigner; use crate::sign::HTLCDescriptor; @@ -42,13 +43,20 @@ pub struct BestBlock { pub block_hash: BlockHash, /// The height at which the block was confirmed. pub height: u32, + /// Previous blocks immediately before [`Self::block_hash`], in reverse chronological order. + /// + /// These ensure we can find the fork point of a reorg if our block source no longer has the + /// previous best tip after a restart. + pub previous_blocks: [Option; ANTI_REORG_DELAY as usize * 2], } impl BestBlock { /// Constructs a `BestBlock` that represents the genesis block at height 0 of the given /// network. pub fn from_network(network: Network) -> Self { - BestBlock { block_hash: genesis_block(network).header.block_hash(), height: 0 } + let block_hash = genesis_block(network).header.block_hash(); + let previous_blocks = [None; ANTI_REORG_DELAY as usize * 2]; + BestBlock { block_hash, height: 0, previous_blocks } } /// Returns a `BestBlock` as identified by the given block hash and height. @@ -56,13 +64,75 @@ impl BestBlock { /// This is not exported to bindings users directly as the bindings auto-generate an /// equivalent `new`. pub fn new(block_hash: BlockHash, height: u32) -> Self { - BestBlock { block_hash, height } + let previous_blocks = [None; ANTI_REORG_DELAY as usize * 2]; + BestBlock { block_hash, height, previous_blocks } + } + + /// Advances to a new block at height [`Self::height`] + 1. + pub fn advance(&mut self, new_hash: BlockHash) { + // Shift all block hashes to the right (making room for the old tip at index 0) + for i in (1..self.previous_blocks.len()).rev() { + self.previous_blocks[i] = self.previous_blocks[i - 1]; + } + + // The old tip becomes the new index 0 (tip-1) + self.previous_blocks[0] = Some(self.block_hash); + + // Update to the new tip + self.block_hash = new_hash; + self.height += 1; + } + + /// Returns the block hash at the given height, if available in our history. + pub fn get_hash_at_height(&self, height: u32) -> Option { + if height > self.height { + return None; + } + if height == self.height { + return Some(self.block_hash); + } + + // offset = 1 means we want tip-1, which is block_hashes[0] + // offset = 2 means we want tip-2, which is block_hashes[1], etc. + let offset = self.height.saturating_sub(height) as usize; + if offset >= 1 && offset <= self.previous_blocks.len() { + self.previous_blocks[offset - 1] + } else { + None + } + } + + /// Find the most recent common ancestor between two BestBlocks by searching their block hash + /// histories. + /// + /// Returns the common block hash and height, or None if no common block is found in the + /// available histories. + pub fn find_common_ancestor(&self, other: &BestBlock) -> Option<(BlockHash, u32)> { + // First check if either tip matches + if self.block_hash == other.block_hash && self.height == other.height { + return Some((self.block_hash, self.height)); + } + + // Check all heights covered by self's history + let min_height = self.height.saturating_sub(self.previous_blocks.len() as u32); + for check_height in (min_height..=self.height).rev() { + if let Some(self_hash) = self.get_hash_at_height(check_height) { + if let Some(other_hash) = other.get_hash_at_height(check_height) { + if self_hash == other_hash { + return Some((self_hash, check_height)); + } + } + } + } + None } } impl_writeable_tlv_based!(BestBlock, { (0, block_hash, required), + (1, previous_blocks_read, (legacy, [Option; ANTI_REORG_DELAY as usize * 2], |us: &BestBlock| Some(us.previous_blocks))), (2, height, required), + (unused, previous_blocks, (static_value, previous_blocks_read.unwrap_or([None; ANTI_REORG_DELAY as usize * 2]))), }); /// The `Listen` trait is used to notify when blocks have been connected or disconnected from the diff --git a/lightning/src/util/ser.rs b/lightning/src/util/ser.rs index f821aa5afc0..2bd8a9eb879 100644 --- a/lightning/src/util/ser.rs +++ b/lightning/src/util/ser.rs @@ -1448,6 +1448,33 @@ impl Readable for BlockHash { } } +impl Writeable for [Option; 12] { + fn write(&self, w: &mut W) -> Result<(), io::Error> { + for hash_opt in self { + match hash_opt { + Some(hash) => hash.write(w)?, + None => ([0u8; 32]).write(w)?, + } + } + Ok(()) + } +} + +impl Readable for [Option; 12] { + fn read(r: &mut R) -> Result { + use bitcoin::hashes::Hash; + + let mut res = [None; 12]; + for hash_opt in res.iter_mut() { + let buf: [u8; 32] = Readable::read(r)?; + if buf != [0; 32] { + *hash_opt = Some(BlockHash::from_slice(&buf[..]).unwrap()); + } + } + Ok(res) + } +} + impl Writeable for ChainHash { fn write(&self, w: &mut W) -> Result<(), io::Error> { w.write_all(self.as_bytes()) From 7acba1712e59aab654205ab9e0f4586abcecc305 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 12 Oct 2025 14:08:16 +0000 Subject: [PATCH 13/22] Return `BestBlock` when deserializing chain-synced structs The deserialization of `ChannelMonitor`, `ChannelManager`, and `OutputSweeper` is implemented for a `(BlockHash, ...)` pair rather than on the object itself. This ensures developers are pushed to think about initial chain sync after deserialization and provides the latest chain sync state conviniently at deserialization-time. In the previous commit we started storing additional recent block hashes in `BestBlock` for use during initial sync to ensure we can handle reorgs while offline if the chain source loses the reorged-out blocks. Here, we move the deserialization routines to be on a `(BestBlock, ...)` pair instead of `(BlockHash, ...)`, providing access to those recent block hashes at deserialization-time. --- lightning-block-sync/src/init.rs | 17 ++++---- lightning/src/chain/channelmonitor.rs | 14 +++---- lightning/src/ln/chanmon_update_fail_tests.rs | 4 +- lightning/src/ln/channelmanager.rs | 20 +++++----- lightning/src/ln/functional_test_utils.rs | 8 ++-- lightning/src/ln/functional_tests.rs | 7 ++-- lightning/src/ln/reload_tests.rs | 11 +++--- lightning/src/util/persist.rs | 39 ++++++++++--------- lightning/src/util/test_utils.rs | 9 +++-- 9 files changed, 64 insertions(+), 65 deletions(-) diff --git a/lightning-block-sync/src/init.rs b/lightning-block-sync/src/init.rs index a870f8ca88c..61f44c6139e 100644 --- a/lightning-block-sync/src/init.rs +++ b/lightning-block-sync/src/init.rs @@ -40,11 +40,10 @@ where /// switching to [`SpvClient`]. For example: /// /// ``` -/// use bitcoin::hash_types::BlockHash; /// use bitcoin::network::Network; /// /// use lightning::chain; -/// use lightning::chain::Watch; +/// use lightning::chain::{BestBlock, Watch}; /// use lightning::chain::chainmonitor; /// use lightning::chain::chainmonitor::ChainMonitor; /// use lightning::chain::channelmonitor::ChannelMonitor; @@ -89,14 +88,14 @@ where /// logger: &L, /// persister: &P, /// ) { -/// // Read a serialized channel monitor paired with the block hash when it was persisted. +/// // Read a serialized channel monitor paired with the best block when it was persisted. /// let serialized_monitor = "..."; -/// let (monitor_block_hash, mut monitor) = <(BlockHash, ChannelMonitor)>::read( +/// let (monitor_best_block, mut monitor) = <(BestBlock, ChannelMonitor)>::read( /// &mut Cursor::new(&serialized_monitor), (entropy_source, signer_provider)).unwrap(); /// -/// // Read the channel manager paired with the block hash when it was persisted. +/// // Read the channel manager paired with the best block when it was persisted. /// let serialized_manager = "..."; -/// let (manager_block_hash, mut manager) = { +/// let (manager_best_block, mut manager) = { /// let read_args = ChannelManagerReadArgs::new( /// entropy_source, /// node_signer, @@ -110,7 +109,7 @@ where /// config, /// vec![&mut monitor], /// ); -/// <(BlockHash, ChannelManager<&ChainMonitor, &T, &ES, &NS, &SP, &F, &R, &MR, &L>)>::read( +/// <(BestBlock, ChannelManager<&ChainMonitor, &T, &ES, &NS, &SP, &F, &R, &MR, &L>)>::read( /// &mut Cursor::new(&serialized_manager), read_args).unwrap() /// }; /// @@ -118,8 +117,8 @@ where /// let mut cache = UnboundedCache::new(); /// let mut monitor_listener = (monitor, &*tx_broadcaster, &*fee_estimator, &*logger); /// let listeners = vec![ -/// (monitor_block_hash, &monitor_listener as &dyn chain::Listen), -/// (manager_block_hash, &manager as &dyn chain::Listen), +/// (monitor_best_block.block_hash, &monitor_listener as &dyn chain::Listen), +/// (manager_best_block.block_hash, &manager as &dyn chain::Listen), /// ]; /// let chain_tip = init::synchronize_listeners( /// block_source, Network::Bitcoin, &mut cache, listeners).await.unwrap(); diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 1d035b68650..e2ea5855576 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1059,7 +1059,7 @@ impl Readable for IrrevocablyResolvedHTLC { /// You MUST ensure that no ChannelMonitors for a given channel anywhere contain out-of-date /// information and are actively monitoring the chain. /// -/// Like the [`ChannelManager`], deserialization is implemented for `(BlockHash, ChannelMonitor)`, +/// Like the [`ChannelManager`], deserialization is implemented for `(BestBlock, ChannelMonitor)`, /// providing you with the last block hash which was connected before shutting down. You must begin /// syncing the chain from that point, disconnecting and connecting blocks as required to get to /// the best chain on startup. Note that all [`ChannelMonitor`]s passed to a [`ChainMonitor`] must @@ -1067,7 +1067,7 @@ impl Readable for IrrevocablyResolvedHTLC { /// initialization. /// /// For those loading potentially-ancient [`ChannelMonitor`]s, deserialization is also implemented -/// for `Option<(BlockHash, ChannelMonitor)>`. LDK can no longer deserialize a [`ChannelMonitor`] +/// for `Option<(BestBlock, ChannelMonitor)>`. LDK can no longer deserialize a [`ChannelMonitor`] /// that was first created in LDK prior to 0.0.110 and last updated prior to LDK 0.0.119. In such /// cases, the `Option<(..)>` deserialization option may return `Ok(None)` rather than failing to /// deserialize, allowing you to differentiate between the two cases. @@ -6420,7 +6420,7 @@ where const MAX_ALLOC_SIZE: usize = 64 * 1024; impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP)> - for (BlockHash, ChannelMonitor) + for (BestBlock, ChannelMonitor) { fn read(reader: &mut R, args: (&'a ES, &'b SP)) -> Result { match >::read(reader, args) { @@ -6432,7 +6432,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP } impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP)> - for Option<(BlockHash, ChannelMonitor)> + for Option<(BestBlock, ChannelMonitor)> { #[rustfmt::skip] fn read(reader: &mut R, args: (&'a ES, &'b SP)) -> Result { @@ -6857,14 +6857,14 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP To continue, run a v0.1 release, send/route a payment over the channel or close it."); } } - Ok(Some((best_block.block_hash, monitor))) + Ok(Some((best_block, monitor))) } } #[cfg(test)] mod tests { use bitcoin::amount::Amount; - use bitcoin::hash_types::{BlockHash, Txid}; + use bitcoin::hash_types::Txid; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; use bitcoin::hex::FromHex; @@ -6964,7 +6964,7 @@ mod tests { nodes[1].chain_monitor.chain_monitor.transactions_confirmed(&new_header, &[(0, broadcast_tx)], conf_height); - let (_, pre_update_monitor) = <(BlockHash, ChannelMonitor<_>)>::read( + let (_, pre_update_monitor) = <(BestBlock, ChannelMonitor<_>)>::read( &mut io::Cursor::new(&get_monitor!(nodes[1], channel.2).encode()), (&nodes[1].keys_manager.backing, &nodes[1].keys_manager.backing)).unwrap(); diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 1a9af4f2071..87eec7c48aa 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -16,7 +16,7 @@ use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::chainmonitor::ChainMonitor; use crate::chain::channelmonitor::{ChannelMonitor, MonitorEvent, ANTI_REORG_DELAY}; use crate::chain::transaction::OutPoint; -use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch}; +use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Listen, Watch}; use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaymentPurpose}; use crate::ln::channel::AnnouncementSigsState; use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields}; @@ -93,7 +93,7 @@ fn test_monitor_and_persister_update_fail() { let chain_mon = { let new_monitor = { let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan.2).unwrap(); - let (_, new_monitor) = <(BlockHash, ChannelMonitor)>::read( + let (_, new_monitor) = <(BestBlock, ChannelMonitor)>::read( &mut &monitor.encode()[..], (nodes[0].keys_manager, nodes[0].keys_manager), ) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 644920557d2..03ce7affe20 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1941,7 +1941,6 @@ where /// detailed in the [`ChannelManagerReadArgs`] documentation. /// /// ``` -/// use bitcoin::BlockHash; /// use bitcoin::network::Network; /// use lightning::chain::BestBlock; /// # use lightning::chain::channelmonitor::ChannelMonitor; @@ -1990,8 +1989,8 @@ where /// entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster, /// router, message_router, logger, config, channel_monitors.iter().collect(), /// ); -/// let (block_hash, channel_manager) = -/// <(BlockHash, ChannelManager<_, _, _, _, _, _, _, _, _>)>::read(&mut reader, args)?; +/// let (best_block, channel_manager) = +/// <(BestBlock, ChannelManager<_, _, _, _, _, _, _, _, _>)>::read(&mut reader, args)?; /// /// // Update the ChannelManager and ChannelMonitors with the latest chain data /// // ... @@ -2557,7 +2556,7 @@ where /// [`read`], those channels will be force-closed based on the `ChannelMonitor` state and no funds /// will be lost (modulo on-chain transaction fees). /// -/// Note that the deserializer is only implemented for `(`[`BlockHash`]`, `[`ChannelManager`]`)`, which +/// Note that the deserializer is only implemented for `(`[`BestBlock`]`, `[`ChannelManager`]`)`, which /// tells you the last block hash which was connected. You should get the best block tip before using the manager. /// See [`chain::Listen`] and [`chain::Confirm`] for more details. /// @@ -2624,7 +2623,6 @@ where /// [`peer_disconnected`]: msgs::BaseMessageHandler::peer_disconnected /// [`funding_created`]: msgs::FundingCreated /// [`funding_transaction_generated`]: Self::funding_transaction_generated -/// [`BlockHash`]: bitcoin::hash_types::BlockHash /// [`update_channel`]: chain::Watch::update_channel /// [`ChannelUpdate`]: msgs::ChannelUpdate /// [`read`]: ReadableArgs::read @@ -16695,7 +16693,7 @@ impl< MR: Deref, L: Deref + Clone, > ReadableArgs> - for (BlockHash, Arc>) + for (BestBlock, Arc>) where M::Target: chain::Watch<::EcdsaSigner>, T::Target: BroadcasterInterface, @@ -16710,9 +16708,9 @@ where fn read( reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>, ) -> Result { - let (blockhash, chan_manager) = - <(BlockHash, ChannelManager)>::read(reader, args)?; - Ok((blockhash, Arc::new(chan_manager))) + let (best_block, chan_manager) = + <(BestBlock, ChannelManager)>::read(reader, args)?; + Ok((best_block, Arc::new(chan_manager))) } } @@ -16728,7 +16726,7 @@ impl< MR: Deref, L: Deref + Clone, > ReadableArgs> - for (BlockHash, ChannelManager) + for (BestBlock, ChannelManager) where M::Target: chain::Watch<::EcdsaSigner>, T::Target: BroadcasterInterface, @@ -18402,7 +18400,7 @@ where //TODO: Broadcast channel update for closed channels, but only after we've made a //connection or two. - Ok((best_block_hash.clone(), channel_manager)) + Ok((best_block, channel_manager)) } } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 271d458bcc8..57e8b5aa1cd 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -847,7 +847,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { let mon = self.chain_monitor.chain_monitor.get_monitor(channel_id).unwrap(); mon.write(&mut w).unwrap(); let (_, deserialized_monitor) = - <(BlockHash, ChannelMonitor)>::read( + <(BestBlock, ChannelMonitor)>::read( &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager), ) @@ -875,7 +875,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { let mut w = test_utils::TestVecWriter(Vec::new()); self.node.write(&mut w).unwrap(); <( - BlockHash, + BestBlock, ChannelManager< &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, @@ -1332,7 +1332,7 @@ pub fn _reload_node<'a, 'b, 'c>( let mut monitors_read = Vec::with_capacity(monitors_encoded.len()); for encoded in monitors_encoded { let mut monitor_read = &encoded[..]; - let (_, monitor) = <(BlockHash, ChannelMonitor)>::read( + let (_, monitor) = <(BestBlock, ChannelMonitor)>::read( &mut monitor_read, (node.keys_manager, node.keys_manager), ) @@ -1347,7 +1347,7 @@ pub fn _reload_node<'a, 'b, 'c>( for monitor in monitors_read.iter() { assert!(channel_monitors.insert(monitor.channel_id(), monitor).is_none()); } - <(BlockHash, TestChannelManager<'b, 'c>)>::read( + <(BestBlock, TestChannelManager<'b, 'c>)>::read( &mut node_read, ChannelManagerReadArgs { config, diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index c161a9664c0..dd57fd5ecd4 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -19,6 +19,7 @@ use crate::chain::channelmonitor::{ LATENCY_GRACE_PERIOD_BLOCKS, }; use crate::chain::transaction::OutPoint; +use crate::chain::BestBlock; use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Listen, Watch}; use crate::events::{ ClosureReason, Event, HTLCHandlingFailureType, PathFailure, PaymentFailureReason, @@ -7239,7 +7240,7 @@ pub fn test_update_err_monitor_lockdown() { let new_monitor = { let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan_1.2).unwrap(); let new_monitor = - <(BlockHash, channelmonitor::ChannelMonitor)>::read( + <(BestBlock, channelmonitor::ChannelMonitor)>::read( &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager), ) @@ -7345,7 +7346,7 @@ pub fn test_concurrent_monitor_claim() { let new_monitor = { let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan_1.2).unwrap(); let new_monitor = - <(BlockHash, channelmonitor::ChannelMonitor)>::read( + <(BestBlock, channelmonitor::ChannelMonitor)>::read( &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager), ) @@ -7395,7 +7396,7 @@ pub fn test_concurrent_monitor_claim() { let new_monitor = { let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan_1.2).unwrap(); let new_monitor = - <(BlockHash, channelmonitor::ChannelMonitor)>::read( + <(BestBlock, channelmonitor::ChannelMonitor)>::read( &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager), ) diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index 8c9e552fa04..9fb5ce8394c 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -11,7 +11,7 @@ //! Functional tests which test for correct behavior across node restarts. -use crate::chain::{ChannelMonitorUpdateStatus, Watch}; +use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Watch}; use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateStep}; use crate::routing::router::{PaymentParameters, RouteParameters}; @@ -29,7 +29,6 @@ use crate::util::ser::{Writeable, ReadableArgs}; use crate::util::config::UserConfig; use bitcoin::hashes::Hash; -use bitcoin::hash_types::BlockHash; use types::payment::{PaymentHash, PaymentPreimage}; use crate::prelude::*; @@ -410,7 +409,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { let mut node_0_stale_monitors = Vec::new(); for serialized in node_0_stale_monitors_serialized.iter() { let mut read = &serialized[..]; - let (_, monitor) = <(BlockHash, ChannelMonitor)>::read(&mut read, (keys_manager, keys_manager)).unwrap(); + let (_, monitor) = <(BestBlock, ChannelMonitor)>::read(&mut read, (keys_manager, keys_manager)).unwrap(); assert!(read.is_empty()); node_0_stale_monitors.push(monitor); } @@ -418,14 +417,14 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { let mut node_0_monitors = Vec::new(); for serialized in node_0_monitors_serialized.iter() { let mut read = &serialized[..]; - let (_, monitor) = <(BlockHash, ChannelMonitor)>::read(&mut read, (keys_manager, keys_manager)).unwrap(); + let (_, monitor) = <(BestBlock, ChannelMonitor)>::read(&mut read, (keys_manager, keys_manager)).unwrap(); assert!(read.is_empty()); node_0_monitors.push(monitor); } let mut nodes_0_read = &nodes_0_serialized[..]; if let Err(msgs::DecodeError::DangerousValue) = - <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestMessageRouter, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs { + <(BestBlock, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestMessageRouter, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs { config: UserConfig::default(), entropy_source: keys_manager, node_signer: keys_manager, @@ -443,7 +442,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { let mut nodes_0_read = &nodes_0_serialized[..]; let (_, nodes_0_deserialized_tmp) = - <(BlockHash, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestMessageRouter, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs { + <(BestBlock, ChannelManager<&test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestRouter, &test_utils::TestMessageRouter, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs { config: UserConfig::default(), entropy_source: keys_manager, node_signer: keys_manager, diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index ca2da7593b5..5f032436725 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -14,7 +14,7 @@ use alloc::sync::Arc; use bitcoin::hashes::hex::FromHex; -use bitcoin::{BlockHash, Txid}; +use bitcoin::Txid; use core::convert::Infallible; use core::future::Future; @@ -32,6 +32,7 @@ use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; use crate::chain::chainmonitor::Persist; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate}; use crate::chain::transaction::OutPoint; +use crate::chain::BestBlock; use crate::ln::types::ChannelId; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, SignerProvider}; use crate::sync::Mutex; @@ -447,7 +448,7 @@ impl Persist( kv_store: K, entropy_source: ES, signer_provider: SP, -) -> Result::EcdsaSigner>)>, io::Error> +) -> Result::EcdsaSigner>)>, io::Error> where K::Target: KVStoreSync, ES::Target: EntropySource + Sized, @@ -459,7 +460,7 @@ where CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, )? { - match ::EcdsaSigner>)>>::read( + match ::EcdsaSigner>)>>::read( &mut io::Cursor::new(kv_store.read( CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE, @@ -467,7 +468,7 @@ where )?), (&*entropy_source, &*signer_provider), ) { - Ok(Some((block_hash, channel_monitor))) => { + Ok(Some((best_block, channel_monitor))) => { let monitor_name = MonitorName::from_str(&stored_key)?; if channel_monitor.persistence_key() != monitor_name { return Err(io::Error::new( @@ -476,7 +477,7 @@ where )); } - res.push((block_hash, channel_monitor)); + res.push((best_block, channel_monitor)); }, Ok(None) => {}, Err(_) => { @@ -652,7 +653,7 @@ where pub fn read_all_channel_monitors_with_updates( &self, ) -> Result< - Vec<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, + Vec<(BestBlock, ChannelMonitor<::EcdsaSigner>)>, io::Error, > { poll_sync_future(self.0.read_all_channel_monitors_with_updates()) @@ -675,7 +676,7 @@ where /// function to accomplish this. Take care to limit the number of parallel readers. pub fn read_channel_monitor_with_updates( &self, monitor_key: &str, - ) -> Result<(BlockHash, ChannelMonitor<::EcdsaSigner>), io::Error> + ) -> Result<(BestBlock, ChannelMonitor<::EcdsaSigner>), io::Error> { poll_sync_future(self.0.read_channel_monitor_with_updates(monitor_key)) } @@ -859,7 +860,7 @@ where pub async fn read_all_channel_monitors_with_updates( &self, ) -> Result< - Vec<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, + Vec<(BestBlock, ChannelMonitor<::EcdsaSigner>)>, io::Error, > { let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; @@ -893,7 +894,7 @@ where pub async fn read_all_channel_monitors_with_updates_parallel( self: &Arc, ) -> Result< - Vec<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, + Vec<(BestBlock, ChannelMonitor<::EcdsaSigner>)>, io::Error, > where K: MaybeSend + MaybeSync + 'static, @@ -944,7 +945,7 @@ where /// function to accomplish this. Take care to limit the number of parallel readers. pub async fn read_channel_monitor_with_updates( &self, monitor_key: &str, - ) -> Result<(BlockHash, ChannelMonitor<::EcdsaSigner>), io::Error> + ) -> Result<(BestBlock, ChannelMonitor<::EcdsaSigner>), io::Error> { self.0.read_channel_monitor_with_updates(monitor_key).await } @@ -1064,7 +1065,7 @@ where { pub async fn read_channel_monitor_with_updates( &self, monitor_key: &str, - ) -> Result<(BlockHash, ChannelMonitor<::EcdsaSigner>), io::Error> + ) -> Result<(BestBlock, ChannelMonitor<::EcdsaSigner>), io::Error> { match self.maybe_read_channel_monitor_with_updates(monitor_key).await? { Some(res) => Ok(res), @@ -1083,7 +1084,7 @@ where async fn maybe_read_channel_monitor_with_updates( &self, monitor_key: &str, ) -> Result< - Option<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, + Option<(BestBlock, ChannelMonitor<::EcdsaSigner>)>, io::Error, > { let monitor_name = MonitorName::from_str(monitor_key)?; @@ -1092,7 +1093,7 @@ where .kv_store .list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_key)); let (read_res, list_res) = TwoFutureJoiner::new(read_future, list_future).await; - let (block_hash, monitor) = match read_res? { + let (best_block, monitor) = match read_res? { Some(res) => res, None => return Ok(None), }; @@ -1123,14 +1124,14 @@ where io::Error::new(io::ErrorKind::Other, "Monitor update failed") })?; } - Ok(Some((block_hash, monitor))) + Ok(Some((best_block, monitor))) } /// Read a channel monitor. async fn maybe_read_monitor( &self, monitor_name: &MonitorName, monitor_key: &str, ) -> Result< - Option<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, + Option<(BestBlock, ChannelMonitor<::EcdsaSigner>)>, io::Error, > { let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; @@ -1141,12 +1142,12 @@ where if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) { monitor_cursor.set_position(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() as u64); } - match ::EcdsaSigner>)>>::read( + match ::EcdsaSigner>)>>::read( &mut monitor_cursor, (&*self.entropy_source, &*self.signer_provider), ) { Ok(None) => Ok(None), - Ok(Some((blockhash, channel_monitor))) => { + Ok(Some((best_block, channel_monitor))) => { if channel_monitor.persistence_key() != *monitor_name { log_error!( self.logger, @@ -1158,7 +1159,7 @@ where "ChannelMonitor was stored under the wrong key", )) } else { - Ok(Some((blockhash, channel_monitor))) + Ok(Some((best_block, channel_monitor))) } }, Err(e) => { @@ -1337,7 +1338,7 @@ where async fn archive_persisted_channel(&self, monitor_name: MonitorName) { let monitor_key = monitor_name.to_string(); let monitor = match self.read_channel_monitor_with_updates(&monitor_key).await { - Ok((_block_hash, monitor)) => monitor, + Ok((_best_block, monitor)) => monitor, Err(_) => return, }; let primary = ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index b4db17bee20..9df5b0ea966 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -20,6 +20,7 @@ use crate::chain::channelmonitor::{ ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, MonitorEvent, }; use crate::chain::transaction::OutPoint; +use crate::chain::BestBlock; use crate::chain::WatchedOutput; use crate::events::bump_transaction::sync::WalletSourceSync; use crate::events::bump_transaction::Utxo; @@ -66,7 +67,7 @@ use bitcoin::amount::Amount; use bitcoin::block::Block; use bitcoin::constants::genesis_block; use bitcoin::constants::ChainHash; -use bitcoin::hash_types::{BlockHash, Txid}; +use bitcoin::hash_types::Txid; use bitcoin::hashes::Hash; use bitcoin::network::Network; use bitcoin::script::{Builder, Script, ScriptBuf}; @@ -533,7 +534,7 @@ impl<'a> TestChainMonitor<'a> { // underlying `ChainMonitor`. let mut w = TestVecWriter(Vec::new()); monitor.write(&mut w).unwrap(); - let new_monitor = <(BlockHash, ChannelMonitor)>::read( + let new_monitor = <(BestBlock, ChannelMonitor)>::read( &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager), ) @@ -565,7 +566,7 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { // monitor to a serialized copy and get he same one back. let mut w = TestVecWriter(Vec::new()); monitor.write(&mut w).unwrap(); - let new_monitor = <(BlockHash, ChannelMonitor)>::read( + let new_monitor = <(BestBlock, ChannelMonitor)>::read( &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager), ) @@ -621,7 +622,7 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { let monitor = self.chain_monitor.get_monitor(channel_id).unwrap(); w.0.clear(); monitor.write(&mut w).unwrap(); - let new_monitor = <(BlockHash, ChannelMonitor)>::read( + let new_monitor = <(BestBlock, ChannelMonitor)>::read( &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager), ) From dae94f914672767d41cc8b9c3b23949e9ec531bf Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 12 Oct 2025 16:01:55 +0000 Subject: [PATCH 14/22] Replace `Cache::block_disconnected` with `blocks_disconnected` In 403dc1a48bb71ae794f6883ae0b760aad44cda39 we converted the `Listen` disconnect semantics to only pass the fork point, rather than each block being disconnected. We did not, however, update the semantics of `lightning-block-sync`'s `Cache` to reduce patch size. Here we go ahead and do so, dropping `ChainDifference::disconnected_blocks` as well as its no longer needed. --- lightning-block-sync/src/init.rs | 8 +++---- lightning-block-sync/src/lib.rs | 37 +++++++++++++------------------- 2 files changed, 19 insertions(+), 26 deletions(-) diff --git a/lightning-block-sync/src/init.rs b/lightning-block-sync/src/init.rs index 61f44c6139e..07575c6f523 100644 --- a/lightning-block-sync/src/init.rs +++ b/lightning-block-sync/src/init.rs @@ -175,7 +175,9 @@ where let mut chain_notifier = ChainNotifier { header_cache, chain_listener }; let difference = chain_notifier.find_difference(best_header, &old_header, &mut chain_poller).await?; - chain_notifier.disconnect_blocks(difference.disconnected_blocks); + if difference.common_ancestor != old_header { + chain_notifier.disconnect_blocks(difference.common_ancestor); + } (difference.common_ancestor, difference.connected_blocks) }; @@ -215,9 +217,7 @@ impl<'a, C: Cache> Cache for ReadOnlyCache<'a, C> { unreachable!() } - fn block_disconnected(&mut self, _block_hash: &BlockHash) -> Option { - None - } + fn blocks_disconnected(&mut self, _fork_point: &ValidatedBlockHeader) {} } /// Wrapper for supporting dynamically sized chain listeners. diff --git a/lightning-block-sync/src/lib.rs b/lightning-block-sync/src/lib.rs index 02593047658..91de5beaca3 100644 --- a/lightning-block-sync/src/lib.rs +++ b/lightning-block-sync/src/lib.rs @@ -202,9 +202,11 @@ pub trait Cache { /// disconnected later if needed. fn block_connected(&mut self, block_hash: BlockHash, block_header: ValidatedBlockHeader); - /// Called when a block has been disconnected from the best chain. Once disconnected, a block's - /// header is no longer needed and thus can be removed. - fn block_disconnected(&mut self, block_hash: &BlockHash) -> Option; + /// Called when blocks have been disconnected from the best chain. Only the fork point + /// (best comon ancestor) is provided. + /// + /// Once disconnected, a block's header is no longer needed and thus can be removed. + fn blocks_disconnected(&mut self, fork_point: &ValidatedBlockHeader); } /// Unbounded cache of block headers keyed by block hash. @@ -219,8 +221,8 @@ impl Cache for UnboundedCache { self.insert(block_hash, block_header); } - fn block_disconnected(&mut self, block_hash: &BlockHash) -> Option { - self.remove(block_hash) + fn blocks_disconnected(&mut self, fork_point: &ValidatedBlockHeader) { + self.retain(|_, block_info| block_info.height < fork_point.height); } } @@ -315,9 +317,6 @@ struct ChainDifference { /// If there are any disconnected blocks, this is where the chain forked. common_ancestor: ValidatedBlockHeader, - /// Blocks that were disconnected from the chain since the last poll. - disconnected_blocks: Vec, - /// Blocks that were connected to the chain since the last poll. connected_blocks: Vec, } @@ -341,7 +340,9 @@ where .find_difference(new_header, old_header, chain_poller) .await .map_err(|e| (e, None))?; - self.disconnect_blocks(difference.disconnected_blocks); + if difference.common_ancestor != *old_header { + self.disconnect_blocks(difference.common_ancestor); + } self.connect_blocks(difference.common_ancestor, difference.connected_blocks, chain_poller) .await } @@ -354,7 +355,6 @@ where &self, current_header: ValidatedBlockHeader, prev_header: &ValidatedBlockHeader, chain_poller: &mut P, ) -> BlockSourceResult { - let mut disconnected_blocks = Vec::new(); let mut connected_blocks = Vec::new(); let mut current = current_header; let mut previous = *prev_header; @@ -369,7 +369,6 @@ where let current_height = current.height; let previous_height = previous.height; if current_height <= previous_height { - disconnected_blocks.push(previous); previous = self.look_up_previous_header(chain_poller, &previous).await?; } if current_height >= previous_height { @@ -379,7 +378,7 @@ where } let common_ancestor = current; - Ok(ChainDifference { common_ancestor, disconnected_blocks, connected_blocks }) + Ok(ChainDifference { common_ancestor, connected_blocks }) } /// Returns the previous header for the given header, either by looking it up in the cache or @@ -394,16 +393,10 @@ where } /// Notifies the chain listeners of disconnected blocks. - fn disconnect_blocks(&mut self, disconnected_blocks: Vec) { - for header in disconnected_blocks.iter() { - if let Some(cached_header) = self.header_cache.block_disconnected(&header.block_hash) { - assert_eq!(cached_header, *header); - } - } - if let Some(block) = disconnected_blocks.last() { - let fork_point = BestBlock::new(block.header.prev_blockhash, block.height - 1); - self.chain_listener.blocks_disconnected(fork_point); - } + fn disconnect_blocks(&mut self, fork_point: ValidatedBlockHeader) { + self.header_cache.blocks_disconnected(&fork_point); + let best_block = BestBlock::new(fork_point.block_hash, fork_point.height); + self.chain_listener.blocks_disconnected(best_block); } /// Notifies the chain listeners of connected blocks. From 51324af966ac93fac661044b7fd8c8415a4b518d Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 12 Oct 2025 15:49:11 +0000 Subject: [PATCH 15/22] Pass a `BestBlock` to `init::synchronize_listeners` On restart, LDK expects the chain to be replayed starting from where it was when objects were last serialized. This is fine in the normal case, but if there was a reorg and the node which we were syncing from either resynced or was changed, the last block that we were synced as of might no longer be available. As a result, it becomes impossible to figure out where the fork point is, and thus to replay the chain. Luckily, changing the block source during a reorg isn't exactly common, but we shouldn't end up with a bricked node. To address this, `lightning-block-sync` allows the user to pass in `Cache` which can be used to cache recent blocks and thus allow for reorg handling in this case. However, serialization for, and a reasonable default implementation of a `Cache` was never built. Instead, here, we start taking a different approach. To avoid developers having to persist yet another object, we move `BestBlock` to storing some number of recent block hashes. This allows us to find the fork point with just the serialized state. In a previous commit, we moved deserialization of various structs to return the `BestBlock` rather than a `BlockHash`. Here we move to actually using it, taking a `BestBlock` in place of `BlockHash` to `init::synchronize_listeners` and walking the `previous_blocks` list to find the fork point rather than relying on the `Cache`. --- lightning-block-sync/src/init.rs | 48 ++++++++++---------------- lightning-block-sync/src/lib.rs | 44 +++++++++++++++++++++-- lightning-block-sync/src/poll.rs | 13 +++++++ lightning-block-sync/src/test_utils.rs | 17 +++++++++ 4 files changed, 90 insertions(+), 32 deletions(-) diff --git a/lightning-block-sync/src/init.rs b/lightning-block-sync/src/init.rs index 07575c6f523..a5a1a0d01f7 100644 --- a/lightning-block-sync/src/init.rs +++ b/lightning-block-sync/src/init.rs @@ -117,8 +117,8 @@ where /// let mut cache = UnboundedCache::new(); /// let mut monitor_listener = (monitor, &*tx_broadcaster, &*fee_estimator, &*logger); /// let listeners = vec![ -/// (monitor_best_block.block_hash, &monitor_listener as &dyn chain::Listen), -/// (manager_best_block.block_hash, &manager as &dyn chain::Listen), +/// (monitor_best_block, &monitor_listener as &dyn chain::Listen), +/// (manager_best_block, &manager as &dyn chain::Listen), /// ]; /// let chain_tip = init::synchronize_listeners( /// block_source, Network::Bitcoin, &mut cache, listeners).await.unwrap(); @@ -143,39 +143,27 @@ pub async fn synchronize_listeners< L: chain::Listen + ?Sized, >( block_source: B, network: Network, header_cache: &mut C, - mut chain_listeners: Vec<(BlockHash, &L)>, + mut chain_listeners: Vec<(BestBlock, &L)>, ) -> BlockSourceResult where B::Target: BlockSource, { let best_header = validate_best_block_header(&*block_source).await?; - // Fetch the header for the block hash paired with each listener. - let mut chain_listeners_with_old_headers = Vec::new(); - for (old_block_hash, chain_listener) in chain_listeners.drain(..) { - let old_header = match header_cache.look_up(&old_block_hash) { - Some(header) => *header, - None => { - block_source.get_header(&old_block_hash, None).await?.validate(old_block_hash)? - }, - }; - chain_listeners_with_old_headers.push((old_header, chain_listener)) - } - // Find differences and disconnect blocks for each listener individually. let mut chain_poller = ChainPoller::new(block_source, network); let mut chain_listeners_at_height = Vec::new(); let mut most_common_ancestor = None; let mut most_connected_blocks = Vec::new(); - for (old_header, chain_listener) in chain_listeners_with_old_headers.drain(..) { + for (old_best_block, chain_listener) in chain_listeners.drain(..) { // Disconnect any stale blocks, but keep them in the cache for the next iteration. let header_cache = &mut ReadOnlyCache(header_cache); let (common_ancestor, connected_blocks) = { let chain_listener = &DynamicChainListener(chain_listener); let mut chain_notifier = ChainNotifier { header_cache, chain_listener }; let difference = - chain_notifier.find_difference(best_header, &old_header, &mut chain_poller).await?; - if difference.common_ancestor != old_header { + chain_notifier.find_difference_from_best_block(best_header, old_best_block, &mut chain_poller).await?; + if difference.common_ancestor.block_hash != old_best_block.block_hash { chain_notifier.disconnect_blocks(difference.common_ancestor); } (difference.common_ancestor, difference.connected_blocks) @@ -281,9 +269,9 @@ mod tests { let listener_3 = MockChainListener::new().expect_block_connected(*chain.at_height(4)); let listeners = vec![ - (chain.at_height(1).block_hash, &listener_1 as &dyn chain::Listen), - (chain.at_height(2).block_hash, &listener_2 as &dyn chain::Listen), - (chain.at_height(3).block_hash, &listener_3 as &dyn chain::Listen), + (chain.best_block_at_height(1), &listener_1 as &dyn chain::Listen), + (chain.best_block_at_height(2), &listener_2 as &dyn chain::Listen), + (chain.best_block_at_height(3), &listener_3 as &dyn chain::Listen), ]; let mut cache = chain.header_cache(0..=4); match synchronize_listeners(&chain, Network::Bitcoin, &mut cache, listeners).await { @@ -313,9 +301,9 @@ mod tests { .expect_block_connected(*main_chain.at_height(4)); let listeners = vec![ - (fork_chain_1.tip().block_hash, &listener_1 as &dyn chain::Listen), - (fork_chain_2.tip().block_hash, &listener_2 as &dyn chain::Listen), - (fork_chain_3.tip().block_hash, &listener_3 as &dyn chain::Listen), + (fork_chain_1.best_block(), &listener_1 as &dyn chain::Listen), + (fork_chain_2.best_block(), &listener_2 as &dyn chain::Listen), + (fork_chain_3.best_block(), &listener_3 as &dyn chain::Listen), ]; let mut cache = fork_chain_1.header_cache(2..=4); cache.extend(fork_chain_2.header_cache(3..=4)); @@ -350,9 +338,9 @@ mod tests { .expect_block_connected(*main_chain.at_height(4)); let listeners = vec![ - (fork_chain_1.tip().block_hash, &listener_1 as &dyn chain::Listen), - (fork_chain_2.tip().block_hash, &listener_2 as &dyn chain::Listen), - (fork_chain_3.tip().block_hash, &listener_3 as &dyn chain::Listen), + (fork_chain_1.best_block(), &listener_1 as &dyn chain::Listen), + (fork_chain_2.best_block(), &listener_2 as &dyn chain::Listen), + (fork_chain_3.best_block(), &listener_3 as &dyn chain::Listen), ]; let mut cache = fork_chain_1.header_cache(2..=4); cache.extend(fork_chain_2.header_cache(3..=4)); @@ -368,18 +356,18 @@ mod tests { let main_chain = Blockchain::default().with_height(2); let fork_chain = main_chain.fork_at_height(1); let new_tip = main_chain.tip(); - let old_tip = fork_chain.tip(); + let old_best_block = fork_chain.best_block(); let listener = MockChainListener::new() .expect_blocks_disconnected(*fork_chain.at_height(1)) .expect_block_connected(*new_tip); - let listeners = vec![(old_tip.block_hash, &listener as &dyn chain::Listen)]; + let listeners = vec![(old_best_block, &listener as &dyn chain::Listen)]; let mut cache = fork_chain.header_cache(2..=2); match synchronize_listeners(&main_chain, Network::Bitcoin, &mut cache, listeners).await { Ok(_) => { assert!(cache.contains_key(&new_tip.block_hash)); - assert!(cache.contains_key(&old_tip.block_hash)); + assert!(cache.contains_key(&old_best_block.block_hash)); }, Err(e) => panic!("Unexpected error: {:?}", e), } diff --git a/lightning-block-sync/src/lib.rs b/lightning-block-sync/src/lib.rs index 91de5beaca3..e3acedae030 100644 --- a/lightning-block-sync/src/lib.rs +++ b/lightning-block-sync/src/lib.rs @@ -337,7 +337,7 @@ where chain_poller: &mut P, ) -> Result<(), (BlockSourceError, Option)> { let difference = self - .find_difference(new_header, old_header, chain_poller) + .find_difference_from_header(new_header, old_header, chain_poller) .await .map_err(|e| (e, None))?; if difference.common_ancestor != *old_header { @@ -347,11 +347,51 @@ where .await } + /// Returns the changes needed to produce the chain with `current_header` as its tip from the + /// chain with `prev_best_block` as its tip. + /// + /// First resolves `prev_best_block` to a `ValidatedBlockHeader` using the `previous_blocks` + /// field as fallback if needed, then finds the common ancestor. + async fn find_difference_from_best_block( + &self, current_header: ValidatedBlockHeader, prev_best_block: BestBlock, + chain_poller: &mut P, + ) -> BlockSourceResult { + // Try to resolve the header for the previous best block. First try the block_hash, + // then fall back to previous_blocks if that fails. + let cur_tip = core::iter::once((0, &prev_best_block.block_hash)); + let prev_tips = prev_best_block.previous_blocks.iter().enumerate().filter_map(|(idx, hash_opt)| { + if let Some(block_hash) = hash_opt { + Some((idx as u32 + 1, block_hash)) + } else { + None + } + }); + let mut found_header = None; + for (height_diff, block_hash) in cur_tip.chain(prev_tips) { + if let Some(header) = self.header_cache.look_up(block_hash) { + found_header = Some(*header); + break; + } + let height = prev_best_block.height.checked_sub(height_diff).ok_or( + BlockSourceError::persistent("BestBlock had more previous_blocks than its height") + )?; + if let Ok(header) = chain_poller.get_header(block_hash, Some(height)).await { + found_header = Some(header); + break; + } + } + let found_header = found_header.ok_or_else(|| { + BlockSourceError::persistent("could not resolve any block from BestBlock") + })?; + + self.find_difference_from_header(current_header, &found_header, chain_poller).await + } + /// Returns the changes needed to produce the chain with `current_header` as its tip from the /// chain with `prev_header` as its tip. /// /// Walks backwards from `current_header` and `prev_header`, finding the common ancestor. - async fn find_difference( + async fn find_difference_from_header( &self, current_header: ValidatedBlockHeader, prev_header: &ValidatedBlockHeader, chain_poller: &mut P, ) -> BlockSourceResult { diff --git a/lightning-block-sync/src/poll.rs b/lightning-block-sync/src/poll.rs index 13e0403c3b6..fd8c546c56f 100644 --- a/lightning-block-sync/src/poll.rs +++ b/lightning-block-sync/src/poll.rs @@ -31,6 +31,11 @@ pub trait Poll { fn fetch_block<'a>( &'a self, header: &'a ValidatedBlockHeader, ) -> impl Future> + Send + 'a; + + /// Returns the header for a given hash and optional height hint. + fn get_header<'a>( + &'a self, block_hash: &'a BlockHash, height_hint: Option, + ) -> impl Future> + Send + 'a; } /// A chain tip relative to another chain tip in terms of block hash and chainwork. @@ -258,6 +263,14 @@ impl + Sized + Send + Sync, T: BlockSource + ?Sized> Poll ) -> impl Future> + Send + 'a { async move { self.block_source.get_block(&header.block_hash).await?.validate(header.block_hash) } } + + fn get_header<'a>( + &'a self, block_hash: &'a BlockHash, height_hint: Option, + ) -> impl Future> + Send + 'a { + Box::pin(async move { + self.block_source.get_header(block_hash, height_hint).await?.validate(*block_hash) + }) + } } #[cfg(test)] diff --git a/lightning-block-sync/src/test_utils.rs b/lightning-block-sync/src/test_utils.rs index 40788e4d08c..3d7870afb1e 100644 --- a/lightning-block-sync/src/test_utils.rs +++ b/lightning-block-sync/src/test_utils.rs @@ -104,6 +104,18 @@ impl Blockchain { block_header.validate(block_hash).unwrap() } + pub fn best_block_at_height(&self, height: usize) -> BestBlock { + let mut previous_blocks = [None; 12]; + for (i, height) in (0..height).rev().take(12).enumerate() { + previous_blocks[i] = Some(self.blocks[height].block_hash()); + } + BestBlock { + height: height as u32, + block_hash: self.blocks[height].block_hash(), + previous_blocks, + } + } + fn at_height_unvalidated(&self, height: usize) -> BlockHeaderData { assert!(!self.blocks.is_empty()); assert!(height < self.blocks.len()); @@ -123,6 +135,11 @@ impl Blockchain { self.at_height(self.blocks.len() - 1) } + pub fn best_block(&self) -> BestBlock { + assert!(!self.blocks.is_empty()); + self.best_block_at_height(self.blocks.len() - 1) + } + pub fn disconnect_tip(&mut self) -> Option { self.blocks.pop() } From 32479d3dd750c804bbef0772e2c734085fcef464 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 12 Oct 2025 16:51:16 +0000 Subject: [PATCH 16/22] Make the `Cache` trait priv, just use `UnboundedCache` publicly In the previous commit, we moved to relying on `BestBlock::previous_blocks` to find the fork point in `lightning-block-sync`'s `init::synchronize_listeners`. Here we now drop the `Cache` parameter as we no longer rely on it. Because we now have no reason to want a persistent `Cache`, we remove the trait from the public interface. However, to keep disconnections reliable we return the `UnboundedCache` we built up during initial sync from `init::synchronize_listeners` which we expect developers to pass to `SpvClient::new`. --- lightning-block-sync/src/init.rs | 87 +++++++------------------------- lightning-block-sync/src/lib.rs | 61 ++++++++++++---------- 2 files changed, 54 insertions(+), 94 deletions(-) diff --git a/lightning-block-sync/src/init.rs b/lightning-block-sync/src/init.rs index a5a1a0d01f7..4b96e648602 100644 --- a/lightning-block-sync/src/init.rs +++ b/lightning-block-sync/src/init.rs @@ -2,7 +2,7 @@ //! from disk. use crate::poll::{ChainPoller, Validate, ValidatedBlockHeader}; -use crate::{BlockSource, BlockSourceResult, Cache, ChainNotifier}; +use crate::{BlockSource, BlockSourceResult, Cache, ChainNotifier, UnboundedCache}; use bitcoin::block::Header; use bitcoin::hash_types::BlockHash; @@ -32,9 +32,9 @@ where /// Performs a one-time sync of chain listeners using a single *trusted* block source, bringing each /// listener's view of the chain from its paired block hash to `block_source`'s best chain tip. /// -/// Upon success, the returned header can be used to initialize [`SpvClient`]. In the case of -/// failure, each listener may be left at a different block hash than the one it was originally -/// paired with. +/// Upon success, the returned header and header cache can be used to initialize [`SpvClient`]. In +/// the case of failure, each listener may be left at a different block hash than the one it was +/// originally paired with. /// /// Useful during startup to bring the [`ChannelManager`] and each [`ChannelMonitor`] in sync before /// switching to [`SpvClient`]. For example: @@ -114,14 +114,13 @@ where /// }; /// /// // Synchronize any channel monitors and the channel manager to be on the best block. -/// let mut cache = UnboundedCache::new(); /// let mut monitor_listener = (monitor, &*tx_broadcaster, &*fee_estimator, &*logger); /// let listeners = vec![ /// (monitor_best_block, &monitor_listener as &dyn chain::Listen), /// (manager_best_block, &manager as &dyn chain::Listen), /// ]; -/// let chain_tip = init::synchronize_listeners( -/// block_source, Network::Bitcoin, &mut cache, listeners).await.unwrap(); +/// let (chain_cache, chain_tip) = init::synchronize_listeners( +/// block_source, Network::Bitcoin, listeners).await.unwrap(); /// /// // Allow the chain monitor to watch any channels. /// let monitor = monitor_listener.0; @@ -130,7 +129,7 @@ where /// // Create an SPV client to notify the chain monitor and channel manager of block events. /// let chain_poller = poll::ChainPoller::new(block_source, Network::Bitcoin); /// let mut chain_listener = (chain_monitor, &manager); -/// let spv_client = SpvClient::new(chain_tip, chain_poller, &mut cache, &chain_listener); +/// let spv_client = SpvClient::new(chain_tip, chain_poller, chain_cache, &chain_listener); /// } /// ``` /// @@ -139,12 +138,11 @@ where /// [`ChannelMonitor`]: lightning::chain::channelmonitor::ChannelMonitor pub async fn synchronize_listeners< B: Deref + Sized + Send + Sync, - C: Cache, L: chain::Listen + ?Sized, >( - block_source: B, network: Network, header_cache: &mut C, + block_source: B, network: Network, mut chain_listeners: Vec<(BestBlock, &L)>, -) -> BlockSourceResult +) -> BlockSourceResult<(UnboundedCache, ValidatedBlockHeader)> where B::Target: BlockSource, { @@ -155,12 +153,12 @@ where let mut chain_listeners_at_height = Vec::new(); let mut most_common_ancestor = None; let mut most_connected_blocks = Vec::new(); + let mut header_cache = UnboundedCache::new(); for (old_best_block, chain_listener) in chain_listeners.drain(..) { // Disconnect any stale blocks, but keep them in the cache for the next iteration. - let header_cache = &mut ReadOnlyCache(header_cache); let (common_ancestor, connected_blocks) = { let chain_listener = &DynamicChainListener(chain_listener); - let mut chain_notifier = ChainNotifier { header_cache, chain_listener }; + let mut chain_notifier = ChainNotifier { header_cache: &mut header_cache, chain_listener }; let difference = chain_notifier.find_difference_from_best_block(best_header, old_best_block, &mut chain_poller).await?; if difference.common_ancestor.block_hash != old_best_block.block_hash { @@ -180,32 +178,14 @@ where // Connect new blocks for all listeners at once to avoid re-fetching blocks. if let Some(common_ancestor) = most_common_ancestor { let chain_listener = &ChainListenerSet(chain_listeners_at_height); - let mut chain_notifier = ChainNotifier { header_cache, chain_listener }; + let mut chain_notifier = ChainNotifier { header_cache: &mut header_cache, chain_listener }; chain_notifier .connect_blocks(common_ancestor, most_connected_blocks, &mut chain_poller) .await .map_err(|(e, _)| e)?; } - Ok(best_header) -} - -/// A wrapper to make a cache read-only. -/// -/// Used to prevent losing headers that may be needed to disconnect blocks common to more than one -/// listener. -struct ReadOnlyCache<'a, C: Cache>(&'a mut C); - -impl<'a, C: Cache> Cache for ReadOnlyCache<'a, C> { - fn look_up(&self, block_hash: &BlockHash) -> Option<&ValidatedBlockHeader> { - self.0.look_up(block_hash) - } - - fn block_connected(&mut self, _block_hash: BlockHash, _block_header: ValidatedBlockHeader) { - unreachable!() - } - - fn blocks_disconnected(&mut self, _fork_point: &ValidatedBlockHeader) {} + Ok((header_cache, best_header)) } /// Wrapper for supporting dynamically sized chain listeners. @@ -273,9 +253,8 @@ mod tests { (chain.best_block_at_height(2), &listener_2 as &dyn chain::Listen), (chain.best_block_at_height(3), &listener_3 as &dyn chain::Listen), ]; - let mut cache = chain.header_cache(0..=4); - match synchronize_listeners(&chain, Network::Bitcoin, &mut cache, listeners).await { - Ok(header) => assert_eq!(header, chain.tip()), + match synchronize_listeners(&chain, Network::Bitcoin, listeners).await { + Ok((_, header)) => assert_eq!(header, chain.tip()), Err(e) => panic!("Unexpected error: {:?}", e), } } @@ -305,11 +284,8 @@ mod tests { (fork_chain_2.best_block(), &listener_2 as &dyn chain::Listen), (fork_chain_3.best_block(), &listener_3 as &dyn chain::Listen), ]; - let mut cache = fork_chain_1.header_cache(2..=4); - cache.extend(fork_chain_2.header_cache(3..=4)); - cache.extend(fork_chain_3.header_cache(4..=4)); - match synchronize_listeners(&main_chain, Network::Bitcoin, &mut cache, listeners).await { - Ok(header) => assert_eq!(header, main_chain.tip()), + match synchronize_listeners(&main_chain, Network::Bitcoin, listeners).await { + Ok((_, header)) => assert_eq!(header, main_chain.tip()), Err(e) => panic!("Unexpected error: {:?}", e), } } @@ -342,33 +318,8 @@ mod tests { (fork_chain_2.best_block(), &listener_2 as &dyn chain::Listen), (fork_chain_3.best_block(), &listener_3 as &dyn chain::Listen), ]; - let mut cache = fork_chain_1.header_cache(2..=4); - cache.extend(fork_chain_2.header_cache(3..=4)); - cache.extend(fork_chain_3.header_cache(4..=4)); - match synchronize_listeners(&main_chain, Network::Bitcoin, &mut cache, listeners).await { - Ok(header) => assert_eq!(header, main_chain.tip()), - Err(e) => panic!("Unexpected error: {:?}", e), - } - } - - #[tokio::test] - async fn cache_connected_and_keep_disconnected_blocks() { - let main_chain = Blockchain::default().with_height(2); - let fork_chain = main_chain.fork_at_height(1); - let new_tip = main_chain.tip(); - let old_best_block = fork_chain.best_block(); - - let listener = MockChainListener::new() - .expect_blocks_disconnected(*fork_chain.at_height(1)) - .expect_block_connected(*new_tip); - - let listeners = vec![(old_best_block, &listener as &dyn chain::Listen)]; - let mut cache = fork_chain.header_cache(2..=2); - match synchronize_listeners(&main_chain, Network::Bitcoin, &mut cache, listeners).await { - Ok(_) => { - assert!(cache.contains_key(&new_tip.block_hash)); - assert!(cache.contains_key(&old_best_block.block_hash)); - }, + match synchronize_listeners(&main_chain, Network::Bitcoin, listeners).await { + Ok((_, header)) => assert_eq!(header, main_chain.tip()), Err(e) => panic!("Unexpected error: {:?}", e), } } diff --git a/lightning-block-sync/src/lib.rs b/lightning-block-sync/src/lib.rs index e3acedae030..36501b3f6a3 100644 --- a/lightning-block-sync/src/lib.rs +++ b/lightning-block-sync/src/lib.rs @@ -170,18 +170,13 @@ pub enum BlockData { /// sources for the best chain tip. During this process it detects any chain forks, determines which /// constitutes the best chain, and updates the listener accordingly with any blocks that were /// connected or disconnected since the last poll. -/// -/// Block headers for the best chain are maintained in the parameterized cache, allowing for a -/// custom cache eviction policy. This offers flexibility to those sensitive to resource usage. -/// Hence, there is a trade-off between a lower memory footprint and potentially increased network -/// I/O as headers are re-fetched during fork detection. -pub struct SpvClient<'a, P: Poll, C: Cache, L: Deref> +pub struct SpvClient where L::Target: chain::Listen, { chain_tip: ValidatedBlockHeader, chain_poller: P, - chain_notifier: ChainNotifier<'a, C, L>, + chain_notifier: ChainNotifier, } /// The `Cache` trait defines behavior for managing a block header cache, where block headers are @@ -194,7 +189,7 @@ where /// Implementations may define how long to retain headers such that it's unlikely they will ever be /// needed to disconnect a block. In cases where block sources provide access to headers on stale /// forks reliably, caches may be entirely unnecessary. -pub trait Cache { +pub(crate) trait Cache { /// Retrieves the block header keyed by the given block hash. fn look_up(&self, block_hash: &BlockHash) -> Option<&ValidatedBlockHeader>; @@ -226,7 +221,21 @@ impl Cache for UnboundedCache { } } -impl<'a, P: Poll, C: Cache, L: Deref> SpvClient<'a, P, C, L> +impl Cache for &mut UnboundedCache { + fn look_up(&self, block_hash: &BlockHash) -> Option<&ValidatedBlockHeader> { + self.get(block_hash) + } + + fn block_connected(&mut self, block_hash: BlockHash, block_header: ValidatedBlockHeader) { + self.insert(block_hash, block_header); + } + + fn blocks_disconnected(&mut self, fork_point: &ValidatedBlockHeader) { + self.retain(|_, block_info| block_info.height < fork_point.height); + } +} + +impl SpvClient where L::Target: chain::Listen, { @@ -241,7 +250,7 @@ where /// /// [`poll_best_tip`]: SpvClient::poll_best_tip pub fn new( - chain_tip: ValidatedBlockHeader, chain_poller: P, header_cache: &'a mut C, + chain_tip: ValidatedBlockHeader, chain_poller: P, header_cache: UnboundedCache, chain_listener: L, ) -> Self { let chain_notifier = ChainNotifier { header_cache, chain_listener }; @@ -295,15 +304,15 @@ where /// Notifies [listeners] of blocks that have been connected or disconnected from the chain. /// /// [listeners]: lightning::chain::Listen -pub struct ChainNotifier<'a, C: Cache, L: Deref> +pub(crate) struct ChainNotifier where L::Target: chain::Listen, { /// Cache for looking up headers before fetching from a block source. - header_cache: &'a mut C, + pub(crate) header_cache: C, /// Listener that will be notified of connected or disconnected blocks. - chain_listener: L, + pub(crate) chain_listener: L, } /// Changes made to the chain between subsequent polls that transformed it from having one chain tip @@ -321,7 +330,7 @@ struct ChainDifference { connected_blocks: Vec, } -impl<'a, C: Cache, L: Deref> ChainNotifier<'a, C, L> +impl ChainNotifier where L::Target: chain::Listen, { @@ -480,9 +489,9 @@ mod spv_client_tests { let best_tip = chain.at_height(1); let poller = poll::ChainPoller::new(&mut chain, Network::Testnet); - let mut cache = UnboundedCache::new(); + let cache = UnboundedCache::new(); let mut listener = NullChainListener {}; - let mut client = SpvClient::new(best_tip, poller, &mut cache, &mut listener); + let mut client = SpvClient::new(best_tip, poller, cache, &mut listener); match client.poll_best_tip().await { Err(e) => { assert_eq!(e.kind(), BlockSourceErrorKind::Persistent); @@ -499,9 +508,9 @@ mod spv_client_tests { let common_tip = chain.tip(); let poller = poll::ChainPoller::new(&mut chain, Network::Testnet); - let mut cache = UnboundedCache::new(); + let cache = UnboundedCache::new(); let mut listener = NullChainListener {}; - let mut client = SpvClient::new(common_tip, poller, &mut cache, &mut listener); + let mut client = SpvClient::new(common_tip, poller, cache, &mut listener); match client.poll_best_tip().await { Err(e) => panic!("Unexpected error: {:?}", e), Ok((chain_tip, blocks_connected)) => { @@ -519,9 +528,9 @@ mod spv_client_tests { let old_tip = chain.at_height(1); let poller = poll::ChainPoller::new(&mut chain, Network::Testnet); - let mut cache = UnboundedCache::new(); + let cache = UnboundedCache::new(); let mut listener = NullChainListener {}; - let mut client = SpvClient::new(old_tip, poller, &mut cache, &mut listener); + let mut client = SpvClient::new(old_tip, poller, cache, &mut listener); match client.poll_best_tip().await { Err(e) => panic!("Unexpected error: {:?}", e), Ok((chain_tip, blocks_connected)) => { @@ -539,9 +548,9 @@ mod spv_client_tests { let old_tip = chain.at_height(1); let poller = poll::ChainPoller::new(&mut chain, Network::Testnet); - let mut cache = UnboundedCache::new(); + let cache = UnboundedCache::new(); let mut listener = NullChainListener {}; - let mut client = SpvClient::new(old_tip, poller, &mut cache, &mut listener); + let mut client = SpvClient::new(old_tip, poller, cache, &mut listener); match client.poll_best_tip().await { Err(e) => panic!("Unexpected error: {:?}", e), Ok((chain_tip, blocks_connected)) => { @@ -559,9 +568,9 @@ mod spv_client_tests { let old_tip = chain.at_height(1); let poller = poll::ChainPoller::new(&mut chain, Network::Testnet); - let mut cache = UnboundedCache::new(); + let cache = UnboundedCache::new(); let mut listener = NullChainListener {}; - let mut client = SpvClient::new(old_tip, poller, &mut cache, &mut listener); + let mut client = SpvClient::new(old_tip, poller, cache, &mut listener); match client.poll_best_tip().await { Err(e) => panic!("Unexpected error: {:?}", e), Ok((chain_tip, blocks_connected)) => { @@ -580,9 +589,9 @@ mod spv_client_tests { let worse_tip = chain.tip(); let poller = poll::ChainPoller::new(&mut chain, Network::Testnet); - let mut cache = UnboundedCache::new(); + let cache = UnboundedCache::new(); let mut listener = NullChainListener {}; - let mut client = SpvClient::new(best_tip, poller, &mut cache, &mut listener); + let mut client = SpvClient::new(best_tip, poller, cache, &mut listener); match client.poll_best_tip().await { Err(e) => panic!("Unexpected error: {:?}", e), Ok((chain_tip, blocks_connected)) => { From 6c59011456226940176439a84d222de2ebf6577c Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 12 Oct 2025 20:12:58 +0000 Subject: [PATCH 17/22] Make `UnboundedCache` bounded In the previous commit we moved to hard-coding `UnboundedCache` in the `lightning-block-sync` interface. This is great, except that its an unbounded cache that can use arbitrary amounts of memory (though never really all that much - its just headers that come in while we're running). Here we simply limit the size, and while we're at it give it a more generic `HeaderCache` name. --- lightning-block-sync/src/init.rs | 7 ++-- lightning-block-sync/src/lib.rs | 49 ++++++++++++++++---------- lightning-block-sync/src/test_utils.rs | 9 ++--- 3 files changed, 39 insertions(+), 26 deletions(-) diff --git a/lightning-block-sync/src/init.rs b/lightning-block-sync/src/init.rs index 4b96e648602..6df296f6a84 100644 --- a/lightning-block-sync/src/init.rs +++ b/lightning-block-sync/src/init.rs @@ -2,10 +2,9 @@ //! from disk. use crate::poll::{ChainPoller, Validate, ValidatedBlockHeader}; -use crate::{BlockSource, BlockSourceResult, Cache, ChainNotifier, UnboundedCache}; +use crate::{BlockSource, BlockSourceResult, ChainNotifier, HeaderCache}; use bitcoin::block::Header; -use bitcoin::hash_types::BlockHash; use bitcoin::network::Network; use lightning::chain; @@ -142,7 +141,7 @@ pub async fn synchronize_listeners< >( block_source: B, network: Network, mut chain_listeners: Vec<(BestBlock, &L)>, -) -> BlockSourceResult<(UnboundedCache, ValidatedBlockHeader)> +) -> BlockSourceResult<(HeaderCache, ValidatedBlockHeader)> where B::Target: BlockSource, { @@ -153,7 +152,7 @@ where let mut chain_listeners_at_height = Vec::new(); let mut most_common_ancestor = None; let mut most_connected_blocks = Vec::new(); - let mut header_cache = UnboundedCache::new(); + let mut header_cache = HeaderCache::new(); for (old_best_block, chain_listener) in chain_listeners.drain(..) { // Disconnect any stale blocks, but keep them in the cache for the next iteration. let (common_ancestor, connected_blocks) = { diff --git a/lightning-block-sync/src/lib.rs b/lightning-block-sync/src/lib.rs index 36501b3f6a3..c71e8ac792d 100644 --- a/lightning-block-sync/src/lib.rs +++ b/lightning-block-sync/src/lib.rs @@ -176,7 +176,7 @@ where { chain_tip: ValidatedBlockHeader, chain_poller: P, - chain_notifier: ChainNotifier, + chain_notifier: ChainNotifier, } /// The `Cache` trait defines behavior for managing a block header cache, where block headers are @@ -204,34 +204,47 @@ pub(crate) trait Cache { fn blocks_disconnected(&mut self, fork_point: &ValidatedBlockHeader); } -/// Unbounded cache of block headers keyed by block hash. -pub type UnboundedCache = std::collections::HashMap; +/// Bounded cache of block headers keyed by block hash. +/// +/// Retains only the last `ANTI_REORG_DELAY * 2` block headers based on height. +pub struct HeaderCache(std::collections::HashMap); -impl Cache for UnboundedCache { +impl HeaderCache { + /// Creates a new empty header cache. + pub fn new() -> Self { + Self(std::collections::HashMap::new()) + } +} + +impl Cache for HeaderCache { fn look_up(&self, block_hash: &BlockHash) -> Option<&ValidatedBlockHeader> { - self.get(block_hash) + self.0.get(block_hash) } fn block_connected(&mut self, block_hash: BlockHash, block_header: ValidatedBlockHeader) { - self.insert(block_hash, block_header); + self.0.insert(block_hash, block_header); + + // Remove headers older than a week. + let cutoff_height = block_header.height.saturating_sub(6 * 24 * 7); + self.0.retain(|_, header| header.height >= cutoff_height); } fn blocks_disconnected(&mut self, fork_point: &ValidatedBlockHeader) { - self.retain(|_, block_info| block_info.height < fork_point.height); + self.0.retain(|_, block_info| block_info.height < fork_point.height); } } -impl Cache for &mut UnboundedCache { +impl Cache for &mut HeaderCache { fn look_up(&self, block_hash: &BlockHash) -> Option<&ValidatedBlockHeader> { - self.get(block_hash) + self.0.get(block_hash) } fn block_connected(&mut self, block_hash: BlockHash, block_header: ValidatedBlockHeader) { - self.insert(block_hash, block_header); + (*self).block_connected(block_hash, block_header); } fn blocks_disconnected(&mut self, fork_point: &ValidatedBlockHeader) { - self.retain(|_, block_info| block_info.height < fork_point.height); + self.0.retain(|_, block_info| block_info.height < fork_point.height); } } @@ -250,7 +263,7 @@ where /// /// [`poll_best_tip`]: SpvClient::poll_best_tip pub fn new( - chain_tip: ValidatedBlockHeader, chain_poller: P, header_cache: UnboundedCache, + chain_tip: ValidatedBlockHeader, chain_poller: P, header_cache: HeaderCache, chain_listener: L, ) -> Self { let chain_notifier = ChainNotifier { header_cache, chain_listener }; @@ -489,7 +502,7 @@ mod spv_client_tests { let best_tip = chain.at_height(1); let poller = poll::ChainPoller::new(&mut chain, Network::Testnet); - let cache = UnboundedCache::new(); + let cache = HeaderCache::new(); let mut listener = NullChainListener {}; let mut client = SpvClient::new(best_tip, poller, cache, &mut listener); match client.poll_best_tip().await { @@ -508,7 +521,7 @@ mod spv_client_tests { let common_tip = chain.tip(); let poller = poll::ChainPoller::new(&mut chain, Network::Testnet); - let cache = UnboundedCache::new(); + let cache = HeaderCache::new(); let mut listener = NullChainListener {}; let mut client = SpvClient::new(common_tip, poller, cache, &mut listener); match client.poll_best_tip().await { @@ -528,7 +541,7 @@ mod spv_client_tests { let old_tip = chain.at_height(1); let poller = poll::ChainPoller::new(&mut chain, Network::Testnet); - let cache = UnboundedCache::new(); + let cache = HeaderCache::new(); let mut listener = NullChainListener {}; let mut client = SpvClient::new(old_tip, poller, cache, &mut listener); match client.poll_best_tip().await { @@ -548,7 +561,7 @@ mod spv_client_tests { let old_tip = chain.at_height(1); let poller = poll::ChainPoller::new(&mut chain, Network::Testnet); - let cache = UnboundedCache::new(); + let cache = HeaderCache::new(); let mut listener = NullChainListener {}; let mut client = SpvClient::new(old_tip, poller, cache, &mut listener); match client.poll_best_tip().await { @@ -568,7 +581,7 @@ mod spv_client_tests { let old_tip = chain.at_height(1); let poller = poll::ChainPoller::new(&mut chain, Network::Testnet); - let cache = UnboundedCache::new(); + let cache = HeaderCache::new(); let mut listener = NullChainListener {}; let mut client = SpvClient::new(old_tip, poller, cache, &mut listener); match client.poll_best_tip().await { @@ -589,7 +602,7 @@ mod spv_client_tests { let worse_tip = chain.tip(); let poller = poll::ChainPoller::new(&mut chain, Network::Testnet); - let cache = UnboundedCache::new(); + let cache = HeaderCache::new(); let mut listener = NullChainListener {}; let mut client = SpvClient::new(best_tip, poller, cache, &mut listener); match client.poll_best_tip().await { diff --git a/lightning-block-sync/src/test_utils.rs b/lightning-block-sync/src/test_utils.rs index 3d7870afb1e..89cb3e81d60 100644 --- a/lightning-block-sync/src/test_utils.rs +++ b/lightning-block-sync/src/test_utils.rs @@ -1,6 +1,7 @@ use crate::poll::{Validate, ValidatedBlockHeader}; use crate::{ - BlockData, BlockHeaderData, BlockSource, BlockSourceError, BlockSourceResult, UnboundedCache, + BlockData, BlockHeaderData, BlockSource, BlockSourceError, BlockSourceResult, Cache, + HeaderCache, }; use bitcoin::block::{Block, Header, Version}; @@ -144,12 +145,12 @@ impl Blockchain { self.blocks.pop() } - pub fn header_cache(&self, heights: std::ops::RangeInclusive) -> UnboundedCache { - let mut cache = UnboundedCache::new(); + pub fn header_cache(&self, heights: std::ops::RangeInclusive) -> HeaderCache { + let mut cache = HeaderCache::new(); for i in heights { let value = self.at_height(i); let key = value.header.block_hash(); - assert!(cache.insert(key, value).is_none()); + cache.block_connected(key, value); } cache } From da065c18ed6119499b35249e46cc8ee39280534c Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 8 Dec 2025 01:10:30 +0000 Subject: [PATCH 18/22] Consolidate all the pub aync utils to `native_async` --- lightning-background-processor/src/lib.rs | 4 +-- lightning/src/chain/chainmonitor.rs | 3 +- lightning/src/events/bump_transaction/mod.rs | 2 +- lightning/src/events/bump_transaction/sync.rs | 3 +- lightning/src/sign/mod.rs | 2 +- lightning/src/util/async_poll.rs | 28 ----------------- lightning/src/util/mod.rs | 2 +- lightning/src/util/native_async.rs | 31 ++++++++++++++++++- lightning/src/util/persist.rs | 4 +-- lightning/src/util/test_utils.rs | 2 +- 10 files changed, 41 insertions(+), 40 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index bc0d42ac191..d9e1f2ce251 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -53,8 +53,6 @@ use lightning::routing::utxo::UtxoLookup; use lightning::sign::{ ChangeDestinationSource, ChangeDestinationSourceSync, EntropySource, OutputSpender, }; -#[cfg(not(c_bindings))] -use lightning::util::async_poll::MaybeSend; use lightning::util::logger::Logger; use lightning::util::persist::{ KVStore, KVStoreSync, KVStoreSyncWrapper, CHANNEL_MANAGER_PERSISTENCE_KEY, @@ -63,6 +61,8 @@ use lightning::util::persist::{ NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE, SCORER_PERSISTENCE_KEY, SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, }; +#[cfg(not(c_bindings))] +use lightning::util::native_async::MaybeSend; use lightning::util::sweep::{OutputSweeper, OutputSweeperSync}; #[cfg(feature = "std")] use lightning::util::wakers::Sleeper; diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index d8d6c90921f..32f36fb3a78 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -51,10 +51,9 @@ use crate::sign::ecdsa::EcdsaChannelSigner; use crate::sign::{EntropySource, PeerStorageKey, SignerProvider}; use crate::sync::{Mutex, MutexGuard, RwLock, RwLockReadGuard}; use crate::types::features::{InitFeatures, NodeFeatures}; -use crate::util::async_poll::{MaybeSend, MaybeSync}; use crate::util::errors::APIError; use crate::util::logger::{Logger, WithContext}; -use crate::util::native_async::FutureSpawner; +use crate::util::native_async::{FutureSpawner, MaybeSend, MaybeSync}; use crate::util::persist::{KVStore, MonitorName, MonitorUpdatingPersisterAsync}; #[cfg(peer_storage)] use crate::util::ser::{VecWriter, Writeable}; diff --git a/lightning/src/events/bump_transaction/mod.rs b/lightning/src/events/bump_transaction/mod.rs index e141d9b8abc..9c35194751f 100644 --- a/lightning/src/events/bump_transaction/mod.rs +++ b/lightning/src/events/bump_transaction/mod.rs @@ -37,7 +37,7 @@ use crate::sign::{ ChannelDerivationParameters, HTLCDescriptor, SignerProvider, P2WPKH_WITNESS_WEIGHT, }; use crate::sync::Mutex; -use crate::util::async_poll::{MaybeSend, MaybeSync}; +use crate::util::native_async::{MaybeSend, MaybeSync}; use crate::util::logger::Logger; use bitcoin::amount::Amount; diff --git a/lightning/src/events/bump_transaction/sync.rs b/lightning/src/events/bump_transaction/sync.rs index 1328c2c1b3a..03ffa479097 100644 --- a/lightning/src/events/bump_transaction/sync.rs +++ b/lightning/src/events/bump_transaction/sync.rs @@ -18,8 +18,9 @@ use crate::chain::chaininterface::BroadcasterInterface; use crate::chain::ClaimId; use crate::prelude::*; use crate::sign::SignerProvider; -use crate::util::async_poll::{dummy_waker, MaybeSend, MaybeSync}; +use crate::util::async_poll::dummy_waker; use crate::util::logger::Logger; +use crate::util::native_async::{MaybeSend, MaybeSync}; use bitcoin::{Psbt, ScriptBuf, Transaction, TxOut}; diff --git a/lightning/src/sign/mod.rs b/lightning/src/sign/mod.rs index 6d0d5bf405a..d562ff13406 100644 --- a/lightning/src/sign/mod.rs +++ b/lightning/src/sign/mod.rs @@ -58,7 +58,7 @@ use crate::ln::script::ShutdownScript; use crate::offers::invoice::UnsignedBolt12Invoice; use crate::types::features::ChannelTypeFeatures; use crate::types::payment::PaymentPreimage; -use crate::util::async_poll::MaybeSend; +use crate::util::native_async::MaybeSend; use crate::util::ser::{ReadableArgs, Writeable}; use crate::util::transaction_utils; diff --git a/lightning/src/util/async_poll.rs b/lightning/src/util/async_poll.rs index 57df5b26cb0..23ca1aad603 100644 --- a/lightning/src/util/async_poll.rs +++ b/lightning/src/util/async_poll.rs @@ -164,31 +164,3 @@ const DUMMY_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new( pub(crate) fn dummy_waker() -> Waker { unsafe { Waker::from_raw(RawWaker::new(core::ptr::null(), &DUMMY_WAKER_VTABLE)) } } - -/// Marker trait to optionally implement `Sync` under std. -/// -/// This is not exported to bindings users as async is only supported in Rust. -#[cfg(feature = "std")] -pub use core::marker::Sync as MaybeSync; - -#[cfg(not(feature = "std"))] -/// Marker trait to optionally implement `Sync` under std. -/// -/// This is not exported to bindings users as async is only supported in Rust. -pub trait MaybeSync {} -#[cfg(not(feature = "std"))] -impl MaybeSync for T where T: ?Sized {} - -/// Marker trait to optionally implement `Send` under std. -/// -/// This is not exported to bindings users as async is only supported in Rust. -#[cfg(feature = "std")] -pub use core::marker::Send as MaybeSend; - -#[cfg(not(feature = "std"))] -/// Marker trait to optionally implement `Send` under std. -/// -/// This is not exported to bindings users as async is only supported in Rust. -pub trait MaybeSend {} -#[cfg(not(feature = "std"))] -impl MaybeSend for T where T: ?Sized {} diff --git a/lightning/src/util/mod.rs b/lightning/src/util/mod.rs index dcbea904b51..51e608d185f 100644 --- a/lightning/src/util/mod.rs +++ b/lightning/src/util/mod.rs @@ -20,7 +20,7 @@ pub mod mut_global; pub mod anchor_channel_reserves; -pub mod async_poll; +pub(crate) mod async_poll; #[cfg(fuzzing)] pub mod base32; #[cfg(not(fuzzing))] diff --git a/lightning/src/util/native_async.rs b/lightning/src/util/native_async.rs index a9f7e01ceea..34bdb056329 100644 --- a/lightning/src/util/native_async.rs +++ b/lightning/src/util/native_async.rs @@ -9,8 +9,9 @@ #[cfg(all(test, feature = "std"))] use crate::sync::{Arc, Mutex}; -use crate::util::async_poll::{MaybeSend, MaybeSync}; +#[cfg(test)] +use alloc::boxed::Box; #[cfg(all(test, not(feature = "std")))] use alloc::rc::Rc; @@ -53,6 +54,34 @@ trait MaybeSendableFuture: Future + MaybeSend + 'static {} #[cfg(test)] impl + MaybeSend + 'static> MaybeSendableFuture for F {} +/// Marker trait to optionally implement `Sync` under std. +/// +/// This is not exported to bindings users as async is only supported in Rust. +#[cfg(feature = "std")] +pub use core::marker::Sync as MaybeSync; + +#[cfg(not(feature = "std"))] +/// Marker trait to optionally implement `Sync` under std. +/// +/// This is not exported to bindings users as async is only supported in Rust. +pub trait MaybeSync {} +#[cfg(not(feature = "std"))] +impl MaybeSync for T where T: ?Sized {} + +/// Marker trait to optionally implement `Send` under std. +/// +/// This is not exported to bindings users as async is only supported in Rust. +#[cfg(feature = "std")] +pub use core::marker::Send as MaybeSend; + +#[cfg(not(feature = "std"))] +/// Marker trait to optionally implement `Send` under std. +/// +/// This is not exported to bindings users as async is only supported in Rust. +pub trait MaybeSend {} +#[cfg(not(feature = "std"))] +impl MaybeSend for T where T: ?Sized {} + /// A simple [`FutureSpawner`] which holds [`Future`]s until they are manually polled via /// [`Self::poll_futures`]. #[cfg(all(test, feature = "std"))] diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index 5f032436725..807cf2bf9fd 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -37,10 +37,10 @@ use crate::ln::types::ChannelId; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, SignerProvider}; use crate::sync::Mutex; use crate::util::async_poll::{ - dummy_waker, MaybeSend, MaybeSync, MultiResultFuturePoller, ResultFuture, TwoFutureJoiner, + dummy_waker, MultiResultFuturePoller, ResultFuture, TwoFutureJoiner, }; use crate::util::logger::Logger; -use crate::util::native_async::FutureSpawner; +use crate::util::native_async::{FutureSpawner, MaybeSend, MaybeSync}; use crate::util::ser::{Readable, ReadableArgs, Writeable}; use crate::util::wakers::Notifier; diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 9df5b0ea966..8e66b8b0dfb 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -51,7 +51,6 @@ use crate::sign::{self, ReceiveAuthKey}; use crate::sign::{ChannelSigner, PeerStorageKey}; use crate::sync::RwLock; use crate::types::features::{ChannelFeatures, InitFeatures, NodeFeatures}; -use crate::util::async_poll::MaybeSend; use crate::util::config::UserConfig; use crate::util::dyn_signer::{ DynKeysInterface, DynKeysInterfaceTrait, DynPhantomKeysInterface, DynSigner, @@ -59,6 +58,7 @@ use crate::util::dyn_signer::{ use crate::util::logger::{Logger, Record}; #[cfg(feature = "std")] use crate::util::mut_global::MutGlobal; +use crate::util::native_async::MaybeSend; use crate::util::persist::{KVStore, KVStoreSync, MonitorName}; use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer}; use crate::util::test_channel_signer::{EnforcementState, TestChannelSigner}; From 6300116e46e6b9d16e334aad95f39eff505af470 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sat, 18 Oct 2025 01:36:01 +0000 Subject: [PATCH 19/22] Add `async_poll.rs` to `lightning-block-sync` In the next commit we'll fetch blocks during initial connection in parallel, which requires a multi-future poller. Here we add a symlink to the existing `lightning` `async_poll.rs` file, making it available in `lightning-block-sync` --- lightning-block-sync/src/async_poll.rs | 1 + lightning-block-sync/src/lib.rs | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) create mode 120000 lightning-block-sync/src/async_poll.rs diff --git a/lightning-block-sync/src/async_poll.rs b/lightning-block-sync/src/async_poll.rs new file mode 120000 index 00000000000..eb85cdac697 --- /dev/null +++ b/lightning-block-sync/src/async_poll.rs @@ -0,0 +1 @@ +../../lightning/src/util/async_poll.rs \ No newline at end of file diff --git a/lightning-block-sync/src/lib.rs b/lightning-block-sync/src/lib.rs index c71e8ac792d..add9354a772 100644 --- a/lightning-block-sync/src/lib.rs +++ b/lightning-block-sync/src/lib.rs @@ -16,9 +16,11 @@ #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] #![deny(missing_docs)] -#![deny(unsafe_code)] #![cfg_attr(docsrs, feature(doc_cfg))] +extern crate alloc; +extern crate core; + #[cfg(any(feature = "rest-client", feature = "rpc-client"))] pub mod http; @@ -42,6 +44,9 @@ mod test_utils; #[cfg(any(feature = "rest-client", feature = "rpc-client"))] mod utils; +#[allow(unused)] +mod async_poll; + use crate::poll::{ChainTip, Poll, ValidatedBlockHeader}; use bitcoin::block::{Block, Header}; From 15f00c1a2130830d6578dbf415c86e379b9e48c2 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 7 Dec 2025 23:30:57 +0000 Subject: [PATCH 20/22] Fetch blocks from source in parallel during initial sync In `init::synchronize_listeners` we may end up spending a decent chunk of our time just fetching block data. Here we parallelize that step across up to 36 blocks at a time. On my node with bitcoind on localhost, the impact of this is somewhat muted by block deserialization being the bulk of the work, however a networked bitcoind would likely change that. Even still, fetching a batch of 36 blocks in parallel happens on my node in ~615 ms vs ~815ms in serial. --- lightning-block-sync/src/init.rs | 85 +++++++++++++++++--------------- 1 file changed, 46 insertions(+), 39 deletions(-) diff --git a/lightning-block-sync/src/init.rs b/lightning-block-sync/src/init.rs index 6df296f6a84..52eea92e244 100644 --- a/lightning-block-sync/src/init.rs +++ b/lightning-block-sync/src/init.rs @@ -1,8 +1,9 @@ //! Utilities to assist in the initial sync required to initialize or reload Rust-Lightning objects //! from disk. -use crate::poll::{ChainPoller, Validate, ValidatedBlockHeader}; -use crate::{BlockSource, BlockSourceResult, ChainNotifier, HeaderCache}; +use crate::async_poll::{MultiResultFuturePoller, ResultFuture}; +use crate::poll::{ChainPoller, Poll, Validate, ValidatedBlockHeader}; +use crate::{BlockData, BlockSource, BlockSourceResult, ChainNotifier, HeaderCache}; use bitcoin::block::Header; use bitcoin::network::Network; @@ -150,7 +151,6 @@ where // Find differences and disconnect blocks for each listener individually. let mut chain_poller = ChainPoller::new(block_source, network); let mut chain_listeners_at_height = Vec::new(); - let mut most_common_ancestor = None; let mut most_connected_blocks = Vec::new(); let mut header_cache = HeaderCache::new(); for (old_best_block, chain_listener) in chain_listeners.drain(..) { @@ -169,19 +169,53 @@ where // Keep track of the most common ancestor and all blocks connected across all listeners. chain_listeners_at_height.push((common_ancestor.height, chain_listener)); if connected_blocks.len() > most_connected_blocks.len() { - most_common_ancestor = Some(common_ancestor); most_connected_blocks = connected_blocks; } } - // Connect new blocks for all listeners at once to avoid re-fetching blocks. - if let Some(common_ancestor) = most_common_ancestor { - let chain_listener = &ChainListenerSet(chain_listeners_at_height); - let mut chain_notifier = ChainNotifier { header_cache: &mut header_cache, chain_listener }; - chain_notifier - .connect_blocks(common_ancestor, most_connected_blocks, &mut chain_poller) - .await - .map_err(|(e, _)| e)?; + while !most_connected_blocks.is_empty() { + #[cfg(not(test))] + const MAX_BLOCKS_AT_ONCE: usize = 6 * 6; // Six hours of blocks, 144MiB encoded + #[cfg(test)] + const MAX_BLOCKS_AT_ONCE: usize = 2; + + let mut fetch_block_futures = + Vec::with_capacity(core::cmp::min(MAX_BLOCKS_AT_ONCE, most_connected_blocks.len())); + for header in most_connected_blocks.iter().rev().take(MAX_BLOCKS_AT_ONCE) { + let fetch_future = chain_poller.fetch_block(header); + fetch_block_futures.push(ResultFuture::Pending(Box::pin(async move { + (header, fetch_future.await) + }))); + } + let results = MultiResultFuturePoller::new(fetch_block_futures).await.into_iter(); + + let mut fetched_blocks = [const { None }; MAX_BLOCKS_AT_ONCE]; + for ((header, block_res), result) in results.into_iter().zip(fetched_blocks.iter_mut()) { + *result = Some((header.height, block_res?)); + } + debug_assert!(fetched_blocks.iter().take(most_connected_blocks.len()).all(|r| r.is_some())); + debug_assert!(fetched_blocks.is_sorted_by_key(|r| r.as_ref().map(|(height, _)| *height).unwrap_or(u32::MAX))); + + for (listener_height, listener) in chain_listeners_at_height.iter() { + // Connect blocks for this listener. + for result in fetched_blocks.iter() { + if let Some((height, block_data)) = result { + if *height > *listener_height { + match &**block_data { + BlockData::FullBlock(block) => { + listener.block_connected(&block, *height); + }, + BlockData::HeaderOnly(header_data) => { + listener.filtered_block_connected(&header_data, &[], *height); + }, + } + } + } + } + } + + most_connected_blocks + .truncate(most_connected_blocks.len().saturating_sub(MAX_BLOCKS_AT_ONCE)); } Ok((header_cache, best_header)) @@ -202,33 +236,6 @@ impl<'a, L: chain::Listen + ?Sized> chain::Listen for DynamicChainListener<'a, L } } -/// A set of dynamically sized chain listeners, each paired with a starting block height. -struct ChainListenerSet<'a, L: chain::Listen + ?Sized>(Vec<(u32, &'a L)>); - -impl<'a, L: chain::Listen + ?Sized> chain::Listen for ChainListenerSet<'a, L> { - fn block_connected(&self, block: &bitcoin::Block, height: u32) { - for (starting_height, chain_listener) in self.0.iter() { - if height > *starting_height { - chain_listener.block_connected(block, height); - } - } - } - - fn filtered_block_connected( - &self, header: &Header, txdata: &chain::transaction::TransactionData, height: u32, - ) { - for (starting_height, chain_listener) in self.0.iter() { - if height > *starting_height { - chain_listener.filtered_block_connected(header, txdata, height); - } - } - } - - fn blocks_disconnected(&self, _fork_point: BestBlock) { - unreachable!() - } -} - #[cfg(test)] mod tests { use super::*; From dcc895f268dc4b981fcf93412c09b92df718d0dc Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 8 Dec 2025 12:18:01 +0000 Subject: [PATCH 21/22] Silence "elided lifetime has a name" warnings in no-std locking --- lightning/src/sync/nostd_sync.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lightning/src/sync/nostd_sync.rs b/lightning/src/sync/nostd_sync.rs index 12070741918..18055d1ebe4 100644 --- a/lightning/src/sync/nostd_sync.rs +++ b/lightning/src/sync/nostd_sync.rs @@ -61,7 +61,7 @@ impl<'a, T: 'a> LockTestExt<'a> for Mutex { } type ExclLock = MutexGuard<'a, T>; #[inline] - fn unsafe_well_ordered_double_lock_self(&'a self) -> MutexGuard { + fn unsafe_well_ordered_double_lock_self(&'a self) -> MutexGuard<'a, T> { self.lock().unwrap() } } @@ -132,7 +132,7 @@ impl<'a, T: 'a> LockTestExt<'a> for RwLock { } type ExclLock = RwLockWriteGuard<'a, T>; #[inline] - fn unsafe_well_ordered_double_lock_self(&'a self) -> RwLockWriteGuard { + fn unsafe_well_ordered_double_lock_self(&'a self) -> RwLockWriteGuard<'a, T> { self.write().unwrap() } } From 9fc766e6205b975daeada2b0c264cb8a09736917 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 8 Dec 2025 14:15:47 +0000 Subject: [PATCH 22/22] Use the header cache across listeners during initial disconnect In `lightning-blocksync::init::synchronize_listeners`, we may have many listeners we want to do a chain diff on. When doing so, we should make sure we utilize our header cache, rather than querying our chain source for every header we need for each listener. Here we do so, inserting into the cache as we do chain diffs. On my node with a bitcoind on localhost, this brings the calculate-differences step of `init::synchronize_listeners` from ~500ms to under 150ms. --- lightning-block-sync/src/lib.rs | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/lightning-block-sync/src/lib.rs b/lightning-block-sync/src/lib.rs index add9354a772..ab91d3f396f 100644 --- a/lightning-block-sync/src/lib.rs +++ b/lightning-block-sync/src/lib.rs @@ -198,6 +198,10 @@ pub(crate) trait Cache { /// Retrieves the block header keyed by the given block hash. fn look_up(&self, block_hash: &BlockHash) -> Option<&ValidatedBlockHeader>; + /// Inserts the given block header during a find_difference operation, implying it might not be + /// the best header. + fn insert_during_diff(&mut self, block_hash: BlockHash, block_header: ValidatedBlockHeader); + /// Called when a block has been connected to the best chain to ensure it is available to be /// disconnected later if needed. fn block_connected(&mut self, block_hash: BlockHash, block_header: ValidatedBlockHeader); @@ -226,6 +230,15 @@ impl Cache for HeaderCache { self.0.get(block_hash) } + fn insert_during_diff(&mut self, block_hash: BlockHash, block_header: ValidatedBlockHeader) { + self.0.insert(block_hash, block_header); + + // Remove headers older than our newest header minus a week. + let best_height = self.0.iter().map(|(_, header)| header.height).max().unwrap_or(0); + let cutoff_height = best_height.saturating_sub(6 * 24 * 7); + self.0.retain(|_, header| header.height >= cutoff_height); + } + fn block_connected(&mut self, block_hash: BlockHash, block_header: ValidatedBlockHeader) { self.0.insert(block_hash, block_header); @@ -244,6 +257,10 @@ impl Cache for &mut HeaderCache { self.0.get(block_hash) } + fn insert_during_diff(&mut self, block_hash: BlockHash, block_header: ValidatedBlockHeader) { + (*self).insert_during_diff(block_hash, block_header); + } + fn block_connected(&mut self, block_hash: BlockHash, block_header: ValidatedBlockHeader) { (*self).block_connected(block_hash, block_header); } @@ -380,7 +397,7 @@ where /// First resolves `prev_best_block` to a `ValidatedBlockHeader` using the `previous_blocks` /// field as fallback if needed, then finds the common ancestor. async fn find_difference_from_best_block( - &self, current_header: ValidatedBlockHeader, prev_best_block: BestBlock, + &mut self, current_header: ValidatedBlockHeader, prev_best_block: BestBlock, chain_poller: &mut P, ) -> BlockSourceResult { // Try to resolve the header for the previous best block. First try the block_hash, @@ -404,6 +421,7 @@ where )?; if let Ok(header) = chain_poller.get_header(block_hash, Some(height)).await { found_header = Some(header); + self.header_cache.insert_during_diff(*block_hash, header); break; } }