diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 2658ff454e9..6ae6d83ddd3 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -320,3 +320,19 @@ jobs: run: cargo fmt --check - name: Run rustfmt checks on lightning-tests run: cd lightning-tests && cargo fmt --check + tor-connect: + runs-on: ubuntu-latest + env: + TOOLCHAIN: 1.75.0 + steps: + - name: Checkout source code + uses: actions/checkout@v4 + - name: Install tor + run: | + sudo apt install -y tor + - name: Install Rust ${{ env.TOOLCHAIN }} toolchain + run: | + curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain ${{ env.TOOLCHAIN }} + - name: Test tor connections using lightning-net-tokio + run: | + TOR_PROXY="127.0.0.1:9050" RUSTFLAGS="--cfg=tor" cargo test --verbose --color always -p lightning-net-tokio diff --git a/.github/workflows/semver.yml b/.github/workflows/semver.yml index 03017e19320..03a8e46e8a7 100644 --- a/.github/workflows/semver.yml +++ b/.github/workflows/semver.yml @@ -13,10 +13,6 @@ jobs: steps: - name: Checkout source code uses: actions/checkout@v4 - - name: Install Rust stable toolchain - run: | - curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --profile=minimal --default-toolchain stable - rustup override set stable - name: Check SemVer with default features uses: obi1kenobi/cargo-semver-checks-action@v2 with: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1bc431a3110..ad25fb10558 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -176,6 +176,28 @@ Fuzzing is heavily encouraged: you will find all related material under `fuzz/` Mutation testing is work-in-progress; any contribution there would be warmly welcomed. +### Environment Variables + +* `LDK_TEST_CONNECT_STYLE` - Override the random block connect style used in tests for deterministic runs. Valid values: + * `BEST_BLOCK_FIRST` + * `BEST_BLOCK_FIRST_SKIPPING_BLOCKS` + * `BEST_BLOCK_FIRST_REORGS_ONLY_TIP` + * `TRANSACTIONS_FIRST` + * `TRANSACTIONS_FIRST_SKIPPING_BLOCKS` + * `TRANSACTIONS_DUPLICATIVELY_FIRST_SKIPPING_BLOCKS` + * `HIGHLY_REDUNDANT_TRANSACTIONS_FIRST_SKIPPING_BLOCKS` + * `TRANSACTIONS_FIRST_REORGS_ONLY_TIP` + * `FULL_BLOCK_VIA_LISTEN` + * `FULL_BLOCK_DISCONNECTIONS_SKIPPING_VIA_LISTEN` + +* `LDK_TEST_DETERMINISTIC_HASHES` - When set to `1`, uses deterministic hash map iteration order in tests. This ensures consistent test output across runs, useful for comparing logs before and after changes. + +* `LDK_TEST_REBUILD_MGR_FROM_MONITORS` - If set to `1`, on test node reload the `ChannelManager`'s + HTLC set will be reconstructed from `Channel{Monitor}` persisted data. If `0`, test nodes will be + reloaded from persisted `ChannelManager` data using legacy code paths. This ensures consistent + test output across runs, useful for comparing logs before and after changes, since otherwise the + selection of which codepaths to be used on reload will be chosen randomly. + C/C++ Bindings -------------- diff --git a/Cargo.toml b/Cargo.toml index a0895fe1641..1eb7b572d8b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -67,4 +67,5 @@ check-cfg = [ "cfg(require_route_graph_test)", "cfg(simple_close)", "cfg(peer_storage)", + "cfg(tor)", ] diff --git a/fuzz/README.md b/fuzz/README.md index 4b6e0d12457..cfdab4940bc 100644 --- a/fuzz/README.md +++ b/fuzz/README.md @@ -68,6 +68,19 @@ cargo +nightly fuzz run --features "libfuzzer_fuzz" msg_ping_target Note: If you encounter a `SIGKILL` during run/build check for OOM in kernel logs and consider increasing RAM size for VM. +##### Fast builds for development + +The default build uses LTO and single codegen unit, which is slow. For faster iteration during +development, use the `-D` (dev) flag: + +```shell +cargo +nightly fuzz run --features "libfuzzer_fuzz" -D msg_ping_target +``` + +The `-D` flag builds in development mode with faster compilation (still has optimizations via +`opt-level = 1`). The first build will be slow as it rebuilds the standard library with +sanitizer instrumentation, but subsequent builds will be fast. + If you wish to just generate fuzzing binary executables for `libFuzzer` and not run them: ```shell cargo +nightly fuzz build --features "libfuzzer_fuzz" msg_ping_target diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index aca232471d6..f87af5c6ff5 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -155,9 +155,15 @@ impl MessageRouter for FuzzRouter { } } -pub struct TestBroadcaster {} +pub struct TestBroadcaster { + txn_broadcasted: RefCell>, +} impl BroadcasterInterface for TestBroadcaster { - fn broadcast_transactions(&self, _txs: &[&Transaction]) {} + fn broadcast_transactions(&self, txs: &[&Transaction]) { + for tx in txs { + self.txn_broadcasted.borrow_mut().push((*tx).clone()); + } + } } pub struct VecWriter(pub Vec); @@ -299,8 +305,10 @@ impl chain::Watch for TestChainMonitor { persisted_monitor: ser.0, pending_monitors: Vec::new(), }, - Ok(chain::ChannelMonitorUpdateStatus::InProgress) => { - panic!("The test currently doesn't test initial-persistence via the async pipeline") + Ok(chain::ChannelMonitorUpdateStatus::InProgress) => LatestMonitorState { + persisted_monitor_id: monitor_id, + persisted_monitor: Vec::new(), + pending_monitors: vec![(monitor_id, ser.0)], }, Ok(chain::ChannelMonitorUpdateStatus::UnrecoverableError) => panic!(), Err(()) => panic!(), @@ -332,7 +340,7 @@ impl chain::Watch for TestChainMonitor { deserialized_monitor .update_monitor( update, - &&TestBroadcaster {}, + &&TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }, &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger, ) @@ -536,44 +544,20 @@ type ChanMan<'a> = ChannelManager< >; #[inline] -fn get_payment_secret_hash( - dest: &ChanMan, payment_id: &mut u8, -) -> Option<(PaymentSecret, PaymentHash)> { - let mut payment_hash; - for _ in 0..256 { - payment_hash = PaymentHash(Sha256::hash(&[*payment_id; 1]).to_byte_array()); - if let Ok(payment_secret) = - dest.create_inbound_payment_for_hash(payment_hash, None, 3600, None) - { - return Some((payment_secret, payment_hash)); - } - *payment_id = payment_id.wrapping_add(1); - } - None -} - -#[inline] -fn send_noret( - source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8, - payment_idx: &mut u64, -) { - send_payment(source, dest, dest_chan_id, amt, payment_id, payment_idx); +fn get_payment_secret_hash(dest: &ChanMan, payment_ctr: &mut u64) -> (PaymentSecret, PaymentHash) { + *payment_ctr += 1; + let payment_hash = PaymentHash(Sha256::hash(&[*payment_ctr as u8]).to_byte_array()); + let payment_secret = dest + .create_inbound_payment_for_hash(payment_hash, None, 3600, None) + .expect("create_inbound_payment_for_hash failed"); + (payment_secret, payment_hash) } #[inline] fn send_payment( - source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_id: &mut u8, - payment_idx: &mut u64, + source: &ChanMan, dest: &ChanMan, dest_chan_id: u64, amt: u64, payment_secret: PaymentSecret, + payment_hash: PaymentHash, payment_id: PaymentId, ) -> bool { - let (payment_secret, payment_hash) = - if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_id) { - (secret, hash) - } else { - return true; - }; - let mut payment_id = [0; 32]; - payment_id[0..8].copy_from_slice(&payment_idx.to_ne_bytes()); - *payment_idx += 1; let (min_value_sendable, max_value_sendable) = source .list_usable_channels() .iter() @@ -600,7 +584,6 @@ fn send_payment( route_params: Some(route_params.clone()), }; let onion = RecipientOnionFields::secret_only(payment_secret); - let payment_id = PaymentId(payment_id); let res = source.send_payment_with_route(route, payment_hash, onion, payment_id); match res { Err(err) => { @@ -615,41 +598,15 @@ fn send_payment( } } -#[inline] -fn send_hop_noret( - source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, dest: &ChanMan, dest_chan_id: u64, - amt: u64, payment_id: &mut u8, payment_idx: &mut u64, -) { - send_hop_payment( - source, - middle, - middle_chan_id, - dest, - dest_chan_id, - amt, - payment_id, - payment_idx, - ); -} - #[inline] fn send_hop_payment( - source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, dest: &ChanMan, dest_chan_id: u64, - amt: u64, payment_id: &mut u8, payment_idx: &mut u64, + source: &ChanMan, middle: &ChanMan, middle_scid: u64, dest: &ChanMan, dest_scid: u64, amt: u64, + payment_secret: PaymentSecret, payment_hash: PaymentHash, payment_id: PaymentId, ) -> bool { - let (payment_secret, payment_hash) = - if let Some((secret, hash)) = get_payment_secret_hash(dest, payment_id) { - (secret, hash) - } else { - return true; - }; - let mut payment_id = [0; 32]; - payment_id[0..8].copy_from_slice(&payment_idx.to_ne_bytes()); - *payment_idx += 1; let (min_value_sendable, max_value_sendable) = source .list_usable_channels() .iter() - .find(|chan| chan.short_channel_id == Some(middle_chan_id)) + .find(|chan| chan.short_channel_id == Some(middle_scid)) .map(|chan| (chan.next_outbound_htlc_minimum_msat, chan.next_outbound_htlc_limit_msat)) .unwrap_or((0, 0)); let first_hop_fee = 50_000; @@ -663,7 +620,7 @@ fn send_hop_payment( RouteHop { pubkey: middle.get_our_node_id(), node_features: middle.node_features(), - short_channel_id: middle_chan_id, + short_channel_id: middle_scid, channel_features: middle.channel_features(), fee_msat: first_hop_fee, cltv_expiry_delta: 100, @@ -672,7 +629,7 @@ fn send_hop_payment( RouteHop { pubkey: dest.get_our_node_id(), node_features: dest.node_features(), - short_channel_id: dest_chan_id, + short_channel_id: dest_scid, channel_features: dest.channel_features(), fee_msat: amt, cltv_expiry_delta: 200, @@ -684,7 +641,6 @@ fn send_hop_payment( route_params: Some(route_params.clone()), }; let onion = RecipientOnionFields::secret_only(payment_secret); - let payment_id = PaymentId(payment_id); let res = source.send_payment_with_route(route, payment_hash, onion, payment_id); match res { Err(err) => { @@ -703,9 +659,29 @@ fn send_hop_payment( #[inline] pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let out = SearchingOutput::new(underlying_out); - let broadcast = Arc::new(TestBroadcaster {}); + let broadcast = Arc::new(TestBroadcaster { txn_broadcasted: RefCell::new(Vec::new()) }); let router = FuzzRouter {}; + // Read initial monitor styles from fuzz input (1 byte: 2 bits per node) + let initial_mon_styles = if !data.is_empty() { data[0] } else { 0 }; + let mon_style = [ + RefCell::new(if initial_mon_styles & 0b01 != 0 { + ChannelMonitorUpdateStatus::InProgress + } else { + ChannelMonitorUpdateStatus::Completed + }), + RefCell::new(if initial_mon_styles & 0b10 != 0 { + ChannelMonitorUpdateStatus::InProgress + } else { + ChannelMonitorUpdateStatus::Completed + }), + RefCell::new(if initial_mon_styles & 0b100 != 0 { + ChannelMonitorUpdateStatus::InProgress + } else { + ChannelMonitorUpdateStatus::Completed + }), + ]; + macro_rules! make_node { ($node_id: expr, $fee_estimator: expr) => {{ let logger: Arc = @@ -725,7 +701,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { logger.clone(), $fee_estimator.clone(), Arc::new(TestPersister { - update_ret: Mutex::new(ChannelMonitorUpdateStatus::Completed), + update_ret: Mutex::new(mon_style[$node_id as usize].borrow().clone()), }), Arc::clone(&keys_manager), )); @@ -762,9 +738,6 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }}; } - let default_mon_style = RefCell::new(ChannelMonitorUpdateStatus::Completed); - let mon_style = [default_mon_style.clone(), default_mon_style.clone(), default_mon_style]; - let reload_node = |ser: &Vec, node_id: u8, old_monitors: &TestChainMonitor, @@ -860,8 +833,21 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }; let mut channel_txn = Vec::new(); + macro_rules! complete_all_pending_monitor_updates { + ($monitor: expr) => {{ + for (channel_id, state) in $monitor.latest_monitors.lock().unwrap().iter_mut() { + for (id, data) in state.pending_monitors.drain(..) { + $monitor.chain_monitor.channel_monitor_updated(*channel_id, id).unwrap(); + if id >= state.persisted_monitor_id { + state.persisted_monitor_id = id; + state.persisted_monitor = data; + } + } + } + }}; + } macro_rules! make_channel { - ($source: expr, $dest: expr, $dest_keys_manager: expr, $chan_id: expr) => {{ + ($source: expr, $dest: expr, $source_monitor: expr, $dest_monitor: expr, $dest_keys_manager: expr, $chan_id: expr) => {{ let init_dest = Init { features: $dest.init_features(), networks: None, @@ -965,12 +951,14 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } }; $dest.handle_funding_created($source.get_our_node_id(), &funding_created); + // Complete any pending monitor updates for dest after watch_channel + complete_all_pending_monitor_updates!($dest_monitor); - let funding_signed = { + let (funding_signed, channel_id) = { let events = $dest.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); if let MessageSendEvent::SendFundingSigned { ref msg, .. } = events[0] { - msg.clone() + (msg.clone(), msg.channel_id.clone()) } else { panic!("Wrong event type"); } @@ -984,19 +972,22 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } $source.handle_funding_signed($dest.get_our_node_id(), &funding_signed); + // Complete any pending monitor updates for source after watch_channel + complete_all_pending_monitor_updates!($source_monitor); + let events = $source.get_and_clear_pending_events(); assert_eq!(events.len(), 1); - let channel_id = if let events::Event::ChannelPending { + if let events::Event::ChannelPending { ref counterparty_node_id, - ref channel_id, + channel_id: ref event_channel_id, .. } = events[0] { assert_eq!(counterparty_node_id, &$dest.get_our_node_id()); - channel_id.clone() + assert_eq!(*event_channel_id, channel_id); } else { panic!("Wrong event type"); - }; + } channel_id }}; @@ -1087,8 +1078,12 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let mut nodes = [node_a, node_b, node_c]; - let chan_1_id = make_channel!(nodes[0], nodes[1], keys_manager_b, 0); - let chan_2_id = make_channel!(nodes[1], nodes[2], keys_manager_c, 1); + let chan_1_id = make_channel!(nodes[0], nodes[1], monitor_a, monitor_b, keys_manager_b, 0); + let chan_2_id = make_channel!(nodes[1], nodes[2], monitor_b, monitor_c, keys_manager_c, 1); + + // Wipe the transactions-broadcasted set to make sure we don't broadcast any transactions + // during normal operation in `test_return`. + broadcast.txn_broadcasted.borrow_mut().clear(); for node in nodes.iter() { confirm_txn!(node); @@ -1101,8 +1096,7 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let chan_b = nodes[2].list_usable_channels()[0].short_channel_id.unwrap(); let chan_b_id = nodes[2].list_usable_channels()[0].channel_id; - let mut p_id: u8 = 0; - let mut p_idx: u64 = 0; + let mut p_ctr: u64 = 0; let mut chan_a_disconnected = false; let mut chan_b_disconnected = false; @@ -1115,16 +1109,24 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let mut node_b_ser = nodes[1].encode(); let mut node_c_ser = nodes[2].encode(); + let pending_payments = RefCell::new([Vec::new(), Vec::new(), Vec::new()]); + let resolved_payments = RefCell::new([Vec::new(), Vec::new(), Vec::new()]); + macro_rules! test_return { () => {{ assert_eq!(nodes[0].list_channels().len(), 1); assert_eq!(nodes[1].list_channels().len(), 2); assert_eq!(nodes[2].list_channels().len(), 1); + + // At no point should we have broadcasted any transactions after the initial channel + // opens. + assert!(broadcast.txn_broadcasted.borrow().is_empty()); + return; }}; } - let mut read_pos = 0; + let mut read_pos = 1; // First byte was consumed for initial mon_style macro_rules! get_slice { ($len: expr) => {{ let slice_len = $len as usize; @@ -1509,6 +1511,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let mut claim_set = new_hash_map(); let mut events = nodes[$node].get_and_clear_pending_events(); let had_events = !events.is_empty(); + let mut pending_payments = pending_payments.borrow_mut(); + let mut resolved_payments = resolved_payments.borrow_mut(); for event in events.drain(..) { match event { events::Event::PaymentClaimable { payment_hash, .. } => { @@ -1520,11 +1524,32 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } } }, - events::Event::PaymentSent { .. } => {}, + events::Event::PaymentSent { payment_id, .. } => { + let sent_id = payment_id.unwrap(); + let idx_opt = + pending_payments[$node].iter().position(|id| *id == sent_id); + if let Some(idx) = idx_opt { + pending_payments[$node].remove(idx); + resolved_payments[$node].push(sent_id); + } else { + assert!(resolved_payments[$node].contains(&sent_id)); + } + }, + events::Event::PaymentFailed { payment_id, .. } => { + let idx_opt = + pending_payments[$node].iter().position(|id| *id == payment_id); + if let Some(idx) = idx_opt { + pending_payments[$node].remove(idx); + resolved_payments[$node].push(payment_id); + } else if !resolved_payments[$node].contains(&payment_id) { + // Payment failed immediately on send, so it was never added to + // pending_payments. Add it to resolved_payments to track it. + resolved_payments[$node].push(payment_id); + } + }, events::Event::PaymentClaimed { .. } => {}, events::Event::PaymentPathSuccessful { .. } => {}, events::Event::PaymentPathFailed { .. } => {}, - events::Event::PaymentFailed { .. } => {}, events::Event::ProbeSuccessful { .. } | events::Event::ProbeFailed { .. } => { // Even though we don't explicitly send probes, because probes are @@ -1613,6 +1638,52 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { } }; + let send = + |source_idx: usize, dest_idx: usize, dest_chan_id, amt, payment_ctr: &mut u64| { + let source = &nodes[source_idx]; + let dest = &nodes[dest_idx]; + let (secret, hash) = get_payment_secret_hash(dest, payment_ctr); + let mut id = PaymentId([0; 32]); + id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); + let succeeded = send_payment(source, dest, dest_chan_id, amt, secret, hash, id); + if succeeded { + pending_payments.borrow_mut()[source_idx].push(id); + } + succeeded + }; + let send_noret = |source_idx, dest_idx, dest_chan_id, amt, payment_ctr: &mut u64| { + send(source_idx, dest_idx, dest_chan_id, amt, payment_ctr); + }; + + let send_hop_noret = |source_idx: usize, + middle_idx: usize, + middle_scid: u64, + dest_idx: usize, + dest_scid: u64, + amt: u64, + payment_ctr: &mut u64| { + let source = &nodes[source_idx]; + let middle = &nodes[middle_idx]; + let dest = &nodes[dest_idx]; + let (secret, hash) = get_payment_secret_hash(dest, payment_ctr); + let mut id = PaymentId([0; 32]); + id.0[0..8].copy_from_slice(&payment_ctr.to_ne_bytes()); + let succeeded = send_hop_payment( + source, + middle, + middle_scid, + dest, + dest_scid, + amt, + secret, + hash, + id, + ); + if succeeded { + pending_payments.borrow_mut()[source_idx].push(id); + } + }; + let v = get_slice!(1)[0]; out.locked_write(format!("READ A BYTE! HANDLING INPUT {:x}...........\n", v).as_bytes()); match v { @@ -1725,93 +1796,61 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { 0x27 => process_ev_noret!(2, false), // 1/10th the channel size: - 0x30 => send_noret(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut p_id, &mut p_idx), - 0x31 => send_noret(&nodes[1], &nodes[0], chan_a, 10_000_000, &mut p_id, &mut p_idx), - 0x32 => send_noret(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut p_id, &mut p_idx), - 0x33 => send_noret(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut p_id, &mut p_idx), - 0x34 => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000_000, &mut p_id, &mut p_idx, - ), - 0x35 => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000_000, &mut p_id, &mut p_idx, - ), - - 0x38 => send_noret(&nodes[0], &nodes[1], chan_a, 1_000_000, &mut p_id, &mut p_idx), - 0x39 => send_noret(&nodes[1], &nodes[0], chan_a, 1_000_000, &mut p_id, &mut p_idx), - 0x3a => send_noret(&nodes[1], &nodes[2], chan_b, 1_000_000, &mut p_id, &mut p_idx), - 0x3b => send_noret(&nodes[2], &nodes[1], chan_b, 1_000_000, &mut p_id, &mut p_idx), - 0x3c => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000_000, &mut p_id, &mut p_idx, - ), - 0x3d => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000_000, &mut p_id, &mut p_idx, - ), - - 0x40 => send_noret(&nodes[0], &nodes[1], chan_a, 100_000, &mut p_id, &mut p_idx), - 0x41 => send_noret(&nodes[1], &nodes[0], chan_a, 100_000, &mut p_id, &mut p_idx), - 0x42 => send_noret(&nodes[1], &nodes[2], chan_b, 100_000, &mut p_id, &mut p_idx), - 0x43 => send_noret(&nodes[2], &nodes[1], chan_b, 100_000, &mut p_id, &mut p_idx), - 0x44 => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100_000, &mut p_id, &mut p_idx, - ), - 0x45 => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100_000, &mut p_id, &mut p_idx, - ), - - 0x48 => send_noret(&nodes[0], &nodes[1], chan_a, 10_000, &mut p_id, &mut p_idx), - 0x49 => send_noret(&nodes[1], &nodes[0], chan_a, 10_000, &mut p_id, &mut p_idx), - 0x4a => send_noret(&nodes[1], &nodes[2], chan_b, 10_000, &mut p_id, &mut p_idx), - 0x4b => send_noret(&nodes[2], &nodes[1], chan_b, 10_000, &mut p_id, &mut p_idx), - 0x4c => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10_000, &mut p_id, &mut p_idx, - ), - 0x4d => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10_000, &mut p_id, &mut p_idx, - ), - - 0x50 => send_noret(&nodes[0], &nodes[1], chan_a, 1_000, &mut p_id, &mut p_idx), - 0x51 => send_noret(&nodes[1], &nodes[0], chan_a, 1_000, &mut p_id, &mut p_idx), - 0x52 => send_noret(&nodes[1], &nodes[2], chan_b, 1_000, &mut p_id, &mut p_idx), - 0x53 => send_noret(&nodes[2], &nodes[1], chan_b, 1_000, &mut p_id, &mut p_idx), - 0x54 => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1_000, &mut p_id, &mut p_idx, - ), - 0x55 => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1_000, &mut p_id, &mut p_idx, - ), - - 0x58 => send_noret(&nodes[0], &nodes[1], chan_a, 100, &mut p_id, &mut p_idx), - 0x59 => send_noret(&nodes[1], &nodes[0], chan_a, 100, &mut p_id, &mut p_idx), - 0x5a => send_noret(&nodes[1], &nodes[2], chan_b, 100, &mut p_id, &mut p_idx), - 0x5b => send_noret(&nodes[2], &nodes[1], chan_b, 100, &mut p_id, &mut p_idx), - 0x5c => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 100, &mut p_id, &mut p_idx, - ), - 0x5d => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 100, &mut p_id, &mut p_idx, - ), - - 0x60 => send_noret(&nodes[0], &nodes[1], chan_a, 10, &mut p_id, &mut p_idx), - 0x61 => send_noret(&nodes[1], &nodes[0], chan_a, 10, &mut p_id, &mut p_idx), - 0x62 => send_noret(&nodes[1], &nodes[2], chan_b, 10, &mut p_id, &mut p_idx), - 0x63 => send_noret(&nodes[2], &nodes[1], chan_b, 10, &mut p_id, &mut p_idx), - 0x64 => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 10, &mut p_id, &mut p_idx, - ), - 0x65 => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 10, &mut p_id, &mut p_idx, - ), - - 0x68 => send_noret(&nodes[0], &nodes[1], chan_a, 1, &mut p_id, &mut p_idx), - 0x69 => send_noret(&nodes[1], &nodes[0], chan_a, 1, &mut p_id, &mut p_idx), - 0x6a => send_noret(&nodes[1], &nodes[2], chan_b, 1, &mut p_id, &mut p_idx), - 0x6b => send_noret(&nodes[2], &nodes[1], chan_b, 1, &mut p_id, &mut p_idx), - 0x6c => send_hop_noret( - &nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1, &mut p_id, &mut p_idx, - ), - 0x6d => send_hop_noret( - &nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1, &mut p_id, &mut p_idx, - ), + 0x30 => send_noret(0, 1, chan_a, 10_000_000, &mut p_ctr), + 0x31 => send_noret(1, 0, chan_a, 10_000_000, &mut p_ctr), + 0x32 => send_noret(1, 2, chan_b, 10_000_000, &mut p_ctr), + 0x33 => send_noret(2, 1, chan_b, 10_000_000, &mut p_ctr), + 0x34 => send_hop_noret(0, 1, chan_a, 2, chan_b, 10_000_000, &mut p_ctr), + 0x35 => send_hop_noret(2, 1, chan_b, 0, chan_a, 10_000_000, &mut p_ctr), + + 0x38 => send_noret(0, 1, chan_a, 1_000_000, &mut p_ctr), + 0x39 => send_noret(1, 0, chan_a, 1_000_000, &mut p_ctr), + 0x3a => send_noret(1, 2, chan_b, 1_000_000, &mut p_ctr), + 0x3b => send_noret(2, 1, chan_b, 1_000_000, &mut p_ctr), + 0x3c => send_hop_noret(0, 1, chan_a, 2, chan_b, 1_000_000, &mut p_ctr), + 0x3d => send_hop_noret(2, 1, chan_b, 0, chan_a, 1_000_000, &mut p_ctr), + + 0x40 => send_noret(0, 1, chan_a, 100_000, &mut p_ctr), + 0x41 => send_noret(1, 0, chan_a, 100_000, &mut p_ctr), + 0x42 => send_noret(1, 2, chan_b, 100_000, &mut p_ctr), + 0x43 => send_noret(2, 1, chan_b, 100_000, &mut p_ctr), + 0x44 => send_hop_noret(0, 1, chan_a, 2, chan_b, 100_000, &mut p_ctr), + 0x45 => send_hop_noret(2, 1, chan_b, 0, chan_a, 100_000, &mut p_ctr), + + 0x48 => send_noret(0, 1, chan_a, 10_000, &mut p_ctr), + 0x49 => send_noret(1, 0, chan_a, 10_000, &mut p_ctr), + 0x4a => send_noret(1, 2, chan_b, 10_000, &mut p_ctr), + 0x4b => send_noret(2, 1, chan_b, 10_000, &mut p_ctr), + 0x4c => send_hop_noret(0, 1, chan_a, 2, chan_b, 10_000, &mut p_ctr), + 0x4d => send_hop_noret(2, 1, chan_b, 0, chan_a, 10_000, &mut p_ctr), + + 0x50 => send_noret(0, 1, chan_a, 1_000, &mut p_ctr), + 0x51 => send_noret(1, 0, chan_a, 1_000, &mut p_ctr), + 0x52 => send_noret(1, 2, chan_b, 1_000, &mut p_ctr), + 0x53 => send_noret(2, 1, chan_b, 1_000, &mut p_ctr), + 0x54 => send_hop_noret(0, 1, chan_a, 2, chan_b, 1_000, &mut p_ctr), + 0x55 => send_hop_noret(2, 1, chan_b, 0, chan_a, 1_000, &mut p_ctr), + + 0x58 => send_noret(0, 1, chan_a, 100, &mut p_ctr), + 0x59 => send_noret(1, 0, chan_a, 100, &mut p_ctr), + 0x5a => send_noret(1, 2, chan_b, 100, &mut p_ctr), + 0x5b => send_noret(2, 1, chan_b, 100, &mut p_ctr), + 0x5c => send_hop_noret(0, 1, chan_a, 2, chan_b, 100, &mut p_ctr), + 0x5d => send_hop_noret(2, 1, chan_b, 0, chan_a, 100, &mut p_ctr), + + 0x60 => send_noret(0, 1, chan_a, 10, &mut p_ctr), + 0x61 => send_noret(1, 0, chan_a, 10, &mut p_ctr), + 0x62 => send_noret(1, 2, chan_b, 10, &mut p_ctr), + 0x63 => send_noret(2, 1, chan_b, 10, &mut p_ctr), + 0x64 => send_hop_noret(0, 1, chan_a, 2, chan_b, 10, &mut p_ctr), + 0x65 => send_hop_noret(2, 1, chan_b, 0, chan_a, 10, &mut p_ctr), + + 0x68 => send_noret(0, 1, chan_a, 1, &mut p_ctr), + 0x69 => send_noret(1, 0, chan_a, 1, &mut p_ctr), + 0x6a => send_noret(1, 2, chan_b, 1, &mut p_ctr), + 0x6b => send_noret(2, 1, chan_b, 1, &mut p_ctr), + 0x6c => send_hop_noret(0, 1, chan_a, 2, chan_b, 1, &mut p_ctr), + 0x6d => send_hop_noret(2, 1, chan_b, 0, chan_a, 1, &mut p_ctr), 0x80 => { let mut max_feerate = last_htlc_clear_fee_a; @@ -1860,11 +1899,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { 0xa0 => { let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 0).unwrap(); - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(10_000), - inputs: vec![input], - change_script: None, - }; + let contribution = + SpliceContribution::splice_in(Amount::from_sat(10_000), vec![input], None); let funding_feerate_sat_per_kw = fee_est_a.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[0].splice_channel( &chan_a_id, @@ -1882,11 +1918,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }, 0xa1 => { let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 1).unwrap(); - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(10_000), - inputs: vec![input], - change_script: None, - }; + let contribution = + SpliceContribution::splice_in(Amount::from_sat(10_000), vec![input], None); let funding_feerate_sat_per_kw = fee_est_b.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[1].splice_channel( &chan_a_id, @@ -1904,11 +1937,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }, 0xa2 => { let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 0).unwrap(); - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(10_000), - inputs: vec![input], - change_script: None, - }; + let contribution = + SpliceContribution::splice_in(Amount::from_sat(10_000), vec![input], None); let funding_feerate_sat_per_kw = fee_est_b.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[1].splice_channel( &chan_b_id, @@ -1926,11 +1956,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { }, 0xa3 => { let input = FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 1).unwrap(); - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(10_000), - inputs: vec![input], - change_script: None, - }; + let contribution = + SpliceContribution::splice_in(Amount::from_sat(10_000), vec![input], None); let funding_feerate_sat_per_kw = fee_est_c.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[2].splice_channel( &chan_b_id, @@ -1958,12 +1985,10 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { .map(|chan| chan.outbound_capacity_msat) .unwrap(); if outbound_capacity_msat >= 20_000_000 { - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), - script_pubkey: coinbase_tx.output[0].script_pubkey.clone(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: coinbase_tx.output[0].script_pubkey.clone(), + }]); let funding_feerate_sat_per_kw = fee_est_a.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[0].splice_channel( @@ -1989,12 +2014,10 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { .map(|chan| chan.outbound_capacity_msat) .unwrap(); if outbound_capacity_msat >= 20_000_000 { - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), - script_pubkey: coinbase_tx.output[1].script_pubkey.clone(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: coinbase_tx.output[1].script_pubkey.clone(), + }]); let funding_feerate_sat_per_kw = fee_est_b.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[1].splice_channel( @@ -2020,12 +2043,10 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { .map(|chan| chan.outbound_capacity_msat) .unwrap(); if outbound_capacity_msat >= 20_000_000 { - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), - script_pubkey: coinbase_tx.output[1].script_pubkey.clone(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: coinbase_tx.output[1].script_pubkey.clone(), + }]); let funding_feerate_sat_per_kw = fee_est_b.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[1].splice_channel( @@ -2051,12 +2072,10 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { .map(|chan| chan.outbound_capacity_msat) .unwrap(); if outbound_capacity_msat >= 20_000_000 { - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), - script_pubkey: coinbase_tx.output[2].script_pubkey.clone(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS), + script_pubkey: coinbase_tx.output[2].script_pubkey.clone(), + }]); let funding_feerate_sat_per_kw = fee_est_c.ret_val.load(atomic::Ordering::Acquire); if let Err(e) = nodes[2].splice_channel( @@ -2243,16 +2262,12 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { // Finally, make sure that at least one end of each channel can make a substantial payment assert!( - send_payment(&nodes[0], &nodes[1], chan_a, 10_000_000, &mut p_id, &mut p_idx) - || send_payment( - &nodes[1], &nodes[0], chan_a, 10_000_000, &mut p_id, &mut p_idx - ) + send(0, 1, chan_a, 10_000_000, &mut p_ctr) + || send(1, 0, chan_a, 10_000_000, &mut p_ctr) ); assert!( - send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut p_id, &mut p_idx) - || send_payment( - &nodes[2], &nodes[1], chan_b, 10_000_000, &mut p_id, &mut p_idx - ) + send(1, 2, chan_b, 10_000_000, &mut p_ctr) + || send(2, 1, chan_b, 10_000_000, &mut p_ctr) ); last_htlc_clear_fee_a = fee_est_a.ret_val.load(atomic::Ordering::Acquire); diff --git a/fuzz/src/router.rs b/fuzz/src/router.rs index af29a0221a9..2e5b15fc7f4 100644 --- a/fuzz/src/router.rs +++ b/fuzz/src/router.rs @@ -31,6 +31,7 @@ use lightning::types::features::{BlindedHopFeatures, Bolt12InvoiceFeatures}; use lightning::util::config::UserConfig; use lightning::util::hash_tables::*; use lightning::util::ser::LengthReadable; +use lightning::util::wakers::Notifier; use bitcoin::hashes::Hash; use bitcoin::network::Network; @@ -88,12 +89,11 @@ impl InputData { } } -struct FuzzChainSource<'a, 'b, Out: test_logger::Output> { +struct FuzzChainSource { input: Arc, - net_graph: &'a NetworkGraph<&'b test_logger::TestLogger>, } -impl UtxoLookup for FuzzChainSource<'_, '_, Out> { - fn get_utxo(&self, _chain_hash: &ChainHash, _short_channel_id: u64) -> UtxoResult { +impl UtxoLookup for FuzzChainSource { + fn get_utxo(&self, _chain_hash: &ChainHash, _scid: u64, notifier: Arc) -> UtxoResult { let input_slice = self.input.get_slice(2); if input_slice.is_none() { return UtxoResult::Sync(Err(UtxoLookupError::UnknownTx)); @@ -107,17 +107,17 @@ impl UtxoLookup for FuzzChainSource<'_, '_, Out> { &[0, _] => UtxoResult::Sync(Err(UtxoLookupError::UnknownChain)), &[1, _] => UtxoResult::Sync(Err(UtxoLookupError::UnknownTx)), &[2, _] => { - let future = UtxoFuture::new(); - future.resolve_without_forwarding(self.net_graph, Ok(txo_res)); + let future = UtxoFuture::new(notifier); + future.resolve(Ok(txo_res)); UtxoResult::Async(future.clone()) }, &[3, _] => { - let future = UtxoFuture::new(); - future.resolve_without_forwarding(self.net_graph, Err(UtxoLookupError::UnknownTx)); + let future = UtxoFuture::new(notifier); + future.resolve(Err(UtxoLookupError::UnknownTx)); UtxoResult::Async(future.clone()) }, &[4, _] => { - UtxoResult::Async(UtxoFuture::new()) // the future will never resolve + UtxoResult::Async(UtxoFuture::new(notifier)) // the future will never resolve }, &[..] => UtxoResult::Sync(Ok(txo_res)), } @@ -197,7 +197,7 @@ pub fn do_test(data: &[u8], out: Out) { let our_pubkey = get_pubkey!(); let net_graph = NetworkGraph::new(Network::Bitcoin, &logger); - let chain_source = FuzzChainSource { input: Arc::clone(&input), net_graph: &net_graph }; + let chain_source = FuzzChainSource { input: Arc::clone(&input) }; let mut node_pks = new_hash_map(); let mut scid = 42; @@ -335,9 +335,7 @@ pub fn do_test(data: &[u8], out: Out) { node_pks.insert(get_pubkey_from_node_id!(msg.node_id_1), ()); node_pks.insert(get_pubkey_from_node_id!(msg.node_id_2), ()); let _ = net_graph - .update_channel_from_unsigned_announcement::<&FuzzChainSource<'_, '_, Out>>( - &msg, &None, - ); + .update_channel_from_unsigned_announcement::<&FuzzChainSource>(&msg, &None); }, 2 => { let msg = diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index aae738ab1c1..c38d6dfe080 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -64,6 +64,7 @@ use lightning::util::persist::{ SCORER_PERSISTENCE_PRIMARY_NAMESPACE, SCORER_PERSISTENCE_SECONDARY_NAMESPACE, }; use lightning::util::sweep::{OutputSweeper, OutputSweeperSync}; +use lightning::util::wakers::Future; #[cfg(feature = "std")] use lightning::util::wakers::Sleeper; use lightning_rapid_gossip_sync::RapidGossipSync; @@ -171,6 +172,16 @@ const SWEEPER_TIMER: Duration = Duration::from_secs(30); #[cfg(test)] const SWEEPER_TIMER: Duration = Duration::from_secs(1); +#[cfg(not(test))] +const FIRST_ARCHIVE_STALE_MONITORS_TIMER: Duration = Duration::from_secs(15); +#[cfg(test)] +const FIRST_ARCHIVE_STALE_MONITORS_TIMER: Duration = Duration::ZERO; + +#[cfg(not(test))] +const ARCHIVE_STALE_MONITORS_TIMER: Duration = Duration::from_secs(60 * 10); +#[cfg(test)] +const ARCHIVE_STALE_MONITORS_TIMER: Duration = Duration::from_secs(1); + /// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement const fn min_duration(a: Duration, b: Duration) -> Duration { if a.as_nanos() < b.as_nanos() { @@ -235,6 +246,14 @@ where GossipSync::None => None, } } + + fn validation_completion_future(&self) -> Option { + match self { + GossipSync::P2P(gossip_sync) => Some(gossip_sync.validation_completion_future()), + GossipSync::Rapid(_) => None, + GossipSync::None => None, + } + } } /// This is not exported to bindings users as the bindings concretize everything and have constructors for us @@ -520,12 +539,14 @@ pub(crate) mod futures_util { C: Future + Unpin, D: Future + Unpin, E: Future + Unpin, + F: Future + Unpin, > { pub a: A, pub b: B, pub c: C, pub d: D, pub e: E, + pub f: F, } pub(crate) enum SelectorOutput { @@ -534,6 +555,7 @@ pub(crate) mod futures_util { C, D, E, + F, } impl< @@ -542,7 +564,8 @@ pub(crate) mod futures_util { C: Future + Unpin, D: Future + Unpin, E: Future + Unpin, - > Future for Selector + F: Future + Unpin, + > Future for Selector { type Output = SelectorOutput; fn poll( @@ -580,6 +603,12 @@ pub(crate) mod futures_util { }, Poll::Pending => {}, } + match Pin::new(&mut self.f).poll(ctx) { + Poll::Ready(()) => { + return Poll::Ready(SelectorOutput::F); + }, + Poll::Pending => {}, + } Poll::Pending } } @@ -606,6 +635,12 @@ pub(crate) mod futures_util { } } + impl + Unpin> From> for OptionalSelector { + fn from(optional_future: Option) -> Self { + Self { optional_future } + } + } + // If we want to poll a future without an async context to figure out if it has completed or // not without awaiting, we need a Waker, which needs a vtable...we fill it with dummy values // but sadly there's a good bit of boilerplate here. @@ -1018,8 +1053,10 @@ where let mut last_scorer_persist_call = sleeper(SCORER_PERSIST_TIMER); let mut last_rebroadcast_call = sleeper(REBROADCAST_TIMER); let mut last_sweeper_call = sleeper(SWEEPER_TIMER); + let mut last_archive_call = sleeper(FIRST_ARCHIVE_STALE_MONITORS_TIMER); let mut have_pruned = false; let mut have_decayed_scorer = false; + let mut have_archived = false; let mut last_forwards_processing_call = sleeper(batch_delay.get()); @@ -1058,18 +1095,13 @@ where if mobile_interruptable_platform { await_start = Some(sleeper(Duration::from_secs(1))); } - let om_fut = if let Some(om) = onion_messenger.as_ref() { - let fut = om.get_om().get_update_future(); - OptionalSelector { optional_future: Some(fut) } - } else { - OptionalSelector { optional_future: None } - }; - let lm_fut = if let Some(lm) = liquidity_manager.as_ref() { - let fut = lm.get_lm().get_pending_msgs_or_needs_persist_future(); - OptionalSelector { optional_future: Some(fut) } - } else { - OptionalSelector { optional_future: None } - }; + let om_fut: OptionalSelector<_> = + onion_messenger.as_ref().map(|om| om.get_om().get_update_future()).into(); + let lm_fut: OptionalSelector<_> = liquidity_manager + .as_ref() + .map(|lm| lm.get_lm().get_pending_msgs_or_needs_persist_future()) + .into(); + let gv_fut: OptionalSelector<_> = gossip_sync.validation_completion_future().into(); let needs_processing = channel_manager.get_cm().needs_pending_htlc_processing(); let sleep_delay = match (needs_processing, mobile_interruptable_platform) { (true, true) => batch_delay.get().min(Duration::from_millis(100)), @@ -1083,9 +1115,14 @@ where c: chain_monitor.get_update_future(), d: om_fut, e: lm_fut, + f: gv_fut, }; match fut.await { - SelectorOutput::B | SelectorOutput::C | SelectorOutput::D | SelectorOutput::E => {}, + SelectorOutput::B + | SelectorOutput::C + | SelectorOutput::D + | SelectorOutput::E + | SelectorOutput::F => {}, SelectorOutput::A(exit) => { if exit { break; @@ -1147,11 +1184,31 @@ where log_trace!(logger, "Done persisting ChannelManager."); } - // Note that we want to run a graph prune once not long after startup before - // falling back to our usual hourly prunes. This avoids short-lived clients never - // pruning their network graph. We run once 60 seconds after startup before - // continuing our normal cadence. For RGS, since 60 seconds is likely too long, - // we prune after an initial sync completes. + // Note that we want to archive stale ChannelMonitors and run a network graph prune once + // not long after startup before falling back to their usual infrequent runs. This avoids + // short-lived clients never archiving stale ChannelMonitors or pruning their network + // graph. For network graph pruning, in the case of RGS sync, we run a prune immediately + // after initial sync completes, otherwise we do so on a timer which should be long enough + // to give us a chance to get most of the network graph from our peers. + let archive_timer = if have_archived { + ARCHIVE_STALE_MONITORS_TIMER + } else { + FIRST_ARCHIVE_STALE_MONITORS_TIMER + }; + let archive_timer_elapsed = { + match check_and_reset_sleeper(&mut last_archive_call, || sleeper(archive_timer)) { + Some(false) => true, + Some(true) => break, + None => false, + } + }; + if archive_timer_elapsed { + log_trace!(logger, "Archiving stale ChannelMonitors."); + chain_monitor.archive_fully_resolved_channel_monitors(); + have_archived = true; + log_trace!(logger, "Archived stale ChannelMonitors."); + } + let prune_timer = if gossip_sync.prunable_network_graph().is_some() { NETWORK_PRUNE_TIMER } else { @@ -1601,8 +1658,10 @@ impl BackgroundProcessor { let mut last_scorer_persist_call = Instant::now(); let mut last_rebroadcast_call = Instant::now(); let mut last_sweeper_call = Instant::now(); + let mut last_archive_call = Instant::now(); let mut have_pruned = false; let mut have_decayed_scorer = false; + let mut have_archived = false; let mut cur_batch_delay = batch_delay.get(); let mut last_forwards_processing_call = Instant::now(); @@ -1635,28 +1694,18 @@ impl BackgroundProcessor { log_trace!(logger, "Terminating background processor."); break; } - let sleeper = match (onion_messenger.as_ref(), liquidity_manager.as_ref()) { - (Some(om), Some(lm)) => Sleeper::from_four_futures( - &channel_manager.get_cm().get_event_or_persistence_needed_future(), - &chain_monitor.get_update_future(), - &om.get_om().get_update_future(), - &lm.get_lm().get_pending_msgs_or_needs_persist_future(), - ), - (Some(om), None) => Sleeper::from_three_futures( - &channel_manager.get_cm().get_event_or_persistence_needed_future(), - &chain_monitor.get_update_future(), - &om.get_om().get_update_future(), - ), - (None, Some(lm)) => Sleeper::from_three_futures( - &channel_manager.get_cm().get_event_or_persistence_needed_future(), - &chain_monitor.get_update_future(), - &lm.get_lm().get_pending_msgs_or_needs_persist_future(), - ), - (None, None) => Sleeper::from_two_futures( - &channel_manager.get_cm().get_event_or_persistence_needed_future(), - &chain_monitor.get_update_future(), - ), - }; + let om_fut = onion_messenger.as_ref().map(|om| om.get_om().get_update_future()); + let lm_fut = liquidity_manager + .as_ref() + .map(|lm| lm.get_lm().get_pending_msgs_or_needs_persist_future()); + let gv_fut = gossip_sync.validation_completion_future(); + let always_futures = [ + channel_manager.get_cm().get_event_or_persistence_needed_future(), + chain_monitor.get_update_future(), + ]; + let futures = always_futures.into_iter().chain(om_fut).chain(lm_fut).chain(gv_fut); + let sleeper = Sleeper::from_futures(futures); + let batch_delay = if channel_manager.get_cm().needs_pending_htlc_processing() { batch_delay.get() } else { @@ -1691,11 +1740,26 @@ impl BackgroundProcessor { }); } - // Note that we want to run a graph prune once not long after startup before - // falling back to our usual hourly prunes. This avoids short-lived clients never - // pruning their network graph. We run once 60 seconds after startup before - // continuing our normal cadence. For RGS, since 60 seconds is likely too long, - // we prune after an initial sync completes. + // Note that we want to archive stale ChannelMonitors and run a network graph prune once + // not long after startup before falling back to their usual infrequent runs. This avoids + // short-lived clients never archiving stale ChannelMonitors or pruning their network + // graph. For network graph pruning, in the case of RGS sync, we run a prune immediately + // after initial sync completes, otherwise we do so on a timer which should be long enough + // to give us a chance to get most of the network graph from our peers. + let archive_timer = if have_archived { + ARCHIVE_STALE_MONITORS_TIMER + } else { + FIRST_ARCHIVE_STALE_MONITORS_TIMER + }; + let archive_timer_elapsed = last_archive_call.elapsed() > archive_timer; + if archive_timer_elapsed { + log_trace!(logger, "Archiving stale ChannelMonitors."); + chain_monitor.archive_fully_resolved_channel_monitors(); + have_archived = true; + last_archive_call = Instant::now(); + log_trace!(logger, "Archived stale ChannelMonitors."); + } + let prune_timer = if have_pruned { NETWORK_PRUNE_TIMER } else { FIRST_NETWORK_PRUNE_TIMER }; let prune_timer_elapsed = last_prune_call.elapsed() > prune_timer; @@ -3698,4 +3762,127 @@ mod tests { exit_sender.send(()).unwrap(); t1.await.unwrap().unwrap(); } + + #[test] + fn test_monitor_archive() { + let (persist_dir, nodes) = create_nodes(2, "test_monitor_archive"); + // Open a channel, but don't confirm it so that it prunes immediately on FC. + open_channel!(nodes[0], nodes[1], 100000); + + let data_dir = nodes[1].kv_store.get_data_dir(); + let persister = Arc::new(Persister::new(data_dir)); + let event_handler = |_: _| Ok(()); + let bp = BackgroundProcessor::start( + persister, + event_handler, + Arc::clone(&nodes[1].chain_monitor), + Arc::clone(&nodes[1].node), + Some(Arc::clone(&nodes[1].messenger)), + nodes[1].p2p_gossip_sync(), + Arc::clone(&nodes[1].peer_manager), + Some(Arc::clone(&nodes[1].liquidity_manager)), + Some(Arc::clone(&nodes[1].sweeper)), + Arc::clone(&nodes[1].logger), + Some(Arc::clone(&nodes[1].scorer)), + ); + + let dir = format!("{}_persister_1/monitors", &persist_dir); + let mut mons = std::fs::read_dir(&dir).unwrap(); + let mut mon = mons.next().unwrap().unwrap(); + if mon.path().to_str().unwrap().ends_with(".tmp") { + mon = mons.next().unwrap().unwrap(); + assert_eq!(mon.path().extension(), None); + } + assert!(mons.next().is_none()); + + // Because the channel wasn't funded, we'll archive the ChannelMonitor immedaitely after + // its force-closed (at least on node B, which didn't put their money into it). + nodes[1].node.force_close_all_channels_broadcasting_latest_txn("".to_owned()); + loop { + let mut mons = std::fs::read_dir(&dir).unwrap(); + if let Some(new_mon) = mons.next() { + let mut new_mon = new_mon.unwrap(); + if new_mon.path().to_str().unwrap().ends_with(".tmp") { + new_mon = mons.next().unwrap().unwrap(); + assert_eq!(new_mon.path().extension(), None); + } + assert_eq!(new_mon.path(), mon.path()); + assert!(mons.next().is_none()); + } else { + break; + } + } + + bp.stop().unwrap(); + } + + #[tokio::test] + #[cfg(not(c_bindings))] + async fn test_monitor_archive_async() { + let (persist_dir, nodes) = create_nodes(2, "test_monitor_archive_async"); + // Open a channel, but don't confirm it so that it prunes immediately on FC. + open_channel!(nodes[0], nodes[1], 100000); + + let kv_store = KVStoreSyncWrapper(Arc::clone(&nodes[0].kv_store)); + let sweeper_async: &'static OutputSweeper<_, _, _, _, _, _, _> = unsafe { + &*(nodes[0].sweeper.sweeper_async() as *const OutputSweeper<_, _, _, _, _, _, _>) + as &'static OutputSweeper<_, _, _, _, _, _, _> + }; + let (exit_sender, exit_receiver) = tokio::sync::watch::channel(()); + let bp_future = tokio::spawn(super::process_events_async( + kv_store, + move |_: Event| async move { Ok(()) }, + Arc::clone(&nodes[1].chain_monitor), + Arc::clone(&nodes[1].node), + crate::NO_ONION_MESSENGER, + nodes[1].no_gossip_sync(), + Arc::clone(&nodes[1].peer_manager), + crate::NO_LIQUIDITY_MANAGER, + Some(sweeper_async), + Arc::clone(&nodes[1].logger), + Some(Arc::clone(&nodes[1].scorer)), + move |dur: Duration| { + let mut exit_receiver = exit_receiver.clone(); + Box::pin(async move { + tokio::select! { + _ = tokio::time::sleep(dur) => false, + _ = exit_receiver.changed() => true, + } + }) + }, + false, + || Some(Duration::ZERO), + )); + + let dir = format!("{}_persister_1/monitors", &persist_dir); + let mut mons = std::fs::read_dir(&dir).unwrap(); + let mut mon = mons.next().unwrap().unwrap(); + if mon.path().to_str().unwrap().ends_with(".tmp") { + mon = mons.next().unwrap().unwrap(); + assert_eq!(mon.path().extension(), None); + } + assert!(mons.next().is_none()); + + // Because the channel wasn't funded, we'll archive the ChannelMonitor immedaitely after + // its force-closed (at least on node B, which didn't put their money into it). + nodes[1].node.force_close_all_channels_broadcasting_latest_txn("".to_owned()); + loop { + let mut mons = std::fs::read_dir(&dir).unwrap(); + if let Some(new_mon) = mons.next() { + let mut new_mon = new_mon.unwrap(); + if new_mon.path().to_str().unwrap().ends_with(".tmp") { + new_mon = mons.next().unwrap().unwrap(); + assert_eq!(new_mon.path().extension(), None); + } + assert_eq!(new_mon.path(), mon.path()); + assert!(mons.next().is_none()); + } else { + break; + } + tokio::task::yield_now().await; + } + + exit_sender.send(()).unwrap(); + bp_future.await.unwrap().unwrap(); + } } diff --git a/lightning-block-sync/src/gossip.rs b/lightning-block-sync/src/gossip.rs index 596098350c7..263fa4027ff 100644 --- a/lightning-block-sync/src/gossip.rs +++ b/lightning-block-sync/src/gossip.rs @@ -9,11 +9,9 @@ use bitcoin::constants::ChainHash; use bitcoin::hash_types::BlockHash; use bitcoin::transaction::{OutPoint, TxOut}; -use lightning::ln::peer_handler::APeerManager; -use lightning::routing::gossip::{NetworkGraph, P2PGossipSync}; use lightning::routing::utxo::{UtxoFuture, UtxoLookup, UtxoLookupError, UtxoResult}; -use lightning::util::logger::Logger; use lightning::util::native_async::FutureSpawner; +use lightning::util::wakers::Notifier; use std::collections::VecDeque; use std::future::Future; @@ -49,8 +47,12 @@ pub trait UtxoSource: BlockSource + 'static { pub struct TokioSpawner; #[cfg(feature = "tokio")] impl FutureSpawner for TokioSpawner { - fn spawn + Send + 'static>(&self, future: T) { - tokio::spawn(future); + type E = tokio::task::JoinError; + type SpawnedFutureResult = tokio::task::JoinHandle; + fn spawn + Send + 'static>( + &self, future: F, + ) -> Self::SpawnedFutureResult { + tokio::spawn(future) } } @@ -127,46 +129,28 @@ impl< /// value of 1024 should more than suffice), and ensure you have sufficient file descriptors /// available on both Bitcoin Core and your LDK application for each request to hold its own /// connection. -pub struct GossipVerifier< - S: FutureSpawner, - Blocks: Deref + Send + Sync + 'static + Clone, - L: Deref + Send + Sync + 'static, -> where +pub struct GossipVerifier +where Blocks::Target: UtxoSource, - L::Target: Logger, { source: Blocks, - peer_manager_wake: Arc, - gossiper: Arc>, Arc, L>>, spawn: S, block_cache: Arc>>, } const BLOCK_CACHE_SIZE: usize = 5; -impl - GossipVerifier +impl GossipVerifier where Blocks::Target: UtxoSource, - L::Target: Logger, { - /// Constructs a new [`GossipVerifier`]. + /// Constructs a new [`GossipVerifier`] for use in a [`P2PGossipSync`]. /// - /// This is expected to be given to a [`P2PGossipSync`] (initially constructed with `None` for - /// the UTXO lookup) via [`P2PGossipSync::add_utxo_lookup`]. - pub fn new( - source: Blocks, spawn: S, gossiper: Arc>, Arc, L>>, - peer_manager: APM, - ) -> Self - where - APM::Target: APeerManager, - { - let peer_manager_wake = Arc::new(move || peer_manager.as_ref().process_events()); + /// [`P2PGossipSync`]: lightning::routing::gossip::P2PGossipSync + pub fn new(source: Blocks, spawn: S) -> Self { Self { source, spawn, - gossiper, - peer_manager_wake, block_cache: Arc::new(Mutex::new(VecDeque::with_capacity(BLOCK_CACHE_SIZE))), } } @@ -255,11 +239,9 @@ where } } -impl Deref - for GossipVerifier +impl Deref for GossipVerifier where Blocks::Target: UtxoSource, - L::Target: Logger, { type Target = Self; fn deref(&self) -> &Self { @@ -267,23 +249,18 @@ where } } -impl UtxoLookup - for GossipVerifier +impl UtxoLookup for GossipVerifier where Blocks::Target: UtxoSource, - L::Target: Logger, { - fn get_utxo(&self, _chain_hash: &ChainHash, short_channel_id: u64) -> UtxoResult { - let res = UtxoFuture::new(); + fn get_utxo(&self, _chain_hash: &ChainHash, scid: u64, notifier: Arc) -> UtxoResult { + let res = UtxoFuture::new(notifier); let fut = res.clone(); let source = self.source.clone(); - let gossiper = Arc::clone(&self.gossiper); let block_cache = Arc::clone(&self.block_cache); - let pmw = Arc::clone(&self.peer_manager_wake); - self.spawn.spawn(async move { - let res = Self::retrieve_utxo(source, block_cache, short_channel_id).await; - fut.resolve(gossiper.network_graph(), &*gossiper, res); - (pmw)(); + let _not_polled = self.spawn.spawn(async move { + let res = Self::retrieve_utxo(source, block_cache, scid).await; + fut.resolve(res); }); UtxoResult::Async(res) } diff --git a/lightning-dns-resolver/src/lib.rs b/lightning-dns-resolver/src/lib.rs index 125d4316d12..d9af330328e 100644 --- a/lightning-dns-resolver/src/lib.rs +++ b/lightning-dns-resolver/src/lib.rs @@ -175,6 +175,7 @@ mod test { use lightning::onion_message::messenger::{ AOnionMessenger, Destination, MessageRouter, OnionMessagePath, OnionMessenger, }; + use lightning::routing::router::DEFAULT_PAYMENT_DUMMY_HOPS; use lightning::sign::{KeysManager, NodeSigner, ReceiveAuthKey, Recipient}; use lightning::types::features::InitFeatures; use lightning::types::payment::PaymentHash; @@ -236,6 +237,7 @@ mod test { recipient, local_node_receive_key, context, + false, &keys, secp_ctx, )]) @@ -345,6 +347,7 @@ mod test { payer_id, receive_key, query_context, + false, &*payer_keys, &secp_ctx, ); @@ -419,6 +422,12 @@ mod test { let updates = get_htlc_update_msgs(&nodes[0], &payee_id); nodes[1].node.handle_update_add_htlc(payer_id, &updates.update_add_htlcs[0]); do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); + + for _ in 0..DEFAULT_PAYMENT_DUMMY_HOPS { + assert!(nodes[1].node.needs_pending_htlc_processing()); + nodes[1].node.process_pending_htlc_forwards(); + } + expect_and_process_pending_htlcs(&nodes[1], false); let claimable_events = nodes[1].node.get_and_clear_pending_events(); diff --git a/lightning-invoice/src/lib.rs b/lightning-invoice/src/lib.rs index d96d730fac0..a83130ab799 100644 --- a/lightning-invoice/src/lib.rs +++ b/lightning-invoice/src/lib.rs @@ -54,7 +54,7 @@ use core::time::Duration; use serde::{de::Error, Deserialize, Deserializer, Serialize, Serializer}; #[doc(no_inline)] -pub use lightning_types::payment::PaymentSecret; +pub use lightning_types::payment::{PaymentHash, PaymentSecret}; #[doc(no_inline)] pub use lightning_types::routing::{RouteHint, RouteHintHop, RoutingFees}; use lightning_types::string::UntrustedString; @@ -1460,8 +1460,9 @@ impl Bolt11Invoice { } /// Returns the hash to which we will receive the preimage on completion of the payment - pub fn payment_hash(&self) -> &sha256::Hash { - &self.signed_invoice.payment_hash().expect("checked by constructor").0 + pub fn payment_hash(&self) -> PaymentHash { + let hash = self.signed_invoice.payment_hash().expect("checked by constructor").0; + PaymentHash(hash.to_byte_array()) } /// Return the description or a hash of it for longer ones @@ -2339,7 +2340,7 @@ mod test { sha256::Hash::from_slice(&[3; 32][..]).unwrap() )) ); - assert_eq!(invoice.payment_hash(), &sha256::Hash::from_slice(&[21; 32][..]).unwrap()); + assert_eq!(invoice.payment_hash(), PaymentHash([21; 32])); assert_eq!(invoice.payment_secret(), &PaymentSecret([42; 32])); let mut expected_features = Bolt11InvoiceFeatures::empty(); diff --git a/lightning-liquidity/src/lsps5/event.rs b/lightning-liquidity/src/lsps5/event.rs index c12273808ef..30e3aea5687 100644 --- a/lightning-liquidity/src/lsps5/event.rs +++ b/lightning-liquidity/src/lsps5/event.rs @@ -30,9 +30,9 @@ pub enum LSPS5ServiceEvent { /// via their registered webhook. /// /// The LSP should send an HTTP POST to the [`url`], using the - /// JSON-serialized [`notification`] as the body and including the `headers`. - /// If the HTTP request fails, the LSP may implement a retry policy according to its - /// implementation preferences. + /// JSON-serialized [`notification`] (via [`WebhookNotification::to_request_body`]) as the body + /// and including the `headers`. If the HTTP request fails, the LSP may implement a retry + /// policy according to its implementation preferences. /// /// The notification is signed using the LSP's node ID to ensure authenticity /// when received by the client. The client verifies this signature using diff --git a/lightning-liquidity/src/lsps5/msgs.rs b/lightning-liquidity/src/lsps5/msgs.rs index e457c299bfe..363a3255f92 100644 --- a/lightning-liquidity/src/lsps5/msgs.rs +++ b/lightning-liquidity/src/lsps5/msgs.rs @@ -565,6 +565,12 @@ impl WebhookNotification { pub fn onion_message_incoming() -> Self { Self { method: WebhookNotificationMethod::LSPS5OnionMessageIncoming } } + + /// Encodes this notification into JSON which can be sent as the body of an HTTP request to + /// deliver the notification. + pub fn to_request_body(&self) -> String { + serde_json::to_string(self).unwrap() + } } impl Serialize for WebhookNotification { diff --git a/lightning-liquidity/tests/lsps2_integration_tests.rs b/lightning-liquidity/tests/lsps2_integration_tests.rs index e4ace27b715..2e469d149b0 100644 --- a/lightning-liquidity/tests/lsps2_integration_tests.rs +++ b/lightning-liquidity/tests/lsps2_integration_tests.rs @@ -1211,7 +1211,7 @@ fn client_trusts_lsp_end_to_end_test() { .node .pay_for_bolt11_invoice( &invoice, - PaymentId(invoice.payment_hash().to_byte_array()), + PaymentId(invoice.payment_hash().0), None, Default::default(), Retry::Attempts(3), @@ -1684,7 +1684,7 @@ fn late_payment_forwarded_and_safe_after_force_close_does_not_broadcast() { .node .pay_for_bolt11_invoice( &invoice, - PaymentId(invoice.payment_hash().to_byte_array()), + PaymentId(invoice.payment_hash().0), None, Default::default(), Retry::Attempts(3), @@ -1714,7 +1714,7 @@ fn late_payment_forwarded_and_safe_after_force_close_does_not_broadcast() { *requested_next_hop_scid, *intercept_id, *expected_outbound_amount_msat, - PaymentHash(invoice.payment_hash().to_byte_array()), + invoice.payment_hash(), ) .unwrap(); }, @@ -1875,7 +1875,7 @@ fn htlc_timeout_before_client_claim_results_in_handling_failed() { .node .pay_for_bolt11_invoice( &invoice, - PaymentId(invoice.payment_hash().to_byte_array()), + PaymentId(invoice.payment_hash().0), None, Default::default(), Retry::Attempts(3), @@ -1905,7 +1905,7 @@ fn htlc_timeout_before_client_claim_results_in_handling_failed() { *requested_next_hop_scid, *intercept_id, *expected_outbound_amount_msat, - PaymentHash(invoice.payment_hash().to_byte_array()), + invoice.payment_hash(), ) .unwrap(); }, @@ -1984,7 +1984,7 @@ fn htlc_timeout_before_client_claim_results_in_handling_failed() { match &client_events[0] { Event::HTLCHandlingFailed { failure_type, .. } => match failure_type { lightning::events::HTLCHandlingFailureType::Receive { payment_hash } => { - assert_eq!(*payment_hash, PaymentHash(invoice.payment_hash().to_byte_array())); + assert_eq!(*payment_hash, invoice.payment_hash()); }, _ => panic!("Unexpected failure_type: {:?}", failure_type), }, @@ -2212,7 +2212,7 @@ fn client_trusts_lsp_partial_fee_does_not_trigger_broadcast() { .node .pay_for_bolt11_invoice( &invoice, - PaymentId(invoice.payment_hash().to_byte_array()), + PaymentId(invoice.payment_hash().0), None, Default::default(), Retry::Attempts(3), diff --git a/lightning-net-tokio/Cargo.toml b/lightning-net-tokio/Cargo.toml index 6c45f40e3c8..af4845b7397 100644 --- a/lightning-net-tokio/Cargo.toml +++ b/lightning-net-tokio/Cargo.toml @@ -19,7 +19,7 @@ rustdoc-args = ["--cfg", "docsrs"] [dependencies] bitcoin = "0.32.2" lightning = { version = "0.3.0", path = "../lightning" } -tokio = { version = "1.35", features = [ "rt", "sync", "net", "time" ] } +tokio = { version = "1.35", features = [ "rt", "sync", "net", "time", "io-util" ] } [dev-dependencies] tokio = { version = "1.35", features = [ "macros", "rt", "rt-multi-thread", "sync", "net", "time" ] } diff --git a/lightning-net-tokio/src/lib.rs b/lightning-net-tokio/src/lib.rs index 75886ebfb5f..27d309f2c18 100644 --- a/lightning-net-tokio/src/lib.rs +++ b/lightning-net-tokio/src/lib.rs @@ -37,6 +37,7 @@ use lightning::ln::msgs::SocketAddress; use lightning::ln::peer_handler; use lightning::ln::peer_handler::APeerManager; use lightning::ln::peer_handler::SocketDescriptor as LnSocketTrait; +use lightning::sign::EntropySource; use std::future::Future; use std::hash::Hash; @@ -51,6 +52,9 @@ use std::time::Duration; static ID_COUNTER: AtomicU64 = AtomicU64::new(0); +const CONNECT_OUTBOUND_TIMEOUT: u64 = 10; +const TOR_CONNECT_OUTBOUND_TIMEOUT: u64 = 30; + // We only need to select over multiple futures in one place, and taking on the full `tokio/macros` // dependency tree in order to do so (which has broken our MSRV before) is excessive. Instead, we // define a trivial two- and three- select macro with the specific types we need and just use that. @@ -462,13 +466,169 @@ where PM::Target: APeerManager, { let connect_fut = async { TcpStream::connect(&addr).await.map(|s| s.into_std().unwrap()) }; - if let Ok(Ok(stream)) = time::timeout(Duration::from_secs(10), connect_fut).await { + if let Ok(Ok(stream)) = + time::timeout(Duration::from_secs(CONNECT_OUTBOUND_TIMEOUT), connect_fut).await + { + Some(setup_outbound(peer_manager, their_node_id, stream)) + } else { + None + } +} + +/// Routes [`connect_outbound`] through Tor. Implements stream isolation for each connection +/// using a stream isolation parameter sourced from [`EntropySource::get_secure_random_bytes`]. +/// +/// Returns a future (as the fn is async) that yields another future, see [`connect_outbound`] for +/// details on this return value. +pub async fn tor_connect_outbound( + peer_manager: PM, their_node_id: PublicKey, addr: SocketAddress, tor_proxy_addr: SocketAddr, + entropy_source: ES, +) -> Option> +where + PM::Target: APeerManager, + ES::Target: EntropySource, +{ + let connect_fut = async { + tor_connect(addr, tor_proxy_addr, entropy_source).await.map(|s| s.into_std().unwrap()) + }; + if let Ok(Ok(stream)) = + time::timeout(Duration::from_secs(TOR_CONNECT_OUTBOUND_TIMEOUT), connect_fut).await + { Some(setup_outbound(peer_manager, their_node_id, stream)) } else { None } } +async fn tor_connect( + addr: SocketAddress, tor_proxy_addr: SocketAddr, entropy_source: ES, +) -> Result +where + ES::Target: EntropySource, +{ + use std::io::Write; + use tokio::io::AsyncReadExt; + + const IPV4_ADDR_LEN: usize = 4; + const IPV6_ADDR_LEN: usize = 16; + const HOSTNAME_MAX_LEN: usize = u8::MAX as usize; + + // Constants defined in RFC 1928 and RFC 1929 + const VERSION: u8 = 5; + const NMETHODS: u8 = 1; + const USERNAME_PASSWORD_AUTH: u8 = 2; + const METHOD_SELECT_REPLY_LEN: usize = 2; + const USERNAME_PASSWORD_VERSION: u8 = 1; + const USERNAME_PASSWORD_REPLY_LEN: usize = 2; + const CMD_CONNECT: u8 = 1; + const RSV: u8 = 0; + const ATYP_IPV4: u8 = 1; + const ATYP_DOMAINNAME: u8 = 3; + const ATYP_IPV6: u8 = 4; + const SUCCESS: u8 = 0; + + // Tor extensions, see https://spec.torproject.org/socks-extensions.html for further details + const USERNAME: &[u8] = b"0"; + const USERNAME_LEN: usize = USERNAME.len(); + const PASSWORD_ENTROPY_LEN: usize = 32; + // We encode the password as a hex string on the wire. RFC 1929 allows arbitrary byte sequences but we choose to be conservative. + const PASSWORD_LEN: usize = PASSWORD_ENTROPY_LEN * 2; + + const USERNAME_PASSWORD_REQUEST_LEN: usize = + 1 /* VER */ + 1 /* ULEN */ + USERNAME_LEN + 1 /* PLEN */ + PASSWORD_LEN; + const SOCKS5_REQUEST_MAX_LEN: usize = 1 /* VER */ + 1 /* CMD */ + 1 /* RSV */ + 1 /* ATYP */ + + 1 /* HOSTNAME len */ + HOSTNAME_MAX_LEN /* HOSTNAME */ + 2 /* PORT */; + const SOCKS5_REPLY_HEADER_LEN: usize = 1 /* VER */ + 1 /* REP */ + 1 /* RSV */ + 1 /* ATYP */; + + let method_selection_request = [VERSION, NMETHODS, USERNAME_PASSWORD_AUTH]; + let mut tcp_stream = TcpStream::connect(&tor_proxy_addr).await.map_err(|_| ())?; + tokio::io::AsyncWriteExt::write_all(&mut tcp_stream, &method_selection_request) + .await + .map_err(|_| ())?; + + let mut method_selection_reply = [0u8; METHOD_SELECT_REPLY_LEN]; + tcp_stream.read_exact(&mut method_selection_reply).await.map_err(|_| ())?; + if method_selection_reply != [VERSION, USERNAME_PASSWORD_AUTH] { + return Err(()); + } + + let password: [u8; PASSWORD_ENTROPY_LEN] = entropy_source.get_secure_random_bytes(); + let mut username_password_request = [0u8; USERNAME_PASSWORD_REQUEST_LEN]; + let mut stream = &mut username_password_request[..]; + stream.write_all(&[USERNAME_PASSWORD_VERSION, USERNAME_LEN as u8]).unwrap(); + stream.write_all(USERNAME).unwrap(); + stream.write_all(&[PASSWORD_LEN as u8]).unwrap(); + // Encode the password as a hex string even if RFC 1929 allows arbitrary sequences + for byte in password { + write!(stream, "{:02x}", byte).unwrap(); + } + debug_assert!(stream.is_empty()); + tokio::io::AsyncWriteExt::write_all(&mut tcp_stream, &username_password_request) + .await + .map_err(|_| ())?; + + let mut username_password_reply = [0u8; USERNAME_PASSWORD_REPLY_LEN]; + tcp_stream.read_exact(&mut username_password_reply).await.map_err(|_| ())?; + if username_password_reply != [USERNAME_PASSWORD_VERSION, SUCCESS] { + return Err(()); + } + + let mut socks5_request = [0u8; SOCKS5_REQUEST_MAX_LEN]; + let mut stream = &mut socks5_request[..]; + stream.write_all(&[VERSION, CMD_CONNECT, RSV]).unwrap(); + match addr { + SocketAddress::TcpIpV4 { addr, port } => { + stream.write_all(&[ATYP_IPV4]).unwrap(); + stream.write_all(&addr).unwrap(); + stream.write_all(&port.to_be_bytes()).unwrap(); + }, + SocketAddress::TcpIpV6 { addr, port } => { + stream.write_all(&[ATYP_IPV6]).unwrap(); + stream.write_all(&addr).unwrap(); + stream.write_all(&port.to_be_bytes()).unwrap(); + }, + ref onion_v3 @ SocketAddress::OnionV3 { port, .. } => { + let onion_v3_url = onion_v3.to_string(); + let hostname = onion_v3_url.split_once(':').ok_or(())?.0.as_bytes(); + stream.write_all(&[ATYP_DOMAINNAME, hostname.len() as u8]).unwrap(); + stream.write_all(hostname).unwrap(); + stream.write_all(&port.to_be_bytes()).unwrap(); + }, + SocketAddress::Hostname { hostname, port } => { + stream.write_all(&[ATYP_DOMAINNAME, hostname.len()]).unwrap(); + stream.write_all(hostname.as_bytes()).unwrap(); + stream.write_all(&port.to_be_bytes()).unwrap(); + }, + SocketAddress::OnionV2 { .. } => return Err(()), + }; + let bytes_remaining = stream.len(); + tokio::io::AsyncWriteExt::write_all( + &mut tcp_stream, + &socks5_request[..socks5_request.len() - bytes_remaining], + ) + .await + .map_err(|_| ())?; + + let mut socks5_reply_header = [0u8; SOCKS5_REPLY_HEADER_LEN]; + tcp_stream.read_exact(&mut socks5_reply_header).await.map_err(|_| ())?; + if socks5_reply_header[..3] != [VERSION, SUCCESS, RSV] { + return Err(()); + } + match socks5_reply_header[3] { + ATYP_IPV4 => tcp_stream.read_exact(&mut [0u8; IPV4_ADDR_LEN]).await.map_err(|_| ())?, + ATYP_DOMAINNAME => { + let hostname_len = tcp_stream.read_u8().await.map_err(|_| ())? as usize; + let mut hostname_buffer = [0u8; HOSTNAME_MAX_LEN]; + tcp_stream.read_exact(&mut hostname_buffer[..hostname_len]).await.map_err(|_| ())? + }, + ATYP_IPV6 => tcp_stream.read_exact(&mut [0u8; IPV6_ADDR_LEN]).await.map_err(|_| ())?, + _ => return Err(()), + }; + tcp_stream.read_u16().await.map_err(|_| ())?; + + Ok(tcp_stream) +} + const SOCK_WAKER_VTABLE: task::RawWakerVTable = task::RawWakerVTable::new( clone_socket_waker, wake_socket_waker, @@ -941,4 +1101,61 @@ mod tests { async fn unthreaded_race_disconnect_accept() { race_disconnect_accept().await; } + + #[cfg(tor)] + #[tokio::test] + async fn test_tor_connect() { + use super::tor_connect; + use lightning::sign::EntropySource; + use std::net::SocketAddr; + + // Set TOR_PROXY=127.0.0.1:9050 + let tor_proxy_addr: SocketAddr = std::env!("TOR_PROXY").parse().unwrap(); + + struct TestEntropySource; + + impl EntropySource for TestEntropySource { + fn get_secure_random_bytes(&self) -> [u8; 32] { + [0xffu8; 32] + } + } + + let entropy_source = TestEntropySource; + + // Success cases + + for addr_str in [ + // google.com + "142.250.189.196:80", + // google.com + "[2607:f8b0:4005:813::2004]:80", + // torproject.org + "torproject.org:80", + // torproject.org + "2gzyxa5ihm7nsggfxnu52rck2vv4rvmdlkiu3zzui5du4xyclen53wid.onion:80", + ] { + let addr: SocketAddress = addr_str.parse().unwrap(); + let tcp_stream = tor_connect(addr, tor_proxy_addr, &entropy_source).await.unwrap(); + assert_eq!( + tcp_stream.try_read(&mut [0u8; 1]).unwrap_err().kind(), + std::io::ErrorKind::WouldBlock + ); + } + + // Failure cases + + for addr_str in [ + // google.com, with some invalid port + "142.250.189.196:1234", + // google.com, with some invalid port + "[2607:f8b0:4005:813::2004]:1234", + // torproject.org, with some invalid port + "torproject.org:1234", + // torproject.org, with a typo + "3gzyxa5ihm7nsggfxnu52rck2vv4rvmdlkiu3zzui5du4xyclen53wid.onion:80", + ] { + let addr: SocketAddress = addr_str.parse().unwrap(); + assert!(tor_connect(addr, tor_proxy_addr, &entropy_source).await.is_err()); + } + } } diff --git a/lightning-tests/src/upgrade_downgrade_tests.rs b/lightning-tests/src/upgrade_downgrade_tests.rs index 19c50e870de..8df670321be 100644 --- a/lightning-tests/src/upgrade_downgrade_tests.rs +++ b/lightning-tests/src/upgrade_downgrade_tests.rs @@ -451,12 +451,10 @@ fn do_test_0_1_htlc_forward_after_splice(fail_htlc: bool) { reconnect_b_c_args.send_announcement_sigs = (true, true); reconnect_nodes(reconnect_b_c_args); - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(1_000), - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }]); let splice_tx = splice_channel(&nodes[0], &nodes[1], ChannelId(chan_id_bytes_a), contribution); for node in nodes.iter() { mine_transaction(node, &splice_tx); diff --git a/lightning-transaction-sync/src/electrum.rs b/lightning-transaction-sync/src/electrum.rs index 47489df69bb..1162b9c00c9 100644 --- a/lightning-transaction-sync/src/electrum.rs +++ b/lightning-transaction-sync/src/electrum.rs @@ -336,10 +336,21 @@ where script_history.iter().filter(|h| h.tx_hash == **txid); if let Some(history) = filtered_history.next() { let prob_conf_height = history.height as u32; + if prob_conf_height <= 0 { + // Skip if it's a an unconfirmed entry. + continue; + } let confirmed_tx = self.get_confirmed_tx(tx, prob_conf_height)?; confirmed_txs.push(confirmed_tx); } - debug_assert!(filtered_history.next().is_none()); + if filtered_history.next().is_some() { + log_error!( + self.logger, + "Failed due to server returning multiple history entries for Tx {}.", + txid + ); + return Err(InternalError::Failed); + } } for (watched_output, script_history) in @@ -347,6 +358,7 @@ where { for possible_output_spend in script_history { if possible_output_spend.height <= 0 { + // Skip if it's a an unconfirmed entry. continue; } diff --git a/lightning/src/blinded_path/message.rs b/lightning/src/blinded_path/message.rs index ed55ca5dc9b..84a42ff1be2 100644 --- a/lightning/src/blinded_path/message.rs +++ b/lightning/src/blinded_path/message.rs @@ -54,21 +54,38 @@ impl Readable for BlindedMessagePath { impl BlindedMessagePath { /// Create a one-hop blinded path for a message. + /// + /// `compact_padding` selects between space-inefficient padding which better hides contents and + /// a space-constrained padding which does very little to hide the contents, especially for the + /// last hop. It should only be set when the blinded path needs to be as compact as possible. pub fn one_hop( recipient_node_id: PublicKey, local_node_receive_key: ReceiveAuthKey, - context: MessageContext, entropy_source: ES, secp_ctx: &Secp256k1, + context: MessageContext, compact_padding: bool, entropy_source: ES, + secp_ctx: &Secp256k1, ) -> Self where ES::Target: EntropySource, { - Self::new(&[], recipient_node_id, local_node_receive_key, context, entropy_source, secp_ctx) + Self::new( + &[], + recipient_node_id, + local_node_receive_key, + context, + compact_padding, + entropy_source, + secp_ctx, + ) } /// Create a path for an onion message, to be forwarded along `node_pks`. + /// + /// `compact_padding` selects between space-inefficient padding which better hides contents and + /// a space-constrained padding which does very little to hide the contents, especially for the + /// last hop. It should only be set when the blinded path needs to be as compact as possible. pub fn new( intermediate_nodes: &[MessageForwardNode], recipient_node_id: PublicKey, - local_node_receive_key: ReceiveAuthKey, context: MessageContext, entropy_source: ES, - secp_ctx: &Secp256k1, + local_node_receive_key: ReceiveAuthKey, context: MessageContext, compact_padding: bool, + entropy_source: ES, secp_ctx: &Secp256k1, ) -> Self where ES::Target: EntropySource, @@ -79,6 +96,7 @@ impl BlindedMessagePath { 0, local_node_receive_key, context, + compact_padding, entropy_source, secp_ctx, ) @@ -86,12 +104,15 @@ impl BlindedMessagePath { /// Same as [`BlindedMessagePath::new`], but allows specifying a number of dummy hops. /// - /// Note: - /// At most [`MAX_DUMMY_HOPS_COUNT`] dummy hops can be added to the blinded path. + /// `compact_padding` selects between space-inefficient padding which better hides contents and + /// a space-constrained padding which does very little to hide the contents, especially for the + /// last hop. It should only be set when the blinded path needs to be as compact as possible. + /// + /// Note: At most [`MAX_DUMMY_HOPS_COUNT`] dummy hops can be added to the blinded path. pub fn new_with_dummy_hops( intermediate_nodes: &[MessageForwardNode], recipient_node_id: PublicKey, dummy_hop_count: usize, local_node_receive_key: ReceiveAuthKey, context: MessageContext, - entropy_source: ES, secp_ctx: &Secp256k1, + compact_padding: bool, entropy_source: ES, secp_ctx: &Secp256k1, ) -> Self where ES::Target: EntropySource, @@ -114,6 +135,7 @@ impl BlindedMessagePath { context, &blinding_secret, local_node_receive_key, + compact_padding, ), }) } @@ -416,28 +438,45 @@ pub enum OffersContext { /// Useful to timeout async recipients that are no longer supported as clients. path_absolute_expiry: Duration, }, - /// Context used by a [`BlindedMessagePath`] within a [`Refund`] or as a reply path for an - /// [`InvoiceRequest`]. + /// Context used by a [`BlindedMessagePath`] within a [`Refund`]. /// /// This variant is intended to be received when handling a [`Bolt12Invoice`] or an /// [`InvoiceError`]. /// /// [`Refund`]: crate::offers::refund::Refund - /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice /// [`InvoiceError`]: crate::offers::invoice_error::InvoiceError - OutboundPayment { - /// Payment ID used when creating a [`Refund`] or [`InvoiceRequest`]. + OutboundPaymentForRefund { + /// Payment ID used when creating a [`Refund`]. /// /// [`Refund`]: crate::offers::refund::Refund - /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest payment_id: PaymentId, - /// A nonce used for authenticating that a [`Bolt12Invoice`] is for a valid [`Refund`] or - /// [`InvoiceRequest`] and for deriving their signing keys. + /// A nonce used for authenticating that a [`Bolt12Invoice`] is for a valid [`Refund`] and + /// for deriving its signing keys. /// /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice /// [`Refund`]: crate::offers::refund::Refund + nonce: Nonce, + }, + /// Context used by a [`BlindedMessagePath`] as a reply path for an [`InvoiceRequest`]. + /// + /// This variant is intended to be received when handling a [`Bolt12Invoice`] or an + /// [`InvoiceError`]. + /// + /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest + /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice + /// [`InvoiceError`]: crate::offers::invoice_error::InvoiceError + OutboundPaymentForOffer { + /// Payment ID used when creating an [`InvoiceRequest`]. + /// + /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest + payment_id: PaymentId, + + /// A nonce used for authenticating that a [`Bolt12Invoice`] is for a valid + /// [`InvoiceRequest`] and for deriving its signing keys. + /// + /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest nonce: Nonce, }, @@ -619,7 +658,7 @@ impl_writeable_tlv_based_enum!(OffersContext, (0, InvoiceRequest) => { (0, nonce, required), }, - (1, OutboundPayment) => { + (1, OutboundPaymentForRefund) => { (0, payment_id, required), (1, nonce, required), }, @@ -631,6 +670,10 @@ impl_writeable_tlv_based_enum!(OffersContext, (2, invoice_slot, required), (4, path_absolute_expiry, required), }, + (4, OutboundPaymentForOffer) => { + (0, payment_id, required), + (1, nonce, required), + }, ); impl_writeable_tlv_based_enum!(AsyncPaymentsContext, @@ -693,7 +736,7 @@ pub const MAX_DUMMY_HOPS_COUNT: usize = 10; pub(super) fn blinded_hops( secp_ctx: &Secp256k1, intermediate_nodes: &[MessageForwardNode], recipient_node_id: PublicKey, dummy_hop_count: usize, context: MessageContext, - session_priv: &SecretKey, local_node_receive_key: ReceiveAuthKey, + session_priv: &SecretKey, local_node_receive_key: ReceiveAuthKey, compact_padding: bool, ) -> Vec { let dummy_count = cmp::min(dummy_hop_count, MAX_DUMMY_HOPS_COUNT); let pks = intermediate_nodes @@ -703,9 +746,8 @@ pub(super) fn blinded_hops( core::iter::repeat((recipient_node_id, Some(local_node_receive_key))).take(dummy_count), ) .chain(core::iter::once((recipient_node_id, Some(local_node_receive_key)))); - let is_compact = intermediate_nodes.iter().any(|node| node.short_channel_id.is_some()); - let tlvs = pks + let intermediate_tlvs = pks .clone() .skip(1) // The first node's TLVs contains the next node's pubkey .zip(intermediate_nodes.iter().map(|node| node.short_channel_id)) @@ -716,18 +758,43 @@ pub(super) fn blinded_hops( .map(|next_hop| { ControlTlvs::Forward(ForwardTlvs { next_hop, next_blinding_override: None }) }) - .chain((0..dummy_count).map(|_| ControlTlvs::Dummy)) - .chain(core::iter::once(ControlTlvs::Receive(ReceiveTlvs { context: Some(context) }))); - - if is_compact { - let path = pks.zip(tlvs); - utils::construct_blinded_hops(secp_ctx, path, session_priv) + .chain((0..dummy_count).map(|_| ControlTlvs::Dummy)); + + let max_intermediate_len = + intermediate_tlvs.clone().map(|tlvs| tlvs.serialized_length()).max().unwrap_or(0); + let have_intermediate_one_byte_smaller = + intermediate_tlvs.clone().any(|tlvs| tlvs.serialized_length() == max_intermediate_len - 1); + + let round_off = if compact_padding { + // We can only pad by a minimum of two bytes (we can only go from no-TLV to a type + length + // byte). Thus, if there are any intermediate hops that need to be padded by exactly one + // byte, we have to instead pad everything by two. + if have_intermediate_one_byte_smaller { + max_intermediate_len + 2 + } else { + max_intermediate_len + } } else { - let path = - pks.zip(tlvs.map(|tlv| BlindedPathWithPadding { - tlvs: tlv, - round_off: MESSAGE_PADDING_ROUND_OFF, - })); - utils::construct_blinded_hops(secp_ctx, path, session_priv) - } + MESSAGE_PADDING_ROUND_OFF + }; + + let tlvs = intermediate_tlvs + .map(|tlvs| { + let res = BlindedPathWithPadding { tlvs, round_off }; + if compact_padding { + debug_assert_eq!(res.serialized_length(), max_intermediate_len); + } else { + // We don't currently ever push extra fields to intermediate hops, so they should + // never go over `MESSAGE_PADDING_ROUND_OFF`. + debug_assert_eq!(res.serialized_length(), MESSAGE_PADDING_ROUND_OFF); + } + res + }) + .chain(core::iter::once(BlindedPathWithPadding { + tlvs: ControlTlvs::Receive(ReceiveTlvs { context: Some(context) }), + round_off: if compact_padding { 0 } else { MESSAGE_PADDING_ROUND_OFF }, + })); + + let path = pks.zip(tlvs); + utils::construct_blinded_hops(secp_ctx, path, session_priv) } diff --git a/lightning/src/blinded_path/payment.rs b/lightning/src/blinded_path/payment.rs index 13ade222f5b..b68be811cb4 100644 --- a/lightning/src/blinded_path/payment.rs +++ b/lightning/src/blinded_path/payment.rs @@ -33,7 +33,6 @@ use crate::util::ser::{ Writeable, Writer, }; -use core::mem; use core::ops::Deref; #[allow(unused_imports)] @@ -121,6 +120,61 @@ impl BlindedPaymentPath { local_node_receive_key: ReceiveAuthKey, payee_tlvs: ReceiveTlvs, htlc_maximum_msat: u64, min_final_cltv_expiry_delta: u16, entropy_source: ES, secp_ctx: &Secp256k1, ) -> Result + where + ES::Target: EntropySource, + { + BlindedPaymentPath::new_inner( + intermediate_nodes, + payee_node_id, + local_node_receive_key, + &[], + payee_tlvs, + htlc_maximum_msat, + min_final_cltv_expiry_delta, + entropy_source, + secp_ctx, + ) + } + + /// Same as [`BlindedPaymentPath::new`], but allows specifying a number of dummy hops. + /// + /// Dummy TLVs allow callers to override the payment relay values used for dummy hops. + /// Any additional fees introduced by these dummy hops are ultimately paid to the final + /// recipient as part of the total amount. + /// + /// This improves privacy by making path-length analysis based on fee and CLTV delta + /// values less reliable. + /// + /// TODO: Add end-to-end tests validating fee aggregation, CLTV deltas, and + /// HTLC bounds when dummy hops are present, before exposing this API publicly. + pub(crate) fn new_with_dummy_hops( + intermediate_nodes: &[PaymentForwardNode], payee_node_id: PublicKey, + dummy_tlvs: &[DummyTlvs], local_node_receive_key: ReceiveAuthKey, payee_tlvs: ReceiveTlvs, + htlc_maximum_msat: u64, min_final_cltv_expiry_delta: u16, entropy_source: ES, + secp_ctx: &Secp256k1, + ) -> Result + where + ES::Target: EntropySource, + { + BlindedPaymentPath::new_inner( + intermediate_nodes, + payee_node_id, + local_node_receive_key, + dummy_tlvs, + payee_tlvs, + htlc_maximum_msat, + min_final_cltv_expiry_delta, + entropy_source, + secp_ctx, + ) + } + + fn new_inner( + intermediate_nodes: &[PaymentForwardNode], payee_node_id: PublicKey, + local_node_receive_key: ReceiveAuthKey, dummy_tlvs: &[DummyTlvs], payee_tlvs: ReceiveTlvs, + htlc_maximum_msat: u64, min_final_cltv_expiry_delta: u16, entropy_source: ES, + secp_ctx: &Secp256k1, + ) -> Result where ES::Target: EntropySource, { @@ -133,6 +187,7 @@ impl BlindedPaymentPath { let blinded_payinfo = compute_payinfo( intermediate_nodes, + dummy_tlvs, &payee_tlvs, htlc_maximum_msat, min_final_cltv_expiry_delta, @@ -145,6 +200,7 @@ impl BlindedPaymentPath { secp_ctx, intermediate_nodes, payee_node_id, + dummy_tlvs, payee_tlvs, &blinding_secret, local_node_receive_key, @@ -191,28 +247,31 @@ impl BlindedPaymentPath { NL::Target: NodeIdLookUp, T: secp256k1::Signing + secp256k1::Verification, { - match self.decrypt_intro_payload::(node_signer) { - Ok(( - BlindedPaymentTlvs::Forward(ForwardTlvs { short_channel_id, .. }), - control_tlvs_ss, - )) => { - let next_node_id = match node_id_lookup.next_node_id(short_channel_id) { - Some(node_id) => node_id, - None => return Err(()), - }; - let mut new_blinding_point = onion_utils::next_hop_pubkey( - secp_ctx, - self.inner_path.blinding_point, - control_tlvs_ss.as_ref(), - ) - .map_err(|_| ())?; - mem::swap(&mut self.inner_path.blinding_point, &mut new_blinding_point); - self.inner_path.introduction_node = IntroductionNode::NodeId(next_node_id); - self.inner_path.blinded_hops.remove(0); - Ok(()) - }, - _ => Err(()), - } + let (next_node_id, control_tlvs_ss) = + match self.decrypt_intro_payload::(node_signer).map_err(|_| ())? { + (BlindedPaymentTlvs::Forward(ForwardTlvs { short_channel_id, .. }), ss) => { + let node_id = node_id_lookup.next_node_id(short_channel_id).ok_or(())?; + (node_id, ss) + }, + (BlindedPaymentTlvs::Dummy(_), ss) => { + let node_id = node_signer.get_node_id(Recipient::Node)?; + (node_id, ss) + }, + _ => return Err(()), + }; + + let new_blinding_point = onion_utils::next_hop_pubkey( + secp_ctx, + self.inner_path.blinding_point, + control_tlvs_ss.as_ref(), + ) + .map_err(|_| ())?; + + self.inner_path.blinding_point = new_blinding_point; + self.inner_path.introduction_node = IntroductionNode::NodeId(next_node_id); + self.inner_path.blinded_hops.remove(0); + + Ok(()) } pub(crate) fn decrypt_intro_payload( @@ -234,9 +293,9 @@ impl BlindedPaymentPath { .map_err(|_| ())?; match (&readable, used_aad) { - (BlindedPaymentTlvs::Forward(_), false) | (BlindedPaymentTlvs::Receive(_), true) => { - Ok((readable, control_tlvs_ss)) - }, + (BlindedPaymentTlvs::Forward(_), false) + | (BlindedPaymentTlvs::Dummy(_), true) + | (BlindedPaymentTlvs::Receive(_), true) => Ok((readable, control_tlvs_ss)), _ => Err(()), } } @@ -328,6 +387,37 @@ pub struct TrampolineForwardTlvs { pub next_blinding_override: Option, } +/// TLVs carried by a dummy hop within a blinded payment path. +/// +/// Dummy hops do not correspond to real forwarding decisions, but are processed +/// identically to real hops at the protocol level. The TLVs contained here define +/// the relay requirements and constraints that must be satisfied for the payment +/// to continue through this hop. +/// +/// By enforcing realistic relay semantics on dummy hops, the payment path remains +/// indistinguishable from a fully real route with respect to fees, CLTV deltas, and +/// validation behavior. +#[derive(Clone, Copy)] +pub struct DummyTlvs { + /// Relay requirements (fees and CLTV delta) that must be satisfied when + /// processing this dummy hop. + pub payment_relay: PaymentRelay, + /// Constraints that apply to the payment when relaying over this dummy hop. + pub payment_constraints: PaymentConstraints, +} + +impl Default for DummyTlvs { + fn default() -> Self { + let payment_relay = + PaymentRelay { cltv_expiry_delta: 0, fee_proportional_millionths: 0, fee_base_msat: 0 }; + + let payment_constraints = + PaymentConstraints { max_cltv_expiry: u32::MAX, htlc_minimum_msat: 0 }; + + Self { payment_relay, payment_constraints } + } +} + /// Data to construct a [`BlindedHop`] for receiving a payment. This payload is custom to LDK and /// may not be valid if received by another lightning implementation. #[derive(Clone, Debug)] @@ -346,6 +436,8 @@ pub struct ReceiveTlvs { pub(crate) enum BlindedPaymentTlvs { /// This blinded payment data is for a forwarding node. Forward(ForwardTlvs), + /// This blinded payment data is dummy and is to be peeled by receiving node. + Dummy(DummyTlvs), /// This blinded payment data is for the receiving node. Receive(ReceiveTlvs), } @@ -361,15 +453,17 @@ pub(crate) enum BlindedTrampolineTlvs { } // Used to include forward and receive TLVs in the same iterator for encoding. +#[derive(Clone)] enum BlindedPaymentTlvsRef<'a> { Forward(&'a ForwardTlvs), + Dummy(&'a DummyTlvs), Receive(&'a ReceiveTlvs), } /// Parameters for relaying over a given [`BlindedHop`]. /// /// [`BlindedHop`]: crate::blinded_path::BlindedHop -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Copy, Debug, PartialEq)] pub struct PaymentRelay { /// Number of blocks subtracted from an incoming HTLC's `cltv_expiry` for this [`BlindedHop`]. pub cltv_expiry_delta: u16, @@ -383,7 +477,7 @@ pub struct PaymentRelay { /// Constraints for relaying over a given [`BlindedHop`]. /// /// [`BlindedHop`]: crate::blinded_path::BlindedHop -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Copy, Debug, PartialEq)] pub struct PaymentConstraints { /// The maximum total CLTV that is acceptable when relaying a payment over this [`BlindedHop`]. pub max_cltv_expiry: u32, @@ -512,6 +606,17 @@ impl Writeable for TrampolineForwardTlvs { } } +impl Writeable for DummyTlvs { + fn write(&self, w: &mut W) -> Result<(), io::Error> { + encode_tlv_stream!(w, { + (10, self.payment_relay, required), + (12, self.payment_constraints, required), + (65539, (), required), + }); + Ok(()) + } +} + // Note: The `authentication` TLV field was removed in LDK v0.3 following // the introduction of `ReceiveAuthKey`-based authentication for inbound // `BlindedPaymentPaths`s. Because we do not support receiving to those @@ -532,6 +637,7 @@ impl<'a> Writeable for BlindedPaymentTlvsRef<'a> { fn write(&self, w: &mut W) -> Result<(), io::Error> { match self { Self::Forward(tlvs) => tlvs.write(w)?, + Self::Dummy(tlvs) => tlvs.write(w)?, Self::Receive(tlvs) => tlvs.write(w)?, } Ok(()) @@ -552,28 +658,41 @@ impl Readable for BlindedPaymentTlvs { (14, features, (option, encoding: (BlindedHopFeatures, WithoutLength))), (65536, payment_secret, option), (65537, payment_context, option), + (65539, is_dummy, option) }); - if let Some(short_channel_id) = scid { - if payment_secret.is_some() { - return Err(DecodeError::InvalidValue); - } - Ok(BlindedPaymentTlvs::Forward(ForwardTlvs { - short_channel_id, - payment_relay: payment_relay.ok_or(DecodeError::InvalidValue)?, - payment_constraints: payment_constraints.0.unwrap(), - next_blinding_override, - features: features.unwrap_or_else(BlindedHopFeatures::empty), - })) - } else { - if payment_relay.is_some() || features.is_some() { - return Err(DecodeError::InvalidValue); - } - Ok(BlindedPaymentTlvs::Receive(ReceiveTlvs { - payment_secret: payment_secret.ok_or(DecodeError::InvalidValue)?, - payment_constraints: payment_constraints.0.unwrap(), - payment_context: payment_context.ok_or(DecodeError::InvalidValue)?, - })) + match ( + scid, + next_blinding_override, + payment_relay, + features, + payment_secret, + payment_context, + is_dummy, + ) { + (Some(short_channel_id), next_override, Some(relay), features, None, None, None) => { + Ok(BlindedPaymentTlvs::Forward(ForwardTlvs { + short_channel_id, + payment_relay: relay, + payment_constraints: payment_constraints.0.unwrap(), + next_blinding_override: next_override, + features: features.unwrap_or_else(BlindedHopFeatures::empty), + })) + }, + (None, None, None, None, Some(secret), Some(context), None) => { + Ok(BlindedPaymentTlvs::Receive(ReceiveTlvs { + payment_secret: secret, + payment_constraints: payment_constraints.0.unwrap(), + payment_context: context, + })) + }, + (None, None, Some(relay), None, None, None, Some(())) => { + Ok(BlindedPaymentTlvs::Dummy(DummyTlvs { + payment_relay: relay, + payment_constraints: payment_constraints.0.unwrap(), + })) + }, + _ => return Err(DecodeError::InvalidValue), } } } @@ -620,21 +739,46 @@ pub(crate) const PAYMENT_PADDING_ROUND_OFF: usize = 30; /// Construct blinded payment hops for the given `intermediate_nodes` and payee info. pub(super) fn blinded_hops( secp_ctx: &Secp256k1, intermediate_nodes: &[PaymentForwardNode], payee_node_id: PublicKey, - payee_tlvs: ReceiveTlvs, session_priv: &SecretKey, local_node_receive_key: ReceiveAuthKey, + dummy_tlvs: &[DummyTlvs], payee_tlvs: ReceiveTlvs, session_priv: &SecretKey, + local_node_receive_key: ReceiveAuthKey, ) -> Vec { let pks = intermediate_nodes .iter() .map(|node| (node.node_id, None)) + .chain(dummy_tlvs.iter().map(|_| (payee_node_id, Some(local_node_receive_key)))) .chain(core::iter::once((payee_node_id, Some(local_node_receive_key)))); let tlvs = intermediate_nodes .iter() .map(|node| BlindedPaymentTlvsRef::Forward(&node.tlvs)) + .chain(dummy_tlvs.iter().map(|tlvs| BlindedPaymentTlvsRef::Dummy(tlvs))) .chain(core::iter::once(BlindedPaymentTlvsRef::Receive(&payee_tlvs))); let path = pks.zip( tlvs.map(|tlv| BlindedPathWithPadding { tlvs: tlv, round_off: PAYMENT_PADDING_ROUND_OFF }), ); + // Debug invariant: all non-final hops must have identical serialized size. + #[cfg(debug_assertions)] + { + let mut iter = path.clone(); + if let Some((_, first)) = iter.next() { + let remaining = iter.clone().count(); // includes intermediate + final + + // At least one intermediate hop + if remaining > 1 { + let expected = first.serialized_length(); + + // skip final hop: take(remaining - 1) + for (_, hop) in iter.take(remaining - 1) { + debug_assert!( + hop.serialized_length() == expected, + "All intermediate blinded hops must have identical serialized size" + ); + } + } + } + } + utils::construct_blinded_hops(secp_ctx, path, session_priv) } @@ -694,14 +838,22 @@ where } pub(super) fn compute_payinfo( - intermediate_nodes: &[PaymentForwardNode], payee_tlvs: &ReceiveTlvs, + intermediate_nodes: &[PaymentForwardNode], dummy_tlvs: &[DummyTlvs], payee_tlvs: &ReceiveTlvs, payee_htlc_maximum_msat: u64, min_final_cltv_expiry_delta: u16, ) -> Result { - let (aggregated_base_fee, aggregated_prop_fee) = - compute_aggregated_base_prop_fee(intermediate_nodes.iter().map(|node| RoutingFees { + let routing_fees = intermediate_nodes + .iter() + .map(|node| RoutingFees { base_msat: node.tlvs.payment_relay.fee_base_msat, proportional_millionths: node.tlvs.payment_relay.fee_proportional_millionths, - }))?; + }) + .chain(dummy_tlvs.iter().map(|tlvs| RoutingFees { + base_msat: tlvs.payment_relay.fee_base_msat, + proportional_millionths: tlvs.payment_relay.fee_proportional_millionths, + })); + + let (aggregated_base_fee, aggregated_prop_fee) = + compute_aggregated_base_prop_fee(routing_fees)?; let mut htlc_minimum_msat: u64 = 1; let mut htlc_maximum_msat: u64 = 21_000_000 * 100_000_000 * 1_000; // Total bitcoin supply @@ -730,6 +882,16 @@ pub(super) fn compute_payinfo( ) .ok_or(())?; // If underflow occurs, we cannot send to this hop without exceeding their max } + for dummy_tlvs in dummy_tlvs.iter() { + cltv_expiry_delta = + cltv_expiry_delta.checked_add(dummy_tlvs.payment_relay.cltv_expiry_delta).ok_or(())?; + + htlc_minimum_msat = amt_to_forward_msat( + core::cmp::max(dummy_tlvs.payment_constraints.htlc_minimum_msat, htlc_minimum_msat), + &dummy_tlvs.payment_relay, + ) + .unwrap_or(1); // If underflow occurs, we definitely reached this node's min + } htlc_minimum_msat = core::cmp::max(payee_tlvs.payment_constraints.htlc_minimum_msat, htlc_minimum_msat); htlc_maximum_msat = core::cmp::min(payee_htlc_maximum_msat, htlc_maximum_msat); @@ -874,7 +1036,7 @@ mod tests { }; let htlc_maximum_msat = 100_000; let blinded_payinfo = - super::compute_payinfo(&intermediate_nodes[..], &recv_tlvs, htlc_maximum_msat, 12) + super::compute_payinfo(&intermediate_nodes[..], &[], &recv_tlvs, htlc_maximum_msat, 12) .unwrap(); assert_eq!(blinded_payinfo.fee_base_msat, 201); assert_eq!(blinded_payinfo.fee_proportional_millionths, 1001); @@ -891,7 +1053,7 @@ mod tests { payment_context: PaymentContext::Bolt12Refund(Bolt12RefundContext {}), }; let blinded_payinfo = - super::compute_payinfo(&[], &recv_tlvs, 4242, TEST_FINAL_CLTV as u16).unwrap(); + super::compute_payinfo(&[], &[], &recv_tlvs, 4242, TEST_FINAL_CLTV as u16).unwrap(); assert_eq!(blinded_payinfo.fee_base_msat, 0); assert_eq!(blinded_payinfo.fee_proportional_millionths, 0); assert_eq!(blinded_payinfo.cltv_expiry_delta, TEST_FINAL_CLTV as u16); @@ -950,6 +1112,7 @@ mod tests { let htlc_maximum_msat = 100_000; let blinded_payinfo = super::compute_payinfo( &intermediate_nodes[..], + &[], &recv_tlvs, htlc_maximum_msat, TEST_FINAL_CLTV as u16, @@ -1009,6 +1172,7 @@ mod tests { let htlc_minimum_msat = 3798; assert!(super::compute_payinfo( &intermediate_nodes[..], + &[], &recv_tlvs, htlc_minimum_msat - 1, TEST_FINAL_CLTV as u16 @@ -1018,6 +1182,7 @@ mod tests { let htlc_maximum_msat = htlc_minimum_msat + 1; let blinded_payinfo = super::compute_payinfo( &intermediate_nodes[..], + &[], &recv_tlvs, htlc_maximum_msat, TEST_FINAL_CLTV as u16, @@ -1078,6 +1243,7 @@ mod tests { let blinded_payinfo = super::compute_payinfo( &intermediate_nodes[..], + &[], &recv_tlvs, 10_000, TEST_FINAL_CLTV as u16, diff --git a/lightning/src/blinded_path/utils.rs b/lightning/src/blinded_path/utils.rs index 8894f37ad33..339b4337eb3 100644 --- a/lightning/src/blinded_path/utils.rs +++ b/lightning/src/blinded_path/utils.rs @@ -256,9 +256,12 @@ impl Writeable for BlindedPathWithPadding { let tlv_length = self.tlvs.serialized_length(); let total_length = tlv_length + TLV_OVERHEAD; - let padding_length = total_length.div_ceil(self.round_off) * self.round_off - total_length; - - let padding = Some(BlindedPathPadding::new(padding_length)); + let padding = if self.round_off == 0 || tlv_length % self.round_off == 0 { + None + } else { + let length = total_length.div_ceil(self.round_off) * self.round_off - total_length; + Some(BlindedPathPadding::new(length)) + }; encode_tlv_stream!(writer, { (1, padding, option), diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index d97ae6097b6..11acdfcd02d 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -30,6 +30,7 @@ use crate::ln::onion_utils::LocalHTLCFailureReason; use crate::ln::types::ChannelId; use crate::offers::invoice::Bolt12Invoice; use crate::offers::invoice_request::InvoiceRequest; +use crate::offers::nonce::Nonce; use crate::offers::static_invoice::StaticInvoice; use crate::onion_message::messenger::Responder; use crate::routing::gossip::NetworkUpdate; @@ -1065,6 +1066,17 @@ pub enum Event { /// /// [`StaticInvoice`]: crate::offers::static_invoice::StaticInvoice bolt12_invoice: Option, + /// The [`Nonce`] used when the BOLT 12 [`InvoiceRequest`] was created for the corresponding + /// [`Offer`] or [`Refund`]. + /// + /// This is needed to build a payer proof, as it allows deriving the signing keys used for + /// the [`InvoiceRequest`]. `None` for non-BOLT 12 payments or for payments initiated on + /// LDK versions prior to 0.2. + /// + /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest + /// [`Offer`]: crate::offers::offer::Offer + /// [`Refund`]: crate::offers::refund::Refund + payment_nonce: Option, }, /// Indicates an outbound payment failed. Individual [`Event::PaymentPathFailed`] events /// provide failure information for each path attempt in the payment, including retries. @@ -1958,6 +1970,7 @@ impl Writeable for Event { ref amount_msat, ref fee_paid_msat, ref bolt12_invoice, + ref payment_nonce, } => { 2u8.write(writer)?; write_tlv_fields!(writer, { @@ -1967,6 +1980,7 @@ impl Writeable for Event { (5, fee_paid_msat, option), (7, amount_msat, option), (9, bolt12_invoice, option), + (11, payment_nonce, option), }); }, &Event::PaymentPathFailed { @@ -2433,6 +2447,7 @@ impl MaybeReadable for Event { let mut amount_msat = None; let mut fee_paid_msat = None; let mut bolt12_invoice = None; + let mut payment_nonce = None; read_tlv_fields!(reader, { (0, payment_preimage, required), (1, payment_hash, option), @@ -2440,6 +2455,7 @@ impl MaybeReadable for Event { (5, fee_paid_msat, option), (7, amount_msat, option), (9, bolt12_invoice, option), + (11, payment_nonce, option), }); if payment_hash.is_none() { payment_hash = Some(PaymentHash( @@ -2453,6 +2469,7 @@ impl MaybeReadable for Event { amount_msat, fee_paid_msat, bolt12_invoice, + payment_nonce, })) }; f() diff --git a/lightning/src/ln/accountable_tests.rs b/lightning/src/ln/accountable_tests.rs new file mode 100644 index 00000000000..442186b376a --- /dev/null +++ b/lightning/src/ln/accountable_tests.rs @@ -0,0 +1,102 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +//! Tests for verifying the correct relay of accountable signals between nodes. + +use crate::ln::channelmanager::{ + HTLCForwardInfo, PaymentId, PendingAddHTLCInfo, PendingHTLCInfo, RecipientOnionFields, Retry, +}; +use crate::ln::functional_test_utils::*; +use crate::ln::msgs::ChannelMessageHandler; +use crate::routing::router::{PaymentParameters, RouteParameters}; + +fn test_accountable_forwarding_with_override( + override_accountable: Option, expected_forwarded: bool, +) { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let _chan_ab = create_announced_chan_between_nodes(&nodes, 0, 1); + let _chan_bc = create_announced_chan_between_nodes(&nodes, 1, 2); + + let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(nodes[2]); + let route_params = RouteParameters::from_payment_params_and_value( + PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV), + 100_000, + ); + let onion_fields = RecipientOnionFields::secret_only(payment_secret); + let payment_id = PaymentId(payment_hash.0); + nodes[0] + .node + .send_payment(payment_hash, onion_fields, payment_id, route_params, Retry::Attempts(0)) + .unwrap(); + check_added_monitors(&nodes[0], 1); + + let updates_ab = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + assert_eq!(updates_ab.update_add_htlcs.len(), 1); + let mut htlc_ab = updates_ab.update_add_htlcs[0].clone(); + assert_eq!(htlc_ab.accountable, Some(false)); + + // Override accountable value if requested + if let Some(override_value) = override_accountable { + htlc_ab.accountable = Some(override_value); + } + + nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &htlc_ab); + do_commitment_signed_dance(&nodes[1], &nodes[0], &updates_ab.commitment_signed, false, false); + expect_and_process_pending_htlcs(&nodes[1], false); + check_added_monitors(&nodes[1], 1); + + let updates_bc = get_htlc_update_msgs(&nodes[1], &nodes[2].node.get_our_node_id()); + assert_eq!(updates_bc.update_add_htlcs.len(), 1); + let htlc_bc = &updates_bc.update_add_htlcs[0]; + assert_eq!( + htlc_bc.accountable, + Some(expected_forwarded), + "B -> C should have accountable = {:?}", + expected_forwarded + ); + + nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), htlc_bc); + do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_bc.commitment_signed, false, false); + + // Accountable signal is not surfaced in PaymentClaimable, so we do our next-best and check + // that the received htlcs that will be processed has the signal set as we expect. We manually + // process pending update adds so that we can access the htlc in forward_htlcs. + nodes[2].node.test_process_pending_update_add_htlcs(); + { + let fwds_lock = nodes[2].node.forward_htlcs.lock().unwrap(); + let recvs = fwds_lock.get(&0).unwrap(); + assert_eq!(recvs.len(), 1); + match recvs[0] { + HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { + forward_info: PendingHTLCInfo { incoming_accountable, .. }, + .. + }) => { + assert_eq!(incoming_accountable, expected_forwarded) + }, + _ => panic!("Unexpected forward"), + } + } + + expect_and_process_pending_htlcs(&nodes[2], false); + check_added_monitors(&nodes[2], 0); + expect_payment_claimable!(nodes[2], payment_hash, payment_secret, 100_000); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); +} + +#[test] +fn test_accountable_signal() { + // Tests forwarding of accountable signal for various incoming signal values. + test_accountable_forwarding_with_override(None, false); + test_accountable_forwarding_with_override(Some(true), true); + test_accountable_forwarding_with_override(Some(false), false); +} diff --git a/lightning/src/ln/async_payments_tests.rs b/lightning/src/ln/async_payments_tests.rs index 1f1bb70714d..0b2652920e1 100644 --- a/lightning/src/ln/async_payments_tests.rs +++ b/lightning/src/ln/async_payments_tests.rs @@ -10,8 +10,8 @@ use crate::blinded_path::message::{ BlindedMessagePath, MessageContext, NextMessageHop, OffersContext, }; -use crate::blinded_path::payment::PaymentContext; use crate::blinded_path::payment::{AsyncBolt12OfferContext, BlindedPaymentTlvs}; +use crate::blinded_path::payment::{DummyTlvs, PaymentContext}; use crate::chain::channelmonitor::{HTLC_FAIL_BACK_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::events::{ Event, EventsProvider, HTLCHandlingFailureReason, HTLCHandlingFailureType, PaidBolt12Invoice, @@ -55,7 +55,7 @@ use crate::onion_message::messenger::{ use crate::onion_message::offers::OffersMessage; use crate::onion_message::packet::ParsedOnionMessageContents; use crate::prelude::*; -use crate::routing::router::{Payee, PaymentParameters}; +use crate::routing::router::{Payee, PaymentParameters, DEFAULT_PAYMENT_DUMMY_HOPS}; use crate::sign::NodeSigner; use crate::sync::Mutex; use crate::types::features::Bolt12InvoiceFeatures; @@ -984,7 +984,8 @@ fn ignore_duplicate_invoice() { check_added_monitors(&sender, 1); let route: &[&[&Node]] = &[&[always_online_node, async_recipient]]; - let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = extract_payment_preimage(&claimable_ev); let (res, _) = @@ -1063,7 +1064,8 @@ fn ignore_duplicate_invoice() { check_added_monitors(&sender, 1); let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev) - .without_clearing_recipient_events(); + .without_clearing_recipient_events() + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); let payment_preimage = match get_event!(async_recipient, Event::PaymentClaimable) { @@ -1138,7 +1140,8 @@ fn async_receive_flow_success() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; - let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = extract_payment_preimage(&claimable_ev); let (res, _) = @@ -1375,11 +1378,13 @@ fn async_receive_mpp() { }; let args = PassAlongPathArgs::new(&nodes[0], expected_route[0], amt_msat, payment_hash, ev) - .without_claimable_event(); + .without_claimable_event() + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); let ev = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events); - let args = PassAlongPathArgs::new(&nodes[0], expected_route[1], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(&nodes[0], expected_route[1], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = match claimable_ev { Event::PaymentClaimable { @@ -1497,7 +1502,8 @@ fn amount_doesnt_match_invreq() { let route: &[&[&Node]] = &[&[&nodes[1], &nodes[3]]]; let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .without_claimable_event() - .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }); + .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); // Modify the invoice request stored in our outbounds to be the correct one, to make sure the @@ -1521,7 +1527,8 @@ fn amount_doesnt_match_invreq() { ev, MessageSendEvent::UpdateHTLCs { ref updates, .. } if updates.update_add_htlcs.len() == 1)); check_added_monitors(&nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[2], &nodes[3]]]; - let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = extract_payment_preimage(&claimable_ev); claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], route, keysend_preimage)); @@ -1712,7 +1719,8 @@ fn invalid_async_receive_with_retry( let payment_hash = extract_payment_hash(&ev); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; - let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); // Fail the HTLC backwards to enable us to more easily modify the now-Retryable outbound to test @@ -1739,7 +1747,8 @@ fn invalid_async_receive_with_retry( let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .without_claimable_event() - .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }); + .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1], &nodes[2]], true); @@ -1751,7 +1760,8 @@ fn invalid_async_receive_with_retry( let mut ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); check_added_monitors(&nodes[0], 1); let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; - let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = extract_payment_preimage(&claimable_ev); claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], route, keysend_preimage)); @@ -1858,6 +1868,13 @@ fn expired_static_invoice_payment_path() { blinded_path .advance_path_by_one(&nodes[1].keys_manager, &nodes[1].node, &secp_ctx) .unwrap(); + + for _ in 0..DEFAULT_PAYMENT_DUMMY_HOPS { + blinded_path + .advance_path_by_one(&nodes[2].keys_manager, &nodes[2].node, &secp_ctx) + .unwrap(); + } + match blinded_path.decrypt_intro_payload(&nodes[2].keys_manager).unwrap().0 { BlindedPaymentTlvs::Receive(tlvs) => tlvs.payment_constraints.max_cltv_expiry, _ => panic!(), @@ -1920,7 +1937,8 @@ fn expired_static_invoice_payment_path() { let route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]]; let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .without_claimable_event() - .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }); + .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); fail_blinded_htlc_backwards(payment_hash, 1, &[&nodes[0], &nodes[1], &nodes[2]], false); nodes[2].logger.assert_log_contains( @@ -2363,7 +2381,8 @@ fn refresh_static_invoices_for_used_offers() { check_added_monitors(&sender, 1); let route: &[&[&Node]] = &[&[server, recipient]]; - let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = extract_payment_preimage(&claimable_ev); let res = claim_payment_along_route(ClaimAlongRouteArgs::new(sender, route, keysend_preimage)); @@ -2697,7 +2716,8 @@ fn invoice_server_is_not_channel_peer() { check_added_monitors(&sender, 1); let route: &[&[&Node]] = &[&[forwarding_node, recipient]]; - let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(sender, route[0], amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = extract_payment_preimage(&claimable_ev); let res = claim_payment_along_route(ClaimAlongRouteArgs::new(sender, route, keysend_preimage)); @@ -2936,7 +2956,8 @@ fn async_payment_e2e() { check_added_monitors(&sender_lsp, 1); let path: &[&Node] = &[invoice_server, recipient]; - let args = PassAlongPathArgs::new(sender_lsp, path, amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(sender_lsp, path, amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let route: &[&[&Node]] = &[&[sender_lsp, invoice_server, recipient]]; @@ -3173,7 +3194,8 @@ fn intercepted_hold_htlc() { check_added_monitors(&lsp, 1); let path: &[&Node] = &[recipient]; - let args = PassAlongPathArgs::new(lsp, path, amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(lsp, path, amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let route: &[&[&Node]] = &[&[lsp, recipient]]; @@ -3276,7 +3298,8 @@ fn async_payment_mpp() { assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&recipient.node.get_our_node_id(), &mut events); let args = PassAlongPathArgs::new(lsp_a, expected_path, amt_msat, payment_hash, ev) - .without_claimable_event(); + .without_claimable_event() + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); lsp_b.node.process_pending_htlc_forwards(); @@ -3284,7 +3307,8 @@ fn async_payment_mpp() { let mut events = lsp_b.node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let ev = remove_first_msg_event_to_node(&recipient.node.get_our_node_id(), &mut events); - let args = PassAlongPathArgs::new(lsp_b, expected_path, amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(lsp_b, expected_path, amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let keysend_preimage = match claimable_ev { @@ -3420,7 +3444,8 @@ fn release_htlc_races_htlc_onion_decode() { check_added_monitors(&sender_lsp, 1); let path: &[&Node] = &[invoice_server, recipient]; - let args = PassAlongPathArgs::new(sender_lsp, path, amt_msat, payment_hash, ev); + let args = PassAlongPathArgs::new(sender_lsp, path, amt_msat, payment_hash, ev) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); let claimable_ev = do_pass_along_path(args).unwrap(); let route: &[&[&Node]] = &[&[sender_lsp, invoice_server, recipient]]; diff --git a/lightning/src/ln/blinded_payment_tests.rs b/lightning/src/ln/blinded_payment_tests.rs index 7941a81f61e..74981ead7f1 100644 --- a/lightning/src/ln/blinded_payment_tests.rs +++ b/lightning/src/ln/blinded_payment_tests.rs @@ -8,8 +8,8 @@ // licenses. use crate::blinded_path::payment::{ - BlindedPaymentPath, Bolt12RefundContext, ForwardTlvs, PaymentConstraints, PaymentContext, - PaymentForwardNode, PaymentRelay, ReceiveTlvs, PAYMENT_PADDING_ROUND_OFF, + BlindedPaymentPath, Bolt12RefundContext, DummyTlvs, ForwardTlvs, PaymentConstraints, + PaymentContext, PaymentForwardNode, PaymentRelay, ReceiveTlvs, PAYMENT_PADDING_ROUND_OFF, }; use crate::blinded_path::utils::is_padded; use crate::blinded_path::{self, BlindedHop}; @@ -196,6 +196,72 @@ fn do_one_hop_blinded_path(success: bool) { } } +#[test] +fn one_hop_blinded_path_with_dummy_hops() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let chan_upd = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0).0.contents; + + let amt_msat = 5000; + let (payment_preimage, payment_hash, payment_secret) = + get_payment_preimage_hash(&nodes[1], Some(amt_msat), None); + let payee_tlvs = ReceiveTlvs { + payment_secret, + payment_constraints: PaymentConstraints { + max_cltv_expiry: u32::max_value(), + htlc_minimum_msat: chan_upd.htlc_minimum_msat, + }, + payment_context: PaymentContext::Bolt12Refund(Bolt12RefundContext {}), + }; + let receive_auth_key = chanmon_cfgs[1].keys_manager.get_receive_auth_key(); + let dummy_tlvs = [DummyTlvs::default(); 2]; + + let mut secp_ctx = Secp256k1::new(); + let blinded_path = BlindedPaymentPath::new_with_dummy_hops( + &[], + nodes[1].node.get_our_node_id(), + &dummy_tlvs, + receive_auth_key, + payee_tlvs, + u64::MAX, + TEST_FINAL_CLTV as u16, + &chanmon_cfgs[1].keys_manager, + &secp_ctx, + ) + .unwrap(); + + let route_params = RouteParameters::from_payment_params_and_value( + PaymentParameters::blinded(vec![blinded_path]), + amt_msat, + ); + nodes[0] + .node + .send_payment( + payment_hash, + RecipientOnionFields::spontaneous_empty(), + PaymentId(payment_hash.0), + route_params, + Retry::Attempts(0), + ) + .unwrap(); + check_added_monitors(&nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events); + + let path = &[&nodes[1]]; + let args = PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash, ev) + .with_dummy_tlvs(&dummy_tlvs) + .with_payment_secret(payment_secret); + + do_pass_along_path(args); + claim_payment(&nodes[0], &[&nodes[1]], payment_preimage); +} + #[test] #[rustfmt::skip] fn mpp_to_one_hop_blinded_path() { @@ -1526,6 +1592,7 @@ fn update_add_msg( skimmed_fee_msat: None, blinding_point, hold_htlc: None, + accountable: None, } } diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 57f0ca87d45..ff499d049d4 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -2784,17 +2784,13 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { } // If we finish updating the monitor, we should free the holding cell right away (this did - // not occur prior to #756). + // not occur prior to #756). This should result in a new monitor update. chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (mon_id, _) = get_latest_mon_update_id(&nodes[0], chan_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, mon_id); expect_payment_claimed!(nodes[0], payment_hash_0, 100_000); - - // New outbound messages should be generated immediately upon a call to - // get_and_clear_pending_msg_events (but not before). - check_added_monitors(&nodes[0], 0); - let mut events = nodes[0].node.get_and_clear_pending_msg_events(); check_added_monitors(&nodes[0], 1); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); // Deliver the pending in-flight CS @@ -3556,12 +3552,10 @@ fn do_test_blocked_chan_preimage_release(completion_mode: BlockedUpdateComplMode } // The event processing should release the last RAA update. - check_added_monitors(&nodes[1], 1); - - // When we fetch the next update the message getter will generate the next update for nodes[2], - // generating a further monitor update. + // It should also generate the next update for nodes[2]. + check_added_monitors(&nodes[1], 2); let mut bs_htlc_fulfill = get_htlc_update_msgs(&nodes[1], &node_c_id); - check_added_monitors(&nodes[1], 1); + check_added_monitors(&nodes[1], 0); nodes[2] .node @@ -5142,13 +5136,12 @@ fn test_mpp_claim_to_holding_cell() { nodes[3].chain_monitor.chain_monitor.channel_monitor_updated(chan_4_id, latest_id).unwrap(); // Once we process monitor events (in this case by checking for the `PaymentClaimed` event, the // RAA monitor update blocked above will be released. + // At the same time, the RAA monitor update completion will allow the C <-> D channel to + // generate its fulfill update. expect_payment_claimed!(nodes[3], paymnt_hash_1, 500_000); - check_added_monitors(&nodes[3], 1); - - // After the RAA monitor update completes, the C <-> D channel will be able to generate its - // fulfill updates as well. + check_added_monitors(&nodes[3], 2); let mut c_claim = get_htlc_update_msgs(&nodes[3], &node_c_id); - check_added_monitors(&nodes[3], 1); + check_added_monitors(&nodes[3], 0); // Finally, clear all the pending payments. let path = [&[&nodes[1], &nodes[3]][..], &[&nodes[2], &nodes[3]][..]]; diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 128091ccdd5..de8bf8a55d9 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -451,6 +451,7 @@ struct OutboundHTLCOutput { skimmed_fee_msat: Option, send_timestamp: Option, hold_htlc: Option<()>, + accountable: bool, } /// See AwaitingRemoteRevoke ChannelState for more info @@ -469,6 +470,7 @@ enum HTLCUpdateAwaitingACK { skimmed_fee_msat: Option, blinding_point: Option, hold_htlc: Option<()>, + accountable: bool, }, ClaimHTLC { payment_preimage: PaymentPreimage, @@ -1914,13 +1916,8 @@ where .handle_tx_complete(msg) .map_err(|reason| self.fail_interactive_tx_negotiation(reason, logger))?, None => { - return Err(( - ChannelError::WarnAndDisconnect( - "Received unexpected interactive transaction negotiation message" - .to_owned(), - ), - None, - )) + let err = "Received unexpected interactive transaction negotiation message"; + return Err((ChannelError::WarnAndDisconnect(err.to_owned()), None)); }, }; @@ -6504,8 +6501,7 @@ fn get_v2_channel_reserve_satoshis(channel_value_satoshis: u64, dust_limit_satos fn check_splice_contribution_sufficient( contribution: &SpliceContribution, is_initiator: bool, funding_feerate: FeeRate, ) -> Result { - let contribution_amount = contribution.value(); - if contribution_amount < SignedAmount::ZERO { + if contribution.inputs().is_empty() { let estimated_fee = Amount::from_sat(estimate_v2_funding_transaction_fee( contribution.inputs(), contribution.outputs(), @@ -6514,20 +6510,25 @@ fn check_splice_contribution_sufficient( funding_feerate.to_sat_per_kwu() as u32, )); + let contribution_amount = contribution.net_value(); contribution_amount .checked_sub( estimated_fee.to_signed().expect("fees should never exceed Amount::MAX_MONEY"), ) - .ok_or(format!("Our {contribution_amount} contribution plus the fee estimate exceeds the total bitcoin supply")) + .ok_or(format!( + "{estimated_fee} splice-out amount plus {} fee estimate exceeds the total bitcoin supply", + contribution_amount.unsigned_abs(), + )) } else { check_v2_funding_inputs_sufficient( - contribution_amount.to_sat(), + contribution.value_added(), contribution.inputs(), + contribution.outputs(), is_initiator, true, funding_feerate.to_sat_per_kwu() as u32, ) - .map(|_| contribution_amount) + .map(|_| contribution.net_value()) } } @@ -6586,16 +6587,16 @@ fn estimate_v2_funding_transaction_fee( /// Returns estimated (partial) fees as additional information #[rustfmt::skip] fn check_v2_funding_inputs_sufficient( - contribution_amount: i64, funding_inputs: &[FundingTxInput], is_initiator: bool, - is_splice: bool, funding_feerate_sat_per_1000_weight: u32, -) -> Result { - let estimated_fee = estimate_v2_funding_transaction_fee( - funding_inputs, &[], is_initiator, is_splice, funding_feerate_sat_per_1000_weight, - ); - - let mut total_input_sats = 0u64; + contributed_input_value: Amount, funding_inputs: &[FundingTxInput], outputs: &[TxOut], + is_initiator: bool, is_splice: bool, funding_feerate_sat_per_1000_weight: u32, +) -> Result { + let estimated_fee = Amount::from_sat(estimate_v2_funding_transaction_fee( + funding_inputs, outputs, is_initiator, is_splice, funding_feerate_sat_per_1000_weight, + )); + + let mut total_input_value = Amount::ZERO; for FundingTxInput { utxo, .. } in funding_inputs.iter() { - total_input_sats = total_input_sats.checked_add(utxo.output.value.to_sat()) + total_input_value = total_input_value.checked_add(utxo.output.value) .ok_or("Sum of input values is greater than the total bitcoin supply")?; } @@ -6610,13 +6611,11 @@ fn check_v2_funding_inputs_sufficient( // TODO(splicing): refine check including the fact wether a change will be added or not. // Can be done once dual funding preparation is included. - let minimal_input_amount_needed = contribution_amount.checked_add(estimated_fee as i64) - .ok_or(format!("Our {contribution_amount} contribution plus the fee estimate exceeds the total bitcoin supply"))?; - if i64::try_from(total_input_sats).map_err(|_| "Sum of input values is greater than the total bitcoin supply")? - < minimal_input_amount_needed - { + let minimal_input_amount_needed = contributed_input_value.checked_add(estimated_fee) + .ok_or(format!("{contributed_input_value} contribution plus {estimated_fee} fee estimate exceeds the total bitcoin supply"))?; + if total_input_value < minimal_input_amount_needed { Err(format!( - "Total input amount {total_input_sats} is lower than needed for contribution {contribution_amount}, considering fees of {estimated_fee}. Need more inputs.", + "Total input amount {total_input_value} is lower than needed for splice-in contribution {contributed_input_value}, considering fees of {estimated_fee}. Need more inputs.", )) } else { Ok(estimated_fee) @@ -6682,7 +6681,7 @@ impl FundingNegotiationContext { }; // Optionally add change output - let change_value_opt = if self.our_funding_contribution > SignedAmount::ZERO { + let change_value_opt = if !self.our_funding_inputs.is_empty() { match calculate_change_output_value( &self, self.shared_funding_input.is_some(), @@ -6710,12 +6709,12 @@ impl FundingNegotiationContext { }, } }; - let mut change_output = - TxOut { value: Amount::from_sat(change_value), script_pubkey: change_script }; + let mut change_output = TxOut { value: change_value, script_pubkey: change_script }; let change_output_weight = get_output_weight(&change_output.script_pubkey).to_wu(); let change_output_fee = fee_for_weight(self.funding_feerate_sat_per_1000_weight, change_output_weight); - let change_value_decreased_with_fee = change_value.saturating_sub(change_output_fee); + let change_value_decreased_with_fee = + change_value.to_sat().saturating_sub(change_output_fee); // Check dust limit again if change_value_decreased_with_fee > context.holder_dust_limit_satoshis { change_output.value = Amount::from_sat(change_value_decreased_with_fee); @@ -7503,6 +7502,7 @@ where Vec::new(), Vec::new(), Vec::new(), + logger, ); UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat } }, @@ -7885,11 +7885,12 @@ where Ok(()) } - #[rustfmt::skip] pub fn initial_commitment_signed_v2( - &mut self, msg: &msgs::CommitmentSigned, best_block: BestBlock, signer_provider: &SP, logger: &L + &mut self, msg: &msgs::CommitmentSigned, best_block: BestBlock, signer_provider: &SP, + logger: &L, ) -> Result::EcdsaSigner>, ChannelError> - where L::Target: Logger + where + L::Target: Logger, { if let Some(signing_session) = self.context.interactive_tx_signing_session.as_ref() { if signing_session.has_received_tx_signatures() { @@ -7904,16 +7905,41 @@ where }; let holder_commitment_point = &mut self.holder_commitment_point.clone(); - self.context.assert_no_commitment_advancement(holder_commitment_point.next_transaction_number(), "initial commitment_signed"); + self.context.assert_no_commitment_advancement( + holder_commitment_point.next_transaction_number(), + "initial commitment_signed", + ); let (channel_monitor, _) = self.initial_commitment_signed( - self.context.channel_id(), msg.signature, holder_commitment_point, best_block, signer_provider, logger)?; + self.context.channel_id(), + msg.signature, + holder_commitment_point, + best_block, + signer_provider, + logger, + )?; self.holder_commitment_point = *holder_commitment_point; - log_info!(logger, "Received initial commitment_signed from peer for channel {}", &self.context.channel_id()); + log_info!( + logger, + "Received initial commitment_signed from peer for channel {}", + &self.context.channel_id() + ); - self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); - self.context.interactive_tx_signing_session.as_mut().expect("signing session should be present").received_commitment_signed(); + self.monitor_updating_paused( + false, + false, + false, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); + self.context + .interactive_tx_signing_session + .as_mut() + .expect("signing session should be present") + .received_commitment_signed(); Ok(channel_monitor) } @@ -8016,7 +8042,15 @@ where .as_mut() .expect("Signing session must exist for negotiated pending splice") .received_commitment_signed(); - self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + self.monitor_updating_paused( + false, + false, + false, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); Ok(self.push_ret_blockable_mon_update(monitor_update)) } @@ -8328,6 +8362,7 @@ where Vec::new(), Vec::new(), Vec::new(), + logger, ); return Ok(self.push_ret_blockable_mon_update(monitor_update)); } @@ -8405,7 +8440,7 @@ where skimmed_fee_msat, blinding_point, hold_htlc, - .. + accountable, } => { match self.send_htlc( amount_msat, @@ -8417,6 +8452,7 @@ where skimmed_fee_msat, blinding_point, hold_htlc.is_some(), + accountable, fee_estimator, logger, ) { @@ -8533,7 +8569,15 @@ where if update_fee.is_some() { "a fee update, " } else { "" }, update_add_count, update_fulfill_count, update_fail_count); - self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); + self.monitor_updating_paused( + false, + true, + false, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail) } else { (None, Vec::new()) @@ -8911,6 +8955,7 @@ where to_forward_infos, revoked_htlcs, finalized_claimed_htlcs, + logger, ); return_with_htlcs_to_fail!(htlcs_to_fail); }, @@ -8938,7 +8983,6 @@ where update_fail_htlcs.len() + update_fail_malformed_htlcs.len(), &self.context.channel_id); } else { - debug_assert!(htlcs_to_fail.is_empty()); let reason = if self.context.channel_state.is_local_stfu_sent() { "exits quiescence" } else if self.context.channel_state.is_monitor_update_in_progress() { @@ -8956,6 +9000,7 @@ where to_forward_infos, revoked_htlcs, finalized_claimed_htlcs, + logger, ); return_with_htlcs_to_fail!(htlcs_to_fail); } else { @@ -8969,6 +9014,7 @@ where to_forward_infos, revoked_htlcs, finalized_claimed_htlcs, + logger, ); return_with_htlcs_to_fail!(htlcs_to_fail); } @@ -9369,20 +9415,22 @@ where /// [`ChannelManager`]: super::channelmanager::ChannelManager /// [`chain::Watch`]: crate::chain::Watch /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress - fn monitor_updating_paused( + fn monitor_updating_paused( &mut self, resend_raa: bool, resend_commitment: bool, resend_channel_ready: bool, - mut pending_forwards: Vec<(PendingHTLCInfo, u64)>, - mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, - mut pending_finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, - ) { + pending_forwards: Vec<(PendingHTLCInfo, u64)>, + pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, + pending_finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, logger: &L, + ) where + L::Target: Logger, + { + log_trace!(logger, "Pausing channel monitor updates"); + self.context.monitor_pending_revoke_and_ack |= resend_raa; self.context.monitor_pending_commitment_signed |= resend_commitment; self.context.monitor_pending_channel_ready |= resend_channel_ready; - self.context.monitor_pending_forwards.append(&mut pending_forwards); - self.context.monitor_pending_failures.append(&mut pending_fails); - self.context - .monitor_pending_finalized_fulfills - .append(&mut pending_finalized_claimed_htlcs); + self.context.monitor_pending_forwards.extend(pending_forwards); + self.context.monitor_pending_failures.extend(pending_fails); + self.context.monitor_pending_finalized_fulfills.extend(pending_finalized_claimed_htlcs); self.context.channel_state.set_monitor_update_in_progress(); } @@ -9747,6 +9795,7 @@ where skimmed_fee_msat: htlc.skimmed_fee_msat, blinding_point: htlc.blinding_point, hold_htlc: htlc.hold_htlc, + accountable: Some(htlc.accountable), }); } } @@ -10425,12 +10474,16 @@ where } } - pub fn shutdown( - &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown, + pub fn shutdown( + &mut self, logger: &L, signer_provider: &SP, their_features: &InitFeatures, + msg: &msgs::Shutdown, ) -> Result< (Option, Option, Vec<(HTLCSource, PaymentHash)>), ChannelError, - > { + > + where + L::Target: Logger, + { if self.context.channel_state.is_peer_disconnected() { return Err(ChannelError::close( "Peer sent shutdown when we needed a channel_reestablish".to_owned(), @@ -10535,7 +10588,15 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + self.monitor_updating_paused( + false, + false, + false, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); self.push_ret_blockable_mon_update(monitor_update) } else { None @@ -11292,7 +11353,15 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + self.monitor_updating_paused( + false, + false, + false, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); let monitor_update = self.push_ret_blockable_mon_update(monitor_update); let announcement_sigs = @@ -12003,7 +12072,7 @@ where }); } - let our_funding_contribution = contribution.value(); + let our_funding_contribution = contribution.net_value(); if our_funding_contribution == SignedAmount::ZERO { return Err(APIError::APIMisuseError { err: format!( @@ -12592,7 +12661,8 @@ where pub fn queue_add_htlc( &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option, - blinding_point: Option, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + blinding_point: Option, accountable: bool, + fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Result<(), (LocalHTLCFailureReason, String)> where F::Target: FeeEstimator, @@ -12609,6 +12679,7 @@ where blinding_point, // This method is only called for forwarded HTLCs, which are never held at the next hop false, + accountable, fee_estimator, logger, ) @@ -12640,7 +12711,7 @@ where &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool, skimmed_fee_msat: Option, blinding_point: Option, hold_htlc: bool, - fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + accountable: bool, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) -> Result where F::Target: FeeEstimator, @@ -12722,6 +12793,7 @@ where skimmed_fee_msat, blinding_point, hold_htlc: hold_htlc.then(|| ()), + accountable, }); return Ok(false); } @@ -12744,6 +12816,7 @@ where skimmed_fee_msat, send_timestamp, hold_htlc: hold_htlc.then(|| ()), + accountable, }); self.context.next_holder_htlc_id += 1; @@ -12993,7 +13066,8 @@ where pub fn send_htlc_and_commit( &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option, - hold_htlc: bool, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + hold_htlc: bool, accountable: bool, fee_estimator: &LowerBoundedFeeEstimator, + logger: &L, ) -> Result, ChannelError> where F::Target: FeeEstimator, @@ -13009,6 +13083,7 @@ where skimmed_fee_msat, None, hold_htlc, + accountable, fee_estimator, logger, ); @@ -13016,7 +13091,15 @@ where let can_add_htlc = send_res.map_err(|(_, msg)| ChannelError::Ignore(msg))?; if can_add_htlc { let monitor_update = self.build_commitment_no_status_check(logger); - self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new()); + self.monitor_updating_paused( + false, + true, + false, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); Ok(self.push_ret_blockable_mon_update(monitor_update)) } else { Ok(None) @@ -13042,13 +13125,19 @@ where /// Begins the shutdown process, getting a message for the remote peer and returning all /// holding cell HTLCs for payment failure. - pub fn get_shutdown( + pub fn get_shutdown( &mut self, signer_provider: &SP, their_features: &InitFeatures, target_feerate_sats_per_kw: Option, override_shutdown_script: Option, + logger: &L, ) -> Result< (msgs::Shutdown, Option, Vec<(HTLCSource, PaymentHash)>), APIError, - > { + > + where + L::Target: Logger, + { + let logger = WithChannelContext::from(logger, &self.context, None); + if self.context.channel_state.is_local_stfu_sent() || self.context.channel_state.is_remote_stfu_sent() || self.context.channel_state.is_quiescent() @@ -13133,7 +13222,15 @@ where }], channel_id: Some(self.context.channel_id()), }; - self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new()); + self.monitor_updating_paused( + false, + false, + false, + Vec::new(), + Vec::new(), + Vec::new(), + &&logger, + ); self.push_ret_blockable_mon_update(monitor_update) } else { None @@ -13704,34 +13801,53 @@ where /// Handles a funding_signed message from the remote end. /// If this call is successful, broadcast the funding transaction (and not before!) - #[rustfmt::skip] pub fn funding_signed( - mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result<(FundedChannel, ChannelMonitor<::EcdsaSigner>), (OutboundV1Channel, ChannelError)> + mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, + logger: &L, + ) -> Result< + (FundedChannel, ChannelMonitor<::EcdsaSigner>), + (OutboundV1Channel, ChannelError), + > where - L::Target: Logger + L::Target: Logger, { if !self.funding.is_outbound() { - return Err((self, ChannelError::close("Received funding_signed for an inbound channel?".to_owned()))); + let err = "Received funding_signed for an inbound channel?"; + return Err((self, ChannelError::close(err.to_owned()))); } if !matches!(self.context.channel_state, ChannelState::FundingNegotiated(_)) { - return Err((self, ChannelError::close("Received funding_signed in strange state!".to_owned()))); + let err = "Received funding_signed in strange state!"; + return Err((self, ChannelError::close(err.to_owned()))); } let mut holder_commitment_point = match self.unfunded_context.holder_commitment_point { Some(point) => point, - None => return Err((self, ChannelError::close("Received funding_signed before our first commitment point was available".to_owned()))), + None => { + let err = "Received funding_signed before our first commitment point was available"; + return Err((self, ChannelError::close(err.to_owned()))); + }, }; - self.context.assert_no_commitment_advancement(holder_commitment_point.next_transaction_number(), "funding_signed"); + self.context.assert_no_commitment_advancement( + holder_commitment_point.next_transaction_number(), + "funding_signed", + ); let (channel_monitor, _) = match self.initial_commitment_signed( - self.context.channel_id(), msg.signature, - &mut holder_commitment_point, best_block, signer_provider, logger + self.context.channel_id(), + msg.signature, + &mut holder_commitment_point, + best_block, + signer_provider, + logger, ) { Ok(channel_monitor) => channel_monitor, Err(err) => return Err((self, err)), }; - log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id()); + log_info!( + logger, + "Received funding_signed from peer for channel {}", + &self.context.channel_id() + ); let mut channel = FundedChannel { funding: self.funding, @@ -13743,7 +13859,15 @@ where let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some() || channel.context.signer_pending_channel_ready; - channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); + channel.monitor_updating_paused( + false, + false, + need_channel_ready, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); Ok((channel, channel_monitor)) } @@ -13976,15 +14100,23 @@ where self.generate_accept_channel_message(logger) } - #[rustfmt::skip] pub fn funding_created( - mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result<(FundedChannel, Option, ChannelMonitor<::EcdsaSigner>), (Self, ChannelError)> + mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, + logger: &L, + ) -> Result< + ( + FundedChannel, + Option, + ChannelMonitor<::EcdsaSigner>, + ), + (Self, ChannelError), + > where - L::Target: Logger + L::Target: Logger, { if self.funding.is_outbound() { - return Err((self, ChannelError::close("Received funding_created for an outbound channel?".to_owned()))); + let err = "Received funding_created for an outbound channel?"; + return Err((self, ChannelError::close(err.to_owned()))); } if !matches!( self.context.channel_state, ChannelState::NegotiatingFunding(flags) @@ -13993,31 +14125,50 @@ where // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT // remember the channel, so it's safe to just send an error_message here and drop the // channel. - return Err((self, ChannelError::close("Received funding_created after we got the channel!".to_owned()))); + let err = "Received funding_created after we got the channel!"; + return Err((self, ChannelError::close(err.to_owned()))); } let mut holder_commitment_point = match self.unfunded_context.holder_commitment_point { Some(point) => point, - None => return Err((self, ChannelError::close("Received funding_created before our first commitment point was available".to_owned()))), + None => { + let err = + "Received funding_created before our first commitment point was available"; + return Err((self, ChannelError::close(err.to_owned()))); + }, }; - self.context.assert_no_commitment_advancement(holder_commitment_point.next_transaction_number(), "funding_created"); + self.context.assert_no_commitment_advancement( + holder_commitment_point.next_transaction_number(), + "funding_created", + ); let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index }; self.funding.channel_transaction_parameters.funding_outpoint = Some(funding_txo); - let (channel_monitor, counterparty_initial_commitment_tx) = match self.initial_commitment_signed( - ChannelId::v1_from_funding_outpoint(funding_txo), msg.signature, - &mut holder_commitment_point, best_block, signer_provider, logger - ) { + let (channel_monitor, counterparty_initial_commitment_tx) = match self + .initial_commitment_signed( + ChannelId::v1_from_funding_outpoint(funding_txo), + msg.signature, + &mut holder_commitment_point, + best_block, + signer_provider, + logger, + ) { Ok(channel_monitor) => channel_monitor, Err(err) => return Err((self, err)), }; let funding_signed = self.context.get_funding_signed_msg( - &self.funding.channel_transaction_parameters, logger, counterparty_initial_commitment_tx + &self.funding.channel_transaction_parameters, + logger, + counterparty_initial_commitment_tx, ); - log_info!(logger, "{} funding_signed for peer for channel {}", - if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id()); + log_info!( + logger, + "{} funding_signed for peer for channel {}", + if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, + &self.context.channel_id() + ); // Promote the channel to a full-fledged one now that we have updated the state and have a // `ChannelMonitor`. @@ -14030,7 +14181,15 @@ where }; let need_channel_ready = channel.check_get_channel_ready(0, logger).is_some() || channel.context.signer_pending_channel_ready; - channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); + channel.monitor_updating_paused( + false, + false, + need_channel_ready, + Vec::new(), + Vec::new(), + Vec::new(), + logger, + ); Ok((channel, funding_signed, channel_monitor)) } @@ -14565,7 +14724,7 @@ where } } let mut removed_htlc_attribution_data: Vec<&Option> = Vec::new(); - let mut inbound_committed_update_adds: Vec> = Vec::new(); + let mut inbound_committed_update_adds: Vec<&Option> = Vec::new(); (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?; for htlc in self.context.pending_inbound_htlcs.iter() { if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state { @@ -14587,7 +14746,7 @@ where }, &InboundHTLCState::Committed { ref update_add_htlc_opt } => { 3u8.write(writer)?; - inbound_committed_update_adds.push(update_add_htlc_opt.clone()); + inbound_committed_update_adds.push(update_add_htlc_opt); }, &InboundHTLCState::LocalRemoved(ref removal_reason) => { 4u8.write(writer)?; @@ -14624,6 +14783,7 @@ where let mut pending_outbound_skimmed_fees: Vec> = Vec::new(); let mut pending_outbound_blinding_points: Vec> = Vec::new(); let mut pending_outbound_held_htlc_flags: Vec> = Vec::new(); + let mut pending_outbound_accountable: Vec = Vec::new(); (self.context.pending_outbound_htlcs.len() as u64).write(writer)?; for htlc in self.context.pending_outbound_htlcs.iter() { @@ -14667,6 +14827,7 @@ where pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat); pending_outbound_blinding_points.push(htlc.blinding_point); pending_outbound_held_htlc_flags.push(htlc.hold_htlc); + pending_outbound_accountable.push(htlc.accountable); } let holding_cell_htlc_update_count = self.context.holding_cell_htlc_updates.len(); @@ -14678,6 +14839,8 @@ where Vec::with_capacity(holding_cell_htlc_update_count); let mut holding_cell_held_htlc_flags: Vec> = Vec::with_capacity(holding_cell_htlc_update_count); + let mut holding_cell_accountable_flags: Vec = + Vec::with_capacity(holding_cell_htlc_update_count); // Vec of (htlc_id, failure_code, sha256_of_onion) let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new(); (holding_cell_htlc_update_count as u64).write(writer)?; @@ -14692,6 +14855,7 @@ where blinding_point, skimmed_fee_msat, hold_htlc, + accountable, } => { 0u8.write(writer)?; amount_msat.write(writer)?; @@ -14703,6 +14867,7 @@ where holding_cell_skimmed_fees.push(skimmed_fee_msat); holding_cell_blinding_points.push(blinding_point); holding_cell_held_htlc_flags.push(hold_htlc); + holding_cell_accountable_flags.push(accountable); }, &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, @@ -14964,6 +15129,8 @@ where (71, holder_commitment_point_previous_revoked, option), // Added in 0.3 (73, holder_commitment_point_last_revoked, option), // Added in 0.3 (75, inbound_committed_update_adds, optional_vec), + (77, holding_cell_accountable_flags, optional_vec), // Added in 0.3 + (79, pending_outbound_accountable, optional_vec), // Added in 0.3 }); Ok(()) @@ -15131,6 +15298,7 @@ where blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }); } @@ -15150,6 +15318,7 @@ where skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: false, }, 1 => HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: Readable::read(reader)?, @@ -15352,6 +15521,8 @@ where let mut pending_outbound_held_htlc_flags_opt: Option>> = None; let mut holding_cell_held_htlc_flags_opt: Option>> = None; let mut inbound_committed_update_adds_opt: Option>> = None; + let mut holding_cell_accountable: Option> = None; + let mut pending_outbound_accountable: Option> = None; read_tlv_fields!(reader, { (0, announcement_sigs, option), @@ -15402,6 +15573,8 @@ where (71, holder_commitment_point_previous_revoked_opt, option), // Added in 0.3 (73, holder_commitment_point_last_revoked_opt, option), // Added in 0.3 (75, inbound_committed_update_adds_opt, optional_vec), + (77, holding_cell_accountable, optional_vec), // Added in 0.3 + (79, pending_outbound_accountable, optional_vec), // Added in 0.3 }); let holder_signer = signer_provider.derive_channel_signer(channel_keys_id); @@ -15537,6 +15710,28 @@ where } } + if let Some(accountable_htlcs) = holding_cell_accountable { + let mut iter = accountable_htlcs.into_iter(); + for htlc in holding_cell_htlc_updates.iter_mut() { + if let HTLCUpdateAwaitingACK::AddHTLC { ref mut accountable, .. } = htlc { + *accountable = iter.next().ok_or(DecodeError::InvalidValue)?; + } + } + // We expect all accountable HTLC signals to be consumed above + if iter.next().is_some() { + return Err(DecodeError::InvalidValue); + } + } + if let Some(accountable_htlcs) = pending_outbound_accountable { + let mut iter = accountable_htlcs.into_iter(); + for htlc in pending_outbound_htlcs.iter_mut() { + htlc.accountable = iter.next().ok_or(DecodeError::InvalidValue)?; + } + // We expect all accountable HTLC signals to be consumed above + if iter.next().is_some() { + return Err(DecodeError::InvalidValue); + } + } if let Some(attribution_data_list) = removed_htlc_attribution_data { let mut removed_htlcs = pending_inbound_htlcs.iter_mut().filter_map(|status| { if let InboundHTLCState::LocalRemoved(reason) = &mut status.state { @@ -16135,11 +16330,13 @@ mod tests { first_hop_htlc_msat: 548, payment_id: PaymentId([42; 32]), bolt12_invoice: None, + payment_nonce: None, }, skimmed_fee_msat: None, blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }); // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass @@ -16583,6 +16780,7 @@ mod tests { first_hop_htlc_msat: 0, payment_id: PaymentId([42; 32]), bolt12_invoice: None, + payment_nonce: None, }; let dummy_outbound_output = OutboundHTLCOutput { htlc_id: 0, @@ -16595,6 +16793,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }; let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10]; for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() { @@ -16621,6 +16820,7 @@ mod tests { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: false, }; let dummy_holding_cell_claim_htlc = |attribution_data| HTLCUpdateAwaitingACK::ClaimHTLC { payment_preimage: PaymentPreimage([42; 32]), @@ -16992,6 +17192,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }); let payment_preimage_3 = @@ -17007,6 +17208,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }); let payment_preimage_4 = @@ -17422,6 +17624,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }); chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput { @@ -17435,6 +17638,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }); test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8", @@ -17676,6 +17880,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }), ); @@ -17739,6 +17944,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }), ); @@ -17821,6 +18027,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, } }), ); @@ -17877,6 +18084,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }), ); @@ -17913,6 +18121,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }, ), ); @@ -17950,6 +18159,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }, ), ); @@ -17987,6 +18197,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }, ), ); @@ -18047,6 +18258,7 @@ mod tests { blinding_point: None, send_timestamp: None, hold_htlc: None, + accountable: false, }), ); @@ -18317,6 +18529,13 @@ mod tests { FundingTxInput::new_p2wpkh(prevtx, 0).unwrap() } + fn funding_output_sats(output_value_sats: u64) -> TxOut { + TxOut { + value: Amount::from_sat(output_value_sats), + script_pubkey: ScriptBuf::new_p2wpkh(&WPubkeyHash::all_zeros()), + } + } + #[test] #[rustfmt::skip] fn test_check_v2_funding_inputs_sufficient() { @@ -18327,16 +18546,83 @@ mod tests { let expected_fee = if cfg!(feature = "grind_signatures") { 2278 } else { 2284 }; assert_eq!( check_v2_funding_inputs_sufficient( - 220_000, + Amount::from_sat(220_000), &[ funding_input_sats(200_000), funding_input_sats(100_000), ], + &[], true, true, 2000, ).unwrap(), - expected_fee, + Amount::from_sat(expected_fee), + ); + } + + // Net splice-in + { + let expected_fee = if cfg!(feature = "grind_signatures") { 2526 } else { 2532 }; + assert_eq!( + check_v2_funding_inputs_sufficient( + Amount::from_sat(220_000), + &[ + funding_input_sats(200_000), + funding_input_sats(100_000), + ], + &[ + funding_output_sats(200_000), + ], + true, + true, + 2000, + ).unwrap(), + Amount::from_sat(expected_fee), + ); + } + + // Net splice-out + { + let expected_fee = if cfg!(feature = "grind_signatures") { 2526 } else { 2532 }; + assert_eq!( + check_v2_funding_inputs_sufficient( + Amount::from_sat(220_000), + &[ + funding_input_sats(200_000), + funding_input_sats(100_000), + ], + &[ + funding_output_sats(400_000), + ], + true, + true, + 2000, + ).unwrap(), + Amount::from_sat(expected_fee), + ); + } + + // Net splice-out, inputs insufficient to cover fees + { + let expected_fee = if cfg!(feature = "grind_signatures") { 113670 } else { 113940 }; + assert_eq!( + check_v2_funding_inputs_sufficient( + Amount::from_sat(220_000), + &[ + funding_input_sats(200_000), + funding_input_sats(100_000), + ], + &[ + funding_output_sats(400_000), + ], + true, + true, + 90000, + ), + Err(format!( + "Total input amount 0.00300000 BTC is lower than needed for splice-in contribution 0.00220000 BTC, considering fees of {}. Need more inputs.", + Amount::from_sat(expected_fee), + )), ); } @@ -18345,17 +18631,18 @@ mod tests { let expected_fee = if cfg!(feature = "grind_signatures") { 1736 } else { 1740 }; assert_eq!( check_v2_funding_inputs_sufficient( - 220_000, + Amount::from_sat(220_000), &[ funding_input_sats(100_000), ], + &[], true, true, 2000, ), Err(format!( - "Total input amount 100000 is lower than needed for contribution 220000, considering fees of {}. Need more inputs.", - expected_fee, + "Total input amount 0.00100000 BTC is lower than needed for splice-in contribution 0.00220000 BTC, considering fees of {}. Need more inputs.", + Amount::from_sat(expected_fee), )), ); } @@ -18365,16 +18652,17 @@ mod tests { let expected_fee = if cfg!(feature = "grind_signatures") { 2278 } else { 2284 }; assert_eq!( check_v2_funding_inputs_sufficient( - (300_000 - expected_fee - 20) as i64, + Amount::from_sat(300_000 - expected_fee - 20), &[ funding_input_sats(200_000), funding_input_sats(100_000), ], + &[], true, true, 2000, ).unwrap(), - expected_fee, + Amount::from_sat(expected_fee), ); } @@ -18383,18 +18671,19 @@ mod tests { let expected_fee = if cfg!(feature = "grind_signatures") { 2506 } else { 2513 }; assert_eq!( check_v2_funding_inputs_sufficient( - 298032, + Amount::from_sat(298032), &[ funding_input_sats(200_000), funding_input_sats(100_000), ], + &[], true, true, 2200, ), Err(format!( - "Total input amount 300000 is lower than needed for contribution 298032, considering fees of {}. Need more inputs.", - expected_fee + "Total input amount 0.00300000 BTC is lower than needed for splice-in contribution 0.00298032 BTC, considering fees of {}. Need more inputs.", + Amount::from_sat(expected_fee), )), ); } @@ -18404,16 +18693,17 @@ mod tests { let expected_fee = if cfg!(feature = "grind_signatures") { 1084 } else { 1088 }; assert_eq!( check_v2_funding_inputs_sufficient( - (300_000 - expected_fee - 20) as i64, + Amount::from_sat(300_000 - expected_fee - 20), &[ funding_input_sats(200_000), funding_input_sats(100_000), ], + &[], false, false, 2000, ).unwrap(), - expected_fee, + Amount::from_sat(expected_fee), ); } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f2e8fa70e4f..aa1cbfae576 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -427,6 +427,9 @@ pub struct PendingHTLCInfo { /// This is used to allow LSPs to take fees as a part of payments, without the sender having to /// shoulder them. pub skimmed_fee_msat: Option, + /// An experimental field indicating whether our node's reputation would be held accountable + /// for the timely resolution of the received HTLC. + pub incoming_accountable: bool, } #[derive(Clone, Debug)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug @@ -767,6 +770,12 @@ mod fuzzy_channelmanager { /// we can provide proof-of-payment details in payment claim events even after a restart /// with a stale ChannelManager state. bolt12_invoice: Option, + /// The [`Nonce`] used when the BOLT 12 [`InvoiceRequest`] was created. Stored here so + /// it can be included in [`Event::PaymentSent`] for building payer proofs, even after + /// a restart with a stale `ChannelManager` state. + /// + /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest + payment_nonce: Option, }, } @@ -810,6 +819,7 @@ impl core::hash::Hash for HTLCSource { payment_id, first_hop_htlc_msat, bolt12_invoice, + .. } => { 1u8.hash(hasher); path.hash(hasher); @@ -830,6 +840,7 @@ impl HTLCSource { first_hop_htlc_msat: 0, payment_id: PaymentId([2; 32]), bolt12_invoice: None, + payment_nonce: None, } } @@ -1366,6 +1377,25 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction, }, ); +/// Result of attempting to resume a channel after a monitor update completes while locks are held. +/// Contains remaining work to be processed after locks are released. +#[must_use] +enum PostMonitorUpdateChanResume { + /// Channel still has blocked monitor updates pending. Contains only update actions to process. + Blocked { update_actions: Vec }, + /// Channel was fully unblocked and has been resumed. Contains remaining data to process. + Unblocked { + channel_id: ChannelId, + counterparty_node_id: PublicKey, + unbroadcasted_batch_funding_txid: Option, + update_actions: Vec, + htlc_forwards: Option, + decode_update_add_htlcs: Option<(u64, Vec)>, + finalized_claimed_htlcs: Vec<(HTLCSource, Option)>, + failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>, + }, +} + #[derive(Clone, Debug, PartialEq, Eq)] pub(crate) struct PaymentCompleteUpdate { counterparty_node_id: PublicKey, @@ -2230,7 +2260,7 @@ where /// match event { /// Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose { /// PaymentPurpose::Bolt11InvoicePayment { payment_preimage: Some(payment_preimage), .. } => { -/// assert_eq!(payment_hash.0, invoice.payment_hash().as_ref()); +/// assert_eq!(payment_hash, invoice.payment_hash()); /// println!("Claiming payment {}", payment_hash); /// channel_manager.claim_funds(payment_preimage); /// }, @@ -2238,7 +2268,7 @@ where /// println!("Unknown payment hash: {}", payment_hash); /// }, /// PaymentPurpose::SpontaneousPayment(payment_preimage) => { -/// assert_ne!(payment_hash.0, invoice.payment_hash().as_ref()); +/// assert_ne!(payment_hash, invoice.payment_hash()); /// println!("Claiming spontaneous payment {}", payment_hash); /// channel_manager.claim_funds(payment_preimage); /// }, @@ -2246,7 +2276,7 @@ where /// # _ => {}, /// }, /// Event::PaymentClaimed { payment_hash, amount_msat, .. } => { -/// assert_eq!(payment_hash.0, invoice.payment_hash().as_ref()); +/// assert_eq!(payment_hash, invoice.payment_hash()); /// println!("Claimed {} msats", amount_msat); /// }, /// // ... @@ -2271,7 +2301,7 @@ where /// # ) { /// # let channel_manager = channel_manager.get_cm(); /// # let payment_id = PaymentId([42; 32]); -/// # let payment_hash = PaymentHash((*invoice.payment_hash()).to_byte_array()); +/// # let payment_hash = invoice.payment_hash(); /// match channel_manager.pay_for_bolt11_invoice( /// invoice, payment_id, None, route_params_config, retry /// ) { @@ -3201,6 +3231,14 @@ pub struct PhantomRouteHints { pub real_node_pubkey: PublicKey, } +/// The return type of [`ChannelManager::check_free_peer_holding_cells`] +type FreeHoldingCellsResult = Vec<( + ChannelId, + PublicKey, + Option, + Vec<(HTLCSource, PaymentHash)>, +)>; + macro_rules! insert_short_channel_id { ($short_to_chan_info: ident, $channel: expr) => {{ if let Some(real_scid) = $channel.funding.get_short_channel_id() { @@ -3272,284 +3310,6 @@ macro_rules! emit_initial_channel_ready_event { }; } -/// Handles the completion steps for when a [`ChannelMonitorUpdate`] is applied to a live channel. -/// -/// You should not add new direct calls to this, generally, rather rely on -/// `handle_new_monitor_update` or [`ChannelManager::channel_monitor_updated`] to call it for you. -/// -/// Requires that the in-flight monitor update set for this channel is empty! -macro_rules! handle_monitor_update_completion { - ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => {{ - let chan_id = $chan.context.channel_id(); - let outbound_alias = $chan.context().outbound_scid_alias(); - let cp_node_id = $chan.context.get_counterparty_node_id(); - - #[cfg(debug_assertions)] - { - let in_flight_updates = $peer_state.in_flight_monitor_updates.get(&chan_id); - assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true)); - assert!($chan.is_awaiting_monitor_update()); - } - - let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); - - let update_actions = - $peer_state.monitor_update_blocked_actions.remove(&chan_id).unwrap_or(Vec::new()); - - if $chan.blocked_monitor_updates_pending() != 0 { - mem::drop($peer_state_lock); - mem::drop($per_peer_state_lock); - - log_debug!(logger, "Channel has blocked monitor updates, completing update actions but leaving channel blocked"); - $self.handle_monitor_update_completion_actions(update_actions); - } else { - log_debug!(logger, "Channel is open and awaiting update, resuming it"); - let updates = $chan.monitor_updating_restored( - &&logger, - &$self.node_signer, - $self.chain_hash, - &*$self.config.read().unwrap(), - $self.best_block.read().unwrap().height, - |htlc_id| { - $self.path_for_release_held_htlc(htlc_id, outbound_alias, &chan_id, &cp_node_id) - }, - ); - let channel_update = if updates.channel_ready.is_some() - && $chan.context.is_usable() - && $peer_state.is_connected - { - // We only send a channel_update in the case where we are just now sending a - // channel_ready and the channel is in a usable state. We may re-send a - // channel_update later through the announcement_signatures process for public - // channels, but there's no reason not to just inform our counterparty of our fees - // now. - if let Ok((msg, _, _)) = $self.get_channel_update_for_unicast($chan) { - Some(MessageSendEvent::SendChannelUpdate { node_id: cp_node_id, msg }) - } else { - None - } - } else { - None - }; - - let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption( - &mut $peer_state.pending_msg_events, - $chan, - updates.raa, - updates.commitment_update, - updates.commitment_order, - updates.accepted_htlcs, - updates.pending_update_adds, - updates.funding_broadcastable, - updates.channel_ready, - updates.announcement_sigs, - updates.tx_signatures, - None, - updates.channel_ready_order, - ); - if let Some(upd) = channel_update { - $peer_state.pending_msg_events.push(upd); - } - - let unbroadcasted_batch_funding_txid = - $chan.context.unbroadcasted_batch_funding_txid(&$chan.funding); - core::mem::drop($peer_state_lock); - core::mem::drop($per_peer_state_lock); - - $self.post_monitor_update_unlock( - chan_id, - cp_node_id, - unbroadcasted_batch_funding_txid, - update_actions, - htlc_forwards, - decode_update_add_htlcs, - updates.finalized_claimed_htlcs, - updates.failed_htlcs, - ); - } - }}; -} - -/// Returns whether the monitor update is completed, `false` if the update is in-progress. -fn handle_monitor_update_res( - cm: &CM, update_res: ChannelMonitorUpdateStatus, logger: LG, -) -> bool { - debug_assert!(cm.get_cm().background_events_processed_since_startup.load(Ordering::Acquire)); - match update_res { - ChannelMonitorUpdateStatus::UnrecoverableError => { - let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; - log_error!(logger, "{}", err_str); - panic!("{}", err_str); - }, - ChannelMonitorUpdateStatus::InProgress => { - #[cfg(not(any(test, feature = "_externalize_tests")))] - if cm.get_cm().monitor_update_type.swap(1, Ordering::Relaxed) == 2 { - panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart"); - } - log_debug!( - logger, - "ChannelMonitor update in flight, holding messages until the update completes.", - ); - false - }, - ChannelMonitorUpdateStatus::Completed => { - #[cfg(not(any(test, feature = "_externalize_tests")))] - if cm.get_cm().monitor_update_type.swap(2, Ordering::Relaxed) == 1 { - panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart"); - } - true - }, - } -} - -macro_rules! handle_initial_monitor { - ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { - let logger = WithChannelContext::from(&$self.logger, &$chan.context, None); - let update_completed = handle_monitor_update_res($self, $update_res, logger); - if update_completed { - handle_monitor_update_completion!( - $self, - $peer_state_lock, - $peer_state, - $per_peer_state_lock, - $chan - ); - } - }; -} - -fn handle_new_monitor_update_internal( - cm: &CM, - in_flight_monitor_updates: &mut BTreeMap)>, - channel_id: ChannelId, funding_txo: OutPoint, counterparty_node_id: PublicKey, - new_update: ChannelMonitorUpdate, logger: LG, -) -> (bool, bool) { - let in_flight_updates = &mut in_flight_monitor_updates - .entry(channel_id) - .or_insert_with(|| (funding_txo, Vec::new())) - .1; - // During startup, we push monitor updates as background events through to here in - // order to replay updates that were in-flight when we shut down. Thus, we have to - // filter for uniqueness here. - let update_idx = - in_flight_updates.iter().position(|upd| upd == &new_update).unwrap_or_else(|| { - in_flight_updates.push(new_update); - in_flight_updates.len() - 1 - }); - - if cm.get_cm().background_events_processed_since_startup.load(Ordering::Acquire) { - let update_res = - cm.get_cm().chain_monitor.update_channel(channel_id, &in_flight_updates[update_idx]); - let update_completed = handle_monitor_update_res(cm, update_res, logger); - if update_completed { - let _ = in_flight_updates.remove(update_idx); - } - (update_completed, update_completed && in_flight_updates.is_empty()) - } else { - // We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we - // fail to persist it. This is a fairly safe assumption, however, since anything we do - // during the startup sequence should be replayed exactly if we immediately crash. - let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id, - funding_txo, - channel_id, - update: in_flight_updates[update_idx].clone(), - }; - // We want to track the in-flight update both in `in_flight_monitor_updates` and in - // `pending_background_events` to avoid a race condition during - // `pending_background_events` processing where we complete one - // `ChannelMonitorUpdate` (but there are more pending as background events) but we - // conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to - // run post-completion actions. - // We could work around that with some effort, but its simpler to just track updates - // twice. - cm.get_cm().pending_background_events.lock().unwrap().push(event); - (false, false) - } -} - -macro_rules! handle_post_close_monitor_update { - ( - $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, - $per_peer_state_lock: expr, $counterparty_node_id: expr, $channel_id: expr - ) => {{ - let (update_completed, all_updates_complete) = handle_new_monitor_update_internal( - $self, - &mut $peer_state.in_flight_monitor_updates, - $channel_id, - $funding_txo, - $counterparty_node_id, - $update, - WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None), - ); - if all_updates_complete { - let update_actions = $peer_state - .monitor_update_blocked_actions - .remove(&$channel_id) - .unwrap_or(Vec::new()); - - mem::drop($peer_state_lock); - mem::drop($per_peer_state_lock); - - $self.handle_monitor_update_completion_actions(update_actions); - } - update_completed - }}; -} - -/// Handles a new monitor update without dropping peer_state locks and calling -/// [`ChannelManager::handle_monitor_update_completion_actions`] if the monitor update completed -/// synchronously. -/// -/// Useful because monitor updates need to be handled in the same mutex where the channel generated -/// them (otherwise they can end up getting applied out-of-order) but it's not always possible to -/// drop the aforementioned peer state locks at a given callsite. In this situation, use this macro -/// to apply the monitor update immediately and handle the monitor update completion actions at a -/// later time. -macro_rules! handle_new_monitor_update_locked_actions_handled_by_caller { - ( - $self: ident, $funding_txo: expr, $update: expr, $in_flight_monitor_updates: expr, $chan_context: expr - ) => {{ - let (update_completed, _all_updates_complete) = handle_new_monitor_update_internal( - $self, - $in_flight_monitor_updates, - $chan_context.channel_id(), - $funding_txo, - $chan_context.get_counterparty_node_id(), - $update, - WithChannelContext::from(&$self.logger, &$chan_context, None), - ); - update_completed - }}; -} - -macro_rules! handle_new_monitor_update { - ( - $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, - $per_peer_state_lock: expr, $chan: expr - ) => {{ - let (update_completed, all_updates_complete) = handle_new_monitor_update_internal( - $self, - &mut $peer_state.in_flight_monitor_updates, - $chan.context.channel_id(), - $funding_txo, - $chan.context.get_counterparty_node_id(), - $update, - WithChannelContext::from(&$self.logger, &$chan.context, None), - ); - if all_updates_complete { - handle_monitor_update_completion!( - $self, - $peer_state_lock, - $peer_state, - $per_peer_state_lock, - $chan - ); - } - update_completed - }}; -} - fn convert_channel_err_internal< Close: FnOnce(ClosureReason, &str) -> (ShutdownResult, Option<(msgs::ChannelUpdate, NodeId, NodeId)>), >( @@ -3686,6 +3446,11 @@ macro_rules! process_events_body { } if !post_event_actions.is_empty() { + // `handle_post_event_actions` may update channel state, so take the total + // consistency lock now similarly to other callers of `handle_post_event_actions`. + // Note that if it needs to wake the background processor for event handling or + // persistence it will do so directly. + let _read_guard = $self.total_consistency_lock.read().unwrap(); $self.handle_post_event_actions(post_event_actions); // If we had some actions, go around again as we may have more events now processed_all_events = false; @@ -4193,6 +3958,7 @@ where their_features, target_feerate_sats_per_1000_weight, override_shutdown_script, + &self.logger, )?; failed_htlcs = htlcs; @@ -4211,15 +3977,19 @@ where // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update_opt.take() { - handle_new_monitor_update!( - self, + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, funding_txo_opt.unwrap(), monitor_update, - peer_state_lock, - peer_state, - per_peer_state, - chan - ); + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } } else { let reason = ClosureReason::LocallyCoopClosedUnfundedChannel; @@ -4344,8 +4114,19 @@ where match peer_state.channel_by_id.entry(channel_id) { hash_map::Entry::Occupied(mut chan_entry) => { if let Some(chan) = chan_entry.get_mut().as_funded_mut() { - handle_new_monitor_update!(self, funding_txo, - monitor_update, peer_state_lock, peer_state, per_peer_state, chan); + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + funding_txo, + monitor_update, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } return; } else { debug_assert!(false, "We shouldn't have an update for a non-funded channel"); @@ -4354,10 +4135,18 @@ where hash_map::Entry::Vacant(_) => {}, } - handle_post_close_monitor_update!( - self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, - counterparty_node_id, channel_id - ); + if let Some(actions) = self.handle_post_close_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + funding_txo, + monitor_update, + counterparty_node_id, + channel_id, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_monitor_update_completion_actions(actions); + } } /// When a channel is removed, two things need to happen: @@ -4703,12 +4492,12 @@ where log_error!(logger, "Closed channel due to close-required error: {}", msg); if let Some((_, funding_txo, _, update)) = shutdown_res.monitor_update.take() { - handle_new_monitor_update_locked_actions_handled_by_caller!( - self, + self.handle_new_monitor_update_locked_actions_handled_by_caller( + in_flight_monitor_updates, + chan.context.channel_id(), funding_txo, + chan.context.get_counterparty_node_id(), update, - in_flight_monitor_updates, - chan.context ); } // If there's a possibility that we need to generate further monitor updates for this @@ -4854,6 +4643,12 @@ where /// the channel. This will spend the channel's funding transaction output, effectively replacing /// it with a new one. /// + /// # Required Feature Flags + /// + /// Initiating a splice requires that the channel counterparty supports splicing. Any + /// channel (no matter the type) can be spliced, as long as the counterparty is currently + /// connected. + /// /// # Arguments /// /// Provide a `contribution` to determine if value is spliced in or out. The splice initiator is @@ -5123,6 +4918,11 @@ where ) -> Result<(), LocalHTLCFailureReason> { let outgoing_scid = match next_packet_details.outgoing_connector { HopConnector::ShortChannelId(scid) => scid, + HopConnector::Dummy => { + // Dummy hops are only used for path padding and must not reach HTLC processing. + debug_assert!(false, "Dummy hop reached HTLC handling."); + return Err(LocalHTLCFailureReason::InvalidOnionPayload); + } HopConnector::Trampoline(_) => { return Err(LocalHTLCFailureReason::InvalidTrampolineForward); } @@ -5249,11 +5049,25 @@ where let current_height: u32 = self.best_block.read().unwrap().height; create_recv_pending_htlc_info(decoded_hop, shared_secret, msg.payment_hash, msg.amount_msat, msg.cltv_expiry, None, allow_underpay, msg.skimmed_fee_msat, - current_height) + msg.accountable.unwrap_or(false), current_height) }, onion_utils::Hop::Forward { .. } | onion_utils::Hop::BlindedForward { .. } => { create_fwd_pending_htlc_info(msg, decoded_hop, shared_secret, next_packet_pubkey_opt) }, + onion_utils::Hop::Dummy { .. } => { + debug_assert!( + false, + "Reached unreachable dummy-hop HTLC. Dummy hops are peeled in \ + `process_pending_update_add_htlcs`, and the resulting HTLC is \ + re-enqueued for processing. Hitting this means the peel-and-requeue \ + step was missed." + ); + return Err(InboundHTLCErr { + msg: "Failed to decode update add htlc onion", + reason: LocalHTLCFailureReason::InvalidOnionPayload, + err_data: Vec::new(), + }) + }, onion_utils::Hop::TrampolineForward { .. } | onion_utils::Hop::TrampolineBlindedForward { .. } => { create_fwd_pending_htlc_info(msg, decoded_hop, shared_secret, next_packet_pubkey_opt) }, @@ -5368,6 +5182,7 @@ where keysend_preimage, invoice_request: None, bolt12_invoice: None, + payment_nonce: None, session_priv_bytes, hold_htlc_at_next_hop: false, }) @@ -5384,6 +5199,7 @@ where keysend_preimage, invoice_request, bolt12_invoice, + payment_nonce, session_priv_bytes, hold_htlc_at_next_hop, } = args; @@ -5467,6 +5283,7 @@ where first_hop_htlc_msat: htlc_msat, payment_id, bolt12_invoice: bolt12_invoice.cloned(), + payment_nonce: payment_nonce.copied(), }; let send_res = chan.send_htlc_and_commit( htlc_msat, @@ -5476,21 +5293,28 @@ where onion_packet, None, hold_htlc_at_next_hop, + false, // Not accountable by default for sender. &self.fee_estimator, &&logger, ); match break_channel_entry!(self, peer_state, send_res, chan_entry) { Some(monitor_update) => { - let ok = handle_new_monitor_update!( - self, - funding_txo, - monitor_update, - peer_state_lock, - peer_state, - per_peer_state, - chan - ); - if !ok { + let (update_completed, completion_data) = self + .handle_new_monitor_update_with_status( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + funding_txo, + monitor_update, + ); + if let Some(data) = completion_data { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } + if !update_completed { // Note that MonitorUpdateInProgress here indicates (per function // docs) that we will resend the commitment update once monitor // updating completes. Therefore, we must return an error @@ -5735,31 +5559,19 @@ where pub fn send_payment_for_bolt12_invoice( &self, invoice: &Bolt12Invoice, context: Option<&OffersContext>, ) -> Result<(), Bolt12PaymentError> { - match self.verify_bolt12_invoice(invoice, context) { - Ok(payment_id) => self.send_payment_for_verified_bolt12_invoice(invoice, payment_id), + let nonce = context.and_then(|ctx| match ctx { + OffersContext::OutboundPaymentForOffer { nonce, .. } + | OffersContext::OutboundPaymentForRefund { nonce, .. } => Some(*nonce), + _ => None, + }); + match self.flow.verify_bolt12_invoice(invoice, context) { + Ok(payment_id) => self.send_payment_for_verified_bolt12_invoice(invoice, payment_id, nonce), Err(()) => Err(Bolt12PaymentError::UnexpectedInvoice), } } - fn verify_bolt12_invoice( - &self, invoice: &Bolt12Invoice, context: Option<&OffersContext>, - ) -> Result { - let secp_ctx = &self.secp_ctx; - let expanded_key = &self.inbound_payment_key; - - match context { - None if invoice.is_for_refund_without_paths() => { - invoice.verify_using_metadata(expanded_key, secp_ctx) - }, - Some(&OffersContext::OutboundPayment { payment_id, nonce, .. }) => { - invoice.verify_using_payer_data(payment_id, nonce, expanded_key, secp_ctx) - }, - _ => Err(()), - } - } - fn send_payment_for_verified_bolt12_invoice( - &self, invoice: &Bolt12Invoice, payment_id: PaymentId, + &self, invoice: &Bolt12Invoice, payment_id: PaymentId, payment_nonce: Option, ) -> Result<(), Bolt12PaymentError> { let best_block_height = self.best_block.read().unwrap().height; let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); @@ -5767,6 +5579,7 @@ where self.pending_outbound_payments.send_payment_for_bolt12_invoice( invoice, payment_id, + payment_nonce, &self.router, self.list_usable_channels(), features, @@ -7034,6 +6847,7 @@ where fn process_pending_update_add_htlcs(&self) -> bool { let mut should_persist = false; let mut decode_update_add_htlcs = new_hash_map(); + let mut dummy_update_add_htlcs = new_hash_map(); mem::swap(&mut decode_update_add_htlcs, &mut self.decode_update_add_htlcs.lock().unwrap()); let get_htlc_failure_type = |outgoing_scid_opt: Option, payment_hash: PaymentHash| { @@ -7097,7 +6911,36 @@ where &*self.logger, &self.secp_ctx, ) { - Ok(decoded_onion) => decoded_onion, + Ok(decoded_onion) => match decoded_onion { + ( + onion_utils::Hop::Dummy { + dummy_hop_data, + next_hop_hmac, + new_packet_bytes, + .. + }, + Some(next_packet_details), + ) => { + let new_update_add_htlc = + onion_utils::peel_dummy_hop_update_add_htlc( + update_add_htlc, + dummy_hop_data, + next_hop_hmac, + new_packet_bytes, + next_packet_details, + &*self.node_signer, + &self.secp_ctx, + ); + + dummy_update_add_htlcs + .entry(incoming_scid_alias) + .or_insert_with(Vec::new) + .push(new_update_add_htlc); + + continue; + }, + _ => decoded_onion, + }, Err((htlc_fail, reason)) => { let failure_type = HTLCHandlingFailureType::InvalidOnion; @@ -7110,6 +6953,13 @@ where let outgoing_scid_opt = next_packet_details_opt.as_ref().and_then(|d| match d.outgoing_connector { HopConnector::ShortChannelId(scid) => Some(scid), + HopConnector::Dummy => { + debug_assert!( + false, + "Dummy hops must never be processed at this stage." + ); + None + }, HopConnector::Trampoline(_) => None, }); let shared_secret = next_hop.shared_secret().secret_bytes(); @@ -7253,6 +7103,19 @@ where )); } } + + // Merge peeled dummy HTLCs into the existing decode queue so they can be + // processed in the next iteration. We avoid replacing the whole queue + // (e.g. via mem::swap) because other threads may have enqueued new HTLCs + // meanwhile; merging preserves everything safely. + if !dummy_update_add_htlcs.is_empty() { + let mut decode_update_add_htlc_source = self.decode_update_add_htlcs.lock().unwrap(); + + for (incoming_scid_alias, htlcs) in dummy_update_add_htlcs.into_iter() { + decode_update_add_htlc_source.entry(incoming_scid_alias).or_default().extend(htlcs); + } + } + should_persist } @@ -7351,10 +7214,6 @@ where } self.forward_htlcs(&mut phantom_receives); - // Freeing the holding cell here is relatively redundant - in practice we'll do it when we - // next get a `get_and_clear_pending_msg_events` call, but some tests rely on it, and it's - // nice to do the work now if we can rather than while we're trying to get messages in the - // network stack. if self.check_free_holding_cells() { should_persist = NotifyOption::DoPersist; } @@ -7392,6 +7251,7 @@ where payment_hash, outgoing_amt_msat, outgoing_cltv_value, + incoming_accountable, .. }, } = payment; @@ -7490,6 +7350,7 @@ where Some(phantom_shared_secret), false, None, + incoming_accountable, current_height, ); match create_res { @@ -7599,6 +7460,7 @@ where outgoing_cltv_value, routing, skimmed_fee_msat, + incoming_accountable, .. }, .. @@ -7699,6 +7561,7 @@ where onion_packet.clone(), *skimmed_fee_msat, next_blinding_point, + *incoming_accountable, &self.fee_estimator, &&logger, ) { @@ -8548,10 +8411,21 @@ where self.check_refresh_async_receive_offer_cache(true); - // Technically we don't need to do this here, but if we have holding cell entries in a - // channel that need freeing, it's better to do that here and block a background task - // than block the message queueing pipeline. if self.check_free_holding_cells() { + // While we try to ensure we clear holding cells immediately, its possible we miss + // one somewhere. Thus, its useful to try regularly to ensure even if something + // gets stuck its only for a minute or so. Still, good to panic here in debug to + // ensure we discover the missing free. + // Note that in cases where we had a fee update in the loop above, we expect to + // need to free holding cells now, thus we only report an error if `should_persist` + // has not been updated to `DoPersist`. + if should_persist != NotifyOption::DoPersist { + debug_assert!(false, "Holding cells are cleared immediately"); + log_error!( + self.logger, + "Holding cells were freed in last-ditch cleanup. Please report this (performance) bug." + ); + } should_persist = NotifyOption::DoPersist; } @@ -9154,15 +9028,19 @@ where .or_insert_with(Vec::new) .push(raa_blocker); } - handle_new_monitor_update!( - self, + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, prev_hop.funding_txo, monitor_update, - peer_state_lock, - peer_state, - per_peer_state, - chan - ); + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } }, UpdateFulfillCommitFetch::DuplicateClaim {} => { let (action_opt, raa_blocker_opt) = completion_action(None, true); @@ -9323,16 +9201,18 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ .push(action); } - handle_post_close_monitor_update!( - self, + if let Some(actions) = self.handle_post_close_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, prev_hop.funding_txo, preimage_update, - peer_state_lock, - peer_state, - per_peer_state, prev_hop.counterparty_node_id, - chan_id - ); + chan_id, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_monitor_update_completion_actions(actions); + } } fn finalize_claims(&self, sources: Vec<(HTLCSource, Option)>) { @@ -9381,7 +9261,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let htlc_id = SentHTLCId::from_source(&source); match source { HTLCSource::OutboundRoute { - session_priv, payment_id, path, bolt12_invoice, .. + session_priv, payment_id, path, bolt12_invoice, payment_nonce, .. } => { debug_assert!(!startup_replay, "We don't support claim_htlc claims during startup - monitors may not be available yet"); @@ -9406,6 +9286,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ payment_id, payment_preimage, bolt12_invoice, + payment_nonce, session_priv, path, from_onchain, @@ -9795,54 +9676,416 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } - /// Handles a channel reentering a functional state, either due to reconnect or a monitor - /// update completion. - #[rustfmt::skip] - fn handle_channel_resumption(&self, pending_msg_events: &mut Vec, - channel: &mut FundedChannel, raa: Option, - commitment_update: Option, commitment_order: RAACommitmentOrder, - pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_update_adds: Vec, - funding_broadcastable: Option, - channel_ready: Option, announcement_sigs: Option, - tx_signatures: Option, tx_abort: Option, - channel_ready_order: ChannelReadyOrder, - ) -> (Option<(u64, PublicKey, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec)>) { - let logger = WithChannelContext::from(&self.logger, &channel.context, None); - log_trace!(logger, "Handling channel resumption with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort", - if raa.is_some() { "an" } else { "no" }, - if commitment_update.is_some() { "a" } else { "no" }, - pending_forwards.len(), pending_update_adds.len(), - if funding_broadcastable.is_some() { "" } else { "not " }, - if channel_ready.is_some() { "sending" } else { "without" }, - if announcement_sigs.is_some() { "sending" } else { "without" }, - if tx_signatures.is_some() { "sending" } else { "without" }, - if tx_abort.is_some() { "sending" } else { "without" }, - ); - - let counterparty_node_id = channel.context.get_counterparty_node_id(); - let outbound_scid_alias = channel.context.outbound_scid_alias(); + /// Applies a [`ChannelMonitorUpdate`] to the channel monitor. + /// + /// Monitor updates must be applied while holding the same lock under which they were generated + /// to ensure correct ordering. However, completion handling requires releasing those locks. + /// This method applies the update immediately (while locks are held) and returns whether the + /// update completed, allowing the caller to handle completion separately after releasing locks. + /// + /// Returns a tuple of `(update_completed, all_updates_completed)`: + /// - `update_completed`: whether this specific monitor update finished persisting + /// - `all_updates_completed`: whether all in-flight updates for this channel are now complete + fn handle_new_monitor_update_locked_actions_handled_by_caller( + &self, + in_flight_monitor_updates: &mut BTreeMap)>, + channel_id: ChannelId, funding_txo: OutPoint, counterparty_node_id: PublicKey, + new_update: ChannelMonitorUpdate, + ) -> (bool, bool) { + let in_flight_updates = &mut in_flight_monitor_updates + .entry(channel_id) + .or_insert_with(|| (funding_txo, Vec::new())) + .1; + // During startup, we push monitor updates as background events through to here in + // order to replay updates that were in-flight when we shut down. Thus, we have to + // filter for uniqueness here. + let update_idx = + in_flight_updates.iter().position(|upd| upd == &new_update).unwrap_or_else(|| { + in_flight_updates.push(new_update); + in_flight_updates.len() - 1 + }); - let mut htlc_forwards = None; - if !pending_forwards.is_empty() { - htlc_forwards = Some(( - outbound_scid_alias, channel.context.get_counterparty_node_id(), - channel.funding.get_funding_txo().unwrap(), channel.context.channel_id(), - channel.context.get_user_id(), pending_forwards - )); - } - let mut decode_update_add_htlcs = None; - if !pending_update_adds.is_empty() { - decode_update_add_htlcs = Some((outbound_scid_alias, pending_update_adds)); + if self.background_events_processed_since_startup.load(Ordering::Acquire) { + let update_res = + self.chain_monitor.update_channel(channel_id, &in_flight_updates[update_idx]); + let logger = + WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None); + let update_completed = self.handle_monitor_update_res(update_res, logger); + if update_completed { + let _ = in_flight_updates.remove(update_idx); + } + (update_completed, update_completed && in_flight_updates.is_empty()) + } else { + // We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we + // fail to persist it. This is a fairly safe assumption, however, since anything we do + // during the startup sequence should be replayed exactly if we immediately crash. + let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id, + funding_txo, + channel_id, + update: in_flight_updates[update_idx].clone(), + }; + // We want to track the in-flight update both in `in_flight_monitor_updates` and in + // `pending_background_events` to avoid a race condition during + // `pending_background_events` processing where we complete one + // `ChannelMonitorUpdate` (but there are more pending as background events) but we + // conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to + // run post-completion actions. + // We could work around that with some effort, but its simpler to just track updates + // twice. + self.pending_background_events.lock().unwrap().push(event); + (false, false) } + } - if channel.context.is_connected() { - if let ChannelReadyOrder::ChannelReadyFirst = channel_ready_order { - if let Some(msg) = &channel_ready { - self.send_channel_ready(pending_msg_events, channel, msg.clone()); - } - - if let Some(msg) = &announcement_sigs { - pending_msg_events.push(MessageSendEvent::SendAnnouncementSignatures { + /// Handles a monitor update for a closed channel, returning optionally the completion actions + /// to process after locks are released. + /// + /// Returns `Some` if all in-flight updates are complete. + fn handle_post_close_monitor_update( + &self, + in_flight_monitor_updates: &mut BTreeMap)>, + monitor_update_blocked_actions: &mut BTreeMap< + ChannelId, + Vec, + >, + funding_txo: OutPoint, update: ChannelMonitorUpdate, counterparty_node_id: PublicKey, + channel_id: ChannelId, + ) -> Option> { + let (_update_completed, all_updates_complete) = self + .handle_new_monitor_update_locked_actions_handled_by_caller( + in_flight_monitor_updates, + channel_id, + funding_txo, + counterparty_node_id, + update, + ); + if all_updates_complete { + Some(monitor_update_blocked_actions.remove(&channel_id).unwrap_or(Vec::new())) + } else { + None + } + } + + /// Returns whether the monitor update is completed, `false` if the update is in-progress. + fn handle_monitor_update_res( + &self, update_res: ChannelMonitorUpdateStatus, logger: LG, + ) -> bool { + debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire)); + match update_res { + ChannelMonitorUpdateStatus::UnrecoverableError => { + let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; + log_error!(logger, "{}", err_str); + panic!("{}", err_str); + }, + ChannelMonitorUpdateStatus::InProgress => { + #[cfg(not(any(test, feature = "_externalize_tests")))] + if self.monitor_update_type.swap(1, Ordering::Relaxed) == 2 { + panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart"); + } + log_debug!( + logger, + "ChannelMonitor update in flight, holding messages until the update completes.", + ); + false + }, + ChannelMonitorUpdateStatus::Completed => { + #[cfg(not(any(test, feature = "_externalize_tests")))] + if self.monitor_update_type.swap(2, Ordering::Relaxed) == 1 { + panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart"); + } + true + }, + } + } + + /// Handles the initial monitor persistence, returning optionally data to process after locks + /// are released. + /// + /// Note: This method takes individual fields from `PeerState` rather than the whole struct + /// to avoid borrow checker issues when the channel is borrowed from `peer_state.channel_by_id`. + fn handle_initial_monitor( + &self, + in_flight_monitor_updates: &mut BTreeMap)>, + monitor_update_blocked_actions: &mut BTreeMap< + ChannelId, + Vec, + >, + pending_msg_events: &mut Vec, is_connected: bool, + chan: &mut FundedChannel, update_res: ChannelMonitorUpdateStatus, + ) -> Option { + let logger = WithChannelContext::from(&self.logger, &chan.context, None); + let update_completed = self.handle_monitor_update_res(update_res, logger); + if update_completed { + Some(self.try_resume_channel_post_monitor_update( + in_flight_monitor_updates, + monitor_update_blocked_actions, + pending_msg_events, + is_connected, + chan, + )) + } else { + None + } + } + + /// Applies a new monitor update and attempts to resume the channel if all updates are complete. + /// + /// Returns [`PostMonitorUpdateChanResume`] if all in-flight updates are complete, which should + /// be passed to [`Self::handle_post_monitor_update_chan_resume`] after releasing locks. + /// + /// Note: This method takes individual fields from [`PeerState`] rather than the whole struct + /// to avoid borrow checker issues when the channel is borrowed from `peer_state.channel_by_id`. + fn handle_new_monitor_update( + &self, + in_flight_monitor_updates: &mut BTreeMap)>, + monitor_update_blocked_actions: &mut BTreeMap< + ChannelId, + Vec, + >, + pending_msg_events: &mut Vec, is_connected: bool, + chan: &mut FundedChannel, funding_txo: OutPoint, update: ChannelMonitorUpdate, + ) -> Option { + self.handle_new_monitor_update_with_status( + in_flight_monitor_updates, + monitor_update_blocked_actions, + pending_msg_events, + is_connected, + chan, + funding_txo, + update, + ) + .1 + } + + /// Like [`Self::handle_new_monitor_update`], but also returns whether this specific update + /// completed (as opposed to being in-progress). + fn handle_new_monitor_update_with_status( + &self, + in_flight_monitor_updates: &mut BTreeMap)>, + monitor_update_blocked_actions: &mut BTreeMap< + ChannelId, + Vec, + >, + pending_msg_events: &mut Vec, is_connected: bool, + chan: &mut FundedChannel, funding_txo: OutPoint, update: ChannelMonitorUpdate, + ) -> (bool, Option) { + let chan_id = chan.context.channel_id(); + let counterparty_node_id = chan.context.get_counterparty_node_id(); + + let (update_completed, all_updates_complete) = self + .handle_new_monitor_update_locked_actions_handled_by_caller( + in_flight_monitor_updates, + chan_id, + funding_txo, + counterparty_node_id, + update, + ); + + let completion_data = if all_updates_complete { + Some(self.try_resume_channel_post_monitor_update( + in_flight_monitor_updates, + monitor_update_blocked_actions, + pending_msg_events, + is_connected, + chan, + )) + } else { + None + }; + + (update_completed, completion_data) + } + + /// Attempts to resume a channel after a monitor update completes, while locks are still held. + /// + /// If the channel has no more blocked monitor updates, this resumes normal operation by + /// calling [`Self::handle_channel_resumption`] and returns the remaining work to process + /// after locks are released. If blocked updates remain, only the update actions are returned. + /// + /// Note: This method takes individual fields from [`PeerState`] rather than the whole struct + /// to avoid borrow checker issues when the channel is borrowed from `peer_state.channel_by_id`. + fn try_resume_channel_post_monitor_update( + &self, + in_flight_monitor_updates: &mut BTreeMap)>, + monitor_update_blocked_actions: &mut BTreeMap< + ChannelId, + Vec, + >, + pending_msg_events: &mut Vec, is_connected: bool, + chan: &mut FundedChannel, + ) -> PostMonitorUpdateChanResume { + let chan_id = chan.context.channel_id(); + let outbound_alias = chan.context.outbound_scid_alias(); + let counterparty_node_id = chan.context.get_counterparty_node_id(); + + #[cfg(debug_assertions)] + { + let in_flight_updates = in_flight_monitor_updates.get(&chan_id); + assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true)); + assert!(chan.is_awaiting_monitor_update()); + } + + let logger = WithChannelContext::from(&self.logger, &chan.context, None); + + let update_actions = monitor_update_blocked_actions.remove(&chan_id).unwrap_or(Vec::new()); + + if chan.blocked_monitor_updates_pending() != 0 { + log_debug!(logger, "Channel has blocked monitor updates, completing update actions but leaving channel blocked"); + PostMonitorUpdateChanResume::Blocked { update_actions } + } else { + log_debug!(logger, "Channel is open and awaiting update, resuming it"); + let updates = chan.monitor_updating_restored( + &&logger, + &self.node_signer, + self.chain_hash, + &*self.config.read().unwrap(), + self.best_block.read().unwrap().height, + |htlc_id| { + self.path_for_release_held_htlc( + htlc_id, + outbound_alias, + &chan_id, + &counterparty_node_id, + ) + }, + ); + let channel_update = if updates.channel_ready.is_some() + && chan.context.is_usable() + && is_connected + { + if let Ok((msg, _, _)) = self.get_channel_update_for_unicast(chan) { + Some(MessageSendEvent::SendChannelUpdate { node_id: counterparty_node_id, msg }) + } else { + None + } + } else { + None + }; + + let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption( + pending_msg_events, + chan, + updates.raa, + updates.commitment_update, + updates.commitment_order, + updates.accepted_htlcs, + updates.pending_update_adds, + updates.funding_broadcastable, + updates.channel_ready, + updates.announcement_sigs, + updates.tx_signatures, + None, + updates.channel_ready_order, + ); + if let Some(upd) = channel_update { + pending_msg_events.push(upd); + } + + let unbroadcasted_batch_funding_txid = + chan.context.unbroadcasted_batch_funding_txid(&chan.funding); + + PostMonitorUpdateChanResume::Unblocked { + channel_id: chan_id, + counterparty_node_id, + unbroadcasted_batch_funding_txid, + update_actions, + htlc_forwards, + decode_update_add_htlcs, + finalized_claimed_htlcs: updates.finalized_claimed_htlcs, + failed_htlcs: updates.failed_htlcs, + } + } + } + + /// Completes channel resumption after locks have been released. + /// + /// Processes the [`PostMonitorUpdateChanResume`] returned by + /// [`Self::try_resume_channel_post_monitor_update`], handling update actions and any + /// remaining work that requires locks to be released (e.g., forwarding HTLCs, failing HTLCs). + fn handle_post_monitor_update_chan_resume(&self, data: PostMonitorUpdateChanResume) { + debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread); + #[cfg(debug_assertions)] + for (_, peer) in self.per_peer_state.read().unwrap().iter() { + debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread); + } + + match data { + PostMonitorUpdateChanResume::Blocked { update_actions } => { + self.handle_monitor_update_completion_actions(update_actions); + }, + PostMonitorUpdateChanResume::Unblocked { + channel_id, + counterparty_node_id, + unbroadcasted_batch_funding_txid, + update_actions, + htlc_forwards, + decode_update_add_htlcs, + finalized_claimed_htlcs, + failed_htlcs, + } => { + self.post_monitor_update_unlock( + channel_id, + counterparty_node_id, + unbroadcasted_batch_funding_txid, + update_actions, + htlc_forwards, + decode_update_add_htlcs, + finalized_claimed_htlcs, + failed_htlcs, + ); + }, + } + } + + /// Handles a channel reentering a functional state, either due to reconnect or a monitor + /// update completion. + #[rustfmt::skip] + fn handle_channel_resumption(&self, pending_msg_events: &mut Vec, + channel: &mut FundedChannel, raa: Option, + commitment_update: Option, commitment_order: RAACommitmentOrder, + pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_update_adds: Vec, + funding_broadcastable: Option, + channel_ready: Option, announcement_sigs: Option, + tx_signatures: Option, tx_abort: Option, + channel_ready_order: ChannelReadyOrder, + ) -> (Option<(u64, PublicKey, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec)>) { + let logger = WithChannelContext::from(&self.logger, &channel.context, None); + log_trace!(logger, "Handling channel resumption with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort", + if raa.is_some() { "an" } else { "no" }, + if commitment_update.is_some() { "a" } else { "no" }, + pending_forwards.len(), pending_update_adds.len(), + if funding_broadcastable.is_some() { "" } else { "not " }, + if channel_ready.is_some() { "sending" } else { "without" }, + if announcement_sigs.is_some() { "sending" } else { "without" }, + if tx_signatures.is_some() { "sending" } else { "without" }, + if tx_abort.is_some() { "sending" } else { "without" }, + ); + + let counterparty_node_id = channel.context.get_counterparty_node_id(); + let outbound_scid_alias = channel.context.outbound_scid_alias(); + + let mut htlc_forwards = None; + if !pending_forwards.is_empty() { + htlc_forwards = Some(( + outbound_scid_alias, channel.context.get_counterparty_node_id(), + channel.funding.get_funding_txo().unwrap(), channel.context.channel_id(), + channel.context.get_user_id(), pending_forwards + )); + } + let mut decode_update_add_htlcs = None; + if !pending_update_adds.is_empty() { + decode_update_add_htlcs = Some((outbound_scid_alias, pending_update_adds)); + } + + if channel.context.is_connected() { + if let ChannelReadyOrder::ChannelReadyFirst = channel_ready_order { + if let Some(msg) = &channel_ready { + self.send_channel_ready(pending_msg_events, channel, msg.clone()); + } + + if let Some(msg) = &announcement_sigs { + pending_msg_events.push(MessageSendEvent::SendAnnouncementSignatures { node_id: counterparty_node_id, msg: msg.clone(), }); @@ -10055,7 +10298,21 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ .and_then(Channel::as_funded_mut) { if chan.is_awaiting_monitor_update() { - handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan); + let completion_data = self.try_resume_channel_post_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + ); + + let holding_cell_res = self.check_free_peer_holding_cells(peer_state); + + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + + self.handle_post_monitor_update_chan_resume(completion_data); + self.handle_holding_cell_free_result(holding_cell_res); } else { log_trace!(logger, "Channel is open but not awaiting update"); } @@ -10671,14 +10928,18 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } if let Some(funded_chan) = e.insert(Channel::from(chan)).as_funded_mut() { - handle_initial_monitor!( - self, + if let Some(data) = self.handle_initial_monitor( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + funded_chan, persist_state, - peer_state_lock, - peer_state, - per_peer_state, - funded_chan - ); + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } else { unreachable!("This must be a funded channel as we just inserted it."); } @@ -10841,7 +11102,18 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }) { Ok((funded_chan, persist_status)) => { - handle_initial_monitor!(self, persist_status, peer_state_lock, peer_state, per_peer_state, funded_chan); + if let Some(data) = self.handle_initial_monitor( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + funded_chan, + persist_status, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } Ok(()) }, Err(e) => try_channel_entry!(self, peer_state, Err(e), chan_entry), @@ -11231,7 +11503,12 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ let (shutdown, monitor_update_opt, htlcs) = try_channel_entry!( self, peer_state, - chan.shutdown(&self.signer_provider, &peer_state.latest_features, &msg), + chan.shutdown( + &self.logger, + &self.signer_provider, + &peer_state.latest_features, + &msg + ), chan_entry ); dropped_htlcs = htlcs; @@ -11247,15 +11524,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update!( - self, + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, funding_txo_opt.unwrap(), monitor_update, - peer_state_lock, - peer_state, - per_peer_state, - chan - ); + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } }, None => { @@ -11547,8 +11828,18 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(monitor) = monitor_opt { let monitor_res = self.chain_monitor.watch_channel(monitor.channel_id(), monitor); if let Ok(persist_state) = monitor_res { - handle_initial_monitor!(self, persist_state, peer_state_lock, peer_state, - per_peer_state, chan); + if let Some(data) = self.handle_initial_monitor( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + persist_state, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } else { let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_error!(logger, "Persisting initial ChannelMonitor failed, implying the channel ID was duplicated"); @@ -11558,8 +11849,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ try_channel_entry!(self, peer_state, Err(err), chan_entry) } } else if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock, - peer_state, per_peer_state, chan); + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + funding_txo.unwrap(), + monitor_update, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } } Ok(()) @@ -11589,10 +11891,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ ); if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update!( - self, funding_txo.unwrap(), monitor_update, peer_state_lock, peer_state, - per_peer_state, chan - ); + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + funding_txo.unwrap(), + monitor_update, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } } Ok(()) @@ -11758,6 +12069,10 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if !new_intercept_events.is_empty() { let mut events = self.pending_events.lock().unwrap(); + // It's possible we processed this intercept forward, generated an event, then re-processed + // it here after restart, in which case the intercept event should not be pushed + // redundantly. + new_intercept_events.retain(|ev| !events.contains(ev)); events.append(&mut new_intercept_events); } } @@ -11829,8 +12144,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ if let Some(monitor_update) = monitor_update_opt { let funding_txo = funding_txo_opt .expect("Funding outpoint must have been set for RAA handling to succeed"); - handle_new_monitor_update!(self, funding_txo, monitor_update, - peer_state_lock, peer_state, per_peer_state, chan); + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + funding_txo, + monitor_update, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } (htlcs_to_fail, static_invoices) } else { @@ -12030,7 +12356,7 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ #[rustfmt::skip] fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> { - let (inferred_splice_locked, need_lnd_workaround) = { + let (inferred_splice_locked, need_lnd_workaround, holding_cell_res) = { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) @@ -12091,7 +12417,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ peer_state.pending_msg_events.push(upd); } - (responses.inferred_splice_locked, need_lnd_workaround) + let holding_cell_res = self.check_free_peer_holding_cells(peer_state); + (responses.inferred_splice_locked, need_lnd_workaround, holding_cell_res) } else { return try_channel_entry!(self, peer_state, Err(ChannelError::close( "Got a channel_reestablish message for an unfunded channel!".into())), chan_entry); @@ -12134,6 +12461,8 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }; + self.handle_holding_cell_free_result(holding_cell_res); + if let Some(channel_ready_msg) = need_lnd_workaround { self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?; } @@ -12308,15 +12637,19 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } if let Some(monitor_update) = splice_promotion.monitor_update { - handle_new_monitor_update!( - self, + if let Some(data) = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, splice_promotion.funding_txo, monitor_update, - peer_state_lock, - peer_state, - per_peer_state, - chan - ); + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_post_monitor_update_chan_resume(data); + } } } } else { @@ -12457,75 +12790,92 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ }, } } - } + } + + for (err, counterparty_node_id) in failed_channels { + let _ = self.handle_error(err, counterparty_node_id); + } + + has_pending_monitor_events + } + + fn handle_holding_cell_free_result(&self, result: FreeHoldingCellsResult) { + debug_assert_ne!( + self.total_consistency_lock.held_by_thread(), + LockHeldState::NotHeldByThread + ); + for (chan_id, cp_node_id, post_update_data, failed_htlcs) in result { + if let Some(data) = post_update_data { + self.handle_post_monitor_update_chan_resume(data); + } - for (err, counterparty_node_id) in failed_channels { - let _ = self.handle_error(err, counterparty_node_id); + self.fail_holding_cell_htlcs(failed_htlcs, chan_id, &cp_node_id); + self.needs_persist_flag.store(true, Ordering::Release); + self.event_persist_notifier.notify(); } + } - has_pending_monitor_events + /// Frees all holding cells in all the channels for a peer. + /// + /// Includes elements in the returned Vec only for channels which changed (implying persistence + /// is required). + #[must_use] + fn check_free_peer_holding_cells( + &self, peer_state: &mut PeerState, + ) -> FreeHoldingCellsResult { + debug_assert_ne!( + self.total_consistency_lock.held_by_thread(), + LockHeldState::NotHeldByThread + ); + + let mut updates = Vec::new(); + let funded_chan_iter = peer_state + .channel_by_id + .iter_mut() + .filter_map(|(chan_id, chan)| chan.as_funded_mut().map(|chan| (chan_id, chan))); + for (chan_id, chan) in funded_chan_iter { + let (monitor_opt, holding_cell_failed_htlcs) = chan.maybe_free_holding_cell_htlcs( + &self.fee_estimator, + &&WithChannelContext::from(&self.logger, &chan.context, None), + ); + if monitor_opt.is_some() || !holding_cell_failed_htlcs.is_empty() { + let update_res = monitor_opt + .map(|monitor_update| { + self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + chan.funding.get_funding_txo().unwrap(), + monitor_update, + ) + }) + .flatten(); + let cp_node_id = chan.context.get_counterparty_node_id(); + updates.push((*chan_id, cp_node_id, update_res, holding_cell_failed_htlcs)); + } + } + updates } /// Check the holding cell in each channel and free any pending HTLCs in them if possible. /// Returns whether there were any updates such as if pending HTLCs were freed or a monitor /// update was applied. fn check_free_holding_cells(&self) -> bool { - let mut has_monitor_update = false; - let mut failed_htlcs = Vec::new(); + let mut unlocked_results = Vec::new(); - // Walk our list of channels and find any that need to update. Note that when we do find an - // update, if it includes actions that must be taken afterwards, we have to drop the - // per-peer state lock as well as the top level per_peer_state lock. Thus, we loop until we - // manage to go through all our peers without finding a single channel to update. - 'peer_loop: loop { + { let per_peer_state = self.per_peer_state.read().unwrap(); for (_cp_id, peer_state_mutex) in per_peer_state.iter() { - 'chan_loop: loop { - let mut peer_state_lock = peer_state_mutex.lock().unwrap(); - let peer_state: &mut PeerState<_> = &mut *peer_state_lock; - for (channel_id, chan) in - peer_state.channel_by_id.iter_mut().filter_map(|(chan_id, chan)| { - chan.as_funded_mut().map(|chan| (chan_id, chan)) - }) { - let counterparty_node_id = chan.context.get_counterparty_node_id(); - let funding_txo = chan.funding.get_funding_txo(); - let (monitor_opt, holding_cell_failed_htlcs) = chan - .maybe_free_holding_cell_htlcs( - &self.fee_estimator, - &&WithChannelContext::from(&self.logger, &chan.context, None), - ); - if !holding_cell_failed_htlcs.is_empty() { - failed_htlcs.push(( - holding_cell_failed_htlcs, - *channel_id, - counterparty_node_id, - )); - } - if let Some(monitor_update) = monitor_opt { - has_monitor_update = true; - - handle_new_monitor_update!( - self, - funding_txo.unwrap(), - monitor_update, - peer_state_lock, - peer_state, - per_peer_state, - chan - ); - continue 'peer_loop; - } - } - break 'chan_loop; - } + let mut peer_state_lock = peer_state_mutex.lock().unwrap(); + let peer_state: &mut PeerState<_> = &mut *peer_state_lock; + unlocked_results.append(&mut self.check_free_peer_holding_cells(peer_state)); } - break 'peer_loop; } - let has_update = has_monitor_update || !failed_htlcs.is_empty(); - for (failures, channel_id, counterparty_node_id) in failed_htlcs.drain(..) { - self.fail_holding_cell_htlcs(failures, channel_id, &counterparty_node_id); - } + let has_update = !unlocked_results.is_empty(); + self.handle_holding_cell_free_result(unlocked_results); has_update } @@ -12857,27 +13207,32 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ #[cfg(any(test, fuzzing))] #[rustfmt::skip] pub fn exit_quiescence(&self, counterparty_node_id: &PublicKey, channel_id: &ChannelId) -> Result { - let per_peer_state = self.per_peer_state.read().unwrap(); - let peer_state_mutex = per_peer_state.get(counterparty_node_id) - .ok_or_else(|| APIError::ChannelUnavailable { - err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}") - })?; - let mut peer_state = peer_state_mutex.lock().unwrap(); - let initiator = match peer_state.channel_by_id.entry(*channel_id) { - hash_map::Entry::Occupied(mut chan_entry) => { - if let Some(chan) = chan_entry.get_mut().as_funded_mut() { - chan.exit_quiescence() - } else { - return Err(APIError::APIMisuseError { - err: format!("Unfunded channel {} cannot be quiescent", channel_id), - }) - } - }, - hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable { - err: format!("Channel with id {} not found for the passed counterparty node_id {}", - channel_id, counterparty_node_id), - }), + let _read_guard = self.total_consistency_lock.read().unwrap(); + + let initiator = { + let per_peer_state = self.per_peer_state.read().unwrap(); + let peer_state_mutex = per_peer_state.get(counterparty_node_id) + .ok_or_else(|| APIError::ChannelUnavailable { + err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}") + })?; + let mut peer_state = peer_state_mutex.lock().unwrap(); + match peer_state.channel_by_id.entry(*channel_id) { + hash_map::Entry::Occupied(mut chan_entry) => { + if let Some(chan) = chan_entry.get_mut().as_funded_mut() { + chan.exit_quiescence() + } else { + return Err(APIError::APIMisuseError { + err: format!("Unfunded channel {} cannot be quiescent", channel_id), + }) + } + }, + hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable { + err: format!("Channel with id {} not found for the passed counterparty node_id {}", + channel_id, counterparty_node_id), + }), + } }; + self.check_free_holding_cells(); Ok(initiator) } @@ -13941,8 +14296,26 @@ where if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() { log_debug!(logger, "Unlocking monitor updating and updating monitor", ); - handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update, - peer_state_lck, peer_state, per_peer_state, chan); + let post_update_data = self.handle_new_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, + &mut peer_state.pending_msg_events, + peer_state.is_connected, + chan, + channel_funding_outpoint, + monitor_update, + ); + let holding_cell_res = self.check_free_peer_holding_cells(peer_state); + + mem::drop(peer_state_lck); + mem::drop(per_peer_state); + + if let Some(data) = post_update_data { + self.handle_post_monitor_update_chan_resume(data); + } + + self.handle_holding_cell_free_result(holding_cell_res); + if further_update_exists { // If there are more `ChannelMonitorUpdate`s to process, restart at the // top of the loop. @@ -13964,6 +14337,10 @@ where } fn handle_post_event_actions>(&self, actions: I) { + debug_assert_ne!( + self.total_consistency_lock.held_by_thread(), + LockHeldState::NotHeldByThread + ); for action in actions.into_iter() { match action { EventCompletionAction::ReleaseRAAChannelMonitorUpdate { @@ -13985,10 +14362,11 @@ where }, ) => { let per_peer_state = self.per_peer_state.read().unwrap(); - let mut peer_state = per_peer_state + let mut peer_state_lock = per_peer_state .get(&counterparty_node_id) .map(|state| state.lock().unwrap()) .expect("Channels originating a payment resolution must have peer state"); + let peer_state = &mut *peer_state_lock; let update_id = peer_state .closed_channel_monitor_update_ids .get_mut(&channel_id) @@ -14015,16 +14393,18 @@ where }; self.pending_background_events.lock().unwrap().push(event); } else { - handle_post_close_monitor_update!( - self, + if let Some(actions) = self.handle_post_close_monitor_update( + &mut peer_state.in_flight_monitor_updates, + &mut peer_state.monitor_update_blocked_actions, channel_funding_outpoint, update, - peer_state, - peer_state, - per_peer_state, counterparty_node_id, - channel_id - ); + channel_id, + ) { + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + self.handle_monitor_update_completion_actions(actions); + } } }, } @@ -14358,19 +14738,32 @@ where PersistenceNotifierGuard::optionally_notify(self, || { let mut result = NotifyOption::SkipPersistNoEvents; + // This method is quite performance-sensitive. Not only is it called very often, but it + // *is* the critical path between generating a message for a peer and giving it to the + // `PeerManager` to send. Thus, we should avoid adding any more logic here than we + // need, especially anything that might end up causing I/O (like a + // `ChannelMonitorUpdate`)! + // TODO: This behavior should be documented. It's unintuitive that we query // ChannelMonitors when clearing other events. if self.process_pending_monitor_events() { result = NotifyOption::DoPersist; } - if self.check_free_holding_cells() { - result = NotifyOption::DoPersist; - } if self.maybe_generate_initial_closing_signed() { result = NotifyOption::DoPersist; } + #[cfg(test)] + if self.check_free_holding_cells() { + // In tests, we want to ensure that we never forget to free holding cells + // immediately, so we check it here. + // Note that we can't turn this on for `debug_assertions` because there's a race in + // (at least) the fee-update logic in `timer_tick_occurred` which can lead to us + // freeing holding cells here while its running. + debug_assert!(false, "Holding cells should always be auto-free'd"); + } + // Quiescence is an in-memory protocol, so we don't have to persist because of it. self.maybe_send_stfu(); @@ -14764,12 +15157,12 @@ where insert_short_channel_id!(short_to_chan_info, funded_channel); if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update_locked_actions_handled_by_caller!( - self, + self.handle_new_monitor_update_locked_actions_handled_by_caller( + &mut peer_state.in_flight_monitor_updates, + funded_channel.context.channel_id(), funding_txo, + funded_channel.context.get_counterparty_node_id(), monitor_update, - &mut peer_state.in_flight_monitor_updates, - funded_channel.context ); to_process_monitor_update_actions.push(( counterparty_node_id, channel_id @@ -15788,12 +16181,17 @@ where return None; } - let res = self.send_payment_for_verified_bolt12_invoice(&invoice, payment_id); + let payment_nonce = context.as_ref().and_then(|ctx| match ctx { + OffersContext::OutboundPaymentForOffer { nonce, .. } + | OffersContext::OutboundPaymentForRefund { nonce, .. } => Some(*nonce), + _ => None, + }); + let res = self.send_payment_for_verified_bolt12_invoice(&invoice, payment_id, payment_nonce); handle_pay_invoice_res!(res, invoice, logger); }, OffersMessage::StaticInvoice(invoice) => { let payment_id = match context { - Some(OffersContext::OutboundPayment { payment_id, .. }) => payment_id, + Some(OffersContext::OutboundPaymentForOffer { payment_id, .. }) => payment_id, _ => return None }; let res = self.initiate_async_payment(&invoice, payment_id); @@ -15809,7 +16207,8 @@ where log_trace!(logger, "Received invoice_error: {}", invoice_error); match context { - Some(OffersContext::OutboundPayment { payment_id, .. }) => { + Some(OffersContext::OutboundPaymentForOffer { payment_id, .. }) + |Some(OffersContext::OutboundPaymentForRefund { payment_id, .. }) => { self.abandon_payment_with_reason( payment_id, PaymentFailureReason::InvoiceRequestRejected, ); @@ -16264,6 +16663,7 @@ impl_writeable_tlv_based!(PendingHTLCInfo, { (8, outgoing_cltv_value, required), (9, incoming_amt_msat, option), (10, skimmed_fee_msat, option), + (11, incoming_accountable, (default_value, false)), }); impl Writeable for HTLCFailureMsg { @@ -16450,6 +16850,7 @@ impl Readable for HTLCSource { let mut payment_params: Option = None; let mut blinded_tail: Option = None; let mut bolt12_invoice: Option = None; + let mut payment_nonce: Option = None; read_tlv_fields!(reader, { (0, session_priv, required), (1, payment_id, option), @@ -16458,6 +16859,7 @@ impl Readable for HTLCSource { (5, payment_params, (option: ReadableArgs, 0)), (6, blinded_tail, option), (7, bolt12_invoice, option), + (9, payment_nonce, option), }); if payment_id.is_none() { // For backwards compat, if there was no payment_id written, use the session_priv bytes @@ -16481,6 +16883,7 @@ impl Readable for HTLCSource { path, payment_id: payment_id.unwrap(), bolt12_invoice, + payment_nonce, }) } 1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)), @@ -16498,6 +16901,7 @@ impl Writeable for HTLCSource { ref path, payment_id, bolt12_invoice, + payment_nonce, } => { 0u8.write(writer)?; let payment_id_opt = Some(payment_id); @@ -16510,6 +16914,7 @@ impl Writeable for HTLCSource { (5, None::, option), // payment_params in LDK versions prior to 0.0.115 (6, path.blinded_tail, option), (7, bolt12_invoice, option), + (9, payment_nonce, option), }); }, HTLCSource::PreviousHopData(ref field) => { @@ -17116,28 +17521,32 @@ fn dedup_decode_update_add_htlcs( ) where L::Target: Logger, { - decode_update_add_htlcs.retain(|src_outb_alias, update_add_htlcs| { - update_add_htlcs.retain(|update_add| { - let matches = *src_outb_alias == prev_hop_data.prev_outbound_scid_alias - && update_add.htlc_id == prev_hop_data.htlc_id; - if matches { - let logger = WithContext::from( - logger, - prev_hop_data.counterparty_node_id, - Some(update_add.channel_id), - Some(update_add.payment_hash), - ); - log_info!( - logger, - "Removing pending to-decode HTLC with id {}: {}", - update_add.htlc_id, - removal_reason - ); + match decode_update_add_htlcs.entry(prev_hop_data.prev_outbound_scid_alias) { + hash_map::Entry::Occupied(mut update_add_htlcs) => { + update_add_htlcs.get_mut().retain(|update_add| { + let matches = update_add.htlc_id == prev_hop_data.htlc_id; + if matches { + let logger = WithContext::from( + logger, + prev_hop_data.counterparty_node_id, + Some(update_add.channel_id), + Some(update_add.payment_hash), + ); + log_info!( + logger, + "Removing pending to-decode HTLC with id {}: {}", + update_add.htlc_id, + removal_reason + ); + } + !matches + }); + if update_add_htlcs.get().is_empty() { + update_add_htlcs.remove(); } - !matches - }); - !update_add_htlcs.is_empty() - }); + }, + _ => {}, + } } // Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the @@ -17490,9 +17899,9 @@ where const MAX_ALLOC_SIZE: usize = 1024 * 64; let forward_htlcs_count: u64 = Readable::read(reader)?; - // This map is read but may no longer be used because we'll attempt to rebuild the set of HTLC - // forwards from the `Channel{Monitor}`s instead, as a step towards removing the requirement of - // regularly persisting the `ChannelManager`. + // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of + // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from + // `Channel{Monitor}` data. See `reconstruct_manager_from_monitors` usage below. let mut forward_htlcs_legacy: HashMap> = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128)); for _ in 0..forward_htlcs_count { @@ -17593,9 +18002,9 @@ where }; } - // Some maps are read but may no longer be used because we attempt to rebuild the pending HTLC - // set from the `Channel{Monitor}`s instead, as a step towards removing the requirement of - // regularly persisting the `ChannelManager`. + // Marked `_legacy` because in versions > 0.2 we are taking steps to remove the requirement of + // regularly persisting the `ChannelManager` and instead rebuild the set of HTLC forwards from + // `Channel{Monitor}` data. See `reconstruct_manager_from_monitors` below. let mut pending_intercepted_htlcs_legacy: Option> = None; let mut decode_update_add_htlcs_legacy: Option>> = @@ -17936,6 +18345,36 @@ where pending_background_events.push(new_event); } + // In LDK 0.2 and below, the `ChannelManager` would track all payments and HTLCs internally and + // persist that state, relying on it being up-to-date on restart. Newer versions are moving + // towards reducing this reliance on regular persistence of the `ChannelManager`, and instead + // reconstruct HTLC/payment state based on `Channel{Monitor}` data if + // `reconstruct_manager_from_monitors` is set below. Currently it is only set in tests, randomly + // to ensure the legacy codepaths also have test coverage. + #[cfg(not(test))] + let reconstruct_manager_from_monitors = false; + #[cfg(test)] + let reconstruct_manager_from_monitors = { + use core::hash::{BuildHasher, Hasher}; + + match std::env::var("LDK_TEST_REBUILD_MGR_FROM_MONITORS") { + Ok(val) => match val.as_str() { + "1" => true, + "0" => false, + _ => panic!("LDK_TEST_REBUILD_MGR_FROM_MONITORS must be 0 or 1, got: {}", val), + }, + Err(_) => { + let rand_val = + std::collections::hash_map::RandomState::new().build_hasher().finish(); + if rand_val % 2 == 0 { + true + } else { + false + } + }, + } + }; + // If there's any preimages for forwarded HTLCs hanging around in ChannelMonitors we // should ensure we try them again on the inbound edge. We put them here and do so after we // have a fully-constructed `ChannelManager` at the end. @@ -17960,18 +18399,20 @@ where let mut peer_state_lock = peer_state_mtx.lock().unwrap(); let peer_state = &mut *peer_state_lock; is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); - if let Some(chan) = peer_state.channel_by_id.get(channel_id) { - if let Some(funded_chan) = chan.as_funded() { - let inbound_committed_update_adds = - funded_chan.get_inbound_committed_update_adds(); - if !inbound_committed_update_adds.is_empty() { - // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized - // `Channel`, as part of removing the requirement to regularly persist the - // `ChannelManager`. - decode_update_add_htlcs.insert( - funded_chan.context.outbound_scid_alias(), - inbound_committed_update_adds, - ); + if reconstruct_manager_from_monitors { + if let Some(chan) = peer_state.channel_by_id.get(channel_id) { + if let Some(funded_chan) = chan.as_funded() { + let inbound_committed_update_adds = + funded_chan.get_inbound_committed_update_adds(); + if !inbound_committed_update_adds.is_empty() { + // Reconstruct `ChannelManager::decode_update_add_htlcs` from the serialized + // `Channel`, as part of removing the requirement to regularly persist the + // `ChannelManager`. + decode_update_add_htlcs.insert( + funded_chan.context.outbound_scid_alias(), + inbound_committed_update_adds, + ); + } } } } @@ -18015,156 +18456,175 @@ where is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id); } - if is_channel_closed { - for (htlc_source, (htlc, preimage_opt)) in - monitor.get_all_current_outbound_htlcs() - { - let logger = WithChannelMonitor::from( - &args.logger, - monitor, - Some(htlc.payment_hash), - ); - let htlc_id = SentHTLCId::from_source(&htlc_source); - match htlc_source { - HTLCSource::PreviousHopData(prev_hop_data) => { - let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { - info.prev_funding_outpoint == prev_hop_data.outpoint - && info.prev_htlc_id == prev_hop_data.htlc_id - }; - // The ChannelMonitor is now responsible for this HTLC's - // failure/success and will let us know what its outcome is. If we - // still have an entry for this HTLC in `forward_htlcs`, - // `pending_intercepted_htlcs`, or `decode_update_add_htlcs`, we were apparently not - // persisted after the monitor was when forwarding the payment. + for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() + { + let logger = + WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash)); + let htlc_id = SentHTLCId::from_source(&htlc_source); + match htlc_source { + HTLCSource::PreviousHopData(prev_hop_data) => { + let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { + info.prev_funding_outpoint == prev_hop_data.outpoint + && info.prev_htlc_id == prev_hop_data.htlc_id + }; + // If `reconstruct_manager_from_monitors` is set, we always add all inbound committed + // HTLCs to `decode_update_add_htlcs` in the above loop, but we need to prune from + // those added HTLCs if they were already forwarded to the outbound edge. Otherwise, + // we'll double-forward. + if reconstruct_manager_from_monitors { dedup_decode_update_add_htlcs( &mut decode_update_add_htlcs, &prev_hop_data, - "HTLC was forwarded to the closed channel", - &args.logger, - ); - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs_legacy, - &prev_hop_data, - "HTLC was forwarded to the closed channel", + "HTLC already forwarded to the outbound edge", &args.logger, ); - forward_htlcs_legacy.retain(|_, forwards| { - forwards.retain(|forward| { - if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - false - } else { true } + } + + if !is_channel_closed || reconstruct_manager_from_monitors { + continue; + } + // The ChannelMonitor is now responsible for this HTLC's + // failure/success and will let us know what its outcome is. If we + // still have an entry for this HTLC in `forward_htlcs_legacy`, + // `pending_intercepted_htlcs_legacy`, or + // `decode_update_add_htlcs_legacy`, we were apparently not persisted + // after the monitor was when forwarding the payment. + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs_legacy, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &args.logger, + ); + forward_htlcs_legacy.retain(|_, forwards| { + forwards.retain(|forward| { + if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", + &htlc.payment_hash, &monitor.channel_id()); + false } else { true } - }); - !forwards.is_empty() - }); - pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - pending_events_read.retain(|(event, _)| { - if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { - intercepted_id != ev_id - } else { true } - }); - false } else { true } }); - }, - HTLCSource::OutboundRoute { - payment_id, - session_priv, - path, - bolt12_invoice, - .. - } => { - if let Some(preimage) = preimage_opt { - let pending_events = Mutex::new(pending_events_read); - let update = PaymentCompleteUpdate { - counterparty_node_id: monitor.get_counterparty_node_id(), - channel_funding_outpoint: monitor.get_funding_txo(), - channel_id: monitor.channel_id(), - htlc_id, - }; - let mut compl_action = Some( - EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(update) - ); - pending_outbounds.claim_htlc( - payment_id, - preimage, - bolt12_invoice, - session_priv, - path, - true, - &mut compl_action, - &pending_events, - ); - // If the completion action was not consumed, then there was no - // payment to claim, and we need to tell the `ChannelMonitor` - // we don't need to hear about the HTLC again, at least as long - // as the PaymentSent event isn't still sitting around in our - // event queue. - let have_action = if compl_action.is_some() { - let pending_events = pending_events.lock().unwrap(); - pending_events.iter().any(|(_, act)| *act == compl_action) - } else { - false - }; - if !have_action && compl_action.is_some() { - let mut peer_state = per_peer_state - .get(&counterparty_node_id) - .map(|state| state.lock().unwrap()) - .expect("Channels originating a preimage must have peer state"); - let update_id = peer_state - .closed_channel_monitor_update_ids - .get_mut(channel_id) - .expect("Channels originating a preimage must have a monitor"); - // Note that for channels closed pre-0.1, the latest - // update_id is `u64::MAX`. - *update_id = update_id.saturating_add(1); - - pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id: monitor.get_counterparty_node_id(), + !forwards.is_empty() + }); + pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", + &htlc.payment_hash, &monitor.channel_id()); + pending_events_read.retain(|(event, _)| { + if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { + intercepted_id != ev_id + } else { true } + }); + false + } else { true } + }); + }, + HTLCSource::OutboundRoute { + payment_id, + session_priv, + path, + bolt12_invoice, + payment_nonce, + .. + } => { + if !is_channel_closed { + continue; + } + if let Some(preimage) = preimage_opt { + let pending_events = Mutex::new(pending_events_read); + let update = PaymentCompleteUpdate { + counterparty_node_id: monitor.get_counterparty_node_id(), + channel_funding_outpoint: monitor.get_funding_txo(), + channel_id: monitor.channel_id(), + htlc_id, + }; + let mut compl_action = Some( + EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(update) + ); + pending_outbounds.claim_htlc( + payment_id, + preimage, + bolt12_invoice, + payment_nonce, + session_priv, + path, + true, + &mut compl_action, + &pending_events, + ); + // If the completion action was not consumed, then there was no + // payment to claim, and we need to tell the `ChannelMonitor` + // we don't need to hear about the HTLC again, at least as long + // as the PaymentSent event isn't still sitting around in our + // event queue. + let have_action = if compl_action.is_some() { + let pending_events = pending_events.lock().unwrap(); + pending_events.iter().any(|(_, act)| *act == compl_action) + } else { + false + }; + if !have_action && compl_action.is_some() { + let mut peer_state = per_peer_state + .get(&counterparty_node_id) + .map(|state| state.lock().unwrap()) + .expect( + "Channels originating a preimage must have peer state", + ); + let update_id = peer_state + .closed_channel_monitor_update_ids + .get_mut(channel_id) + .expect( + "Channels originating a preimage must have a monitor", + ); + // Note that for channels closed pre-0.1, the latest + // update_id is `u64::MAX`. + *update_id = update_id.saturating_add(1); + + pending_background_events.push( + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + counterparty_node_id: monitor + .get_counterparty_node_id(), funding_txo: monitor.get_funding_txo(), channel_id: monitor.channel_id(), update: ChannelMonitorUpdate { update_id: *update_id, channel_id: Some(monitor.channel_id()), - updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete { - htlc: htlc_id, - }], + updates: vec![ + ChannelMonitorUpdateStep::ReleasePaymentComplete { + htlc: htlc_id, + }, + ], }, - }); - } - pending_events_read = pending_events.into_inner().unwrap(); + }, + ); } - }, - } + pending_events_read = pending_events.into_inner().unwrap(); + } + }, } - for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() { - log_info!( - args.logger, - "Failing HTLC with payment hash {} as it was resolved on-chain.", - payment_hash - ); - let completion_action = Some(PaymentCompleteUpdate { - counterparty_node_id: monitor.get_counterparty_node_id(), - channel_funding_outpoint: monitor.get_funding_txo(), - channel_id: monitor.channel_id(), - htlc_id: SentHTLCId::from_source(&htlc_source), - }); + } + for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() { + log_info!( + args.logger, + "Failing HTLC with payment hash {} as it was resolved on-chain.", + payment_hash + ); + let completion_action = Some(PaymentCompleteUpdate { + counterparty_node_id: monitor.get_counterparty_node_id(), + channel_funding_outpoint: monitor.get_funding_txo(), + channel_id: monitor.channel_id(), + htlc_id: SentHTLCId::from_source(&htlc_source), + }); - failed_htlcs.push(( - htlc_source, - payment_hash, - monitor.get_counterparty_node_id(), - monitor.channel_id(), - LocalHTLCFailureReason::OnChainTimeout, - completion_action, - )); - } + failed_htlcs.push(( + htlc_source, + payment_hash, + monitor.get_counterparty_node_id(), + monitor.channel_id(), + LocalHTLCFailureReason::OnChainTimeout, + completion_action, + )); } // Whether the downstream channel was closed or not, try to re-apply any payment @@ -18532,99 +18992,55 @@ where } } - // De-duplicate HTLCs that are present in both `failed_htlcs` and `decode_update_add_htlcs`. - // Omitting this de-duplication could lead to redundant HTLC processing and/or bugs. - for (src, _, _, _, _, _) in failed_htlcs.iter() { - if let HTLCSource::PreviousHopData(prev_hop_data) = src { - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - prev_hop_data, - "HTLC was failed backwards during manager read", - &args.logger, - ); - } - } - - // See above comment on `failed_htlcs`. - for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { - for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { - dedup_decode_update_add_htlcs( - &mut decode_update_add_htlcs, - prev_hop_data, - "HTLC was already decoded and marked as a claimable payment", - &args.logger, - ); - } - } - - // Remove HTLCs from `forward_htlcs` if they are also present in `decode_update_add_htlcs`. - // - // In the future, the full set of pending HTLCs will be pulled from `Channel{Monitor}` data and - // placed in `ChannelManager::decode_update_add_htlcs` on read, to be handled on the next call - // to `process_pending_htlc_forwards`. This is part of a larger effort to remove the requirement - // of regularly persisting the `ChannelManager`. The new pipeline is supported for HTLC forwards - // received on LDK 0.3+ but not <= 0.2, so prune non-legacy HTLCs from `forward_htlcs`. - forward_htlcs_legacy.retain(|scid, pending_fwds| { - for fwd in pending_fwds { - let (prev_scid, prev_htlc_id) = match fwd { - HTLCForwardInfo::AddHTLC(htlc) => { - (htlc.prev_outbound_scid_alias, htlc.prev_htlc_id) - }, - HTLCForwardInfo::FailHTLC { htlc_id, .. } - | HTLCForwardInfo::FailMalformedHTLC { htlc_id, .. } => (*scid, *htlc_id), - }; - if let Some(pending_update_adds) = decode_update_add_htlcs.get_mut(&prev_scid) { - if pending_update_adds - .iter() - .any(|update_add| update_add.htlc_id == prev_htlc_id) - { - return false; - } + if reconstruct_manager_from_monitors { + // De-duplicate HTLCs that are present in both `failed_htlcs` and `decode_update_add_htlcs`. + // Omitting this de-duplication could lead to redundant HTLC processing and/or bugs. + for (src, _, _, _, _, _) in failed_htlcs.iter() { + if let HTLCSource::PreviousHopData(prev_hop_data) = src { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was failed backwards during manager read", + &args.logger, + ); } } - true - }); - // Remove intercepted HTLC forwards if they are also present in `decode_update_add_htlcs`. See - // the above comment. - pending_intercepted_htlcs_legacy.retain(|id, fwd| { - let prev_scid = fwd.prev_outbound_scid_alias; - if let Some(pending_update_adds) = decode_update_add_htlcs.get_mut(&prev_scid) { - if pending_update_adds - .iter() - .any(|update_add| update_add.htlc_id == fwd.prev_htlc_id) - { - pending_events_read.retain( - |(ev, _)| !matches!(ev, Event::HTLCIntercepted { intercept_id, .. } if intercept_id == id), + + // See above comment on `failed_htlcs`. + for htlcs in claimable_payments.values().map(|pmt| &pmt.htlcs) { + for prev_hop_data in htlcs.iter().map(|h| &h.prev_hop) { + dedup_decode_update_add_htlcs( + &mut decode_update_add_htlcs, + prev_hop_data, + "HTLC was already decoded and marked as a claimable payment", + &args.logger, ); - return false; } } + } + + let (decode_update_add_htlcs, forward_htlcs, pending_intercepted_htlcs) = + if reconstruct_manager_from_monitors { + (decode_update_add_htlcs, new_hash_map(), new_hash_map()) + } else { + ( + decode_update_add_htlcs_legacy, + forward_htlcs_legacy, + pending_intercepted_htlcs_legacy, + ) + }; + + // If we have a pending intercept HTLC present but no corresponding event, add that now rather + // than relying on the user having persisted the event prior to shutdown. + for (id, fwd) in pending_intercepted_htlcs.iter() { if !pending_events_read.iter().any( |(ev, _)| matches!(ev, Event::HTLCIntercepted { intercept_id, .. } if intercept_id == id), ) { - match create_htlc_intercepted_event(*id, &fwd) { + match create_htlc_intercepted_event(*id, fwd) { Ok(ev) => pending_events_read.push_back((ev, None)), Err(()) => debug_assert!(false), } } - true - }); - // Add legacy update_adds that were received on LDK <= 0.2 that are not present in the - // `decode_update_add_htlcs` map that was rebuilt from `Channel{Monitor}` data, see above - // comment. - for (scid, legacy_update_adds) in decode_update_add_htlcs_legacy.drain() { - match decode_update_add_htlcs.entry(scid) { - hash_map::Entry::Occupied(mut update_adds) => { - for legacy_update_add in legacy_update_adds { - if !update_adds.get().contains(&legacy_update_add) { - update_adds.get_mut().push(legacy_update_add); - } - } - }, - hash_map::Entry::Vacant(entry) => { - entry.insert(legacy_update_adds); - }, - } } let best_block = BestBlock::new(best_block_hash, best_block_height); @@ -18653,9 +19069,9 @@ where inbound_payment_key: expanded_inbound_key, pending_outbound_payments: pending_outbounds, - pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs_legacy), + pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs), - forward_htlcs: Mutex::new(forward_htlcs_legacy), + forward_htlcs: Mutex::new(forward_htlcs), decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs), claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, @@ -18991,12 +19407,11 @@ where mod tests { use crate::events::{ClosureReason, Event, HTLCHandlingFailureType}; use crate::ln::channelmanager::{ - create_recv_pending_htlc_info, inbound_payment, HTLCForwardInfo, InterceptId, PaymentId, + create_recv_pending_htlc_info, inbound_payment, InterceptId, PaymentId, RecipientOnionFields, }; use crate::ln::functional_test_utils::*; use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, MessageSendEvent}; - use crate::ln::onion_utils::AttributionData; use crate::ln::onion_utils::{self, LocalHTLCFailureReason}; use crate::ln::outbound_payment::Retry; use crate::ln::types::ChannelId; @@ -19006,7 +19421,6 @@ mod tests { use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use crate::util::config::{ChannelConfig, ChannelConfigUpdate}; use crate::util::errors::APIError; - use crate::util::ser::Writeable; use crate::util::test_utils; use bitcoin::secp256k1::ecdh::SharedSecret; use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey}; @@ -19853,7 +20267,7 @@ mod tests { if let Err(crate::ln::channelmanager::InboundHTLCErr { reason, .. }) = create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]), sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat), - current_height) + false, current_height) { assert_eq!(reason, LocalHTLCFailureReason::FinalIncorrectHTLCAmount); } else { panic!(); } @@ -19876,7 +20290,7 @@ mod tests { let current_height: u32 = node[0].node.best_block.read().unwrap().height; assert!(create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]), sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat), - current_height).is_ok()); + false, current_height).is_ok()); } #[test] @@ -19901,7 +20315,7 @@ mod tests { custom_tlvs: Vec::new(), }, shared_secret: SharedSecret::from_bytes([0; 32]), - }, [0; 32], PaymentHash([0; 32]), 100, TEST_FINAL_CLTV + 1, None, true, None, current_height); + }, [0; 32], PaymentHash([0; 32]), 100, TEST_FINAL_CLTV + 1, None, true, None, false, current_height); // Should not return an error as this condition: // https://github.com/lightning/bolts/blob/4dcc377209509b13cf89a4b91fde7d478f5b46d8/04-onion-routing.md?plain=1#L334 @@ -20064,66 +20478,6 @@ mod tests { check_spends!(txn[0], funding_tx); } } - - #[test] - #[rustfmt::skip] - fn test_malformed_forward_htlcs_ser() { - // Ensure that `HTLCForwardInfo::FailMalformedHTLC`s are (de)serialized properly. - let chanmon_cfg = create_chanmon_cfgs(1); - let node_cfg = create_node_cfgs(1, &chanmon_cfg); - let persister; - let chain_monitor; - let chanmgrs = create_node_chanmgrs(1, &node_cfg, &[None]); - let deserialized_chanmgr; - let mut nodes = create_network(1, &node_cfg, &chanmgrs); - - let dummy_failed_htlc = |htlc_id| { - HTLCForwardInfo::FailHTLC { htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42], attribution_data: Some(AttributionData::new()) } } - }; - let dummy_malformed_htlc = |htlc_id| { - HTLCForwardInfo::FailMalformedHTLC { - htlc_id, - failure_code: LocalHTLCFailureReason::InvalidOnionPayload.failure_code(), - sha256_of_onion: [0; 32], - } - }; - - let dummy_htlcs_1: Vec = (1..10).map(|htlc_id| { - if htlc_id % 2 == 0 { - dummy_failed_htlc(htlc_id) - } else { - dummy_malformed_htlc(htlc_id) - } - }).collect(); - - let dummy_htlcs_2: Vec = (1..10).map(|htlc_id| { - if htlc_id % 2 == 1 { - dummy_failed_htlc(htlc_id) - } else { - dummy_malformed_htlc(htlc_id) - } - }).collect(); - - - let (scid_1, scid_2) = (42, 43); - let mut forward_htlcs = new_hash_map(); - forward_htlcs.insert(scid_1, dummy_htlcs_1.clone()); - forward_htlcs.insert(scid_2, dummy_htlcs_2.clone()); - - let mut chanmgr_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap(); - *chanmgr_fwd_htlcs = forward_htlcs.clone(); - core::mem::drop(chanmgr_fwd_htlcs); - - reload_node!(nodes[0], nodes[0].node.encode(), &[], persister, chain_monitor, deserialized_chanmgr); - - let mut deserialized_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap(); - for scid in [scid_1, scid_2].iter() { - let deserialized_htlcs = deserialized_fwd_htlcs.remove(scid).unwrap(); - assert_eq!(forward_htlcs.remove(scid).unwrap(), deserialized_htlcs); - } - assert!(deserialized_fwd_htlcs.is_empty()); - core::mem::drop(deserialized_fwd_htlcs); - } } #[cfg(ldk_bench)] diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e072deb6a97..528786d5fac 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -10,6 +10,7 @@ //! A bunch of useful utilities for building networks of nodes and exchanging messages between //! nodes for functional tests. +use crate::blinded_path::payment::DummyTlvs; use crate::chain::channelmonitor::ChannelMonitor; use crate::chain::transaction::OutPoint; use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Listen, Watch}; @@ -971,7 +972,7 @@ pub fn get_revoke_commit_msgs>( assert_eq!(node_id, recipient); (*msg).clone() }, - _ => panic!("Unexpected event"), + _ => panic!("Unexpected event: {events:?}"), }, match events[1] { MessageSendEvent::UpdateHTLCs { ref node_id, ref channel_id, ref updates } => { @@ -984,7 +985,7 @@ pub fn get_revoke_commit_msgs>( assert!(updates.commitment_signed.iter().all(|cs| cs.channel_id == *channel_id)); updates.commitment_signed.clone() }, - _ => panic!("Unexpected event"), + _ => panic!("Unexpected event: {events:?}"), }, ) } @@ -2985,6 +2986,7 @@ pub fn expect_payment_sent>( ref amount_msat, ref fee_paid_msat, ref bolt12_invoice, + .. } => { assert_eq!(expected_payment_preimage, *payment_preimage); assert_eq!(expected_payment_hash, *payment_hash); @@ -3443,6 +3445,7 @@ pub fn fail_payment_along_path<'a, 'b, 'c>(expected_path: &[&Node<'a, 'b, 'c>]) pub struct PassAlongPathArgs<'a, 'b, 'c, 'd> { pub origin_node: &'a Node<'b, 'c, 'd>, pub expected_path: &'a [&'a Node<'b, 'c, 'd>], + pub dummy_tlvs: Vec, pub recv_value: u64, pub payment_hash: PaymentHash, pub payment_secret: Option, @@ -3464,6 +3467,7 @@ impl<'a, 'b, 'c, 'd> PassAlongPathArgs<'a, 'b, 'c, 'd> { Self { origin_node, expected_path, + dummy_tlvs: vec![], recv_value, payment_hash, payment_secret: None, @@ -3511,12 +3515,17 @@ impl<'a, 'b, 'c, 'd> PassAlongPathArgs<'a, 'b, 'c, 'd> { self.expected_failure = Some(failure); self } + pub fn with_dummy_tlvs(mut self, dummy_tlvs: &[DummyTlvs]) -> Self { + self.dummy_tlvs = dummy_tlvs.to_vec(); + self + } } pub fn do_pass_along_path<'a, 'b, 'c>(args: PassAlongPathArgs) -> Option { let PassAlongPathArgs { origin_node, expected_path, + dummy_tlvs, recv_value, payment_hash: our_payment_hash, payment_secret: our_payment_secret, @@ -3551,6 +3560,16 @@ pub fn do_pass_along_path<'a, 'b, 'c>(args: PassAlongPathArgs) -> Option node.node.process_pending_htlc_forwards(); } + if is_last_hop { + // At the final hop, the incoming packet contains N dummy-hop layers + // before the real HTLC. Each call to `process_pending_htlc_forwards` + // strips exactly one dummy layer, so we call it N times. + for _ in 0..dummy_tlvs.len() { + assert!(node.node.needs_pending_htlc_processing()); + node.node.process_pending_htlc_forwards(); + } + } + if is_last_hop && clear_recipient_events { let events_2 = node.node.get_and_clear_pending_events(); if payment_claimable_expected { @@ -3763,6 +3782,29 @@ pub struct ClaimAlongRouteArgs<'a, 'b, 'c, 'd> { pub origin_node: &'a Node<'b, 'c, 'd>, pub expected_paths: &'a [&'a [&'a Node<'b, 'c, 'd>]], pub expected_extra_fees: Vec, + /// A one-off adjustment used only in tests to account for an existing + /// fee-handling trade-off in LDK. + /// + /// When the payer is the introduction node of a blinded path, LDK does not + /// subtract the forward fee for the `payer -> next_hop` channel + /// (see [`BlindedPaymentPath::advance_path_by_one`]). This keeps the fee + /// logic simpler at the cost of a small, intentional overpayment. + /// + /// In the simple two-hop case (payer as introduction node → payee), + /// this overpayment has historically been avoided by simply not charging + /// the payer the forward fee, since the payer knows there is only + /// a single hop after them. + /// + /// However, with the introduction of dummy hops in LDK v0.3, even a + /// two-node real path (payer as introduction node → payee) may appear as a + /// multi-hop blinded path. This makes the existing overpayment surface in + /// tests. + /// + /// Until the fee-handling trade-off is revisited, this field allows tests + /// to compensate for that expected difference. + /// + /// [`BlindedPaymentPath::advance_path_by_one`]: crate::blinded_path::payment::BlindedPaymentPath::advance_path_by_one + pub expected_extra_total_fees_msat: u64, pub expected_min_htlc_overpay: Vec, pub skip_last: bool, pub payment_preimage: PaymentPreimage, @@ -3786,6 +3828,7 @@ impl<'a, 'b, 'c, 'd> ClaimAlongRouteArgs<'a, 'b, 'c, 'd> { origin_node, expected_paths, expected_extra_fees: vec![0; expected_paths.len()], + expected_extra_total_fees_msat: 0, expected_min_htlc_overpay: vec![0; expected_paths.len()], skip_last: false, payment_preimage, @@ -3801,6 +3844,10 @@ impl<'a, 'b, 'c, 'd> ClaimAlongRouteArgs<'a, 'b, 'c, 'd> { self.expected_extra_fees = extra_fees; self } + pub fn with_expected_extra_total_fees_msat(mut self, extra_total_fees: u64) -> Self { + self.expected_extra_total_fees_msat = extra_total_fees; + self + } pub fn with_expected_min_htlc_overpay(mut self, extra_fees: Vec) -> Self { self.expected_min_htlc_overpay = extra_fees; self @@ -4068,13 +4115,21 @@ pub fn pass_claimed_payment_along_route_from_ev( expected_total_fee_msat } + pub fn claim_payment_along_route( args: ClaimAlongRouteArgs, ) -> (Option, Vec) { - let origin_node = args.origin_node; - let payment_preimage = args.payment_preimage; - let skip_last = args.skip_last; - let expected_total_fee_msat = do_claim_payment_along_route(args); + let ClaimAlongRouteArgs { + origin_node, + payment_preimage, + skip_last, + expected_extra_total_fees_msat, + .. + } = args; + + let expected_total_fee_msat = + do_claim_payment_along_route(args) + expected_extra_total_fees_msat; + if !skip_last { expect_payment_sent!(origin_node, payment_preimage, Some(expected_total_fee_msat)) } else { @@ -4563,7 +4618,29 @@ pub fn create_network<'a, 'b: 'a, 'c: 'b>( let mut nodes = Vec::new(); let chan_count = Rc::new(RefCell::new(0)); let payment_count = Rc::new(RefCell::new(0)); - let connect_style = Rc::new(RefCell::new(ConnectStyle::random_style())); + + let connect_style = Rc::new(RefCell::new(match std::env::var("LDK_TEST_CONNECT_STYLE") { + Ok(val) => match val.as_str() { + "BEST_BLOCK_FIRST" => ConnectStyle::BestBlockFirst, + "BEST_BLOCK_FIRST_SKIPPING_BLOCKS" => ConnectStyle::BestBlockFirstSkippingBlocks, + "BEST_BLOCK_FIRST_REORGS_ONLY_TIP" => ConnectStyle::BestBlockFirstReorgsOnlyTip, + "TRANSACTIONS_FIRST" => ConnectStyle::TransactionsFirst, + "TRANSACTIONS_FIRST_SKIPPING_BLOCKS" => ConnectStyle::TransactionsFirstSkippingBlocks, + "TRANSACTIONS_DUPLICATIVELY_FIRST_SKIPPING_BLOCKS" => { + ConnectStyle::TransactionsDuplicativelyFirstSkippingBlocks + }, + "HIGHLY_REDUNDANT_TRANSACTIONS_FIRST_SKIPPING_BLOCKS" => { + ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks + }, + "TRANSACTIONS_FIRST_REORGS_ONLY_TIP" => ConnectStyle::TransactionsFirstReorgsOnlyTip, + "FULL_BLOCK_VIA_LISTEN" => ConnectStyle::FullBlockViaListen, + "FULL_BLOCK_DISCONNECTIONS_SKIPPING_VIA_LISTEN" => { + ConnectStyle::FullBlockDisconnectionsSkippingViaListen + }, + _ => panic!("Unknown ConnectStyle '{}'", val), + }, + Err(_) => ConnectStyle::random_style(), + })); for i in 0..node_count { let dedicated_entropy = DedicatedEntropy(RandomBytes::new([i as u8; 32])); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index e2963dbeb09..fcb348c690d 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -2270,6 +2270,7 @@ pub fn fail_backward_pending_htlc_upon_channel_failure() { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, }; nodes[0].node.handle_update_add_htlc(node_b_id, &update_add_htlc); } @@ -9899,3 +9900,154 @@ pub fn test_multi_post_event_actions() { do_test_multi_post_event_actions(true); do_test_multi_post_event_actions(false); } + +#[xtest(feature = "_externalize_tests")] +pub fn test_dust_exposure_holding_cell_assertion() { + // Test that we properly move forward if we pop an HTLC-add from the holding cell but fail to + // add it to the channel. In 0.2 this cause a (harmless in prod) debug assertion failure. We + // try to ensure that this won't happen by checking that an HTLC will be able to be added + // before we add it to the holding cell, so getting into this state takes a bit of work. + // + // Here we accomplish this by using the dust exposure limit. This has the unique feature that + // node C can increase node B's dust exposure on the B <-> C channel without B doing anything. + // To exploit this, we get node B one HTLC away from being over-exposed to dust, give it one + // more HTLC in the holding cell, then have node C add an HTLC. By the time the holding-cell + // HTLC is released we are at max-dust-exposure and will fail it. + + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + + // Configure nodes with specific dust limits + let mut config = test_default_channel_config(); + // Use a fixed dust exposure limit to make the test simpler + const DUST_HTLC_VALUE_MSAT: u64 = 500_000; + config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FixedLimitMsat(5_000_000); + config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + + let configs = [Some(config.clone()), Some(config.clone()), Some(config.clone())]; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &configs); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + + // Create channels: A <-> B <-> C + create_announced_chan_between_nodes(&nodes, 0, 1); + let bc_chan_id = create_announced_chan_between_nodes(&nodes, 1, 2).2; + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 10_000_000); + + // Send multiple dust HTLCs from B to C to approach the dust limit (including transaction fees) + for _ in 0..4 { + route_payment(&nodes[1], &[&nodes[2]], DUST_HTLC_VALUE_MSAT); + } + + // At this point we shouldn't be over the dust limit, and should still be able to send HTLCs. + let bs_chans = nodes[1].node.list_channels(); + let bc_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + assert_eq!( + bc_chan.next_outbound_htlc_minimum_msat, + config.channel_handshake_config.our_htlc_minimum_msat + ); + + // Add a further HTLC from B to C, but don't deliver the send messages. + // After this we'll only have the ability to add one more HTLC, but by not delivering the send + // messages (leaving B waiting on C's RAA) the next HTLC will go into B's holding cell. + let (route_bc, payment_hash_bc, _payment_preimage_bc, payment_secret_bc) = + get_route_and_payment_hash!(nodes[1], nodes[2], DUST_HTLC_VALUE_MSAT); + let onion_bc = RecipientOnionFields::secret_only(payment_secret_bc); + let id = PaymentId(payment_hash_bc.0); + nodes[1].node.send_payment_with_route(route_bc, payment_hash_bc, onion_bc, id).unwrap(); + check_added_monitors(&nodes[1], 1); + let send_bc = SendEvent::from_node(&nodes[1]); + + let bs_chans = nodes[1].node.list_channels(); + let bc_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + assert_eq!( + bc_chan.next_outbound_htlc_minimum_msat, + config.channel_handshake_config.our_htlc_minimum_msat + ); + + // Forward an additional HTLC from A through B to C. This will go in B's holding cell for node + // C as it is waiting on a response to the above messages. + let payment_params_ac = PaymentParameters::from_node_id(node_c_id, TEST_FINAL_CLTV) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()) + .unwrap(); + let (route_ac, payment_hash_cell, _, payment_secret_ac) = + get_route_and_payment_hash!(nodes[0], nodes[2], payment_params_ac, DUST_HTLC_VALUE_MSAT); + let onion_ac = RecipientOnionFields::secret_only(payment_secret_ac); + let id = PaymentId(payment_hash_cell.0); + nodes[0].node.send_payment_with_route(route_ac, payment_hash_cell, onion_ac, id).unwrap(); + check_added_monitors(&nodes[0], 1); + + let send_ab = SendEvent::from_node(&nodes[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_ab.msgs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[0], &send_ab.commitment_msg, false, true); + + // At this point when we process pending forwards the HTLC will go into the holding cell and no + // further messages will be generated. Node B will also be at its maximum dust exposure and + // will refuse to send any dust HTLCs (when it includes the holding cell HTLC). + expect_and_process_pending_htlcs(&nodes[1], false); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + let bs_chans = nodes[1].node.list_channels(); + let bc_chan = bs_chans.iter().find(|chan| chan.counterparty.node_id == node_c_id).unwrap(); + assert!(bc_chan.next_outbound_htlc_minimum_msat > DUST_HTLC_VALUE_MSAT); + + // Send an additional HTLC from C to B. This will make B unable to forward the HTLC already in + // its holding cell as it would be over-exposed to dust. + let (route_cb, payment_hash_cb, payment_preimage_cb, payment_secret_cb) = + get_route_and_payment_hash!(nodes[2], nodes[1], DUST_HTLC_VALUE_MSAT); + let onion_cb = RecipientOnionFields::secret_only(payment_secret_cb); + let id = PaymentId(payment_hash_cb.0); + nodes[2].node.send_payment_with_route(route_cb, payment_hash_cb, onion_cb, id).unwrap(); + check_added_monitors(&nodes[2], 1); + + // Now deliver all the messages and make sure that the HTLC is failed-back. + let send_event_cb = SendEvent::from_node(&nodes[2]); + nodes[1].node.handle_update_add_htlc(node_c_id, &send_event_cb.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &send_event_cb.commitment_msg); + check_added_monitors(&nodes[1], 1); + + nodes[2].node.handle_update_add_htlc(node_b_id, &send_bc.msgs[0]); + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &send_bc.commitment_msg); + check_added_monitors(&nodes[2], 1); + + let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); + nodes[1].node.handle_revoke_and_ack(node_c_id, &cs_raa); + check_added_monitors(&nodes[1], 1); + let (bs_raa, bs_cs) = get_revoke_commit_msgs(&nodes[1], &node_c_id); + + // When we delivered the RAA above, we attempted (and failed) to add the HTLC to the channel, + // causing it to be ready to fail-back, which we do here: + let next_hop = + HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: bc_chan_id }; + expect_htlc_forwarding_fails(&nodes[1], &[next_hop]); + check_added_monitors(&nodes[1], 1); + fail_payment_along_path(&[&nodes[0], &nodes[1]]); + let conditions = PaymentFailedConditions::new(); + expect_payment_failed_conditions(&nodes[0], payment_hash_cell, false, conditions); + + nodes[2].node.handle_revoke_and_ack(node_b_id, &bs_raa); + check_added_monitors(&nodes[2], 1); + let cs_cs = get_htlc_update_msgs(&nodes[2], &node_b_id); + + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs); + check_added_monitors(&nodes[2], 1); + let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); + + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &cs_cs.commitment_signed); + check_added_monitors(&nodes[1], 1); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); + + nodes[1].node.handle_revoke_and_ack(node_c_id, &cs_raa); + check_added_monitors(&nodes[1], 1); + expect_and_process_pending_htlcs(&nodes[1], false); + expect_payment_claimable!(nodes[1], payment_hash_cb, payment_secret_cb, DUST_HTLC_VALUE_MSAT); + + nodes[2].node.handle_revoke_and_ack(node_b_id, &bs_raa); + check_added_monitors(&nodes[2], 1); + + // Now that everything has settled, make sure the channels still work with a simple claim. + claim_payment(&nodes[2], &[&nodes[1]], payment_preimage_cb); +} diff --git a/lightning/src/ln/funding.rs b/lightning/src/ln/funding.rs index f80b2b6daea..8092a0e4451 100644 --- a/lightning/src/ln/funding.rs +++ b/lightning/src/ln/funding.rs @@ -20,69 +20,82 @@ use crate::sign::{P2TR_KEY_PATH_WITNESS_WEIGHT, P2WPKH_WITNESS_WEIGHT}; /// The components of a splice's funding transaction that are contributed by one party. #[derive(Debug, Clone)] -pub enum SpliceContribution { - /// When funds are added to a channel. - SpliceIn { - /// The amount to contribute to the splice. - value: Amount, - - /// The inputs included in the splice's funding transaction to meet the contributed amount - /// plus fees. Any excess amount will be sent to a change output. - inputs: Vec, - - /// An optional change output script. This will be used if needed or, when not set, - /// generated using [`SignerProvider::get_destination_script`]. - /// - /// [`SignerProvider::get_destination_script`]: crate::sign::SignerProvider::get_destination_script - change_script: Option, - }, - /// When funds are removed from a channel. - SpliceOut { - /// The outputs to include in the splice's funding transaction. The total value of all - /// outputs plus fees will be the amount that is removed. - outputs: Vec, - }, +pub struct SpliceContribution { + /// The amount from [`inputs`] to contribute to the splice. + /// + /// [`inputs`]: Self::inputs + value_added: Amount, + + /// The inputs included in the splice's funding transaction to meet the contributed amount + /// plus fees. Any excess amount will be sent to a change output. + inputs: Vec, + + /// The outputs to include in the splice's funding transaction. The total value of all + /// outputs plus fees will be the amount that is removed. + outputs: Vec, + + /// An optional change output script. This will be used if needed or, when not set, + /// generated using [`SignerProvider::get_destination_script`]. + /// + /// [`SignerProvider::get_destination_script`]: crate::sign::SignerProvider::get_destination_script + change_script: Option, } impl SpliceContribution { - pub(super) fn value(&self) -> SignedAmount { - match self { - SpliceContribution::SpliceIn { value, .. } => { - value.to_signed().unwrap_or(SignedAmount::MAX) - }, - SpliceContribution::SpliceOut { outputs } => { - let value_removed = outputs - .iter() - .map(|txout| txout.value) - .sum::() - .to_signed() - .unwrap_or(SignedAmount::MAX); - -value_removed - }, - } + /// Creates a contribution for when funds are only added to a channel. + pub fn splice_in( + value_added: Amount, inputs: Vec, change_script: Option, + ) -> Self { + Self { value_added, inputs, outputs: vec![], change_script } + } + + /// Creates a contribution for when funds are only removed from a channel. + pub fn splice_out(outputs: Vec) -> Self { + Self { value_added: Amount::ZERO, inputs: vec![], outputs, change_script: None } + } + + /// Creates a contribution for when funds are both added to and removed from a channel. + /// + /// Note that `value_added` represents the value added by `inputs` but should not account for + /// value removed by `outputs`. The net value contributed can be obtained by calling + /// [`SpliceContribution::net_value`]. + pub fn splice_in_and_out( + value_added: Amount, inputs: Vec, outputs: Vec, + change_script: Option, + ) -> Self { + Self { value_added, inputs, outputs, change_script } + } + + /// The net value contributed to a channel by the splice. If negative, more value will be + /// spliced out than spliced in. + pub fn net_value(&self) -> SignedAmount { + let value_added = self.value_added.to_signed().unwrap_or(SignedAmount::MAX); + let value_removed = self + .outputs + .iter() + .map(|txout| txout.value) + .sum::() + .to_signed() + .unwrap_or(SignedAmount::MAX); + + value_added - value_removed + } + + pub(super) fn value_added(&self) -> Amount { + self.value_added } pub(super) fn inputs(&self) -> &[FundingTxInput] { - match self { - SpliceContribution::SpliceIn { inputs, .. } => &inputs[..], - SpliceContribution::SpliceOut { .. } => &[], - } + &self.inputs[..] } pub(super) fn outputs(&self) -> &[TxOut] { - match self { - SpliceContribution::SpliceIn { .. } => &[], - SpliceContribution::SpliceOut { outputs } => &outputs[..], - } + &self.outputs[..] } pub(super) fn into_tx_parts(self) -> (Vec, Vec, Option) { - match self { - SpliceContribution::SpliceIn { inputs, change_script, .. } => { - (inputs, vec![], change_script) - }, - SpliceContribution::SpliceOut { outputs } => (vec![], outputs, None), - } + let SpliceContribution { value_added: _, inputs, outputs, change_script } = self; + (inputs, outputs, change_script) } } diff --git a/lightning/src/ln/htlc_reserve_unit_tests.rs b/lightning/src/ln/htlc_reserve_unit_tests.rs index 86c95721d47..4c4fbada7dd 100644 --- a/lightning/src/ln/htlc_reserve_unit_tests.rs +++ b/lightning/src/ln/htlc_reserve_unit_tests.rs @@ -839,6 +839,7 @@ pub fn do_test_fee_spike_buffer(cfg: Option, htlc_fails: bool) { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); @@ -1082,6 +1083,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, }; nodes[0].node.handle_update_add_htlc(node_b_id, &msg); @@ -1266,6 +1268,7 @@ pub fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); @@ -1650,6 +1653,7 @@ pub fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, }; for i in 0..50 { @@ -2256,6 +2260,7 @@ pub fn do_test_dust_limit_fee_accounting(can_afford: bool) { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, }; nodes[1].node.handle_update_add_htlc(node_a_id, &msg); diff --git a/lightning/src/ln/interactivetxs.rs b/lightning/src/ln/interactivetxs.rs index 4340aad420a..7ed829886c6 100644 --- a/lightning/src/ln/interactivetxs.rs +++ b/lightning/src/ln/interactivetxs.rs @@ -2337,22 +2337,21 @@ impl InteractiveTxConstructor { pub(super) fn calculate_change_output_value( context: &FundingNegotiationContext, is_splice: bool, shared_output_funding_script: &ScriptBuf, change_output_dust_limit: u64, -) -> Result, AbortReason> { - assert!(context.our_funding_contribution > SignedAmount::ZERO); - let our_funding_contribution_satoshis = context.our_funding_contribution.to_sat() as u64; - - let mut total_input_satoshis = 0u64; +) -> Result, AbortReason> { + let mut total_input_value = Amount::ZERO; let mut our_funding_inputs_weight = 0u64; for FundingTxInput { utxo, .. } in context.our_funding_inputs.iter() { - total_input_satoshis = total_input_satoshis.saturating_add(utxo.output.value.to_sat()); + total_input_value = total_input_value.checked_add(utxo.output.value).unwrap_or(Amount::MAX); let weight = BASE_INPUT_WEIGHT + utxo.satisfaction_weight; our_funding_inputs_weight = our_funding_inputs_weight.saturating_add(weight); } let funding_outputs = &context.our_funding_outputs; - let total_output_satoshis = - funding_outputs.iter().fold(0u64, |total, out| total.saturating_add(out.value.to_sat())); + let total_output_value = funding_outputs + .iter() + .fold(Amount::ZERO, |total, out| total.checked_add(out.value).unwrap_or(Amount::MAX)); + let our_funding_outputs_weight = funding_outputs.iter().fold(0u64, |weight, out| { weight.saturating_add(get_output_weight(&out.script_pubkey).to_wu()) }); @@ -2376,15 +2375,25 @@ pub(super) fn calculate_change_output_value( } } - let fees_sats = fee_for_weight(context.funding_feerate_sat_per_1000_weight, weight); - let net_total_less_fees = - total_input_satoshis.saturating_sub(total_output_satoshis).saturating_sub(fees_sats); - if net_total_less_fees < our_funding_contribution_satoshis { + let contributed_fees = + Amount::from_sat(fee_for_weight(context.funding_feerate_sat_per_1000_weight, weight)); + + let contributed_input_value = + context.our_funding_contribution + total_output_value.to_signed().unwrap(); + assert!(contributed_input_value > SignedAmount::ZERO); + let contributed_input_value = contributed_input_value.unsigned_abs(); + + let total_input_value_less_fees = + total_input_value.checked_sub(contributed_fees).unwrap_or(Amount::ZERO); + if total_input_value_less_fees < contributed_input_value { // Not enough to cover contribution plus fees return Err(AbortReason::InsufficientFees); } - let remaining_value = net_total_less_fees.saturating_sub(our_funding_contribution_satoshis); - if remaining_value < change_output_dust_limit { + + let remaining_value = total_input_value_less_fees + .checked_sub(contributed_input_value) + .expect("remaining_value should not be negative"); + if remaining_value.to_sat() < change_output_dust_limit { // Enough to cover contribution plus fees, but leftover is below dust limit; no change Ok(None) } else { @@ -3440,14 +3449,14 @@ mod tests { total_inputs - total_outputs - context.our_funding_contribution.to_unsigned().unwrap(); assert_eq!( calculate_change_output_value(&context, false, &ScriptBuf::new(), 300), - Ok(Some((gross_change - fees - common_fees).to_sat())), + Ok(Some(gross_change - fees - common_fees)), ); // There is leftover for change, without common fees let context = FundingNegotiationContext { is_initiator: false, ..context }; assert_eq!( calculate_change_output_value(&context, false, &ScriptBuf::new(), 300), - Ok(Some((gross_change - fees).to_sat())), + Ok(Some(gross_change - fees)), ); // Insufficient inputs, no leftover @@ -3482,7 +3491,7 @@ mod tests { total_inputs - total_outputs - context.our_funding_contribution.to_unsigned().unwrap(); assert_eq!( calculate_change_output_value(&context, false, &ScriptBuf::new(), 100), - Ok(Some((gross_change - fees).to_sat())), + Ok(Some(gross_change - fees)), ); // Larger fee, smaller change @@ -3496,7 +3505,7 @@ mod tests { total_inputs - total_outputs - context.our_funding_contribution.to_unsigned().unwrap(); assert_eq!( calculate_change_output_value(&context, false, &ScriptBuf::new(), 300), - Ok(Some((gross_change - fees * 3 - common_fees * 3).to_sat())), + Ok(Some(gross_change - fees * 3 - common_fees * 3)), ); } diff --git a/lightning/src/ln/invoice_utils.rs b/lightning/src/ln/invoice_utils.rs index 425cc4d7eb6..e72ea4518a4 100644 --- a/lightning/src/ln/invoice_utils.rs +++ b/lightning/src/ln/invoice_utils.rs @@ -627,7 +627,7 @@ mod test { use crate::util::dyn_signer::{DynKeysInterface, DynPhantomKeysInterface}; use crate::util::test_utils; use bitcoin::hashes::sha256::Hash as Sha256; - use bitcoin::hashes::{sha256, Hash}; + use bitcoin::hashes::Hash; use bitcoin::network::Network; use core::time::Duration; use lightning_invoice::{ @@ -829,7 +829,7 @@ mod test { invoice.description(), Bolt11InvoiceDescriptionRef::Direct(&Description::new("test".to_string()).unwrap()) ); - assert_eq!(invoice.payment_hash(), &sha256::Hash::from_slice(&payment_hash.0[..]).unwrap()); + assert_eq!(invoice.payment_hash(), payment_hash); } #[cfg(not(feature = "std"))] @@ -1257,8 +1257,7 @@ mod test { Duration::from_secs(genesis_timestamp), ) .unwrap(); - let (payment_hash, payment_secret) = - (PaymentHash(invoice.payment_hash().to_byte_array()), *invoice.payment_secret()); + let (payment_hash, payment_secret) = (invoice.payment_hash(), *invoice.payment_secret()); let payment_preimage = if user_generated_pmt_hash { user_payment_preimage } else { @@ -1290,7 +1289,7 @@ mod test { invoice.amount_milli_satoshis().unwrap(), ); - let payment_hash = PaymentHash(invoice.payment_hash().to_byte_array()); + let payment_hash = invoice.payment_hash(); let id = PaymentId(payment_hash.0); let onion = RecipientOnionFields::secret_only(*invoice.payment_secret()); nodes[0].node.send_payment(payment_hash, onion, id, params, Retry::Attempts(0)).unwrap(); diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index 04aa8181b92..e782fee92f6 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -52,6 +52,8 @@ pub(crate) mod interactivetxs; // without the node parameter being mut. This is incorrect, and thus newer rustcs will complain // about an unnecessary mut. Thus, we silence the unused_mut warning in two test modules below. +#[cfg(test)] +mod accountable_tests; #[cfg(test)] #[allow(unused_mut)] mod async_payments_tests; diff --git a/lightning/src/ln/msgs.rs b/lightning/src/ln/msgs.rs index f237d73e533..2bb2b244ccb 100644 --- a/lightning/src/ln/msgs.rs +++ b/lightning/src/ln/msgs.rs @@ -32,7 +32,7 @@ use bitcoin::secp256k1::PublicKey; use bitcoin::{secp256k1, Transaction, Witness}; use crate::blinded_path::message::BlindedMessagePath; -use crate::blinded_path::payment::{BlindedPaymentTlvs, ForwardTlvs, ReceiveTlvs}; +use crate::blinded_path::payment::{BlindedPaymentTlvs, DummyTlvs, ForwardTlvs, ReceiveTlvs}; use crate::blinded_path::payment::{BlindedTrampolineTlvs, TrampolineForwardTlvs}; use crate::ln::onion_utils; use crate::ln::types::ChannelId; @@ -768,6 +768,45 @@ pub struct UpdateAddHTLC { /// /// [`ReleaseHeldHtlc`]: crate::onion_message::async_payments::ReleaseHeldHtlc pub hold_htlc: Option<()>, + /// An experimental field indicating whether the receiving node's reputation would be held + /// accountable for the timely resolution of the HTLC. + /// + /// Note that this field is [`experimental`] so should not be used for forwarding decisions. + /// + /// [`experimental`]: https://github.com/lightning/blips/blob/master/blip-0004.md + pub accountable: Option, +} + +struct AccountableBool(T); + +impl Writeable for AccountableBool { + #[inline] + fn write(&self, writer: &mut W) -> Result<(), io::Error> { + let wire_value = if self.0 { 7u8 } else { 0u8 }; + writer.write_all(&[wire_value]) + } +} + +impl Readable for AccountableBool { + #[inline] + fn read(reader: &mut R) -> Result, DecodeError> { + let mut buf = [0u8; 1]; + reader.read_exact(&mut buf)?; + let bool_value = buf[0] == 7; + Ok(AccountableBool(bool_value)) + } +} + +impl From for AccountableBool { + fn from(val: bool) -> Self { + Self(val) + } +} + +impl From> for bool { + fn from(val: AccountableBool) -> Self { + val.0 + } } /// An [`onion message`] to be sent to or received from a peer. @@ -2338,6 +2377,11 @@ mod fuzzy_internal_msgs { pub intro_node_blinding_point: Option, pub next_blinding_override: Option, } + pub struct InboundOnionDummyPayload { + pub payment_relay: PaymentRelay, + pub payment_constraints: PaymentConstraints, + pub intro_node_blinding_point: Option, + } pub struct InboundOnionBlindedReceivePayload { pub sender_intended_htlc_amt_msat: u64, pub total_msat: u64, @@ -2357,6 +2401,7 @@ mod fuzzy_internal_msgs { Receive(InboundOnionReceivePayload), BlindedForward(InboundOnionBlindedForwardPayload), BlindedReceive(InboundOnionBlindedReceivePayload), + Dummy(InboundOnionDummyPayload), } pub struct InboundTrampolineForwardPayload { @@ -3375,6 +3420,7 @@ impl_writeable_msg!(UpdateAddHTLC, { // TODO: currently we may fail to read the `ChannelManager` if we write a new even TLV in this message // and then downgrade. Once this is fixed, update the type here to match BOLTs PR 989. (75537, hold_htlc, option), + (106823, accountable, (option, encoding: (bool, AccountableBool))), }); impl LengthReadable for OnionMessage { @@ -3696,6 +3742,25 @@ where next_blinding_override, })) }, + ChaChaDualPolyReadAdapter { + readable: + BlindedPaymentTlvs::Dummy(DummyTlvs { payment_relay, payment_constraints }), + used_aad, + } => { + if amt.is_some() + || cltv_value.is_some() || total_msat.is_some() + || keysend_preimage.is_some() + || invoice_request.is_some() + || !used_aad + { + return Err(DecodeError::InvalidValue); + } + Ok(Self::Dummy(InboundOnionDummyPayload { + payment_relay, + payment_constraints, + intro_node_blinding_point, + })) + }, ChaChaDualPolyReadAdapter { readable: BlindedPaymentTlvs::Receive(receive_tlvs), used_aad, @@ -4374,7 +4439,7 @@ mod tests { }; use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret}; use crate::util::ser::{BigSize, Hostname, LengthReadable, Readable, ReadableArgs, Writeable}; - use crate::util::test_utils; + use crate::util::test_utils::{self, pubkey}; use bitcoin::hex::DisplayHex; use bitcoin::{Amount, ScriptBuf, Sequence, Transaction, TxIn, TxOut, Witness}; @@ -5874,6 +5939,7 @@ mod tests { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, }; let encoded_value = update_add_htlc.encode(); let target_value = >::from_hex("020202020202020202020202020202020202020202020202020202020202020200083a840000034d32144668701144760101010101010101010101010101010101010101010101010101010101010101000c89d4ff031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202").unwrap(); @@ -6761,4 +6827,71 @@ mod tests { .to_socket_addrs() .is_err()); } + + fn test_update_add_htlc() -> msgs::UpdateAddHTLC { + msgs::UpdateAddHTLC { + channel_id: ChannelId::from_bytes([2; 32]), + htlc_id: 42, + amount_msat: 1000, + payment_hash: PaymentHash([1; 32]), + cltv_expiry: 500000, + skimmed_fee_msat: None, + onion_routing_packet: msgs::OnionPacket { + version: 0, + public_key: Ok(pubkey(42)), + hop_data: [1; 20 * 65], + hmac: [2; 32], + }, + blinding_point: None, + hold_htlc: None, + accountable: None, + } + } + + #[test] + fn test_update_add_htlc_accountable_encoding() { + // Tests that accountable boolean values are written to the wire with correct u8 values. + for (bool_signal, wire_value) in [(Some(false), 0u8), (Some(true), 7u8)] { + let mut base_msg = test_update_add_htlc(); + base_msg.accountable = bool_signal; + let encoded = base_msg.encode(); + assert_eq!( + *encoded.last().unwrap(), + wire_value, + "wrong wire value for accountable={:?}", + bool_signal + ); + } + } + + fn do_test_htlc_accountable_from_u8(accountable_override: Option, expected: Option) { + // Tests custom encoding conversion of u8 wire values to appropriate boolean, manually + // writing to support values that we wouldn't encode ourselves but should be able to read. + let base_msg = test_update_add_htlc(); + let mut encoded = base_msg.encode(); + if let Some(value) = accountable_override { + encoded.extend_from_slice(&[0xfe, 0x00, 0x01, 0xa1, 0x47]); + encoded.push(1); + encoded.push(value); + } + + let decoded: msgs::UpdateAddHTLC = + LengthReadable::read_from_fixed_length_buffer(&mut &encoded[..]).unwrap(); + + assert_eq!( + decoded.accountable, expected, + "accountable={:?} with override={:?} not eq to expected={:?}", + decoded.accountable, accountable_override, expected + ); + } + + #[test] + fn update_add_htlc_accountable_from_u8() { + // Tests that accountable signals encoded as a u8 are properly translated to a bool. + do_test_htlc_accountable_from_u8(None, None); + do_test_htlc_accountable_from_u8(Some(8), Some(false)); // 8 is an invalid value + do_test_htlc_accountable_from_u8(Some(7), Some(true)); + do_test_htlc_accountable_from_u8(Some(3), Some(false)); + do_test_htlc_accountable_from_u8(Some(0), Some(false)); + } } diff --git a/lightning/src/ln/offers_tests.rs b/lightning/src/ln/offers_tests.rs index 906d9e247ce..1d20d1d368e 100644 --- a/lightning/src/ln/offers_tests.rs +++ b/lightning/src/ln/offers_tests.rs @@ -47,7 +47,7 @@ use bitcoin::secp256k1::{PublicKey, Secp256k1}; use core::time::Duration; use crate::blinded_path::IntroductionNode; use crate::blinded_path::message::BlindedMessagePath; -use crate::blinded_path::payment::{Bolt12OfferContext, Bolt12RefundContext, PaymentContext}; +use crate::blinded_path::payment::{Bolt12OfferContext, Bolt12RefundContext, DummyTlvs, PaymentContext}; use crate::blinded_path::message::OffersContext; use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaidBolt12Invoice, PaymentFailureReason, PaymentPurpose}; use crate::ln::channelmanager::{Bolt12PaymentError, PaymentId, RecentPaymentDetails, RecipientOnionFields, Retry, self}; @@ -60,10 +60,10 @@ use crate::offers::invoice_error::InvoiceError; use crate::offers::invoice_request::{InvoiceRequest, InvoiceRequestFields, InvoiceRequestVerifiedFromOffer}; use crate::offers::nonce::Nonce; use crate::offers::parse::Bolt12SemanticError; -use crate::onion_message::messenger::{DefaultMessageRouter, Destination, MessageSendInstructions, NodeIdMessageRouter, NullMessageRouter, PeeledOnion, PADDED_PATH_LENGTH}; +use crate::onion_message::messenger::{DefaultMessageRouter, Destination, MessageSendInstructions, NodeIdMessageRouter, NullMessageRouter, PeeledOnion, DUMMY_HOPS_PATH_LENGTH, QR_CODED_DUMMY_HOPS_PATH_LENGTH}; use crate::onion_message::offers::OffersMessage; use crate::routing::gossip::{NodeAlias, NodeId}; -use crate::routing::router::{PaymentParameters, RouteParameters, RouteParametersConfig}; +use crate::routing::router::{DEFAULT_PAYMENT_DUMMY_HOPS, PaymentParameters, RouteParameters, RouteParametersConfig}; use crate::sign::{NodeSigner, Recipient}; use crate::util::ser::Writeable; @@ -163,6 +163,20 @@ fn check_compact_path_introduction_node<'a, 'b, 'c>( && matches!(path.introduction_node(), IntroductionNode::DirectedShortChannelId(..)) } +fn check_dummy_hopped_path_length<'a, 'b, 'c>( + path: &BlindedMessagePath, + lookup_node: &Node<'a, 'b, 'c>, + expected_introduction_node: PublicKey, + expected_path_length: usize, +) -> bool { + let introduction_node_id = resolve_introduction_node(lookup_node, path); + let first_hop_len = path.blinded_hops().first().unwrap().encrypted_payload.len(); + let hops = path.blinded_hops(); + introduction_node_id == expected_introduction_node + && hops.len() == expected_path_length + && hops.iter().take(hops.len() - 1).all(|hop| hop.encrypted_payload.len() == first_hop_len) +} + fn route_bolt12_payment<'a, 'b, 'c>( node: &Node<'a, 'b, 'c>, path: &[&Node<'a, 'b, 'c>], invoice: &Bolt12Invoice ) { @@ -178,14 +192,28 @@ fn route_bolt12_payment<'a, 'b, 'c>( let amount_msats = invoice.amount_msats(); let payment_hash = invoice.payment_hash(); let args = PassAlongPathArgs::new(node, path, amount_msats, payment_hash, ev) - .without_clearing_recipient_events(); + .without_clearing_recipient_events() + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); } fn claim_bolt12_payment<'a, 'b, 'c>( node: &Node<'a, 'b, 'c>, path: &[&Node<'a, 'b, 'c>], expected_payment_context: PaymentContext, invoice: &Bolt12Invoice ) { - let recipient = &path[path.len() - 1]; + claim_bolt12_payment_with_extra_fees( + node, + path, + expected_payment_context, + invoice, + None, + ) +} + +fn claim_bolt12_payment_with_extra_fees<'a, 'b, 'c>( + node: &Node<'a, 'b, 'c>, path: &[&Node<'a, 'b, 'c>], expected_payment_context: PaymentContext, invoice: &Bolt12Invoice, + expected_extra_fees_msat: Option, +) { + let recipient = path.last().expect("Empty path?"); let payment_purpose = match get_event!(recipient, Event::PaymentClaimable) { Event::PaymentClaimable { purpose, .. } => purpose, _ => panic!("No Event::PaymentClaimable"), @@ -194,20 +222,29 @@ fn claim_bolt12_payment<'a, 'b, 'c>( Some(preimage) => preimage, None => panic!("No preimage in Event::PaymentClaimable"), }; - match payment_purpose { - PaymentPurpose::Bolt12OfferPayment { payment_context, .. } => { - assert_eq!(PaymentContext::Bolt12Offer(payment_context), expected_payment_context); - }, - PaymentPurpose::Bolt12RefundPayment { payment_context, .. } => { - assert_eq!(PaymentContext::Bolt12Refund(payment_context), expected_payment_context); - }, + let context = match payment_purpose { + PaymentPurpose::Bolt12OfferPayment { payment_context, .. } => + PaymentContext::Bolt12Offer(payment_context), + PaymentPurpose::Bolt12RefundPayment { payment_context, .. } => + PaymentContext::Bolt12Refund(payment_context), _ => panic!("Unexpected payment purpose: {:?}", payment_purpose), - } - if let Some(inv) = claim_payment(node, path, payment_preimage) { - assert_eq!(inv, PaidBolt12Invoice::Bolt12Invoice(invoice.to_owned())); - } else { - panic!("Expected PaidInvoice::Bolt12Invoice"); }; + + assert_eq!(context, expected_payment_context); + + let expected_paths = [path]; + let mut args = ClaimAlongRouteArgs::new( + node, + &expected_paths, + payment_preimage, + ); + + if let Some(extra) = expected_extra_fees_msat { + args = args.with_expected_extra_total_fees_msat(extra); + } + + let (inv, _) = claim_payment_along_route(args); + assert_eq!(inv, Some(PaidBolt12Invoice::Bolt12Invoice(invoice.clone()))); } fn extract_offer_nonce<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, message: &OnionMessage) -> Nonce { @@ -455,7 +492,7 @@ fn check_dummy_hop_pattern_in_offer() { let bob_id = bob.node.get_our_node_id(); // Case 1: DefaultMessageRouter → uses compact blinded paths (via SCIDs) - // Expected: No dummy hops; each path contains only the recipient. + // Expected: Padded to QR_CODED_DUMMY_HOPS_PATH_LENGTH for QR code size optimization let default_router = DefaultMessageRouter::new(alice.network_graph, alice.keys_manager); let compact_offer = alice.node @@ -467,8 +504,8 @@ fn check_dummy_hop_pattern_in_offer() { for path in compact_offer.paths() { assert_eq!( - path.blinded_hops().len(), 1, - "Compact paths must include only the recipient" + path.blinded_hops().len(), QR_CODED_DUMMY_HOPS_PATH_LENGTH, + "Compact offer paths are padded to QR_CODED_DUMMY_HOPS_PATH_LENGTH" ); } @@ -480,10 +517,10 @@ fn check_dummy_hop_pattern_in_offer() { assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), bob_id); - assert!(check_compact_path_introduction_node(&reply_path, alice, bob_id)); + assert!(check_dummy_hopped_path_length(&reply_path, alice, bob_id, DUMMY_HOPS_PATH_LENGTH)); // Case 2: NodeIdMessageRouter → uses node ID-based blinded paths - // Expected: 0 to MAX_DUMMY_HOPS_COUNT dummy hops, followed by recipient. + // Expected: Also padded to QR_CODED_DUMMY_HOPS_PATH_LENGTH for QR code size optimization let node_id_router = NodeIdMessageRouter::new(alice.network_graph, alice.keys_manager); let padded_offer = alice.node @@ -492,7 +529,7 @@ fn check_dummy_hop_pattern_in_offer() { .build().unwrap(); assert!(!padded_offer.paths().is_empty()); - assert!(padded_offer.paths().iter().all(|path| path.blinded_hops().len() == PADDED_PATH_LENGTH)); + assert!(padded_offer.paths().iter().all(|path| path.blinded_hops().len() == QR_CODED_DUMMY_HOPS_PATH_LENGTH)); let payment_id = PaymentId([2; 32]); bob.node.pay_for_offer(&padded_offer, None, payment_id, Default::default()).unwrap(); @@ -502,7 +539,7 @@ fn check_dummy_hop_pattern_in_offer() { assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), bob_id); - assert!(check_compact_path_introduction_node(&reply_path, alice, bob_id)); + assert!(check_dummy_hopped_path_length(&reply_path, alice, bob_id, DUMMY_HOPS_PATH_LENGTH)); } /// Checks that blinded paths are compact for short-lived offers. @@ -687,7 +724,7 @@ fn creates_and_pays_for_offer_using_two_hop_blinded_path() { }); assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), david_id); - assert!(check_compact_path_introduction_node(&reply_path, bob, charlie_id)); + assert!(check_dummy_hopped_path_length(&reply_path, bob, charlie_id, DUMMY_HOPS_PATH_LENGTH)); let onion_message = alice.onion_messenger.next_onion_message_for_peer(charlie_id).unwrap(); charlie.onion_messenger.handle_onion_message(alice_id, &onion_message); @@ -706,8 +743,8 @@ fn creates_and_pays_for_offer_using_two_hop_blinded_path() { // to Alice when she's handling the message. Therefore, either Bob or Charlie could // serve as the introduction node for the reply path back to Alice. assert!( - check_compact_path_introduction_node(&reply_path, david, bob_id) || - check_compact_path_introduction_node(&reply_path, david, charlie_id) + check_dummy_hopped_path_length(&reply_path, david, bob_id, DUMMY_HOPS_PATH_LENGTH) || + check_dummy_hopped_path_length(&reply_path, david, charlie_id, DUMMY_HOPS_PATH_LENGTH) ); route_bolt12_payment(david, &[charlie, bob, alice], &invoice); @@ -790,7 +827,7 @@ fn creates_and_pays_for_refund_using_two_hop_blinded_path() { for path in invoice.payment_paths() { assert_eq!(path.introduction_node(), &IntroductionNode::NodeId(bob_id)); } - assert!(check_compact_path_introduction_node(&reply_path, alice, bob_id)); + assert!(check_dummy_hopped_path_length(&reply_path, alice, bob_id, DUMMY_HOPS_PATH_LENGTH)); route_bolt12_payment(david, &[charlie, bob, alice], &invoice); expect_recent_payment!(david, RecentPaymentDetails::Pending, payment_id); @@ -845,7 +882,7 @@ fn creates_and_pays_for_offer_using_one_hop_blinded_path() { }); assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), bob_id); - assert!(check_compact_path_introduction_node(&reply_path, alice, bob_id)); + assert!(check_dummy_hopped_path_length(&reply_path, alice, bob_id, DUMMY_HOPS_PATH_LENGTH)); let onion_message = alice.onion_messenger.next_onion_message_for_peer(bob_id).unwrap(); bob.onion_messenger.handle_onion_message(alice_id, &onion_message); @@ -857,7 +894,7 @@ fn creates_and_pays_for_offer_using_one_hop_blinded_path() { for path in invoice.payment_paths() { assert_eq!(path.introduction_node(), &IntroductionNode::NodeId(alice_id)); } - assert!(check_compact_path_introduction_node(&reply_path, bob, alice_id)); + assert!(check_dummy_hopped_path_length(&reply_path, bob, alice_id, DUMMY_HOPS_PATH_LENGTH)); route_bolt12_payment(bob, &[alice], &invoice); expect_recent_payment!(bob, RecentPaymentDetails::Pending, payment_id); @@ -913,7 +950,7 @@ fn creates_and_pays_for_refund_using_one_hop_blinded_path() { for path in invoice.payment_paths() { assert_eq!(path.introduction_node(), &IntroductionNode::NodeId(alice_id)); } - assert!(check_compact_path_introduction_node(&reply_path, bob, alice_id)); + assert!(check_dummy_hopped_path_length(&reply_path, bob, alice_id, DUMMY_HOPS_PATH_LENGTH)); route_bolt12_payment(bob, &[alice], &invoice); expect_recent_payment!(bob, RecentPaymentDetails::Pending, payment_id); @@ -1059,6 +1096,7 @@ fn send_invoice_requests_with_distinct_reply_path() { let bob_id = bob.node.get_our_node_id(); let charlie_id = charlie.node.get_our_node_id(); let david_id = david.node.get_our_node_id(); + let frank_id = nodes[6].node.get_our_node_id(); disconnect_peers(alice, &[charlie, david, &nodes[4], &nodes[5], &nodes[6]]); disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]); @@ -1089,7 +1127,7 @@ fn send_invoice_requests_with_distinct_reply_path() { alice.onion_messenger.handle_onion_message(bob_id, &onion_message); let (_, reply_path) = extract_invoice_request(alice, &onion_message); - assert!(check_compact_path_introduction_node(&reply_path, alice, charlie_id)); + assert!(check_dummy_hopped_path_length(&reply_path, alice, charlie_id, DUMMY_HOPS_PATH_LENGTH)); // Send, extract and verify the second Invoice Request message let onion_message = david.onion_messenger.next_onion_message_for_peer(bob_id).unwrap(); @@ -1099,7 +1137,7 @@ fn send_invoice_requests_with_distinct_reply_path() { alice.onion_messenger.handle_onion_message(bob_id, &onion_message); let (_, reply_path) = extract_invoice_request(alice, &onion_message); - assert!(check_compact_path_introduction_node(&reply_path, alice, nodes[6].node.get_our_node_id())); + assert!(check_dummy_hopped_path_length(&reply_path, alice, frank_id, DUMMY_HOPS_PATH_LENGTH)); } /// This test checks that when multiple potential introduction nodes are available for the payee, @@ -1170,7 +1208,7 @@ fn send_invoice_for_refund_with_distinct_reply_path() { let onion_message = bob.onion_messenger.next_onion_message_for_peer(alice_id).unwrap(); let (_, reply_path) = extract_invoice(alice, &onion_message); - assert!(check_compact_path_introduction_node(&reply_path, alice, charlie_id)); + assert!(check_dummy_hopped_path_length(&reply_path, alice, charlie_id, DUMMY_HOPS_PATH_LENGTH)); // Send, extract and verify the second Invoice Request message let onion_message = david.onion_messenger.next_onion_message_for_peer(bob_id).unwrap(); @@ -1179,7 +1217,7 @@ fn send_invoice_for_refund_with_distinct_reply_path() { let onion_message = bob.onion_messenger.next_onion_message_for_peer(alice_id).unwrap(); let (_, reply_path) = extract_invoice(alice, &onion_message); - assert!(check_compact_path_introduction_node(&reply_path, alice, nodes[6].node.get_our_node_id())); + assert!(check_dummy_hopped_path_length(&reply_path, alice, nodes[6].node.get_our_node_id(), DUMMY_HOPS_PATH_LENGTH)); } /// Verifies that the invoice request message can be retried if it fails to reach the @@ -1233,7 +1271,7 @@ fn creates_and_pays_for_offer_with_retry() { }); assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), bob_id); - assert!(check_compact_path_introduction_node(&reply_path, alice, bob_id)); + assert!(check_dummy_hopped_path_length(&reply_path, alice, bob_id, DUMMY_HOPS_PATH_LENGTH)); let onion_message = alice.onion_messenger.next_onion_message_for_peer(bob_id).unwrap(); bob.onion_messenger.handle_onion_message(alice_id, &onion_message); @@ -1410,7 +1448,20 @@ fn creates_offer_with_blinded_path_using_unannounced_introduction_node() { route_bolt12_payment(bob, &[alice], &invoice); expect_recent_payment!(bob, RecentPaymentDetails::Pending, payment_id); - claim_bolt12_payment(bob, &[alice], payment_context, &invoice); + // When the payer is the introduction node of a blinded path, LDK doesn't + // subtract the forward fee for the `payer -> next_hop` channel (see + // `BlindedPaymentPath::advance_path_by_one`). This keeps fee logic simple, + // at the cost of a small, intentional overpayment. + // + // In the old two-hop case (payer as introduction node → payee), this never + // surfaced because the payer simply wasn’t charged the forward fee. + // + // With dummy hops in LDK v0.3, even a real two-node path can appear as a + // longer blinded route, so the overpayment shows up in tests. + // + // Until the fee-handling trade-off is revisited, we pass an expected extra + // fee here so tests can compensate for it. + claim_bolt12_payment_with_extra_fees(bob, &[alice], payment_context, &invoice, Some(1000)); expect_recent_payment!(bob, RecentPaymentDetails::Fulfilled, payment_id); } @@ -1534,7 +1585,7 @@ fn fails_authentication_when_handling_invoice_request() { let (invoice_request, reply_path) = extract_invoice_request(alice, &onion_message); assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), david_id); - assert!(check_compact_path_introduction_node(&reply_path, david, charlie_id)); + assert!(check_dummy_hopped_path_length(&reply_path, david, charlie_id, DUMMY_HOPS_PATH_LENGTH)); assert_eq!(alice.onion_messenger.next_onion_message_for_peer(charlie_id), None); @@ -1563,7 +1614,7 @@ fn fails_authentication_when_handling_invoice_request() { let (invoice_request, reply_path) = extract_invoice_request(alice, &onion_message); assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), david_id); - assert!(check_compact_path_introduction_node(&reply_path, david, charlie_id)); + assert!(check_dummy_hopped_path_length(&reply_path, david, charlie_id, DUMMY_HOPS_PATH_LENGTH)); assert_eq!(alice.onion_messenger.next_onion_message_for_peer(charlie_id), None); } @@ -1663,7 +1714,7 @@ fn fails_authentication_when_handling_invoice_for_offer() { let (invoice_request, reply_path) = extract_invoice_request(alice, &onion_message); assert_eq!(invoice_request.amount_msats(), Some(10_000_000)); assert_ne!(invoice_request.payer_signing_pubkey(), david_id); - assert!(check_compact_path_introduction_node(&reply_path, david, charlie_id)); + assert!(check_dummy_hopped_path_length(&reply_path, david, charlie_id, DUMMY_HOPS_PATH_LENGTH)); let onion_message = alice.onion_messenger.next_onion_message_for_peer(charlie_id).unwrap(); charlie.onion_messenger.handle_onion_message(alice_id, &onion_message); @@ -2422,12 +2473,13 @@ fn rejects_keysend_to_non_static_invoice_path() { let args = PassAlongPathArgs::new(&nodes[0], route[0], amt_msat, payment_hash, ev) .with_payment_preimage(payment_preimage) - .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }); + .expect_failure(HTLCHandlingFailureType::Receive { payment_hash }) + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); let mut updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_malformed_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_malformed_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); - expect_payment_failed_conditions(&nodes[0], payment_hash, true, PaymentFailedConditions::new()); + expect_payment_failed_conditions(&nodes[0], payment_hash, false, PaymentFailedConditions::new()); } #[test] @@ -2486,12 +2538,14 @@ fn no_double_pay_with_stale_channelmanager() { let ev = remove_first_msg_event_to_node(&bob_id, &mut events); let args = PassAlongPathArgs::new(&nodes[0], expected_route[0], amt_msat, payment_hash, ev) - .without_clearing_recipient_events(); + .without_clearing_recipient_events() + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); let ev = remove_first_msg_event_to_node(&bob_id, &mut events); let args = PassAlongPathArgs::new(&nodes[0], expected_route[0], amt_msat, payment_hash, ev) - .without_clearing_recipient_events(); + .without_clearing_recipient_events() + .with_dummy_tlvs(&[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS]); do_pass_along_path(args); expect_recent_payment!(nodes[0], RecentPaymentDetails::Pending, payment_id); diff --git a/lightning/src/ln/onion_payment.rs b/lightning/src/ln/onion_payment.rs index 1abe4330a25..e7b5f557ffb 100644 --- a/lightning/src/ln/onion_payment.rs +++ b/lightning/src/ln/onion_payment.rs @@ -149,6 +149,14 @@ pub(super) fn create_fwd_pending_htlc_info( (RoutingInfo::Direct { short_channel_id, new_packet_bytes, next_hop_hmac }, amt_to_forward, outgoing_cltv_value, intro_node_blinding_point, next_blinding_override) }, + onion_utils::Hop::Dummy { .. } => { + debug_assert!(false, "Dummy hop should have been peeled earlier"); + return Err(InboundHTLCErr { + msg: "Dummy Hop OnionHopData provided for us as an intermediary node", + reason: LocalHTLCFailureReason::InvalidOnionPayload, + err_data: Vec::new(), + }) + }, onion_utils::Hop::Receive { .. } | onion_utils::Hop::BlindedReceive { .. } => return Err(InboundHTLCErr { msg: "Final Node OnionHopData provided for us as an intermediary node", @@ -267,6 +275,7 @@ pub(super) fn create_fwd_pending_htlc_info( outgoing_amt_msat: amt_to_forward, outgoing_cltv_value, skimmed_fee_msat: None, + incoming_accountable: msg.accountable.unwrap_or(false), }) } @@ -274,7 +283,7 @@ pub(super) fn create_fwd_pending_htlc_info( pub(super) fn create_recv_pending_htlc_info( hop_data: onion_utils::Hop, shared_secret: [u8; 32], payment_hash: PaymentHash, amt_msat: u64, cltv_expiry: u32, phantom_shared_secret: Option<[u8; 32]>, allow_underpay: bool, - counterparty_skimmed_fee_msat: Option, current_height: u32 + counterparty_skimmed_fee_msat: Option, incoming_accountable: bool, current_height: u32 ) -> Result { let ( payment_data, keysend_preimage, custom_tlvs, onion_amt_msat, onion_cltv_expiry, @@ -364,6 +373,14 @@ pub(super) fn create_recv_pending_htlc_info( msg: "Got blinded non final data with an HMAC of 0", }) }, + onion_utils::Hop::Dummy { .. } => { + debug_assert!(false, "Dummy hop should have been peeled earlier"); + return Err(InboundHTLCErr { + reason: LocalHTLCFailureReason::InvalidOnionBlinding, + err_data: vec![0; 32], + msg: "Got blinded non final data with an HMAC of 0", + }) + } onion_utils::Hop::TrampolineForward { .. } | onion_utils::Hop::TrampolineBlindedForward { .. } => { return Err(InboundHTLCErr { reason: LocalHTLCFailureReason::InvalidOnionPayload, @@ -456,6 +473,7 @@ pub(super) fn create_recv_pending_htlc_info( outgoing_amt_msat: onion_amt_msat, outgoing_cltv_value: onion_cltv_expiry, skimmed_fee_msat: counterparty_skimmed_fee_msat, + incoming_accountable, }) } @@ -478,7 +496,7 @@ where L::Target: Logger, { let (hop, next_packet_details_opt) = - decode_incoming_update_add_htlc_onion(msg, node_signer, logger, secp_ctx + decode_incoming_update_add_htlc_onion(msg, &*node_signer, &*logger, secp_ctx ).map_err(|(msg, failure_reason)| { let (reason, err_data) = match msg { HTLCFailureMsg::Malformed(_) => (failure_reason, Vec::new()), @@ -516,11 +534,35 @@ where // onion here and check it. create_fwd_pending_htlc_info(msg, hop, shared_secret.secret_bytes(), Some(next_packet_pubkey))? }, + onion_utils::Hop::Dummy { dummy_hop_data, next_hop_hmac, new_packet_bytes, .. } => { + let next_packet_details = match next_packet_details_opt { + Some(next_packet_details) => next_packet_details, + // Dummy Hops should always include the next hop details + None => return Err(InboundHTLCErr { + msg: "Failed to decode update add htlc onion", + reason: LocalHTLCFailureReason::InvalidOnionPayload, + err_data: Vec::new(), + }), + }; + + let new_update_add_htlc = onion_utils::peel_dummy_hop_update_add_htlc( + msg, + dummy_hop_data, + next_hop_hmac, + new_packet_bytes, + next_packet_details, + &*node_signer, + secp_ctx + ); + + peel_payment_onion(&new_update_add_htlc, node_signer, logger, secp_ctx, cur_height, allow_skimmed_fees)? + }, _ => { let shared_secret = hop.shared_secret().secret_bytes(); create_recv_pending_htlc_info( hop, shared_secret, msg.payment_hash, msg.amount_msat, msg.cltv_expiry, - None, allow_skimmed_fees, msg.skimmed_fee_msat, cur_height, + None, allow_skimmed_fees, msg.skimmed_fee_msat, + msg.accountable.unwrap_or(false), cur_height, )? } }) @@ -529,6 +571,8 @@ where pub(super) enum HopConnector { // scid-based routing ShortChannelId(u64), + // Dummy hop for path padding + Dummy, // Trampoline-based routing #[allow(unused)] Trampoline(PublicKey), @@ -633,6 +677,22 @@ where outgoing_cltv_value }) } + onion_utils::Hop::Dummy { dummy_hop_data: msgs::InboundOnionDummyPayload { ref payment_relay, ref payment_constraints, .. }, shared_secret, .. } => { + let (amt_to_forward, outgoing_cltv_value) = match check_blinded_forward( + msg.amount_msat, msg.cltv_expiry, &payment_relay, &payment_constraints, &BlindedHopFeatures::empty() + ) { + Ok((amt, cltv)) => (amt, cltv), + Err(()) => { + return encode_relay_error("Underflow calculating outbound amount or cltv value for blinded forward", + LocalHTLCFailureReason::InvalidOnionBlinding, shared_secret.secret_bytes(), None, &[0; 32]); + } + }; + + let next_packet_pubkey = onion_utils::next_hop_pubkey(secp_ctx, + msg.onion_routing_packet.public_key.unwrap(), &shared_secret.secret_bytes()); + + Some(NextPacketDetails { next_packet_pubkey, outgoing_connector: HopConnector::Dummy, outgoing_amt_msat: amt_to_forward, outgoing_cltv_value }) + } onion_utils::Hop::TrampolineForward { next_trampoline_hop_data: msgs::InboundTrampolineForwardPayload { amt_to_forward, outgoing_cltv_value, next_trampoline }, trampoline_shared_secret, incoming_trampoline_public_key, .. } => { let next_trampoline_packet_pubkey = onion_utils::next_hop_pubkey(secp_ctx, incoming_trampoline_public_key, &trampoline_shared_secret.secret_bytes()); @@ -814,6 +874,7 @@ mod tests { skimmed_fee_msat: None, blinding_point: None, hold_htlc: None, + accountable: None, } } diff --git a/lightning/src/ln/onion_utils.rs b/lightning/src/ln/onion_utils.rs index dbc2ebc9d48..492f6ec69b0 100644 --- a/lightning/src/ln/onion_utils.rs +++ b/lightning/src/ln/onion_utils.rs @@ -16,7 +16,8 @@ use crate::crypto::streams::ChaChaReader; use crate::events::HTLCHandlingFailureReason; use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS; use crate::ln::channelmanager::{HTLCSource, RecipientOnionFields}; -use crate::ln::msgs::{self, DecodeError}; +use crate::ln::msgs::{self, DecodeError, InboundOnionDummyPayload, OnionPacket, UpdateAddHTLC}; +use crate::ln::onion_payment::{HopConnector, NextPacketDetails}; use crate::offers::invoice_request::InvoiceRequest; use crate::routing::gossip::NetworkUpdate; use crate::routing::router::{BlindedTail, Path, RouteHop, RouteParameters, TrampolineHop}; @@ -2227,6 +2228,17 @@ pub(crate) enum Hop { /// Bytes of the onion packet we're forwarding. new_packet_bytes: [u8; ONION_DATA_LEN], }, + /// This onion payload is dummy, and needs to be peeled by us. + Dummy { + /// Blinding point for introduction-node dummy hops. + dummy_hop_data: msgs::InboundOnionDummyPayload, + /// Shared secret for decrypting the next-hop public key. + shared_secret: SharedSecret, + /// HMAC of the next hop's onion packet. + next_hop_hmac: [u8; 32], + /// Onion packet bytes after this dummy layer is peeled. + new_packet_bytes: [u8; ONION_DATA_LEN], + }, /// This onion payload was for us, not for forwarding to a next-hop. Contains information for /// verifying the incoming payment. Receive { @@ -2281,6 +2293,7 @@ impl Hop { match self { Hop::Forward { shared_secret, .. } => shared_secret, Hop::BlindedForward { shared_secret, .. } => shared_secret, + Hop::Dummy { shared_secret, .. } => shared_secret, Hop::TrampolineForward { outer_shared_secret, .. } => outer_shared_secret, Hop::TrampolineBlindedForward { outer_shared_secret, .. } => outer_shared_secret, Hop::Receive { shared_secret, .. } => shared_secret, @@ -2348,6 +2361,12 @@ where new_packet_bytes, }) }, + msgs::InboundOnionPayload::Dummy(dummy_hop_data) => Ok(Hop::Dummy { + dummy_hop_data, + shared_secret, + next_hop_hmac, + new_packet_bytes, + }), _ => { if blinding_point.is_some() { return Err(OnionDecodeErr::Malformed { @@ -2525,6 +2544,61 @@ where } } +/// Peels a single dummy hop from an inbound `UpdateAddHTLC` by reconstructing the next +/// onion packet and HTLC state. +/// +/// This helper is used when processing dummy hops in a blinded path. Dummy hops are not +/// forwarded on the network; instead, their onion layer is removed locally and a new +/// `UpdateAddHTLC` is constructed with the next onion packet and updated amount/CLTV +/// values. +/// +/// This function performs no validation and does not enqueue or forward the HTLC. +/// It only reconstructs the next `UpdateAddHTLC` for further local processing. +pub(super) fn peel_dummy_hop_update_add_htlc( + msg: &UpdateAddHTLC, dummy_hop_data: InboundOnionDummyPayload, next_hop_hmac: [u8; 32], + new_packet_bytes: [u8; ONION_DATA_LEN], next_packet_details: NextPacketDetails, + node_signer: NS, secp_ctx: &Secp256k1, +) -> UpdateAddHTLC +where + NS::Target: NodeSigner, +{ + let NextPacketDetails { + next_packet_pubkey, + outgoing_amt_msat, + outgoing_connector, + outgoing_cltv_value, + } = next_packet_details; + + debug_assert!( + matches!(outgoing_connector, HopConnector::Dummy), + "Dummy hop must always map to HopConnector::Dummy" + ); + + let next_blinding_point = dummy_hop_data + .intro_node_blinding_point + .or(msg.blinding_point) + .and_then(|blinding_point| { + let ss = node_signer.ecdh(Recipient::Node, &blinding_point, None).ok()?.secret_bytes(); + + next_hop_pubkey(secp_ctx, blinding_point, &ss).ok() + }); + + let new_onion_packet = OnionPacket { + version: 0, + public_key: next_packet_pubkey, + hop_data: new_packet_bytes, + hmac: next_hop_hmac, + }; + + UpdateAddHTLC { + onion_routing_packet: new_onion_packet, + blinding_point: next_blinding_point, + amount_msat: outgoing_amt_msat, + cltv_expiry: outgoing_cltv_value, + ..msg.clone() + } +} + /// Build a payment onion, returning the first hop msat and cltv values as well. /// /// `cur_block_height` should be set to the best known block height + 1. @@ -3457,6 +3531,7 @@ mod tests { first_hop_htlc_msat: 0, payment_id: PaymentId([1; 32]), bolt12_invoice: None, + payment_nonce: None, }; process_onion_failure(&ctx_full, &logger, &htlc_source, onion_error) @@ -3643,6 +3718,7 @@ mod tests { first_hop_htlc_msat: dummy_amt_msat, payment_id: PaymentId([1; 32]), bolt12_invoice: None, + payment_nonce: None, }; { @@ -3831,6 +3907,7 @@ mod tests { first_hop_htlc_msat: 0, payment_id: PaymentId([1; 32]), bolt12_invoice: None, + payment_nonce: None, }; // Iterate over all possible failure positions and check that the cases that can be attributed are. @@ -3940,6 +4017,7 @@ mod tests { first_hop_htlc_msat: 0, payment_id: PaymentId([1; 32]), bolt12_invoice: None, + payment_nonce: None, }; let decrypted_failure = process_onion_failure(&ctx_full, &logger, &htlc_source, packet); diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index 75fe55bfeac..f1ade2d6a12 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -124,6 +124,12 @@ pub(crate) enum PendingOutboundPayment { // Storing the BOLT 12 invoice here to allow Proof of Payment after // the payment is made. bolt12_invoice: Option, + /// The [`Nonce`] used when the BOLT 12 [`InvoiceRequest`] was created. Stored here so + /// retried paths can include the nonce in [`HTLCSource::OutboundRoute`] for payer proof + /// construction after payment success. + /// + /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest + payment_nonce: Option, custom_tlvs: Vec<(u64, Vec)>, pending_amt_msat: u64, /// Used to track the fee paid. Present iff the payment was serialized on 0.0.103+. @@ -180,6 +186,13 @@ impl PendingOutboundPayment { } } + fn payment_nonce(&self) -> Option<&Nonce> { + match self { + PendingOutboundPayment::Retryable { payment_nonce, .. } => payment_nonce.as_ref(), + _ => None, + } + } + fn increment_attempts(&mut self) { if let PendingOutboundPayment::Retryable { attempts, .. } = self { attempts.count += 1; @@ -833,6 +846,7 @@ pub(super) struct SendAlongPathArgs<'a> { pub keysend_preimage: &'a Option, pub invoice_request: Option<&'a InvoiceRequest>, pub bolt12_invoice: Option<&'a PaidBolt12Invoice>, + pub payment_nonce: Option<&'a Nonce>, pub session_priv_bytes: [u8; 32], pub hold_htlc_at_next_hop: bool, } @@ -933,7 +947,7 @@ where IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, { - let payment_hash = PaymentHash((*invoice.payment_hash()).to_byte_array()); + let payment_hash = invoice.payment_hash(); let amount = match (invoice.amount_milli_satoshis(), amount_msats) { (Some(amt), None) | (None, Some(amt)) => amt, @@ -965,7 +979,7 @@ where pub(super) fn send_payment_for_bolt12_invoice< R: Deref, ES: Deref, NS: Deref, NL: Deref, IH, SP >( - &self, invoice: &Bolt12Invoice, payment_id: PaymentId, router: &R, + &self, invoice: &Bolt12Invoice, payment_id: PaymentId, payment_nonce: Option, router: &R, first_hops: Vec, features: Bolt12InvoiceFeatures, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1, best_block_height: u32, @@ -1000,7 +1014,7 @@ where } let invoice = PaidBolt12Invoice::Bolt12Invoice(invoice.clone()); self.send_payment_for_bolt12_invoice_internal( - payment_id, payment_hash, None, None, invoice, route_params, retry_strategy, false, router, + payment_id, payment_hash, None, None, invoice, payment_nonce, route_params, retry_strategy, false, router, first_hops, inflight_htlcs, entropy_source, node_signer, node_id_lookup, secp_ctx, best_block_height, pending_events, send_payment_along_path ) @@ -1012,7 +1026,7 @@ where >( &self, payment_id: PaymentId, payment_hash: PaymentHash, keysend_preimage: Option, invoice_request: Option<&InvoiceRequest>, - bolt12_invoice: PaidBolt12Invoice, + bolt12_invoice: PaidBolt12Invoice, payment_nonce: Option, mut route_params: RouteParameters, retry_strategy: Retry, hold_htlcs_at_next_hop: bool, router: &R, first_hops: Vec, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1, best_block_height: u32, @@ -1073,7 +1087,8 @@ where hash_map::Entry::Occupied(entry) => match entry.get() { PendingOutboundPayment::InvoiceReceived { .. } => { let (retryable_payment, onion_session_privs) = Self::create_pending_payment( - payment_hash, recipient_onion.clone(), keysend_preimage, None, Some(bolt12_invoice.clone()), &route, + payment_hash, recipient_onion.clone(), keysend_preimage, None, Some(bolt12_invoice.clone()), + payment_nonce, &route, Some(retry_strategy), payment_params, entropy_source, best_block_height, ); *entry.into_mut() = retryable_payment; @@ -1084,7 +1099,8 @@ where invoice_request } else { unreachable!() }; let (retryable_payment, onion_session_privs) = Self::create_pending_payment( - payment_hash, recipient_onion.clone(), keysend_preimage, Some(invreq), Some(bolt12_invoice.clone()), &route, + payment_hash, recipient_onion.clone(), keysend_preimage, Some(invreq), Some(bolt12_invoice.clone()), + payment_nonce, &route, Some(retry_strategy), payment_params, entropy_source, best_block_height ); outbounds.insert(payment_id, retryable_payment); @@ -1097,7 +1113,8 @@ where core::mem::drop(outbounds); let result = self.pay_route_internal( - &route, payment_hash, &recipient_onion, keysend_preimage, invoice_request, Some(&bolt12_invoice), payment_id, + &route, payment_hash, &recipient_onion, keysend_preimage, invoice_request, Some(&bolt12_invoice), + payment_nonce.as_ref(), payment_id, Some(route_params.final_value_msat), &onion_session_privs, hold_htlcs_at_next_hop, node_signer, best_block_height, &send_payment_along_path ); @@ -1290,6 +1307,7 @@ where Some(keysend_preimage), Some(&invoice_request), invoice, + None, route_params, retry_strategy, hold_htlcs_at_next_hop, @@ -1499,7 +1517,7 @@ where })?; let res = self.pay_route_internal(&route, payment_hash, &recipient_onion, - keysend_preimage, None, None, payment_id, None, &onion_session_privs, false, node_signer, + keysend_preimage, None, None, None, payment_id, None, &onion_session_privs, false, node_signer, best_block_height, &send_payment_along_path); log_info!(self.logger, "Sending payment with id {} and hash {} returned {:?}", payment_id, payment_hash, res); @@ -1578,7 +1596,7 @@ where } } } - let (total_msat, recipient_onion, keysend_preimage, onion_session_privs, invoice_request, bolt12_invoice) = { + let (total_msat, recipient_onion, keysend_preimage, onion_session_privs, invoice_request, bolt12_invoice, payment_nonce) = { let mut outbounds = self.pending_outbound_payments.lock().unwrap(); match outbounds.entry(payment_id) { hash_map::Entry::Occupied(mut payment) => { @@ -1621,8 +1639,9 @@ where payment.get_mut().increment_attempts(); let bolt12_invoice = payment.get().bolt12_invoice(); + let payment_nonce = payment.get().payment_nonce().copied(); - (total_msat, recipient_onion, keysend_preimage, onion_session_privs, invoice_request, bolt12_invoice.cloned()) + (total_msat, recipient_onion, keysend_preimage, onion_session_privs, invoice_request, bolt12_invoice.cloned(), payment_nonce) }, PendingOutboundPayment::Legacy { .. } => { log_error!(self.logger, "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102"); @@ -1662,7 +1681,8 @@ where } }; let res = self.pay_route_internal(&route, payment_hash, &recipient_onion, keysend_preimage, - invoice_request.as_ref(), bolt12_invoice.as_ref(), payment_id, Some(total_msat), + invoice_request.as_ref(), bolt12_invoice.as_ref(), payment_nonce.as_ref(), + payment_id, Some(total_msat), &onion_session_privs, false, node_signer, best_block_height, &send_payment_along_path); log_info!(self.logger, "Result retrying payment id {}: {:?}", &payment_id, res); if let Err(e) = res { @@ -1823,7 +1843,7 @@ where let recipient_onion_fields = RecipientOnionFields::spontaneous_empty(); match self.pay_route_internal(&route, payment_hash, &recipient_onion_fields, - None, None, None, payment_id, None, &onion_session_privs, false, node_signer, + None, None, None, None, payment_id, None, &onion_session_privs, false, node_signer, best_block_height, &send_payment_along_path ) { Ok(()) => Ok((payment_hash, payment_id)), @@ -1886,7 +1906,7 @@ where hash_map::Entry::Occupied(_) => Err(PaymentSendFailure::DuplicatePayment), hash_map::Entry::Vacant(entry) => { let (payment, onion_session_privs) = Self::create_pending_payment( - payment_hash, recipient_onion, keysend_preimage, None, bolt12_invoice, route, retry_strategy, + payment_hash, recipient_onion, keysend_preimage, None, bolt12_invoice, None, route, retry_strategy, payment_params, entropy_source, best_block_height ); entry.insert(payment); @@ -1899,7 +1919,8 @@ where fn create_pending_payment( payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option, invoice_request: Option, - bolt12_invoice: Option, route: &Route, retry_strategy: Option, + bolt12_invoice: Option, payment_nonce: Option, + route: &Route, retry_strategy: Option, payment_params: Option, entropy_source: &ES, best_block_height: u32 ) -> (PendingOutboundPayment, Vec<[u8; 32]>) where @@ -1923,6 +1944,7 @@ where keysend_preimage, invoice_request, bolt12_invoice, + payment_nonce, custom_tlvs: recipient_onion.custom_tlvs, starting_block_height: best_block_height, total_msat: route.get_total_amount(), @@ -2071,6 +2093,7 @@ where fn pay_route_internal( &self, route: &Route, payment_hash: PaymentHash, recipient_onion: &RecipientOnionFields, keysend_preimage: Option, invoice_request: Option<&InvoiceRequest>, bolt12_invoice: Option<&PaidBolt12Invoice>, + payment_nonce: Option<&Nonce>, payment_id: PaymentId, recv_value_msat: Option, onion_session_privs: &Vec<[u8; 32]>, hold_htlcs_at_next_hop: bool, node_signer: &NS, best_block_height: u32, send_payment_along_path: &F ) -> Result<(), PaymentSendFailure> @@ -2126,7 +2149,7 @@ where let path_res = send_payment_along_path(SendAlongPathArgs { path: &path, payment_hash: &payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage: &keysend_preimage, invoice_request, - bolt12_invoice, hold_htlc_at_next_hop: hold_htlcs_at_next_hop, + bolt12_invoice, payment_nonce, hold_htlc_at_next_hop: hold_htlcs_at_next_hop, session_priv_bytes: *session_priv_bytes }); results.push(path_res); @@ -2194,7 +2217,7 @@ where F: Fn(SendAlongPathArgs) -> Result<(), APIError>, { self.pay_route_internal(route, payment_hash, &recipient_onion, - keysend_preimage, None, None, payment_id, recv_value_msat, &onion_session_privs, + keysend_preimage, None, None, None, payment_id, recv_value_msat, &onion_session_privs, false, node_signer, best_block_height, &send_payment_along_path) .map_err(|e| { self.remove_outbound_if_all_failed(payment_id, &e); e }) } @@ -2218,6 +2241,7 @@ where #[rustfmt::skip] pub(super) fn claim_htlc( &self, payment_id: PaymentId, payment_preimage: PaymentPreimage, bolt12_invoice: Option, + payment_nonce: Option, session_priv: SecretKey, path: Path, from_onchain: bool, ev_completion_action: &mut Option, pending_events: &Mutex)>>, ) { @@ -2238,6 +2262,7 @@ where amount_msat, fee_paid_msat, bolt12_invoice: bolt12_invoice, + payment_nonce, }, ev_completion_action.take())); payment.get_mut().mark_fulfilled(); } @@ -2638,6 +2663,7 @@ where keysend_preimage: None, // only used for retries, and we'll never retry on startup invoice_request: None, // only used for retries, and we'll never retry on startup bolt12_invoice: None, // only used for retries, and we'll never retry on startup! + payment_nonce: None, // only used for retries, and we'll never retry on startup custom_tlvs: Vec::new(), // only used for retries, and we'll never retry on startup pending_amt_msat: path_amt, pending_fee_msat: Some(path_fee), @@ -2726,6 +2752,7 @@ impl_writeable_tlv_based_enum_upgradable!(PendingOutboundPayment, (11, remaining_max_total_routing_fee_msat, option), (13, invoice_request, option), (15, bolt12_invoice, option), + (17, payment_nonce, option), (not_written, retry_strategy, (static_value, None)), (not_written, attempts, (static_value, PaymentAttempts::new())), }, @@ -3212,7 +3239,7 @@ mod tests { assert_eq!( outbound_payments.send_payment_for_bolt12_invoice( - &invoice, payment_id, &&router, vec![], Bolt12InvoiceFeatures::empty(), + &invoice, payment_id, None, &&router, vec![], Bolt12InvoiceFeatures::empty(), || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, &EmptyNodeIdLookUp {}, &secp_ctx, 0, &pending_events, |_| panic!() ), @@ -3275,7 +3302,7 @@ mod tests { assert_eq!( outbound_payments.send_payment_for_bolt12_invoice( - &invoice, payment_id, &&router, vec![], Bolt12InvoiceFeatures::empty(), + &invoice, payment_id, None, &&router, vec![], Bolt12InvoiceFeatures::empty(), || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, &EmptyNodeIdLookUp {}, &secp_ctx, 0, &pending_events, |_| panic!() ), @@ -3351,7 +3378,7 @@ mod tests { assert!(!outbound_payments.has_pending_payments()); assert_eq!( outbound_payments.send_payment_for_bolt12_invoice( - &invoice, payment_id, &&router, vec![], Bolt12InvoiceFeatures::empty(), + &invoice, payment_id, None, &&router, vec![], Bolt12InvoiceFeatures::empty(), || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, &EmptyNodeIdLookUp {}, &secp_ctx, 0, &pending_events, |_| panic!() ), @@ -3371,7 +3398,7 @@ mod tests { assert_eq!( outbound_payments.send_payment_for_bolt12_invoice( - &invoice, payment_id, &&router, vec![], Bolt12InvoiceFeatures::empty(), + &invoice, payment_id, None, &&router, vec![], Bolt12InvoiceFeatures::empty(), || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, &EmptyNodeIdLookUp {}, &secp_ctx, 0, &pending_events, |_| Ok(()) ), @@ -3382,7 +3409,7 @@ mod tests { assert_eq!( outbound_payments.send_payment_for_bolt12_invoice( - &invoice, payment_id, &&router, vec![], Bolt12InvoiceFeatures::empty(), + &invoice, payment_id, None, &&router, vec![], Bolt12InvoiceFeatures::empty(), || InFlightHtlcs::new(), &&keys_manager, &&keys_manager, &EmptyNodeIdLookUp {}, &secp_ctx, 0, &pending_events, |_| panic!() ), diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index f9894fa8819..8f209c88e25 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -5103,6 +5103,7 @@ fn peel_payment_onion_custom_tlvs() { onion_routing_packet, blinding_point: None, hold_htlc: None, + accountable: None, }; let peeled_onion = crate::ln::onion_payment::peel_payment_onion( &update_add, diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index d143082821d..a38262e6952 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -1258,6 +1258,64 @@ fn do_manager_persisted_pre_outbound_edge_forward(intercept_htlc: bool) { expect_payment_sent(&nodes[0], payment_preimage, None, true, true); } +#[test] +fn test_manager_persisted_post_outbound_edge_forward() { + // Test that we will not double-forward an HTLC after restart if it has already been forwarded to + // the outbound edge, which was previously broken. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes_1_deserialized; + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; + + // Lock in the HTLC from node_a <> node_b. + let amt_msat = 5000; + let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], amt_msat); + nodes[0].node.send_payment_with_route(route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors(&nodes[0], 1); + let updates = get_htlc_update_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[1], &nodes[0], &updates.commitment_signed, false, false); + + // Add the HTLC to the outbound edge, node_b <> node_c. + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors(&nodes[1], 1); + + // Disconnect peers and reload the forwarding node_b. + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + + let node_b_encoded = nodes[1].node.encode(); + let chan_0_monitor_serialized = get_monitor!(nodes[1], chan_id_1).encode(); + let chan_1_monitor_serialized = get_monitor!(nodes[1], chan_id_2).encode(); + reload_node!(nodes[1], node_b_encoded, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); + + reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[0])); + let mut args_b_c = ReconnectArgs::new(&nodes[1], &nodes[2]); + args_b_c.send_channel_ready = (true, true); + args_b_c.send_announcement_sigs = (true, true); + args_b_c.pending_htlc_adds = (0, 1); + // While reconnecting, we re-send node_b's outbound update_add and commit the HTLC to the b<>c + // channel. + reconnect_nodes(args_b_c); + + // Ensure node_b won't double-forward the outbound HTLC (this was previously broken). + nodes[1].node.process_pending_htlc_forwards(); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Claim the HTLC backwards to node_a. + expect_and_process_pending_htlcs(&nodes[2], false); + expect_payment_claimable!(nodes[2], payment_hash, payment_secret, amt_msat, None, nodes[2].node.get_our_node_id()); + let path: &[&[_]] = &[&[&nodes[1], &nodes[2]]]; + do_claim_payment_along_route(ClaimAlongRouteArgs::new(&nodes[0], path, payment_preimage)); + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); +} + #[test] fn test_reload_partial_funding_batch() { let chanmon_cfgs = create_chanmon_cfgs(3); diff --git a/lightning/src/ln/splicing_tests.rs b/lightning/src/ln/splicing_tests.rs index a05c0bd92d8..db6680d963c 100644 --- a/lightning/src/ln/splicing_tests.rs +++ b/lightning/src/ln/splicing_tests.rs @@ -29,8 +29,9 @@ use crate::util::errors::APIError; use crate::util::ser::Writeable; use crate::util::test_channel_signer::SignerOp; +use bitcoin::hashes::Hash; use bitcoin::secp256k1::PublicKey; -use bitcoin::{Amount, OutPoint as BitcoinOutPoint, ScriptBuf, Transaction, TxOut}; +use bitcoin::{Amount, OutPoint as BitcoinOutPoint, ScriptBuf, Transaction, TxOut, WPubkeyHash}; #[test] fn test_splicing_not_supported_api_error() { @@ -47,11 +48,7 @@ fn test_splicing_not_supported_api_error() { let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); - let bs_contribution = SpliceContribution::SpliceIn { - value: Amount::ZERO, - inputs: Vec::new(), - change_script: None, - }; + let bs_contribution = SpliceContribution::splice_in(Amount::ZERO, Vec::new(), None); let res = nodes[1].node.splice_channel( &channel_id, @@ -113,11 +110,8 @@ fn test_v1_splice_in_negative_insufficient_inputs() { let funding_inputs = create_dual_funding_utxos_with_prev_txs(&nodes[0], &[extra_splice_funding_input_sats]); - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(splice_in_sats), - inputs: funding_inputs, - change_script: None, - }; + let contribution = + SpliceContribution::splice_in(Amount::from_sat(splice_in_sats), funding_inputs, None); // Initiate splice-in, with insufficient input contribution let res = nodes[0].node.splice_channel( @@ -490,12 +484,10 @@ fn do_test_splice_state_reset_on_disconnect(reload: bool) { let (_, _, channel_id, _) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 50_000_000); - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(1_000), - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }]); nodes[0] .node .splice_channel( @@ -748,12 +740,10 @@ fn test_config_reject_inbound_splices() { let (_, _, channel_id, _) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 50_000_000); - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(1_000), - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }]); nodes[0] .node .splice_channel( @@ -811,16 +801,27 @@ fn test_splice_in() { let coinbase_tx1 = provide_anchor_reserves(&nodes); let coinbase_tx2 = provide_anchor_reserves(&nodes); - let initiator_contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(initial_channel_value_sat * 2), - inputs: vec![ + + let added_value = Amount::from_sat(initial_channel_value_sat * 2); + let change_script = ScriptBuf::new_p2wpkh(&WPubkeyHash::all_zeros()); + let fees = Amount::from_sat(321); + + let initiator_contribution = SpliceContribution::splice_in( + added_value, + vec![ FundingTxInput::new_p2wpkh(coinbase_tx1, 0).unwrap(), FundingTxInput::new_p2wpkh(coinbase_tx2, 0).unwrap(), ], - change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), - }; + Some(change_script.clone()), + ); let splice_tx = splice_channel(&nodes[0], &nodes[1], channel_id, initiator_contribution); + let expected_change = Amount::ONE_BTC * 2 - added_value - fees; + assert_eq!( + splice_tx.output.iter().find(|txout| txout.script_pubkey == change_script).unwrap().value, + expected_change, + ); + mine_transaction(&nodes[0], &splice_tx); mine_transaction(&nodes[1], &splice_tx); @@ -850,32 +851,194 @@ fn test_splice_out() { let _ = send_payment(&nodes[0], &[&nodes[1]], 100_000); - let initiator_contribution = SpliceContribution::SpliceOut { - outputs: vec![ + let initiator_contribution = SpliceContribution::splice_out(vec![ + TxOut { + value: Amount::from_sat(initial_channel_value_sat / 4), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }, + TxOut { + value: Amount::from_sat(initial_channel_value_sat / 4), + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }, + ]); + + let splice_tx = splice_channel(&nodes[0], &nodes[1], channel_id, initiator_contribution); + mine_transaction(&nodes[0], &splice_tx); + mine_transaction(&nodes[1], &splice_tx); + + let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; + assert!(htlc_limit_msat < initial_channel_value_sat / 2 * 1000); + let _ = send_payment(&nodes[0], &[&nodes[1]], htlc_limit_msat); + + lock_splice_after_blocks(&nodes[0], &nodes[1], ANTI_REORG_DELAY - 1); + + let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; + assert!(htlc_limit_msat < initial_channel_value_sat / 2 * 1000); + let _ = send_payment(&nodes[0], &[&nodes[1]], htlc_limit_msat); +} + +#[test] +fn test_splice_in_and_out() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let mut config = test_default_channel_config(); + config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(config)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let initial_channel_value_sat = 100_000; + let (_, _, channel_id, _) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, initial_channel_value_sat, 0); + + let _ = send_payment(&nodes[0], &[&nodes[1]], 100_000); + + let coinbase_tx1 = provide_anchor_reserves(&nodes); + let coinbase_tx2 = provide_anchor_reserves(&nodes); + + // Contribute a net negative value, with fees taken from the contributed inputs and the + // remaining value sent to change + let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; + let added_value = Amount::from_sat(htlc_limit_msat / 1000); + let removed_value = added_value * 2; + let change_script = ScriptBuf::new_p2wpkh(&WPubkeyHash::all_zeros()); + let fees = if cfg!(feature = "grind_signatures") { + Amount::from_sat(383) + } else { + Amount::from_sat(384) + }; + + assert!(htlc_limit_msat > initial_channel_value_sat / 2 * 1000); + + let initiator_contribution = SpliceContribution::splice_in_and_out( + added_value, + vec![ + FundingTxInput::new_p2wpkh(coinbase_tx1, 0).unwrap(), + FundingTxInput::new_p2wpkh(coinbase_tx2, 0).unwrap(), + ], + vec![ TxOut { - value: Amount::from_sat(initial_channel_value_sat / 4), + value: removed_value / 2, script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), }, TxOut { - value: Amount::from_sat(initial_channel_value_sat / 4), + value: removed_value / 2, script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), }, ], - }; + Some(change_script.clone()), + ); let splice_tx = splice_channel(&nodes[0], &nodes[1], channel_id, initiator_contribution); + let expected_change = Amount::ONE_BTC * 2 - added_value - fees; + assert_eq!( + splice_tx.output.iter().find(|txout| txout.script_pubkey == change_script).unwrap().value, + expected_change, + ); + mine_transaction(&nodes[0], &splice_tx); mine_transaction(&nodes[1], &splice_tx); let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; - assert!(htlc_limit_msat < initial_channel_value_sat / 2 * 1000); + assert!(htlc_limit_msat < added_value.to_sat() * 1000); let _ = send_payment(&nodes[0], &[&nodes[1]], htlc_limit_msat); lock_splice_after_blocks(&nodes[0], &nodes[1], ANTI_REORG_DELAY - 1); let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; - assert!(htlc_limit_msat < initial_channel_value_sat / 2 * 1000); + assert!(htlc_limit_msat < added_value.to_sat() * 1000); + let _ = send_payment(&nodes[0], &[&nodes[1]], htlc_limit_msat); + + let coinbase_tx1 = provide_anchor_reserves(&nodes); + let coinbase_tx2 = provide_anchor_reserves(&nodes); + + // Contribute a net positive value, with fees taken from the contributed inputs and the + // remaining value sent to change + let added_value = Amount::from_sat(initial_channel_value_sat * 2); + let removed_value = added_value / 2; + let change_script = ScriptBuf::new_p2wpkh(&WPubkeyHash::all_zeros()); + let fees = if cfg!(feature = "grind_signatures") { + Amount::from_sat(383) + } else { + Amount::from_sat(384) + }; + + let initiator_contribution = SpliceContribution::splice_in_and_out( + added_value, + vec![ + FundingTxInput::new_p2wpkh(coinbase_tx1, 0).unwrap(), + FundingTxInput::new_p2wpkh(coinbase_tx2, 0).unwrap(), + ], + vec![ + TxOut { + value: removed_value / 2, + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }, + TxOut { + value: removed_value / 2, + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }, + ], + Some(change_script.clone()), + ); + + let splice_tx = splice_channel(&nodes[0], &nodes[1], channel_id, initiator_contribution); + let expected_change = Amount::ONE_BTC * 2 - added_value - fees; + assert_eq!( + splice_tx.output.iter().find(|txout| txout.script_pubkey == change_script).unwrap().value, + expected_change, + ); + + mine_transaction(&nodes[0], &splice_tx); + mine_transaction(&nodes[1], &splice_tx); + + let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; + assert_eq!(htlc_limit_msat, 0); + + lock_splice_after_blocks(&nodes[0], &nodes[1], ANTI_REORG_DELAY - 1); + + let htlc_limit_msat = nodes[0].node.list_channels()[0].next_outbound_htlc_limit_msat; + assert!(htlc_limit_msat > initial_channel_value_sat / 2 * 1000); let _ = send_payment(&nodes[0], &[&nodes[1]], htlc_limit_msat); + + let coinbase_tx1 = provide_anchor_reserves(&nodes); + let coinbase_tx2 = provide_anchor_reserves(&nodes); + + // Fail adding a net contribution value of zero + let added_value = Amount::from_sat(initial_channel_value_sat * 2); + let removed_value = added_value; + let change_script = ScriptBuf::new_p2wpkh(&WPubkeyHash::all_zeros()); + + let initiator_contribution = SpliceContribution::splice_in_and_out( + added_value, + vec![ + FundingTxInput::new_p2wpkh(coinbase_tx1, 0).unwrap(), + FundingTxInput::new_p2wpkh(coinbase_tx2, 0).unwrap(), + ], + vec![ + TxOut { + value: removed_value / 2, + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }, + TxOut { + value: removed_value / 2, + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }, + ], + Some(change_script), + ); + + assert_eq!( + nodes[0].node.splice_channel( + &channel_id, + &nodes[1].node.get_our_node_id(), + initiator_contribution, + FEERATE_FLOOR_SATS_PER_KW, + None, + ), + Err(APIError::APIMisuseError { + err: format!("Channel {} cannot be spliced; contribution cannot be zero", channel_id), + }), + ); } #[cfg(test)] @@ -919,11 +1082,11 @@ fn do_test_splice_commitment_broadcast(splice_status: SpliceStatus, claim_htlcs: let payment_amount = 1_000_000; let (preimage1, payment_hash1, ..) = route_payment(&nodes[0], &[&nodes[1]], payment_amount); let splice_in_amount = initial_channel_capacity / 2; - let initiator_contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(splice_in_amount), - inputs: vec![FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 0).unwrap()], - change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), - }; + let initiator_contribution = SpliceContribution::splice_in( + Amount::from_sat(splice_in_amount), + vec![FundingTxInput::new_p2wpkh(coinbase_tx.clone(), 0).unwrap()], + Some(nodes[0].wallet_source.get_change_script().unwrap()), + ); let splice_tx = splice_channel(&nodes[0], &nodes[1], channel_id, initiator_contribution); let (preimage2, payment_hash2, ..) = route_payment(&nodes[0], &[&nodes[1]], payment_amount); let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS; @@ -1117,18 +1280,16 @@ fn do_test_splice_reestablish(reload: bool, async_monitor_update: bool) { route_payment(&nodes[0], &[&nodes[1]], 1_000_000); // Negotiate the splice up until the nodes exchange `tx_complete`. - let initiator_contribution = SpliceContribution::SpliceOut { - outputs: vec![ - TxOut { - value: Amount::from_sat(initial_channel_value_sat / 4), - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }, - TxOut { - value: Amount::from_sat(initial_channel_value_sat / 4), - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }, - ], - }; + let initiator_contribution = SpliceContribution::splice_out(vec![ + TxOut { + value: Amount::from_sat(initial_channel_value_sat / 4), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }, + TxOut { + value: Amount::from_sat(initial_channel_value_sat / 4), + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }, + ]); let initial_commit_sig_for_acceptor = negotiate_splice_tx(&nodes[0], &nodes[1], channel_id, initiator_contribution); assert_eq!(initial_commit_sig_for_acceptor.htlc_signatures.len(), 1); @@ -1405,12 +1566,10 @@ fn do_test_propose_splice_while_disconnected(reload: bool, use_0conf: bool) { nodes[1].node.peer_disconnected(node_id_0); let splice_out_sat = initial_channel_value_sat / 4; - let node_0_contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(splice_out_sat), - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; + let node_0_contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(splice_out_sat), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }]); nodes[0] .node .splice_channel( @@ -1423,12 +1582,10 @@ fn do_test_propose_splice_while_disconnected(reload: bool, use_0conf: bool) { .unwrap(); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - let node_1_contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(splice_out_sat), - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }], - }; + let node_1_contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(splice_out_sat), + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }]); nodes[1] .node .splice_channel( @@ -1681,11 +1838,11 @@ fn disconnect_on_unexpected_interactive_tx_message() { let coinbase_tx = provide_anchor_reserves(&nodes); let splice_in_amount = initial_channel_capacity / 2; - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(splice_in_amount), - inputs: vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], - change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), - }; + let contribution = SpliceContribution::splice_in( + Amount::from_sat(splice_in_amount), + vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], + Some(nodes[0].wallet_source.get_change_script().unwrap()), + ); // Complete interactive-tx construction, but fail by having the acceptor send a duplicate // tx_complete instead of commitment_signed. @@ -1721,11 +1878,11 @@ fn fail_splice_on_interactive_tx_error() { let coinbase_tx = provide_anchor_reserves(&nodes); let splice_in_amount = initial_channel_capacity / 2; - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(splice_in_amount), - inputs: vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], - change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), - }; + let contribution = SpliceContribution::splice_in( + Amount::from_sat(splice_in_amount), + vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], + Some(nodes[0].wallet_source.get_change_script().unwrap()), + ); // Fail during interactive-tx construction by having the acceptor echo back tx_add_input instead // of sending tx_complete. The failure occurs because the serial id will have the wrong parity. @@ -1827,11 +1984,11 @@ fn fail_splice_on_tx_abort() { let coinbase_tx = provide_anchor_reserves(&nodes); let splice_in_amount = initial_channel_capacity / 2; - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(splice_in_amount), - inputs: vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], - change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), - }; + let contribution = SpliceContribution::splice_in( + Amount::from_sat(splice_in_amount), + vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], + Some(nodes[0].wallet_source.get_change_script().unwrap()), + ); // Fail during interactive-tx construction by having the acceptor send tx_abort instead of // tx_complete. @@ -1881,11 +2038,11 @@ fn fail_splice_on_channel_close() { let coinbase_tx = provide_anchor_reserves(&nodes); let splice_in_amount = initial_channel_capacity / 2; - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(splice_in_amount), - inputs: vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], - change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), - }; + let contribution = SpliceContribution::splice_in( + Amount::from_sat(splice_in_amount), + vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], + Some(nodes[0].wallet_source.get_change_script().unwrap()), + ); // Close the channel before completion of interactive-tx construction. let _ = complete_splice_handshake(initiator, acceptor, channel_id, contribution.clone()); @@ -1932,11 +2089,11 @@ fn fail_quiescent_action_on_channel_close() { let coinbase_tx = provide_anchor_reserves(&nodes); let splice_in_amount = initial_channel_capacity / 2; - let contribution = SpliceContribution::SpliceIn { - value: Amount::from_sat(splice_in_amount), - inputs: vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], - change_script: Some(nodes[0].wallet_source.get_change_script().unwrap()), - }; + let contribution = SpliceContribution::splice_in( + Amount::from_sat(splice_in_amount), + vec![FundingTxInput::new_p2wpkh(coinbase_tx, 0).unwrap()], + Some(nodes[0].wallet_source.get_change_script().unwrap()), + ); // Close the channel before completion of STFU handshake. initiator @@ -2025,23 +2182,19 @@ fn do_test_splice_with_inflight_htlc_forward_and_resolution(expire_scid_pre_forw // Splice both channels, lock them, and connect enough blocks to trigger the legacy SCID pruning // logic while the HTLC is still pending. - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(1_000), - script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[0].wallet_source.get_change_script().unwrap(), + }]); let splice_tx_0_1 = splice_channel(&nodes[0], &nodes[1], channel_id_0_1, contribution); for node in &nodes { mine_transaction(node, &splice_tx_0_1); } - let contribution = SpliceContribution::SpliceOut { - outputs: vec![TxOut { - value: Amount::from_sat(1_000), - script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), - }], - }; + let contribution = SpliceContribution::splice_out(vec![TxOut { + value: Amount::from_sat(1_000), + script_pubkey: nodes[1].wallet_source.get_change_script().unwrap(), + }]); let splice_tx_1_2 = splice_channel(&nodes[1], &nodes[2], channel_id_1_2, contribution); for node in &nodes { mine_transaction(node, &splice_tx_1_2); diff --git a/lightning/src/offers/flow.rs b/lightning/src/offers/flow.rs index 94a4534c61a..8b03f0ea081 100644 --- a/lightning/src/offers/flow.rs +++ b/lightning/src/offers/flow.rs @@ -52,7 +52,7 @@ use crate::onion_message::async_payments::{ StaticInvoicePersisted, }; use crate::onion_message::messenger::{ - Destination, MessageRouter, MessageSendInstructions, Responder, PADDED_PATH_LENGTH, + Destination, MessageRouter, MessageSendInstructions, Responder, DUMMY_HOPS_PATH_LENGTH, }; use crate::onion_message::offers::OffersMessage; use crate::onion_message::packet::OnionMessageContents; @@ -490,11 +490,12 @@ where Ok(InvreqResponseInstructions::SendInvoice(invoice_request)) } - /// Verifies a [`Bolt12Invoice`] using the provided [`OffersContext`] or the invoice's payer metadata, - /// returning the corresponding [`PaymentId`] if successful. + /// Verifies a [`Bolt12Invoice`] using the provided [`OffersContext`] or the invoice's payer + /// metadata, returning the corresponding [`PaymentId`] if successful. /// - /// - If an [`OffersContext::OutboundPayment`] with a `nonce` is provided, verification is performed - /// using this to form the payer metadata. + /// - If an [`OffersContext::OutboundPaymentForOffer`] or + /// [`OffersContext::OutboundPaymentForRefund`] with a `nonce` is provided, verification is + /// performed using this to form the payer metadata. /// - If no context is provided and the invoice corresponds to a [`Refund`] without blinded paths, /// verification is performed using the [`Bolt12Invoice::payer_metadata`]. /// - If neither condition is met, verification fails. @@ -508,8 +509,19 @@ where None if invoice.is_for_refund_without_paths() => { invoice.verify_using_metadata(expanded_key, secp_ctx) }, - Some(&OffersContext::OutboundPayment { payment_id, nonce, .. }) => { - invoice.verify_using_payer_data(payment_id, nonce, expanded_key, secp_ctx) + Some(&OffersContext::OutboundPaymentForOffer { payment_id, nonce, .. }) => { + if invoice.is_for_offer() { + invoice.verify_using_payer_data(payment_id, nonce, expanded_key, secp_ctx) + } else { + Err(()) + } + }, + Some(&OffersContext::OutboundPaymentForRefund { payment_id, nonce, .. }) => { + if invoice.is_for_refund() { + invoice.verify_using_payer_data(payment_id, nonce, expanded_key, secp_ctx) + } else { + Err(()) + } }, _ => Err(()), } @@ -675,7 +687,8 @@ where let secp_ctx = &self.secp_ctx; let nonce = Nonce::from_entropy_source(entropy); - let context = MessageContext::Offers(OffersContext::OutboundPayment { payment_id, nonce }); + let context = + MessageContext::Offers(OffersContext::OutboundPaymentForRefund { payment_id, nonce }); // Create the base builder with common properties let mut builder = RefundBuilder::deriving_signing_pubkey( @@ -1098,7 +1111,8 @@ where &self, invoice_request: InvoiceRequest, payment_id: PaymentId, nonce: Nonce, peers: Vec, ) -> Result<(), Bolt12SemanticError> { - let context = MessageContext::Offers(OffersContext::OutboundPayment { payment_id, nonce }); + let context = + MessageContext::Offers(OffersContext::OutboundPaymentForOffer { payment_id, nonce }); let reply_paths = self .create_blinded_paths(peers, context) .map_err(|_| Bolt12SemanticError::MissingPaths)?; @@ -1280,13 +1294,14 @@ where prev_outbound_scid_alias, htlc_id, }); - let num_dummy_hops = PADDED_PATH_LENGTH.saturating_sub(1); + let num_dummy_hops = DUMMY_HOPS_PATH_LENGTH.saturating_sub(1); BlindedMessagePath::new_with_dummy_hops( &[], self.get_our_node_id(), num_dummy_hops, self.receive_auth_key, context, + false, &*entropy, &self.secp_ctx, ) diff --git a/lightning/src/offers/invoice.rs b/lightning/src/offers/invoice.rs index 6dfd6eac508..8d83225f117 100644 --- a/lightning/src/offers/invoice.rs +++ b/lightning/src/offers/invoice.rs @@ -778,6 +778,19 @@ struct InvoiceFields { } macro_rules! invoice_accessors { ($self: ident, $contents: expr) => { + /// Whether the invoice was created in response to a [`Refund`]. + pub fn is_for_refund(&$self) -> bool { + $contents.is_for_refund() + } + + /// Whether the invoice was created in response to an [`InvoiceRequest`] created from an + /// [`Offer`]. + /// + /// [`Offer`]: crate::offers::offer::Offer + pub fn is_for_offer(&$self) -> bool { + $contents.is_for_offer() + } + /// The chains that may be used when paying a requested invoice. /// /// From [`Offer::chains`]; `None` if the invoice was created in response to a [`Refund`]. @@ -1093,6 +1106,20 @@ impl InvoiceContents { } } + fn is_for_refund(&self) -> bool { + match self { + InvoiceContents::ForRefund { .. } => true, + InvoiceContents::ForOffer { .. } => false, + } + } + + fn is_for_offer(&self) -> bool { + match self { + InvoiceContents::ForRefund { .. } => false, + InvoiceContents::ForOffer { .. } => true, + } + } + fn offer_chains(&self) -> Option> { match self { InvoiceContents::ForOffer { invoice_request, .. } => { diff --git a/lightning/src/onion_message/functional_tests.rs b/lightning/src/onion_message/functional_tests.rs index 605a81a4f95..75e2aaf3c5f 100644 --- a/lightning/src/onion_message/functional_tests.rs +++ b/lightning/src/onion_message/functional_tests.rs @@ -436,8 +436,9 @@ fn one_blinded_hop() { let context = MessageContext::Custom(Vec::new()); let entropy = &*nodes[1].entropy_source; let receive_key = nodes[1].messenger.node_signer.get_receive_auth_key(); + let node_id = nodes[1].node_id; let blinded_path = - BlindedMessagePath::new(&[], nodes[1].node_id, receive_key, context, entropy, &secp_ctx); + BlindedMessagePath::new(&[], node_id, receive_key, context, false, entropy, &secp_ctx); let destination = Destination::BlindedPath(blinded_path); let instructions = MessageSendInstructions::WithoutReplyPath { destination }; nodes[0].messenger.send_onion_message(test_msg, instructions).unwrap(); @@ -450,18 +451,15 @@ fn blinded_path_with_dummy_hops() { let nodes = create_nodes(2); let test_msg = TestCustomMessage::Pong; - let secp_ctx = Secp256k1::new(); - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[1].entropy_source; - let receive_key = nodes[1].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new_with_dummy_hops( &[], nodes[1].node_id, TEST_DUMMY_HOP_COUNT, - receive_key, - context, - entropy, - &secp_ctx, + nodes[1].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[1].entropy_source, + &Secp256k1::new(), ); // Ensure that dummy hops are added to the blinded path. assert_eq!(blinded_path.blinded_hops().len(), 6); @@ -477,19 +475,16 @@ fn two_unblinded_two_blinded() { let nodes = create_nodes(5); let test_msg = TestCustomMessage::Pong; - let secp_ctx = Secp256k1::new(); let intermediate_nodes = [MessageForwardNode { node_id: nodes[3].node_id, short_channel_id: None }]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[4].entropy_source; - let receive_key = nodes[4].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[4].node_id, - receive_key, - context, - entropy, - &secp_ctx, + nodes[4].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[4].entropy_source, + &Secp256k1::new(), ); let path = OnionMessagePath { intermediate_nodes: vec![nodes[1].node_id, nodes[2].node_id], @@ -507,21 +502,18 @@ fn three_blinded_hops() { let nodes = create_nodes(4); let test_msg = TestCustomMessage::Pong; - let secp_ctx = Secp256k1::new(); let intermediate_nodes = [ MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }, MessageForwardNode { node_id: nodes[2].node_id, short_channel_id: None }, ]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[3].entropy_source; - let receive_key = nodes[3].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[3].node_id, - receive_key, - context, - entropy, - &secp_ctx, + nodes[3].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[3].entropy_source, + &Secp256k1::new(), ); let destination = Destination::BlindedPath(blinded_path); let instructions = MessageSendInstructions::WithoutReplyPath { destination }; @@ -548,8 +540,9 @@ fn async_response_over_one_blinded_hop() { let context = MessageContext::Custom(Vec::new()); let entropy = &*nodes[1].entropy_source; let receive_key = nodes[1].messenger.node_signer.get_receive_auth_key(); + let node_id = nodes[1].node_id; let reply_path = - BlindedMessagePath::new(&[], nodes[1].node_id, receive_key, context, entropy, &secp_ctx); + BlindedMessagePath::new(&[], node_id, receive_key, context, false, entropy, &secp_ctx); // 4. Create a responder using the reply path for Alice. let responder = Some(Responder::new(reply_path)); @@ -590,7 +583,7 @@ fn async_response_with_reply_path_succeeds() { let entropy = &*bob.entropy_source; let receive_key = bob.messenger.node_signer.get_receive_auth_key(); let reply_path = - BlindedMessagePath::new(&[], bob.node_id, receive_key, context, entropy, &secp_ctx); + BlindedMessagePath::new(&[], bob.node_id, receive_key, context, false, entropy, &secp_ctx); // Alice asynchronously responds to Bob, expecting a response back from him. let responder = Responder::new(reply_path); @@ -632,7 +625,7 @@ fn async_response_with_reply_path_fails() { let entropy = &*bob.entropy_source; let receive_key = bob.messenger.node_signer.get_receive_auth_key(); let reply_path = - BlindedMessagePath::new(&[], bob.node_id, receive_key, context, entropy, &secp_ctx); + BlindedMessagePath::new(&[], bob.node_id, receive_key, context, false, entropy, &secp_ctx); // Alice tries to asynchronously respond to Bob, but fails because the nodes are unannounced and // disconnected. Thus, a reply path could no be created for the response. @@ -668,28 +661,26 @@ fn too_big_packet_error() { #[test] fn test_blinded_path_padding_for_full_length_path() { - // Check that for a full blinded path, all encrypted payload are padded to rounded-off length. + // Check that for a full blinded path without compact padding, all encrypted payload are padded + // to rounded-off length. let nodes = create_nodes(4); let test_msg = TestCustomMessage::Pong; - let secp_ctx = Secp256k1::new(); let intermediate_nodes = [ MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }, MessageForwardNode { node_id: nodes[2].node_id, short_channel_id: None }, ]; - // Update the context to create a larger final receive TLVs, ensuring that - // the hop sizes vary before padding. - let context = MessageContext::Custom(vec![0u8; 42]); - let entropy = &*nodes[3].entropy_source; - let receive_key = nodes[3].messenger.node_signer.get_receive_auth_key(); + // Build with a larger context to create a larger final receive TLVs, ensuring that the hop + // sizes vary before padding. let blinded_path = BlindedMessagePath::new_with_dummy_hops( &intermediate_nodes, nodes[3].node_id, TEST_DUMMY_HOP_COUNT, - receive_key, - context, - entropy, - &secp_ctx, + nodes[3].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(vec![0u8; 42]), + false, + &*nodes[3].entropy_source, + &Secp256k1::new(), ); assert!(is_padded(&blinded_path.blinded_hops(), MESSAGE_PADDING_ROUND_OFF)); @@ -703,32 +694,72 @@ fn test_blinded_path_padding_for_full_length_path() { } #[test] -fn test_blinded_path_no_padding_for_compact_path() { - // Check that for a compact blinded path, no padding is applied. +fn test_blinded_path_compact_padding() { + // Check that for a blinded path with non-SCID intermediate hops with compact padding, no extra + // padding is applied. let nodes = create_nodes(4); - let secp_ctx = Secp256k1::new(); - // Include some short_channel_id, so that MessageRouter uses this to create compact blinded paths. + let intermediate_nodes = [ + MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }, + MessageForwardNode { node_id: nodes[2].node_id, short_channel_id: None }, + ]; + // Build with a larger context to create a larger final receive TLVs, ensuring that the hop + // sizes vary before padding. + let blinded_path = BlindedMessagePath::new_with_dummy_hops( + &intermediate_nodes, + nodes[3].node_id, + TEST_DUMMY_HOP_COUNT, + nodes[3].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(vec![0u8; 42]), + true, + &*nodes[3].entropy_source, + &Secp256k1::new(), + ); + + let hops = blinded_path.blinded_hops(); + assert!(!is_padded(&hops, MESSAGE_PADDING_ROUND_OFF)); + assert_eq!(hops.len(), TEST_DUMMY_HOP_COUNT + 3); + for hop in hops.iter().take(TEST_DUMMY_HOP_COUNT + 2) { + assert_eq!(hops[0].encrypted_payload.len(), hop.encrypted_payload.len()); + } + // Check the actual encrypted payload lengths, which may change in the future but serves to + // ensure that this and test_compact_blinded_path_compact_padding, below, differ. + assert_eq!(hops[0].encrypted_payload.len(), 51); +} + +#[test] +fn test_compact_blinded_path_compact_padding() { + // Check that for a blinded path with SCID intermediate hops with compact padding, no extra + // padding is applied. + let nodes = create_nodes(4); + + // Include some short_channel_id, so that MessageRouter uses this to create compact blinded paths let intermediate_nodes = [ MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: Some(24) }, MessageForwardNode { node_id: nodes[2].node_id, short_channel_id: Some(25) }, ]; - // Update the context to create a larger final receive TLVs, ensuring that - // the hop sizes vary before padding. - let context = MessageContext::Custom(vec![0u8; 42]); - let entropy = &*nodes[3].entropy_source; - let receive_key = nodes[3].messenger.node_signer.get_receive_auth_key(); + // Build with a larger context to create a larger final receive TLVs, ensuring that the hop + // sizes vary before padding. let blinded_path = BlindedMessagePath::new_with_dummy_hops( &intermediate_nodes, nodes[3].node_id, TEST_DUMMY_HOP_COUNT, - receive_key, - context, - entropy, - &secp_ctx, + nodes[3].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(vec![0u8; 42]), + true, + &*nodes[3].entropy_source, + &Secp256k1::new(), ); - assert!(!is_padded(&blinded_path.blinded_hops(), MESSAGE_PADDING_ROUND_OFF)); + let hops = blinded_path.blinded_hops(); + assert!(!is_padded(&hops, MESSAGE_PADDING_ROUND_OFF)); + assert_eq!(hops.len(), TEST_DUMMY_HOP_COUNT + 3); + for hop in hops.iter().take(TEST_DUMMY_HOP_COUNT + 2) { + assert_eq!(hops[0].encrypted_payload.len(), hop.encrypted_payload.len()); + } + // Check the actual encrypted payload lengths, which may change in the future but serves to + // ensure that this and test_blinded_path_compact_padding, above, differ. + assert_eq!(hops[0].encrypted_payload.len(), 26); } #[test] @@ -743,15 +774,13 @@ fn we_are_intro_node() { MessageForwardNode { node_id: nodes[0].node_id, short_channel_id: None }, MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }, ]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[2].entropy_source; - let receive_key = nodes[2].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[2].node_id, - receive_key, - context, - entropy, + nodes[2].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[2].entropy_source, &secp_ctx, ); let destination = Destination::BlindedPath(blinded_path); @@ -764,15 +793,13 @@ fn we_are_intro_node() { // Try with a two-hop blinded path where we are the introduction node. let intermediate_nodes = [MessageForwardNode { node_id: nodes[0].node_id, short_channel_id: None }]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[1].entropy_source; - let receive_key = nodes[1].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[1].node_id, - receive_key, - context, - entropy, + nodes[1].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[1].entropy_source, &secp_ctx, ); let destination = Destination::BlindedPath(blinded_path); @@ -790,19 +817,16 @@ fn invalid_blinded_path_error() { let nodes = create_nodes(3); let test_msg = TestCustomMessage::Pong; - let secp_ctx = Secp256k1::new(); let intermediate_nodes = [MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[2].entropy_source; - let receive_key = nodes[2].messenger.node_signer.get_receive_auth_key(); let mut blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[2].node_id, - receive_key, - context, - entropy, - &secp_ctx, + nodes[2].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[2].entropy_source, + &Secp256k1::new(), ); blinded_path.clear_blinded_hops(); let destination = Destination::BlindedPath(blinded_path); @@ -828,15 +852,13 @@ fn reply_path() { MessageForwardNode { node_id: nodes[2].node_id, short_channel_id: None }, MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }, ]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[0].entropy_source; - let receive_key = nodes[0].messenger.node_signer.get_receive_auth_key(); let reply_path = BlindedMessagePath::new( &intermediate_nodes, nodes[0].node_id, - receive_key, - context, - entropy, + nodes[0].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[0].entropy_source, &secp_ctx, ); nodes[0] @@ -855,15 +877,13 @@ fn reply_path() { MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }, MessageForwardNode { node_id: nodes[2].node_id, short_channel_id: None }, ]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[3].entropy_source; - let receive_key = nodes[3].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[3].node_id, - receive_key, - context, - entropy, + nodes[3].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[3].entropy_source, &secp_ctx, ); let destination = Destination::BlindedPath(blinded_path); @@ -871,15 +891,13 @@ fn reply_path() { MessageForwardNode { node_id: nodes[2].node_id, short_channel_id: None }, MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }, ]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[0].entropy_source; - let receive_key = nodes[0].messenger.node_signer.get_receive_auth_key(); let reply_path = BlindedMessagePath::new( &intermediate_nodes, nodes[0].node_id, - receive_key, - context, - entropy, + nodes[0].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[0].entropy_source, &secp_ctx, ); let instructions = MessageSendInstructions::WithSpecifiedReplyPath { destination, reply_path }; @@ -975,15 +993,13 @@ fn requests_peer_connection_for_buffered_messages() { let intermediate_nodes = [MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[0].entropy_source; - let receive_key = nodes[0].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[2].node_id, - receive_key, - context, - entropy, + nodes[0].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[0].entropy_source, &secp_ctx, ); let destination = Destination::BlindedPath(blinded_path); @@ -1046,15 +1062,13 @@ fn drops_buffered_messages_waiting_for_peer_connection() { let intermediate_nodes = [MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[0].entropy_source; - let receive_key = nodes[0].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[2].node_id, - receive_key, - context, - entropy, + nodes[0].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[0].entropy_source, &secp_ctx, ); let destination = Destination::BlindedPath(blinded_path); @@ -1107,19 +1121,16 @@ fn intercept_offline_peer_oms() { } let message = TestCustomMessage::Pong; - let secp_ctx = Secp256k1::new(); let intermediate_nodes = [MessageForwardNode { node_id: nodes[1].node_id, short_channel_id: None }]; - let context = MessageContext::Custom(Vec::new()); - let entropy = &*nodes[2].entropy_source; - let receive_key = nodes[2].messenger.node_signer.get_receive_auth_key(); let blinded_path = BlindedMessagePath::new( &intermediate_nodes, nodes[2].node_id, - receive_key, - context, - entropy, - &secp_ctx, + nodes[2].messenger.node_signer.get_receive_auth_key(), + MessageContext::Custom(Vec::new()), + false, + &*nodes[2].entropy_source, + &Secp256k1::new(), ); let destination = Destination::BlindedPath(blinded_path); let instructions = MessageSendInstructions::WithoutReplyPath { destination }; diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs index 9a2c06bb72f..533853229b1 100644 --- a/lightning/src/onion_message/messenger.rs +++ b/lightning/src/onion_message/messenger.rs @@ -272,7 +272,7 @@ where /// ]; /// let context = MessageContext::Custom(Vec::new()); /// let receive_key = keys_manager.get_receive_auth_key(); -/// let blinded_path = BlindedMessagePath::new(&hops, your_node_id, receive_key, context, &keys_manager, &secp_ctx); +/// let blinded_path = BlindedMessagePath::new(&hops, your_node_id, receive_key, context, false, &keys_manager, &secp_ctx); /// /// // Send a custom onion message to a blinded path. /// let destination = Destination::BlindedPath(blinded_path); @@ -524,9 +524,11 @@ pub trait MessageRouter { /// A [`MessageRouter`] that can only route to a directly connected [`Destination`]. /// -/// [`DefaultMessageRouter`] constructs compact [`BlindedMessagePath`]s on a best-effort basis. -/// That is, if appropriate SCID information is available for the intermediate peers, it will -/// default to creating compact paths. +/// [`DefaultMessageRouter`] tries to construct compact or private [`BlindedMessagePath`]s based on +/// the [`MessageContext`] given to [`MessageRouter::create_blinded_paths`]. That is, if the +/// provided context implies the path may be used in a BOLT 12 object which might appear in a QR +/// code, it reduces the amount of padding and dummy hops and prefers building compact paths when +/// short channel IDs (SCIDs) are available for intermediate peers. /// /// # Compact Blinded Paths /// @@ -545,7 +547,8 @@ pub trait MessageRouter { /// Creating [`BlindedMessagePath`]s may affect privacy since, if a suitable path cannot be found, /// it will create a one-hop path using the recipient as the introduction node if it is an announced /// node. Otherwise, there is no way to find a path to the introduction node in order to send a -/// message, and thus an `Err` is returned. +/// message, and thus an `Err` is returned. The impact of this may be somewhat muted when +/// additional dummy hops are added to the blinded path, but this protection is not complete. pub struct DefaultMessageRouter>, L: Deref, ES: Deref> where L::Target: Logger, @@ -553,15 +556,19 @@ where { network_graph: G, entropy_source: ES, + compact_paths: bool, } -// Target total length (in hops) for non-compact blinded paths. -// We pad with dummy hops until the path reaches this length, -// obscuring the recipient's true position. +// Target total length (in hops) for blinded paths used outside of QR codes. // -// Compact paths are optimized for minimal size, so we avoid -// adding dummy hops to them. -pub(crate) const PADDED_PATH_LENGTH: usize = 4; +// We add dummy hops until the path reaches this length (including the recipient). +pub(crate) const DUMMY_HOPS_PATH_LENGTH: usize = 4; + +// Target total length (in hops) for blinded paths included in objects which may appear in a QR +// code. +// +// We add dummy hops until the path reaches this length (including the recipient). +pub(crate) const QR_CODED_DUMMY_HOPS_PATH_LENGTH: usize = 2; impl>, L: Deref, ES: Deref> DefaultMessageRouter where @@ -569,17 +576,33 @@ where ES::Target: EntropySource, { /// Creates a [`DefaultMessageRouter`] using the given [`NetworkGraph`]. + /// + /// Compact blinded paths are enabled by default. Use [`Self::with_compact_paths`] to + /// configure this behavior. pub fn new(network_graph: G, entropy_source: ES) -> Self { - Self { network_graph, entropy_source } + Self { network_graph, entropy_source, compact_paths: true } + } + + /// Creates a [`DefaultMessageRouter`] with configurable compact blinded paths behavior. + /// + /// When `compact_paths` is `true`, blinded paths will use short channel IDs (SCIDs) instead + /// of full node pubkeys when possible, resulting in smaller serialization suitable for + /// space-constrained formats like QR codes. However, compact paths may fail to route if + /// the corresponding channel is closed or modified. + /// + /// When `compact_paths` is `false`, blinded paths will always use full node IDs, which + /// are more stable for long-lived offers but result in larger encoded data. + pub fn with_compact_paths(network_graph: G, entropy_source: ES, compact_paths: bool) -> Self { + Self { network_graph, entropy_source, compact_paths } } pub(crate) fn create_blinded_paths_from_iter< - I: ExactSizeIterator, + I: ExactSizeIterator + Clone, T: secp256k1::Signing + secp256k1::Verification, >( network_graph: &G, recipient: PublicKey, local_node_receive_key: ReceiveAuthKey, context: MessageContext, peers: I, entropy_source: &ES, secp_ctx: &Secp256k1, - compact_paths: bool, + never_compact_path: bool, ) -> Result, ()> { // Limit the number of blinded paths that are computed. const MAX_PATHS: usize = 3; @@ -592,6 +615,31 @@ where let is_recipient_announced = network_graph.nodes().contains_key(&NodeId::from_pubkey(&recipient)); + let (size_constrained, path_len_incl_dummys) = match &context { + MessageContext::Offers(OffersContext::InvoiceRequest { .. }) + | MessageContext::Offers(OffersContext::OutboundPaymentForRefund { .. }) => { + // When including blinded paths within BOLT 12 objects that appear in QR codes, we + // sadly need to be conservative about size, especially if the QR code ultimately + // also includes an on-chain address. + (true, QR_CODED_DUMMY_HOPS_PATH_LENGTH) + }, + MessageContext::Offers(OffersContext::StaticInvoiceRequested { .. }) => { + // Async Payments aggressively embeds the entire `InvoiceRequest` in the payment + // onion. In a future version it should likely move to embedding only the + // `InvoiceRequest`-specific fields instead, but until then we have to be + // incredibly strict in the size of the blinded path we include in a static payment + // `Offer`. + (true, 0) + }, + _ => { + // If there's no need to be small, add additional dummy hops and never use + // SCID-based next-hops as they carry additional expiry risk. + (false, DUMMY_HOPS_PATH_LENGTH) + }, + }; + + let compact_paths = !never_compact_path && size_constrained; + let has_one_peer = peers.len() == 1; let mut peer_info = peers .map(|peer| MessageForwardNode { @@ -619,12 +667,8 @@ where }); let build_path = |intermediate_hops: &[MessageForwardNode]| { - let dummy_hops_count = if compact_paths { - 0 - } else { - // Add one for the final recipient TLV - PADDED_PATH_LENGTH.saturating_sub(intermediate_hops.len() + 1) - }; + // Calculate the dummy hops given the total hop count target (including the recipient). + let dummy_hops_count = path_len_incl_dummys.saturating_sub(intermediate_hops.len() + 1); BlindedMessagePath::new_with_dummy_hops( intermediate_hops, @@ -632,6 +676,7 @@ where dummy_hops_count, local_node_receive_key, context.clone(), + size_constrained, &**entropy_source, secp_ctx, ) @@ -651,12 +696,6 @@ where } } - // Sanity check: Ones the paths are created for the non-compact case, ensure - // each of them are of the length `PADDED_PATH_LENGTH`. - if !compact_paths { - debug_assert!(paths.iter().all(|path| path.blinded_hops().len() == PADDED_PATH_LENGTH)); - } - if compact_paths { for path in &mut paths { path.use_compact_introduction_node(&network_graph); @@ -740,13 +779,18 @@ where peers.into_iter(), &self.entropy_source, secp_ctx, - true, + !self.compact_paths, ) } } /// This message router is similar to [`DefaultMessageRouter`], but it always creates -/// full-length blinded paths, using the peer's [`NodeId`]. +/// non-compact blinded paths, using the peer's [`NodeId`]. It uses the same heuristics as +/// [`DefaultMessageRouter`] for deciding when to add additional dummy hops to the generated blinded +/// paths. +/// +/// This may be useful in cases where you want a long-lived blinded path and anticipate channel(s) +/// may close, but connections to specific peers will remain stable. /// /// This message router can only route to a directly connected [`Destination`]. /// @@ -755,7 +799,8 @@ where /// Creating [`BlindedMessagePath`]s may affect privacy since, if a suitable path cannot be found, /// it will create a one-hop path using the recipient as the introduction node if it is an announced /// node. Otherwise, there is no way to find a path to the introduction node in order to send a -/// message, and thus an `Err` is returned. +/// message, and thus an `Err` is returned. The impact of this may be somewhat muted when +/// additional dummy hops are added to the blinded path, but this protection is not complete. pub struct NodeIdMessageRouter>, L: Deref, ES: Deref> where L::Target: Logger, @@ -790,8 +835,11 @@ where fn create_blinded_paths( &self, recipient: PublicKey, local_node_receive_key: ReceiveAuthKey, - context: MessageContext, peers: Vec, secp_ctx: &Secp256k1, + context: MessageContext, mut peers: Vec, secp_ctx: &Secp256k1, ) -> Result, ()> { + for peer in peers.iter_mut() { + peer.short_channel_id = None; + } DefaultMessageRouter::create_blinded_paths_from_iter( &self.network_graph, recipient, @@ -800,7 +848,7 @@ where peers.into_iter(), &self.entropy_source, secp_ctx, - false, + true, ) } } diff --git a/lightning/src/routing/gossip.rs b/lightning/src/routing/gossip.rs index ae317ad1ac3..534bebe7618 100644 --- a/lightning/src/routing/gossip.rs +++ b/lightning/src/routing/gossip.rs @@ -43,6 +43,7 @@ use crate::util::indexed_map::{ use crate::util::logger::{Level, Logger}; use crate::util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK}; use crate::util::ser::{MaybeReadable, Readable, ReadableArgs, RequiredWrapper, Writeable, Writer}; +use crate::util::wakers::Future; use crate::io; use crate::io_extras::{copy, sink}; @@ -327,7 +328,10 @@ where L::Target: Logger, { network_graph: G, - utxo_lookup: RwLock>, + #[cfg(any(feature = "_test_utils", test))] + pub(super) utxo_lookup: Option, + #[cfg(not(any(feature = "_test_utils", test)))] + utxo_lookup: Option, full_syncs_requested: AtomicUsize, pending_events: Mutex>, logger: L, @@ -340,25 +344,19 @@ where { /// Creates a new tracker of the actual state of the network of channels and nodes, /// assuming an existing [`NetworkGraph`]. + /// /// UTXO lookup is used to make sure announced channels exist on-chain, channel data is /// correct, and the announcement is signed with channel owners' keys. pub fn new(network_graph: G, utxo_lookup: Option, logger: L) -> Self { P2PGossipSync { network_graph, full_syncs_requested: AtomicUsize::new(0), - utxo_lookup: RwLock::new(utxo_lookup), + utxo_lookup, pending_events: Mutex::new(vec![]), logger, } } - /// Adds a provider used to check new announcements. Does not affect - /// existing announcements unless they are updated. - /// Add, update or remove the provider would replace the current one. - pub fn add_utxo_lookup(&self, utxo_lookup: Option) { - *self.utxo_lookup.write().unwrap() = utxo_lookup; - } - /// Gets a reference to the underlying [`NetworkGraph`] which was provided in /// [`P2PGossipSync::new`]. /// @@ -367,6 +365,17 @@ where &self.network_graph } + /// Gets a [`Future`] which will resolve the next time an async validation of gossip data + /// completes. + /// + /// If the [`UtxoLookup`] provided in [`P2PGossipSync::new`] does not return + /// [`UtxoResult::Async`] values, the returned [`Future`] will never resolve + /// + /// [`UtxoResult::Async`]: crate::routing::utxo::UtxoResult::Async + pub fn validation_completion_future(&self) -> Future { + self.network_graph.pending_checks.completion_notifier.get_future() + } + /// Returns true when a full routing table sync should be performed with a peer. fn should_request_full_sync(&self) -> bool { const FULL_SYNCS_TO_REQUEST: usize = 5; @@ -378,39 +387,42 @@ where } } - /// Used to broadcast forward gossip messages which were validated async. - /// - /// Note that this will ignore events other than `Broadcast*` or messages with too much excess - /// data. - pub(super) fn forward_gossip_msg(&self, mut ev: MessageSendEvent) { - match &mut ev { - MessageSendEvent::BroadcastChannelAnnouncement { msg, ref mut update_msg } => { - if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY { - return; - } - if update_msg.as_ref().map(|msg| msg.contents.excess_data.len()).unwrap_or(0) - > MAX_EXCESS_BYTES_FOR_RELAY - { - *update_msg = None; - } - }, - MessageSendEvent::BroadcastChannelUpdate { msg, .. } => { - if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY { - return; - } - }, - MessageSendEvent::BroadcastNodeAnnouncement { msg } => { - if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY - || msg.contents.excess_address_data.len() > MAX_EXCESS_BYTES_FOR_RELAY - || msg.contents.excess_data.len() + msg.contents.excess_address_data.len() + /// Walks the list of pending UTXO validations and removes completed ones, adding any messages + /// we should forward as a result to [`Self::pending_events`]. + fn process_completed_checks(&self) { + let msgs = self.network_graph.pending_checks.check_resolved_futures(&*self.network_graph); + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.reserve(msgs.len()); + for mut message in msgs { + match &mut message { + MessageSendEvent::BroadcastChannelAnnouncement { msg, ref mut update_msg } => { + if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY { + continue; + } + if update_msg.as_ref().map(|msg| msg.contents.excess_data.len()).unwrap_or(0) > MAX_EXCESS_BYTES_FOR_RELAY - { - return; - } - }, - _ => return, + { + *update_msg = None; + } + }, + MessageSendEvent::BroadcastChannelUpdate { msg, .. } => { + if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY { + continue; + } + }, + MessageSendEvent::BroadcastNodeAnnouncement { msg } => { + if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY + || msg.contents.excess_address_data.len() > MAX_EXCESS_BYTES_FOR_RELAY + || msg.contents.excess_data.len() + msg.contents.excess_address_data.len() + > MAX_EXCESS_BYTES_FOR_RELAY + { + continue; + } + }, + _ => continue, + } + pending_events.push(message); } - self.pending_events.lock().unwrap().push(ev); } } @@ -549,8 +561,7 @@ where fn handle_channel_announcement( &self, _their_node_id: Option, msg: &msgs::ChannelAnnouncement, ) -> Result { - self.network_graph - .update_channel_from_announcement(msg, &*self.utxo_lookup.read().unwrap())?; + self.network_graph.update_channel_from_announcement(msg, &self.utxo_lookup)?; Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY) } @@ -884,6 +895,7 @@ where } fn get_and_clear_pending_msg_events(&self) -> Vec { + self.process_completed_checks(); let mut ret = Vec::new(); let mut pending_events = self.pending_events.lock().unwrap(); core::mem::swap(&mut ret, &mut pending_events); @@ -1670,9 +1682,17 @@ where fn read(reader: &mut R, logger: L) -> Result, DecodeError> { let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION); + const MAX_CHAN_COUNT_LIMIT: usize = 100_000_000; + const MAX_NODE_COUNT_LIMIT: usize = 10_000_000; + let chain_hash: ChainHash = Readable::read(reader)?; let channels_count: u64 = Readable::read(reader)?; - let mut channels = IndexedMap::with_capacity(CHAN_COUNT_ESTIMATE); + // Pre-allocate 115% of the known channel count to avoid unnecessary reallocations. + let channels_map_capacity = (channels_count as u128 * 115 / 100) + .try_into() + .map(|v: usize| v.min(MAX_CHAN_COUNT_LIMIT)) + .map_err(|_| DecodeError::InvalidValue)?; + let mut channels = IndexedMap::with_capacity(channels_map_capacity); for _ in 0..channels_count { let chan_id: u64 = Readable::read(reader)?; let chan_info: ChannelInfo = Readable::read(reader)?; @@ -1684,7 +1704,12 @@ where if nodes_count > u32::max_value() as u64 / 2 { return Err(DecodeError::InvalidValue); } - let mut nodes = IndexedMap::with_capacity(NODE_COUNT_ESTIMATE); + // Pre-allocate 115% of the known channel count to avoid unnecessary reallocations. + let nodes_map_capacity: usize = (nodes_count as u128 * 115 / 100) + .try_into() + .map(|v: usize| v.min(MAX_NODE_COUNT_LIMIT)) + .map_err(|_| DecodeError::InvalidValue)?; + let mut nodes = IndexedMap::with_capacity(nodes_map_capacity); for i in 0..nodes_count { let node_id = Readable::read(reader)?; let mut node_info: NodeInfo = Readable::read(reader)?; @@ -1760,13 +1785,15 @@ where } } -// In Jan, 2025 there were about 49K channels. -// We over-allocate by a bit because 20% more is better than the double we get if we're slightly -// too low -const CHAN_COUNT_ESTIMATE: usize = 60_000; -// In Jan, 2025 there were about 15K nodes -// We over-allocate by a bit because 33% more is better than the double we get if we're slightly -// too low +/// In Jan, 2026 there were about 54K channels. +/// +/// We over-allocate by a bit because ~15% more is better than the double we get if we're slightly +/// too low. +const CHAN_COUNT_ESTIMATE: usize = 63_000; +/// In Jan, 2026 there were about 17K nodes +/// +/// We over-allocate by a bit because 15% more is better than the double we get if we're slightly +/// too low. const NODE_COUNT_ESTIMATE: usize = 20_000; impl NetworkGraph @@ -1775,12 +1802,18 @@ where { /// Creates a new, empty, network graph. pub fn new(network: Network, logger: L) -> NetworkGraph { + let (node_map_cap, chan_map_cap) = if matches!(network, Network::Bitcoin) { + (NODE_COUNT_ESTIMATE, CHAN_COUNT_ESTIMATE) + } else { + (0, 0) + }; + Self { secp_ctx: Secp256k1::verification_only(), chain_hash: ChainHash::using_genesis_block(network), logger, - channels: RwLock::new(IndexedMap::with_capacity(CHAN_COUNT_ESTIMATE)), - nodes: RwLock::new(IndexedMap::with_capacity(NODE_COUNT_ESTIMATE)), + channels: RwLock::new(IndexedMap::with_capacity(chan_map_cap)), + nodes: RwLock::new(IndexedMap::with_capacity(node_map_cap)), next_node_counter: AtomicUsize::new(0), removed_node_counters: Mutex::new(Vec::new()), last_rapid_gossip_sync_timestamp: Mutex::new(None), diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index c06e5174263..2dd48ca8058 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -13,8 +13,8 @@ use bitcoin::secp256k1::{self, PublicKey, Secp256k1}; use lightning_invoice::Bolt11Invoice; use crate::blinded_path::payment::{ - BlindedPaymentPath, ForwardTlvs, PaymentConstraints, PaymentForwardNode, PaymentRelay, - ReceiveTlvs, + BlindedPaymentPath, DummyTlvs, ForwardTlvs, PaymentConstraints, PaymentForwardNode, + PaymentRelay, ReceiveTlvs, }; use crate::blinded_path::{BlindedHop, Direction, IntroductionNode}; use crate::crypto::chacha20::ChaCha20; @@ -74,6 +74,9 @@ pub struct DefaultRouter< score_params: SP, } +/// The number of dummy hops included in [`BlindedPaymentPath`]s created by [`DefaultRouter`]. +pub const DEFAULT_PAYMENT_DUMMY_HOPS: usize = 3; + impl< G: Deref>, L: Deref, @@ -198,9 +201,9 @@ where }) }) .map(|forward_node| { - BlindedPaymentPath::new( - &[forward_node], recipient, local_node_receive_key, tlvs.clone(), u64::MAX, MIN_FINAL_CLTV_EXPIRY_DELTA, - &*self.entropy_source, secp_ctx + BlindedPaymentPath::new_with_dummy_hops( + &[forward_node], recipient, &[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS], + local_node_receive_key, tlvs.clone(), u64::MAX, MIN_FINAL_CLTV_EXPIRY_DELTA, &*self.entropy_source, secp_ctx ) }) .take(MAX_PAYMENT_PATHS) @@ -210,9 +213,9 @@ where Ok(paths) if !paths.is_empty() => Ok(paths), _ => { if network_graph.nodes().contains_key(&NodeId::from_pubkey(&recipient)) { - BlindedPaymentPath::new( - &[], recipient, local_node_receive_key, tlvs, u64::MAX, MIN_FINAL_CLTV_EXPIRY_DELTA, &*self.entropy_source, - secp_ctx + BlindedPaymentPath::new_with_dummy_hops( + &[], recipient, &[DummyTlvs::default(); DEFAULT_PAYMENT_DUMMY_HOPS], + local_node_receive_key, tlvs, u64::MAX, MIN_FINAL_CLTV_EXPIRY_DELTA, &*self.entropy_source, secp_ctx ).map(|path| vec![path]) } else { Err(()) @@ -3943,10 +3946,7 @@ mod tests { ChannelUsage, FixedPenaltyScorer, ProbabilisticScorer, ProbabilisticScoringDecayParameters, ProbabilisticScoringFeeParameters, ScoreLookUp, }; - use crate::routing::test_utils::{ - add_channel, add_or_update_node, build_graph, build_line_graph, get_nodes, - id_to_feature_flags, update_channel, - }; + use crate::routing::test_utils::*; use crate::routing::utxo::UtxoResult; use crate::types::features::{BlindedHopFeatures, ChannelFeatures, InitFeatures, NodeFeatures}; use crate::util::config::UserConfig; @@ -5368,7 +5368,7 @@ mod tests { fn available_amount_while_routing_test() { // Tests whether we choose the correct available channel amount while routing. - let (secp_ctx, network_graph, gossip_sync, chain_monitor, logger) = build_graph(); + let (secp_ctx, network_graph, gossip_sync, chain_monitor, logger) = build_graph_with_gossip_validation(); let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx); let scorer = ln_test_utils::TestScorer::new(); let random_seed_bytes = [42; 32]; @@ -5588,11 +5588,10 @@ mod tests { .push_opcode(opcodes::all::OP_PUSHNUM_2) .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script().to_p2wsh(); + *chain_monitor.utxo_ret.lock().unwrap() = UtxoResult::Sync(Ok(TxOut { value: Amount::from_sat(15), script_pubkey: good_script.clone() })); - gossip_sync.add_utxo_lookup(Some(chain_monitor)); - - add_channel(&gossip_sync, &secp_ctx, &privkeys[0], &privkeys[2], ChannelFeatures::from_le_bytes(id_to_feature_flags(3)), 333); + add_channel_skipping_utxo_update(&gossip_sync, &secp_ctx, &privkeys[0], &privkeys[2], ChannelFeatures::from_le_bytes(id_to_feature_flags(3)), 333); update_channel(&gossip_sync, &secp_ctx, &privkeys[0], UnsignedChannelUpdate { chain_hash: ChainHash::using_genesis_block(Network::Testnet), short_channel_id: 333, diff --git a/lightning/src/routing/test_utils.rs b/lightning/src/routing/test_utils.rs index c5c35c9ce77..a433fa30c5b 100644 --- a/lightning/src/routing/test_utils.rs +++ b/lightning/src/routing/test_utils.rs @@ -10,7 +10,9 @@ // licenses. use crate::routing::gossip::{NetworkGraph, NodeAlias, P2PGossipSync}; +use crate::routing::utxo::UtxoResult; use crate::types::features::{ChannelFeatures, NodeFeatures}; +use crate::ln::chan_utils::make_funding_redeemscript; use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, MAX_VALUE_MSAT, NodeAnnouncement, RoutingMessageHandler, SocketAddress, UnsignedChannelAnnouncement, UnsignedChannelUpdate, UnsignedNodeAnnouncement}; use crate::util::test_utils; use crate::util::ser::Writeable; @@ -22,6 +24,7 @@ use bitcoin::hex::FromHex; use bitcoin::network::Network; use bitcoin::secp256k1::{PublicKey,SecretKey}; use bitcoin::secp256k1::{Secp256k1, All}; +use bitcoin::{Amount, TxOut}; #[allow(unused)] use crate::prelude::*; @@ -58,19 +61,34 @@ pub(crate) fn channel_announcement( } // Using the same keys for LN and BTC ids -pub(crate) fn add_channel( +pub(crate) fn add_channel_skipping_utxo_update( gossip_sync: &P2PGossipSync>>, Arc, Arc>, - secp_ctx: &Secp256k1, node_1_privkey: &SecretKey, node_2_privkey: &SecretKey, features: ChannelFeatures, short_channel_id: u64 + secp_ctx: &Secp256k1, node_1_privkey: &SecretKey, node_2_privkey: &SecretKey, features: ChannelFeatures, short_channel_id: u64, ) { let valid_announcement = channel_announcement(node_1_privkey, node_2_privkey, features, short_channel_id, secp_ctx); - let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey); + + let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey); match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) { Ok(res) => assert!(res), - _ => panic!() + Err(e) => panic!("{:?}", e), }; } +pub(crate) fn add_channel( + gossip_sync: &P2PGossipSync>>, Arc, Arc>, + secp_ctx: &Secp256k1, node_1_privkey: &SecretKey, node_2_privkey: &SecretKey, features: ChannelFeatures, short_channel_id: u64, +) { + gossip_sync.utxo_lookup.as_ref().map(|checker| { + let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, &node_1_privkey); + let node_2_pubkey = PublicKey::from_secret_key(&secp_ctx, &node_2_privkey); + let script_pubkey = make_funding_redeemscript(&node_1_pubkey, &node_2_pubkey).to_p2wsh(); + *checker.utxo_ret.lock().unwrap() = + UtxoResult::Sync(Ok(TxOut { value: Amount::from_sat(21_000_000_0000_0000), script_pubkey })); + }); + add_channel_skipping_utxo_update(gossip_sync, secp_ctx, node_1_privkey, node_2_privkey, features, short_channel_id); +} + pub(crate) fn add_or_update_node( gossip_sync: &P2PGossipSync>>, Arc, Arc>, secp_ctx: &Secp256k1, node_privkey: &SecretKey, features: NodeFeatures, timestamp: u32 @@ -197,18 +215,43 @@ pub(super) fn build_line_graph() -> ( (secp_ctx, network_graph, gossip_sync, chain_monitor, logger) } +pub(super) fn build_graph_with_gossip_validation() -> ( + Secp256k1, + sync::Arc>>, + P2PGossipSync>>, sync::Arc, sync::Arc>, + sync::Arc, + sync::Arc, +) { + do_build_graph(true) +} + pub(super) fn build_graph() -> ( Secp256k1, sync::Arc>>, P2PGossipSync>>, sync::Arc, sync::Arc>, sync::Arc, sync::Arc, +) { + do_build_graph(false) +} + +fn do_build_graph(with_validation: bool) -> ( + Secp256k1, + sync::Arc>>, + P2PGossipSync>>, sync::Arc, sync::Arc>, + sync::Arc, + sync::Arc, ) { let secp_ctx = Secp256k1::new(); let logger = Arc::new(test_utils::TestLogger::new()); let chain_monitor = Arc::new(test_utils::TestChainSource::new(Network::Testnet)); let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, Arc::clone(&logger))); - let gossip_sync = P2PGossipSync::new(Arc::clone(&network_graph), None, Arc::clone(&logger)); + let checker = if with_validation { + Some(Arc::clone(&chain_monitor)) + } else { + None + }; + let gossip_sync = P2PGossipSync::new(Arc::clone(&network_graph), checker, Arc::clone(&logger)); // Build network from our_id to node6: // // -1(1)2- node0 -1(3)2- diff --git a/lightning/src/routing/utxo.rs b/lightning/src/routing/utxo.rs index 4299dffb90f..f46160f1f14 100644 --- a/lightning/src/routing/utxo.rs +++ b/lightning/src/routing/utxo.rs @@ -21,8 +21,9 @@ use bitcoin::hex::DisplayHex; use crate::ln::chan_utils::make_funding_redeemscript_from_slices; use crate::ln::msgs::{self, ErrorAction, LightningError, MessageSendEvent}; -use crate::routing::gossip::{NetworkGraph, NodeId, P2PGossipSync}; +use crate::routing::gossip::{NetworkGraph, NodeId}; use crate::util::logger::{Level, Logger}; +use crate::util::wakers::Notifier; use crate::prelude::*; @@ -64,8 +65,14 @@ pub trait UtxoLookup { /// Returns an error if `chain_hash` is for a different chain or if such a transaction output is /// unknown. /// + /// An `async_completion_notifier` is provided which should be [`Notifier::notify`]ed upon + /// resolution of the [`UtxoFuture`] in case this method returns [`UtxoResult::Async`]. + /// /// [`short_channel_id`]: https://github.com/lightning/bolts/blob/master/07-routing-gossip.md#definition-of-short_channel_id - fn get_utxo(&self, chain_hash: &ChainHash, short_channel_id: u64) -> UtxoResult; + fn get_utxo( + &self, chain_hash: &ChainHash, short_channel_id: u64, + async_completion_notifier: Arc, + ) -> UtxoResult; } enum ChannelAnnouncement { @@ -108,6 +115,7 @@ impl ChannelUpdate { } struct UtxoMessages { + notifier: Arc, complete: Option>, channel_announce: Option, latest_node_announce_a: Option, @@ -128,166 +136,32 @@ pub struct UtxoFuture { /// once we have a concrete resolution of a request. pub(crate) struct UtxoResolver(Result); impl UtxoLookup for UtxoResolver { - fn get_utxo(&self, _chain_hash: &ChainHash, _short_channel_id: u64) -> UtxoResult { + fn get_utxo(&self, _hash: &ChainHash, _scid: u64, _notifier: Arc) -> UtxoResult { UtxoResult::Sync(self.0.clone()) } } impl UtxoFuture { /// Builds a new future for later resolution. - #[rustfmt::skip] - pub fn new() -> Self { - Self { state: Arc::new(Mutex::new(UtxoMessages { - complete: None, - channel_announce: None, - latest_node_announce_a: None, - latest_node_announce_b: None, - latest_channel_update_a: None, - latest_channel_update_b: None, - }))} - } - - /// Resolves this future against the given `graph` and with the given `result`. - /// - /// This is identical to calling [`UtxoFuture::resolve`] with a dummy `gossip`, disabling - /// forwarding the validated gossip message onwards to peers. - /// - /// Because this may cause the [`NetworkGraph`]'s [`processing_queue_high`] to flip, in order - /// to allow us to interact with peers again, you should call [`PeerManager::process_events`] - /// after this. - /// - /// [`processing_queue_high`]: crate::ln::msgs::RoutingMessageHandler::processing_queue_high - /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events - pub fn resolve_without_forwarding( - &self, graph: &NetworkGraph, result: Result, - ) where - L::Target: Logger, - { - self.do_resolve(graph, result); - } - - /// Resolves this future against the given `graph` and with the given `result`. - /// - /// The given `gossip` is used to broadcast any validated messages onwards to all peers which - /// have available buffer space. - /// - /// Because this may cause the [`NetworkGraph`]'s [`processing_queue_high`] to flip, in order - /// to allow us to interact with peers again, you should call [`PeerManager::process_events`] - /// after this. - /// - /// [`processing_queue_high`]: crate::ln::msgs::RoutingMessageHandler::processing_queue_high - /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events - pub fn resolve< - L: Deref, - G: Deref>, - U: Deref, - GS: Deref>, - >( - &self, graph: &NetworkGraph, gossip: GS, result: Result, - ) where - L::Target: Logger, - U::Target: UtxoLookup, - { - let mut res = self.do_resolve(graph, result); - for msg_opt in res.iter_mut() { - if let Some(msg) = msg_opt.take() { - gossip.forward_gossip_msg(msg); - } + pub fn new(notifier: Arc) -> Self { + Self { + state: Arc::new(Mutex::new(UtxoMessages { + notifier, + complete: None, + channel_announce: None, + latest_node_announce_a: None, + latest_node_announce_b: None, + latest_channel_update_a: None, + latest_channel_update_b: None, + })), } } - #[rustfmt::skip] - fn do_resolve(&self, graph: &NetworkGraph, result: Result) - -> [Option; 5] where L::Target: Logger { - let (announcement, node_a, node_b, update_a, update_b) = { - let mut pending_checks = graph.pending_checks.internal.lock().unwrap(); - let mut async_messages = self.state.lock().unwrap(); - - if async_messages.channel_announce.is_none() { - // We raced returning to `check_channel_announcement` which hasn't updated - // `channel_announce` yet. That's okay, we can set the `complete` field which it will - // check once it gets control again. - async_messages.complete = Some(result); - return [None, None, None, None, None]; - } - - let announcement_msg = match async_messages.channel_announce.as_ref().unwrap() { - ChannelAnnouncement::Full(signed_msg) => &signed_msg.contents, - ChannelAnnouncement::Unsigned(msg) => &msg, - }; - - pending_checks.lookup_completed(announcement_msg, &Arc::downgrade(&self.state)); - - (async_messages.channel_announce.take().unwrap(), - async_messages.latest_node_announce_a.take(), - async_messages.latest_node_announce_b.take(), - async_messages.latest_channel_update_a.take(), - async_messages.latest_channel_update_b.take()) - }; - - let mut res = [None, None, None, None, None]; - let mut res_idx = 0; - - // Now that we've updated our internal state, pass the pending messages back through the - // network graph with a different `UtxoLookup` which will resolve immediately. - // Note that we ignore errors as we don't disconnect peers anyway, so there's nothing to do - // with them. - let resolver = UtxoResolver(result); - let (node_id_1, node_id_2) = match &announcement { - ChannelAnnouncement::Full(signed_msg) => (signed_msg.contents.node_id_1, signed_msg.contents.node_id_2), - ChannelAnnouncement::Unsigned(msg) => (msg.node_id_1, msg.node_id_2), - }; - match announcement { - ChannelAnnouncement::Full(signed_msg) => { - if graph.update_channel_from_announcement(&signed_msg, &Some(&resolver)).is_ok() { - res[res_idx] = Some(MessageSendEvent::BroadcastChannelAnnouncement { - msg: signed_msg, update_msg: None, - }); - res_idx += 1; - } - }, - ChannelAnnouncement::Unsigned(msg) => { - let _ = graph.update_channel_from_unsigned_announcement(&msg, &Some(&resolver)); - }, - } - - for announce in core::iter::once(node_a).chain(core::iter::once(node_b)) { - match announce { - Some(NodeAnnouncement::Full(signed_msg)) => { - if graph.update_node_from_announcement(&signed_msg).is_ok() { - res[res_idx] = Some(MessageSendEvent::BroadcastNodeAnnouncement { - msg: signed_msg, - }); - res_idx += 1; - } - }, - Some(NodeAnnouncement::Unsigned(msg)) => { - let _ = graph.update_node_from_unsigned_announcement(&msg); - }, - None => {}, - } - } - - for update in core::iter::once(update_a).chain(core::iter::once(update_b)) { - match update { - Some(ChannelUpdate::Full(signed_msg)) => { - if graph.update_channel(&signed_msg).is_ok() { - res[res_idx] = Some(MessageSendEvent::BroadcastChannelUpdate { - msg: signed_msg, - node_id_1, - node_id_2, - }); - res_idx += 1; - } - }, - Some(ChannelUpdate::Unsigned(msg)) => { - let _ = graph.update_channel_unsigned(&msg); - }, - None => {}, - } - } - - res + /// Resolves this future with the given result. + pub fn resolve(&self, result: Result) { + let mut state = self.state.lock().unwrap(); + state.complete = Some(result); + state.notifier.notify(); } } @@ -296,39 +170,21 @@ struct PendingChecksContext { nodes: HashMap>>>, } -impl PendingChecksContext { - #[rustfmt::skip] - fn lookup_completed(&mut self, - msg: &msgs::UnsignedChannelAnnouncement, completed_state: &Weak> - ) { - if let hash_map::Entry::Occupied(e) = self.channels.entry(msg.short_channel_id) { - if Weak::ptr_eq(e.get(), &completed_state) { - e.remove(); - } - } - - if let hash_map::Entry::Occupied(mut e) = self.nodes.entry(msg.node_id_1) { - e.get_mut().retain(|elem| !Weak::ptr_eq(&elem, &completed_state)); - if e.get().is_empty() { e.remove(); } - } - if let hash_map::Entry::Occupied(mut e) = self.nodes.entry(msg.node_id_2) { - e.get_mut().retain(|elem| !Weak::ptr_eq(&elem, &completed_state)); - if e.get().is_empty() { e.remove(); } - } - } -} - /// A set of messages which are pending UTXO lookups for processing. pub(super) struct PendingChecks { internal: Mutex, + pub(super) completion_notifier: Arc, } impl PendingChecks { - #[rustfmt::skip] pub(super) fn new() -> Self { - PendingChecks { internal: Mutex::new(PendingChecksContext { - channels: new_hash_map(), nodes: new_hash_map(), - }) } + PendingChecks { + internal: Mutex::new(PendingChecksContext { + channels: new_hash_map(), + nodes: new_hash_map(), + }), + completion_notifier: Arc::new(Notifier::new()), + } } /// Checks if there is a pending `channel_update` UTXO validation for the given channel, @@ -519,7 +375,8 @@ impl PendingChecks { Ok(None) }, &Some(ref utxo_lookup) => { - match utxo_lookup.get_utxo(&msg.chain_hash, msg.short_channel_id) { + let notifier = Arc::clone(&self.completion_notifier); + match utxo_lookup.get_utxo(&msg.chain_hash, msg.short_channel_id, notifier) { UtxoResult::Sync(res) => handle_result(res), UtxoResult::Async(future) => { let mut pending_checks = self.internal.lock().unwrap(); @@ -581,6 +438,142 @@ impl PendingChecks { false } } + + fn resolve_single_future( + &self, graph: &NetworkGraph, entry: Arc>, + new_messages: &mut Vec, + ) where + L::Target: Logger, + { + let (announcement, result, announce_a, announce_b, update_a, update_b); + { + let mut state = entry.lock().unwrap(); + announcement = if let Some(announcement) = state.channel_announce.take() { + announcement + } else { + // We raced returning to `check_channel_announcement` which hasn't updated + // `channel_announce` yet. That's okay, we can set the `complete` field which it will + // check once it gets control again. + return; + }; + + result = if let Some(result) = state.complete.take() { + result + } else { + debug_assert!(false, "Future should have been resolved"); + return; + }; + + announce_a = state.latest_node_announce_a.take(); + announce_b = state.latest_node_announce_b.take(); + update_a = state.latest_channel_update_a.take(); + update_b = state.latest_channel_update_b.take(); + } + + // Now that we've updated our internal state, pass the pending messages back through the + // network graph with a different `UtxoLookup` which will resolve immediately. + // Note that we ignore errors as we don't disconnect peers anyway, so there's nothing to do + // with them. + let resolver = UtxoResolver(result); + let (node_id_1, node_id_2) = match &announcement { + ChannelAnnouncement::Full(signed_msg) => { + (signed_msg.contents.node_id_1, signed_msg.contents.node_id_2) + }, + ChannelAnnouncement::Unsigned(msg) => (msg.node_id_1, msg.node_id_2), + }; + match announcement { + ChannelAnnouncement::Full(signed_msg) => { + if graph.update_channel_from_announcement(&signed_msg, &Some(&resolver)).is_ok() { + new_messages.push(MessageSendEvent::BroadcastChannelAnnouncement { + msg: signed_msg, + update_msg: None, + }); + } + }, + ChannelAnnouncement::Unsigned(msg) => { + let _ = graph.update_channel_from_unsigned_announcement(&msg, &Some(&resolver)); + }, + } + + for announce in [announce_a, announce_b] { + match announce { + Some(NodeAnnouncement::Full(signed_msg)) => { + if graph.update_node_from_announcement(&signed_msg).is_ok() { + new_messages + .push(MessageSendEvent::BroadcastNodeAnnouncement { msg: signed_msg }); + } + }, + Some(NodeAnnouncement::Unsigned(msg)) => { + let _ = graph.update_node_from_unsigned_announcement(&msg); + }, + None => {}, + } + } + + for update in [update_a, update_b] { + match update { + Some(ChannelUpdate::Full(signed_msg)) => { + if graph.update_channel(&signed_msg).is_ok() { + new_messages.push(MessageSendEvent::BroadcastChannelUpdate { + msg: signed_msg, + node_id_1, + node_id_2, + }); + } + }, + Some(ChannelUpdate::Unsigned(msg)) => { + let _ = graph.update_channel_unsigned(&msg); + }, + None => {}, + } + } + } + + pub(super) fn check_resolved_futures( + &self, graph: &NetworkGraph, + ) -> Vec + where + L::Target: Logger, + { + let mut completed_states = Vec::new(); + { + let mut lck = self.internal.lock().unwrap(); + lck.channels.retain(|_, state| { + if let Some(state) = state.upgrade() { + if state.lock().unwrap().complete.is_some() { + completed_states.push(state); + false + } else { + true + } + } else { + // The UtxoFuture has been dropped, drop the pending-lookup state. + false + } + }); + lck.nodes.retain(|_, lookups| { + lookups.retain(|state| { + if let Some(state) = state.upgrade() { + if state.lock().unwrap().complete.is_some() { + completed_states.push(state); + false + } else { + true + } + } else { + // The UtxoFuture has been dropped, drop the pending-lookup state. + false + } + }); + !lookups.is_empty() + }); + } + let mut res = Vec::with_capacity(completed_states.len() * 5); + for state in completed_states { + self.resolve_single_future(graph, state, &mut res); + } + res + } } #[cfg(test)] @@ -636,9 +629,12 @@ mod tests { // `get_utxo` call can read it still resolve properly. let (valid_announcement, chain_source, network_graph, good_script, ..) = get_test_objects(); - let future = UtxoFuture::new(); - future.resolve_without_forwarding(&network_graph, - Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); + let notifier = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier)); + future + .resolve(Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); + assert!(notifier.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap(); @@ -652,7 +648,8 @@ mod tests { let (valid_announcement, chain_source, network_graph, good_script, node_a_announce, node_b_announce, ..) = get_test_objects(); - let future = UtxoFuture::new(); + let notifier = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( @@ -660,8 +657,9 @@ mod tests { "Channel being checked async"); assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); - future.resolve_without_forwarding(&network_graph, - Ok(TxOut { value: Amount::ZERO, script_pubkey: good_script })); + future.resolve(Ok(TxOut { value: Amount::ZERO, script_pubkey: good_script })); + assert!(notifier.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).unwrap(); network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).unwrap(); @@ -681,7 +679,8 @@ mod tests { // Test an async lookup which returns an incorrect script let (valid_announcement, chain_source, network_graph, ..) = get_test_objects(); - let future = UtxoFuture::new(); + let notifier = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( @@ -689,8 +688,10 @@ mod tests { "Channel being checked async"); assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); - future.resolve_without_forwarding(&network_graph, - Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: bitcoin::ScriptBuf::new() })); + let value = Amount::from_sat(1_000_000); + future.resolve(Ok(TxOut { value, script_pubkey: bitcoin::ScriptBuf::new() })); + assert!(notifier.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); } @@ -700,7 +701,8 @@ mod tests { // Test an async lookup which returns an error let (valid_announcement, chain_source, network_graph, ..) = get_test_objects(); - let future = UtxoFuture::new(); + let notifier = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( @@ -708,7 +710,9 @@ mod tests { "Channel being checked async"); assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); - future.resolve_without_forwarding(&network_graph, Err(UtxoLookupError::UnknownTx)); + future.resolve(Err(UtxoLookupError::UnknownTx)); + assert!(notifier.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); assert!(network_graph.read_only().channels().get(&valid_announcement.contents.short_channel_id).is_none()); } @@ -720,7 +724,8 @@ mod tests { let (valid_announcement, chain_source, network_graph, good_script, node_a_announce, node_b_announce, chan_update_a, chan_update_b, ..) = get_test_objects(); - let future = UtxoFuture::new(); + let notifier = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( @@ -740,8 +745,11 @@ mod tests { assert_eq!(network_graph.update_channel(&chan_update_b).unwrap_err().err, "Awaiting channel_announcement validation to accept channel_update"); - future.resolve_without_forwarding(&network_graph, - Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); + assert!(!notifier.notify_pending()); + future + .resolve(Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); + assert!(notifier.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); assert!(network_graph.read_only().channels() .get(&valid_announcement.contents.short_channel_id).unwrap().one_to_two.is_some()); @@ -762,7 +770,8 @@ mod tests { let (valid_announcement, chain_source, network_graph, good_script, _, _, chan_update_a, chan_update_b, chan_update_c, ..) = get_test_objects(); - let future = UtxoFuture::new(); + let notifier = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( @@ -777,8 +786,10 @@ mod tests { assert_eq!(network_graph.update_channel(&chan_update_c).unwrap_err().err, "Awaiting channel_announcement validation to accept channel_update"); - future.resolve_without_forwarding(&network_graph, - Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); + assert!(!notifier.notify_pending()); + future.resolve(Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); + assert!(notifier.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); assert_eq!(chan_update_a.contents.timestamp, chan_update_b.contents.timestamp); let graph_lock = network_graph.read_only(); @@ -797,7 +808,8 @@ mod tests { // only if the channel_announcement message is identical. let (valid_announcement, chain_source, network_graph, good_script, ..) = get_test_objects(); - let future = UtxoFuture::new(); + let notifier_a = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier_a)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); assert_eq!( @@ -806,7 +818,8 @@ mod tests { assert_eq!(chain_source.get_utxo_call_count.load(Ordering::Relaxed), 1); // If we make a second request with the same message, the call count doesn't increase... - let future_b = UtxoFuture::new(); + let notifier_b = Arc::new(Notifier::new()); + let future_b = UtxoFuture::new(Arc::clone(¬ifier_b)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future_b.clone()); assert_eq!( network_graph.update_channel_from_announcement(&valid_announcement, &Some(&chain_source)).unwrap_err().err, @@ -825,8 +838,11 @@ mod tests { assert_eq!(chain_source.get_utxo_call_count.load(Ordering::Relaxed), 2); // Still, if we resolve the original future, the original channel will be accepted. - future.resolve_without_forwarding(&network_graph, - Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); + future + .resolve(Ok(TxOut { value: Amount::from_sat(1_000_000), script_pubkey: good_script })); + assert!(notifier_a.notify_pending()); + assert!(!notifier_b.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); assert!(!network_graph.read_only().channels() .get(&valid_announcement.contents.short_channel_id).unwrap() .announcement_message.as_ref().unwrap() @@ -842,7 +858,8 @@ mod tests { let (chain_source, network_graph) = get_network(); // We cheat and use a single future for all the lookups to complete them all at once. - let future = UtxoFuture::new(); + let notifier = Arc::new(Notifier::new()); + let future = UtxoFuture::new(Arc::clone(¬ifier)); *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(future.clone()); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); @@ -861,7 +878,9 @@ mod tests { assert!(network_graph.pending_checks.too_many_checks_pending()); // Once the future completes the "too many checks" flag should reset. - future.resolve_without_forwarding(&network_graph, Err(UtxoLookupError::UnknownTx)); + future.resolve(Err(UtxoLookupError::UnknownTx)); + assert!(notifier.notify_pending()); + network_graph.pending_checks.check_resolved_futures(&network_graph); assert!(!network_graph.pending_checks.too_many_checks_pending()); } @@ -874,7 +893,8 @@ mod tests { let (chain_source, network_graph) = get_network(); // We cheat and use a single future for all the lookups to complete them all at once. - *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(UtxoFuture::new()); + let notifier = Arc::new(Notifier::new()); + *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Async(UtxoFuture::new(notifier)); let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap(); let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap(); diff --git a/lightning/src/util/async_poll.rs b/lightning/src/util/async_poll.rs index 9c2ca4c247f..57df5b26cb0 100644 --- a/lightning/src/util/async_poll.rs +++ b/lightning/src/util/async_poll.rs @@ -15,26 +15,100 @@ use core::marker::Unpin; use core::pin::Pin; use core::task::{Context, Poll, RawWaker, RawWakerVTable, Waker}; -pub(crate) enum ResultFuture>, E: Unpin> { +pub(crate) enum ResultFuture + Unpin, O> { Pending(F), - Ready(Result<(), E>), + Ready(O), } -pub(crate) struct MultiResultFuturePoller> + Unpin, E: Unpin> { - futures_state: Vec>, +pub(crate) struct TwoFutureJoiner< + AO, + BO, + AF: Future + Unpin, + BF: Future + Unpin, +> { + a: Option>, + b: Option>, } -impl> + Unpin, E: Unpin> MultiResultFuturePoller { - pub fn new(futures_state: Vec>) -> Self { +impl + Unpin, BF: Future + Unpin> + TwoFutureJoiner +{ + pub fn new(future_a: AF, future_b: BF) -> Self { + Self { a: Some(ResultFuture::Pending(future_a)), b: Some(ResultFuture::Pending(future_b)) } + } +} + +impl + Unpin, BF: Future + Unpin> Future + for TwoFutureJoiner +{ + type Output = (AO, BO); + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<(AO, BO)> { + let mut have_pending_futures = false; + // SAFETY: While we are pinned, we can't get direct access to our internal state because we + // aren't `Unpin`. However, we don't actually need the `Pin` - we only use it below on the + // `Future` in the `ResultFuture::Pending` case, and the `Future` is bound by `Unpin`. + // Thus, the `Pin` is not actually used, and its safe to bypass it and access the inner + // reference directly. + let state = unsafe { &mut self.get_unchecked_mut() }; + macro_rules! poll_future { + ($future: ident) => { + match state.$future { + Some(ResultFuture::Pending(ref mut fut)) => match Pin::new(fut).poll(cx) { + Poll::Ready(res) => { + state.$future = Some(ResultFuture::Ready(res)); + }, + Poll::Pending => { + have_pending_futures = true; + }, + }, + Some(ResultFuture::Ready(_)) => {}, + None => { + debug_assert!(false, "Future polled after Ready"); + return Poll::Pending; + }, + } + }; + } + poll_future!(a); + poll_future!(b); + + if have_pending_futures { + Poll::Pending + } else { + Poll::Ready(( + match state.a.take() { + Some(ResultFuture::Ready(a)) => a, + _ => unreachable!(), + }, + match state.b.take() { + Some(ResultFuture::Ready(b)) => b, + _ => unreachable!(), + }, + )) + } + } +} + +pub(crate) struct MultiResultFuturePoller + Unpin, O> { + futures_state: Vec>, +} + +impl + Unpin, O> MultiResultFuturePoller { + pub fn new(futures_state: Vec>) -> Self { Self { futures_state } } } -impl> + Unpin, E: Unpin> Future for MultiResultFuturePoller { - type Output = Vec>; - fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll>> { +impl + Unpin, O> Future for MultiResultFuturePoller { + type Output = Vec; + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let mut have_pending_futures = false; - let futures_state = &mut self.get_mut().futures_state; + // SAFETY: While we are pinned, we can't get direct access to `futures_state` because we + // aren't `Unpin`. However, we don't actually need the `Pin` - we only use it below on the + // `Future` in the `ResultFuture::Pending` case, and the `Future` is bound by `Unpin`. + // Thus, the `Pin` is not actually used, and its safe to bypass it and access the inner + // reference directly. + let futures_state = unsafe { &mut self.get_unchecked_mut().futures_state }; for state in futures_state.iter_mut() { match state { ResultFuture::Pending(ref mut fut) => match Pin::new(fut).poll(cx) { diff --git a/lightning/src/util/hash_tables.rs b/lightning/src/util/hash_tables.rs index 00341d57b45..b6555975191 100644 --- a/lightning/src/util/hash_tables.rs +++ b/lightning/src/util/hash_tables.rs @@ -6,10 +6,75 @@ pub use hashbrown::hash_map; mod hashbrown_tables { - #[cfg(feature = "std")] + #[cfg(all(feature = "std", not(test)))] mod hasher { pub use std::collections::hash_map::RandomState; } + #[cfg(all(feature = "std", test))] + mod hasher { + #![allow(deprecated)] // hash::SipHasher was deprecated in favor of something only in std. + use core::hash::{BuildHasher, Hasher}; + + /// A [`BuildHasher`] for tests that supports deterministic behavior via environment variable. + /// + /// When `LDK_TEST_DETERMINISTIC_HASHES` is set, uses fixed keys for deterministic iteration. + /// Otherwise, delegates to std's RandomState for random hashing. + #[derive(Clone)] + pub enum RandomState { + Std(std::collections::hash_map::RandomState), + Deterministic, + } + + impl RandomState { + pub fn new() -> RandomState { + if std::env::var("LDK_TEST_DETERMINISTIC_HASHES").map(|v| v == "1").unwrap_or(false) + { + RandomState::Deterministic + } else { + RandomState::Std(std::collections::hash_map::RandomState::new()) + } + } + } + + impl Default for RandomState { + fn default() -> RandomState { + RandomState::new() + } + } + + /// A hasher wrapper that delegates to either std's DefaultHasher or a deterministic SipHasher. + pub enum RandomStateHasher { + Std(std::collections::hash_map::DefaultHasher), + Deterministic(core::hash::SipHasher), + } + + impl Hasher for RandomStateHasher { + fn finish(&self) -> u64 { + match self { + RandomStateHasher::Std(h) => h.finish(), + RandomStateHasher::Deterministic(h) => h.finish(), + } + } + fn write(&mut self, bytes: &[u8]) { + match self { + RandomStateHasher::Std(h) => h.write(bytes), + RandomStateHasher::Deterministic(h) => h.write(bytes), + } + } + } + + impl BuildHasher for RandomState { + type Hasher = RandomStateHasher; + fn build_hasher(&self) -> RandomStateHasher { + match self { + RandomState::Std(s) => RandomStateHasher::Std(s.build_hasher()), + RandomState::Deterministic => { + RandomStateHasher::Deterministic(core::hash::SipHasher::new_with_keys(0, 0)) + }, + } + } + } + } #[cfg(not(feature = "std"))] mod hasher { #![allow(deprecated)] // hash::SipHasher was deprecated in favor of something only in std. diff --git a/lightning/src/util/native_async.rs b/lightning/src/util/native_async.rs index 886146e976d..0c380f2b1d1 100644 --- a/lightning/src/util/native_async.rs +++ b/lightning/src/util/native_async.rs @@ -8,23 +8,44 @@ //! environment. #[cfg(all(test, feature = "std"))] -use crate::sync::Mutex; +use crate::sync::{Arc, Mutex}; use crate::util::async_poll::{MaybeSend, MaybeSync}; +#[cfg(all(test, not(feature = "std")))] +use alloc::rc::Rc; + #[cfg(all(test, not(feature = "std")))] use core::cell::RefCell; +#[cfg(test)] +use core::convert::Infallible; use core::future::Future; #[cfg(test)] use core::pin::Pin; +#[cfg(test)] +use core::task::{Context, Poll}; -/// A generic trait which is able to spawn futures in the background. +/// A generic trait which is able to spawn futures to be polled in the background. +/// +/// When the spawned future completes, the returned [`Self::SpawnedFutureResult`] should resolve +/// with the output of the spawned future. +/// +/// Spawned futures must be polled independently in the background even if the returned +/// [`Self::SpawnedFutureResult`] is dropped without being polled. This matches the semantics of +/// `tokio::spawn`. /// /// This is not exported to bindings users as async is only supported in Rust. pub trait FutureSpawner: MaybeSend + MaybeSync + 'static { + /// The error type of [`Self::SpawnedFutureResult`]. This can be used to indicate that the + /// spawned future was cancelled or panicked. + type E; + /// The result of [`Self::spawn`], a future which completes when the spawned future completes. + type SpawnedFutureResult: Future> + Unpin; /// Spawns the given future as a background task. /// /// This method MUST NOT block on the given future immediately. - fn spawn + MaybeSend + 'static>(&self, future: T); + fn spawn + MaybeSend + 'static>( + &self, future: T, + ) -> Self::SpawnedFutureResult; } #[cfg(test)] @@ -39,6 +60,77 @@ pub(crate) struct FutureQueue(Mutex>>>); #[cfg(all(test, not(feature = "std")))] pub(crate) struct FutureQueue(RefCell>>>); +/// A simple future which can be completed later. Used to implement [`FutureQueue`]. +#[cfg(all(test, feature = "std"))] +pub struct FutureQueueCompletion(Arc>>); +#[cfg(all(test, not(feature = "std")))] +pub struct FutureQueueCompletion(Rc>>); + +#[cfg(all(test, feature = "std"))] +impl FutureQueueCompletion { + fn new() -> Self { + Self(Arc::new(Mutex::new(None))) + } + + fn complete(&self, o: O) { + *self.0.lock().unwrap() = Some(o); + } +} + +#[cfg(all(test, feature = "std"))] +impl Clone for FutureQueueCompletion { + fn clone(&self) -> Self { + #[cfg(all(test, feature = "std"))] + { + Self(Arc::clone(&self.0)) + } + #[cfg(all(test, not(feature = "std")))] + { + Self(Rc::clone(&self.0)) + } + } +} + +#[cfg(all(test, not(feature = "std")))] +impl FutureQueueCompletion { + fn new() -> Self { + Self(Rc::new(RefCell::new(None))) + } + + fn complete(&self, o: O) { + *self.0.borrow_mut() = Some(o); + } +} + +#[cfg(all(test, not(feature = "std")))] +impl Clone for FutureQueueCompletion { + fn clone(&self) -> Self { + Self(self.0.clone()) + } +} + +#[cfg(all(test, feature = "std"))] +impl Future for FutureQueueCompletion { + type Output = Result; + fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + match Pin::into_inner(self).0.lock().unwrap().take() { + None => Poll::Pending, + Some(o) => Poll::Ready(Ok(o)), + } + } +} + +#[cfg(all(test, not(feature = "std")))] +impl Future for FutureQueueCompletion { + type Output = Result; + fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + match Pin::into_inner(self).0.borrow_mut().take() { + None => Poll::Pending, + Some(o) => Poll::Ready(Ok(o)), + } + } +} + #[cfg(test)] impl FutureQueue { pub(crate) fn new() -> Self { @@ -74,7 +166,6 @@ impl FutureQueue { futures = self.0.borrow_mut(); } futures.retain_mut(|fut| { - use core::task::{Context, Poll}; let waker = crate::util::async_poll::dummy_waker(); match fut.as_mut().poll(&mut Context::from_waker(&waker)) { Poll::Ready(()) => false, @@ -86,7 +177,16 @@ impl FutureQueue { #[cfg(test)] impl FutureSpawner for FutureQueue { - fn spawn + MaybeSend + 'static>(&self, future: T) { + type E = Infallible; + type SpawnedFutureResult = FutureQueueCompletion; + fn spawn + MaybeSend + 'static>( + &self, f: F, + ) -> FutureQueueCompletion { + let completion = FutureQueueCompletion::new(); + let compl_ref = completion.clone(); + let future = async move { + compl_ref.complete(f.await); + }; #[cfg(feature = "std")] { self.0.lock().unwrap().push(Box::pin(future)); @@ -95,6 +195,7 @@ impl FutureSpawner for FutureQueue { { self.0.borrow_mut().push(Box::pin(future)); } + completion } } @@ -102,7 +203,16 @@ impl FutureSpawner for FutureQueue { impl + MaybeSend + MaybeSync + 'static> FutureSpawner for D { - fn spawn + MaybeSend + 'static>(&self, future: T) { + type E = Infallible; + type SpawnedFutureResult = FutureQueueCompletion; + fn spawn + MaybeSend + 'static>( + &self, f: F, + ) -> FutureQueueCompletion { + let completion = FutureQueueCompletion::new(); + let compl_ref = completion.clone(); + let future = async move { + compl_ref.complete(f.await); + }; #[cfg(feature = "std")] { self.0.lock().unwrap().push(Box::pin(future)); @@ -111,5 +221,6 @@ impl + MaybeSend + MaybeSync + 'static { self.0.borrow_mut().push(Box::pin(future)); } + completion } } diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index e15209676e3..2e1e8805d0a 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -16,6 +16,7 @@ use alloc::sync::Arc; use bitcoin::hashes::hex::FromHex; use bitcoin::{BlockHash, Txid}; +use core::convert::Infallible; use core::future::Future; use core::mem; use core::ops::Deref; @@ -34,7 +35,9 @@ use crate::chain::transaction::OutPoint; use crate::ln::types::ChannelId; use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, SignerProvider}; use crate::sync::Mutex; -use crate::util::async_poll::{dummy_waker, MaybeSend, MaybeSync}; +use crate::util::async_poll::{ + dummy_waker, MaybeSend, MaybeSync, MultiResultFuturePoller, ResultFuture, TwoFutureJoiner, +}; use crate::util::logger::Logger; use crate::util::native_async::FutureSpawner; use crate::util::ser::{Readable, ReadableArgs, Writeable}; @@ -489,7 +492,11 @@ where struct PanicingSpawner; impl FutureSpawner for PanicingSpawner { - fn spawn + MaybeSend + 'static>(&self, _: T) { + type E = Infallible; + type SpawnedFutureResult = Box> + Unpin>; + fn spawn + MaybeSend + 'static>( + &self, _: T, + ) -> Self::SpawnedFutureResult { unreachable!(); } } @@ -569,15 +576,6 @@ fn poll_sync_future(future: F) -> F::Output { /// list channel monitors themselves and load channels individually using /// [`MonitorUpdatingPersister::read_channel_monitor_with_updates`]. /// -/// ## EXTREMELY IMPORTANT -/// -/// It is extremely important that your [`KVStoreSync::read`] implementation uses the -/// [`io::ErrorKind::NotFound`] variant correctly: that is, when a file is not found, and _only_ in -/// that circumstance (not when there is really a permissions error, for example). This is because -/// neither channel monitor reading function lists updates. Instead, either reads the monitor, and -/// using its stored `update_id`, synthesizes update storage keys, and tries them in sequence until -/// one is not found. All _other_ errors will be bubbled up in the function's [`Result`]. -/// /// # Pruning stale channel updates /// /// Stale updates are pruned when the consolidation threshold is reached according to `maximum_pending_updates`. @@ -651,10 +649,6 @@ where } /// Reads all stored channel monitors, along with any stored updates for them. - /// - /// It is extremely important that your [`KVStoreSync::read`] implementation uses the - /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the - /// documentation for [`MonitorUpdatingPersister`]. pub fn read_all_channel_monitors_with_updates( &self, ) -> Result< @@ -666,10 +660,6 @@ where /// Read a single channel monitor, along with any stored updates for it. /// - /// It is extremely important that your [`KVStoreSync::read`] implementation uses the - /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the - /// documentation for [`MonitorUpdatingPersister`]. - /// /// For `monitor_key`, channel storage keys can be the channel's funding [`OutPoint`], with an /// underscore `_` between txid and index for v1 channels. For example, given: /// @@ -863,9 +853,13 @@ where /// Reads all stored channel monitors, along with any stored updates for them. /// - /// It is extremely important that your [`KVStore::read`] implementation uses the - /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the - /// documentation for [`MonitorUpdatingPersister`]. + /// While the reads themselves are performed in parallel, deserializing the + /// [`ChannelMonitor`]s is not. For large [`ChannelMonitor`]s actively used for forwarding, + /// this may substantially limit the parallelism of this method. + /// + /// If you can move this object into an `Arc`, consider using + /// [`Self::read_all_channel_monitors_with_updates_parallel`] to parallelize the CPU-bound + /// deserialization as well. pub async fn read_all_channel_monitors_with_updates( &self, ) -> Result< @@ -875,22 +869,70 @@ where let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; let monitor_list = self.0.kv_store.list(primary, secondary).await?; - let mut res = Vec::with_capacity(monitor_list.len()); + let mut futures = Vec::with_capacity(monitor_list.len()); for monitor_key in monitor_list { - let result = - self.0.maybe_read_channel_monitor_with_updates(monitor_key.as_str()).await?; - if let Some(read_res) = result { + futures.push(ResultFuture::Pending(Box::pin(async move { + self.0.maybe_read_channel_monitor_with_updates(monitor_key.as_str()).await + }))); + } + let future_results = MultiResultFuturePoller::new(futures).await; + let mut res = Vec::with_capacity(future_results.len()); + for result in future_results { + if let Some(read_res) = result? { res.push(read_res); } } Ok(res) } - /// Read a single channel monitor, along with any stored updates for it. + /// Reads all stored channel monitors, along with any stored updates for them, in parallel. /// - /// It is extremely important that your [`KVStoreSync::read`] implementation uses the - /// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the - /// documentation for [`MonitorUpdatingPersister`]. + /// Because deserializing large [`ChannelMonitor`]s from forwarding nodes is often CPU-bound, + /// this version of [`Self::read_all_channel_monitors_with_updates`] uses the [`FutureSpawner`] + /// to parallelize deserialization as well as the IO operations. + /// + /// Because [`FutureSpawner`] requires that the spawned future be `'static` (matching `tokio` + /// and other multi-threaded runtime requirements), this method requires that `self` be an + /// `Arc` that can live for `'static` and be sent and accessed across threads. + pub async fn read_all_channel_monitors_with_updates_parallel( + self: &Arc, + ) -> Result< + Vec<(BlockHash, ChannelMonitor<::EcdsaSigner>)>, + io::Error, + > + where + K: MaybeSend + MaybeSync + 'static, + L: MaybeSend + MaybeSync + 'static, + ES: MaybeSend + MaybeSync + 'static, + SP: MaybeSend + MaybeSync + 'static, + BI: MaybeSend + MaybeSync + 'static, + FE: MaybeSend + MaybeSync + 'static, + ::EcdsaSigner: MaybeSend, + { + let primary = CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE; + let secondary = CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE; + let monitor_list = self.0.kv_store.list(primary, secondary).await?; + let mut futures = Vec::with_capacity(monitor_list.len()); + for monitor_key in monitor_list { + let us = Arc::clone(&self); + futures.push(ResultFuture::Pending(self.0.future_spawner.spawn(async move { + us.0.maybe_read_channel_monitor_with_updates(monitor_key.as_str()).await + }))); + } + let future_results = MultiResultFuturePoller::new(futures).await; + let mut res = Vec::with_capacity(future_results.len()); + for result in future_results { + match result { + Err(_) => return Err(io::Error::new(io::ErrorKind::Other, "Future was cancelled")), + Ok(Err(e)) => return Err(e), + Ok(Ok(Some(read_res))) => res.push(read_res), + Ok(Ok(None)) => {}, + } + } + Ok(res) + } + + /// Read a single channel monitor, along with any stored updates for it. /// /// For `monitor_key`, channel storage keys can be the channel's funding [`OutPoint`], with an /// underscore `_` between txid and index for v1 channels. For example, given: @@ -952,7 +994,7 @@ where let future = inner.persist_new_channel(monitor_name, monitor); let channel_id = monitor.channel_id(); let completion = (monitor.channel_id(), monitor.get_latest_update_id()); - self.0.future_spawner.spawn(async move { + let _runs_free = self.0.future_spawner.spawn(async move { match future.await { Ok(()) => { inner.async_completed_updates.lock().unwrap().push(completion); @@ -984,7 +1026,7 @@ where None }; let inner = Arc::clone(&self.0); - self.0.future_spawner.spawn(async move { + let _runs_free = self.0.future_spawner.spawn(async move { match future.await { Ok(()) => if let Some(completion) = completion { inner.async_completed_updates.lock().unwrap().push(completion); @@ -1002,7 +1044,7 @@ where pub(crate) fn spawn_async_archive_persisted_channel(&self, monitor_name: MonitorName) { let inner = Arc::clone(&self.0); - self.0.future_spawner.spawn(async move { + let _runs_free = self.0.future_spawner.spawn(async move { inner.archive_persisted_channel(monitor_name).await; }); } @@ -1050,28 +1092,29 @@ where io::Error, > { let monitor_name = MonitorName::from_str(monitor_key)?; - let read_res = self.maybe_read_monitor(&monitor_name, monitor_key).await?; - let (block_hash, monitor) = match read_res { + let read_future = pin!(self.maybe_read_monitor(&monitor_name, monitor_key)); + let list_future = pin!(self + .kv_store + .list(CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE, monitor_key)); + let (read_res, list_res) = TwoFutureJoiner::new(read_future, list_future).await; + let (block_hash, monitor) = match read_res? { Some(res) => res, None => return Ok(None), }; - let mut current_update_id = monitor.get_latest_update_id(); - // TODO: Parallelize this loop by speculatively reading a batch of updates - loop { - current_update_id = match current_update_id.checked_add(1) { - Some(next_update_id) => next_update_id, - None => break, - }; - let update_name = UpdateName::from(current_update_id); - let update = match self.read_monitor_update(monitor_key, &update_name).await { - Ok(update) => update, - Err(err) if err.kind() == io::ErrorKind::NotFound => { - // We can't find any more updates, so we are done. - break; - }, - Err(err) => return Err(err), - }; - + let current_update_id = monitor.get_latest_update_id(); + let updates: Result, _> = + list_res?.into_iter().map(|name| UpdateName::new(name)).collect(); + let mut updates = updates?; + updates.sort_unstable(); + let updates_to_load = updates.iter().filter(|update| update.0 > current_update_id); + let mut update_futures = Vec::with_capacity(updates_to_load.clone().count()); + for update_name in updates_to_load { + update_futures.push(ResultFuture::Pending(Box::pin(async move { + (update_name, self.read_monitor_update(monitor_key, update_name).await) + }))); + } + for (update_name, update_res) in MultiResultFuturePoller::new(update_futures).await { + let update = update_res?; monitor .update_monitor(&update, &self.broadcaster, &self.fee_estimator, &self.logger) .map_err(|e| { @@ -1458,7 +1501,7 @@ impl core::fmt::Display for MonitorName { /// let monitor_name = "some_monitor_name"; /// let storage_key = format!("channel_monitor_updates/{}/{}", monitor_name, update_name.as_str()); /// ``` -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] pub struct UpdateName(pub u64, String); impl UpdateName { diff --git a/lightning/src/util/ser_macros.rs b/lightning/src/util/ser_macros.rs index 647e7c77a6c..86b24e1b849 100644 --- a/lightning/src/util/ser_macros.rs +++ b/lightning/src/util/ser_macros.rs @@ -852,6 +852,9 @@ macro_rules! _init_tlv_based_struct_field { ($field: ident, (required_vec, encoding: ($fieldty: ty, $encoding: ident))) => { $crate::_init_tlv_based_struct_field!($field, required) }; + ($field: ident, (option, encoding: ($fieldty: ty, $encoding: ident))) => { + $crate::_init_tlv_based_struct_field!($field, option) + }; ($field: ident, optional_vec) => { $field.unwrap() }; @@ -1924,4 +1927,31 @@ mod tests { LengthReadable::read_from_fixed_length_buffer(&mut &encoded[..]).unwrap(); assert_eq!(decoded, instance); } + + #[test] + fn test_option_with_encoding() { + // Ensure that serializing an option with a specified encoding will survive a ser round + // trip for Some and None options. + #[derive(PartialEq, Eq, Debug)] + struct MyCustomStruct { + tlv_field: Option, + } + + impl_writeable_msg!(MyCustomStruct, {}, { + (1, tlv_field, (option, encoding: (u64, HighZeroBytesDroppedBigSize))), + }); + + for tlv_field in [None, Some(0u64), Some(255u64)] { + let instance = MyCustomStruct { tlv_field }; + let encoded = instance.encode(); + let decoded: MyCustomStruct = + LengthReadable::read_from_fixed_length_buffer(&mut &encoded[..]).unwrap(); + assert_eq!( + decoded, + MyCustomStruct { tlv_field }, + "option custom encoding failed for: {:?}", + tlv_field + ); + } + } } diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 50514e0a894..34f5d5fe36e 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -61,6 +61,7 @@ use crate::util::mut_global::MutGlobal; use crate::util::persist::{KVStore, KVStoreSync, MonitorName}; use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer}; use crate::util::test_channel_signer::{EnforcementState, TestChannelSigner}; +use crate::util::wakers::Notifier; use bitcoin::amount::Amount; use bitcoin::block::Block; @@ -2101,7 +2102,7 @@ impl TestChainSource { } impl UtxoLookup for TestChainSource { - fn get_utxo(&self, chain_hash: &ChainHash, _short_channel_id: u64) -> UtxoResult { + fn get_utxo(&self, chain_hash: &ChainHash, _scid: u64, _notifier: Arc) -> UtxoResult { self.get_utxo_call_count.fetch_add(1, Ordering::Relaxed); if self.chain_hash != *chain_hash { return UtxoResult::Sync(Err(UtxoLookupError::UnknownChain)); diff --git a/lightning/src/util/wakers.rs b/lightning/src/util/wakers.rs index a84d90960d8..17edadfd822 100644 --- a/lightning/src/util/wakers.rs +++ b/lightning/src/util/wakers.rs @@ -253,37 +253,13 @@ impl Sleeper { pub fn from_single_future(future: &Future) -> Self { Self { notifiers: vec![Arc::clone(&future.state)] } } - /// Constructs a new sleeper from two futures, allowing blocking on both at once. - pub fn from_two_futures(fut_a: &Future, fut_b: &Future) -> Self { - Self { notifiers: vec![Arc::clone(&fut_a.state), Arc::clone(&fut_b.state)] } - } - /// Constructs a new sleeper from three futures, allowing blocking on all three at once. - /// - // Note that this is the common case - a ChannelManager, a ChainMonitor, and an - // OnionMessenger. - pub fn from_three_futures(fut_a: &Future, fut_b: &Future, fut_c: &Future) -> Self { - let notifiers = - vec![Arc::clone(&fut_a.state), Arc::clone(&fut_b.state), Arc::clone(&fut_c.state)]; - Self { notifiers } - } - /// Constructs a new sleeper from four futures, allowing blocking on all four at once. - /// - // Note that this is another common case - a ChannelManager, a ChainMonitor, an - // OnionMessenger, and a LiquidityManager. - pub fn from_four_futures( - fut_a: &Future, fut_b: &Future, fut_c: &Future, fut_d: &Future, - ) -> Self { - let notifiers = vec![ - Arc::clone(&fut_a.state), - Arc::clone(&fut_b.state), - Arc::clone(&fut_c.state), - Arc::clone(&fut_d.state), - ]; - Self { notifiers } + /// Constructs an iterator of futures, allowing blocking on all at once. + pub fn from_futures>(futures: I) -> Self { + Self { notifiers: futures.into_iter().map(|f| Arc::clone(&f.state)).collect() } } /// Constructs a new sleeper on many futures, allowing blocking on all at once. pub fn new(futures: Vec) -> Self { - Self { notifiers: futures.into_iter().map(|f| Arc::clone(&f.state)).collect() } + Self::from_futures(futures) } /// Prepares to go into a wait loop body, creating a condition variable which we can block on /// and an `Arc>>` which gets set to the waking `Future`'s state prior to the @@ -506,15 +482,13 @@ mod tests { // Wait on the other thread to finish its sleep, note that the leak only happened if we // actually have to sleep here, not if we immediately return. - Sleeper::from_two_futures(&future_a, &future_b).wait(); + Sleeper::from_futures([future_a, future_b]).wait(); join_handle.join().unwrap(); // then drop the notifiers and make sure the future states are gone. mem::drop(notifier_a); mem::drop(notifier_b); - mem::drop(future_a); - mem::drop(future_b); assert!(future_state_a.upgrade().is_none() && future_state_b.upgrade().is_none()); } @@ -736,18 +710,18 @@ mod tests { // Set both notifiers as woken without sleeping yet. notifier_a.notify(); notifier_b.notify(); - Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()).wait(); + Sleeper::from_futures([notifier_a.get_future(), notifier_b.get_future()]).wait(); // One future has woken us up, but the other should still have a pending notification. - Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()).wait(); + Sleeper::from_futures([notifier_a.get_future(), notifier_b.get_future()]).wait(); // However once we've slept twice, we should no longer have any pending notifications - assert!(!Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()) + assert!(!Sleeper::from_futures([notifier_a.get_future(), notifier_b.get_future()]) .wait_timeout(Duration::from_millis(10))); // Test ordering somewhat more. notifier_a.notify(); - Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()).wait(); + Sleeper::from_futures([notifier_a.get_future(), notifier_b.get_future()]).wait(); } #[test] @@ -765,7 +739,7 @@ mod tests { // After sleeping one future (not guaranteed which one, however) will have its notification // bit cleared. - Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()).wait(); + Sleeper::from_futures([notifier_a.get_future(), notifier_b.get_future()]).wait(); // By registering a callback on the futures for both notifiers, one will complete // immediately, but one will remain tied to the notifier, and will complete once the @@ -788,8 +762,8 @@ mod tests { notifier_b.notify(); assert!(callback_a.load(Ordering::SeqCst) && callback_b.load(Ordering::SeqCst)); - Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()).wait(); - assert!(!Sleeper::from_two_futures(¬ifier_a.get_future(), ¬ifier_b.get_future()) + Sleeper::from_futures([notifier_a.get_future(), notifier_b.get_future()]).wait(); + assert!(!Sleeper::from_futures([notifier_a.get_future(), notifier_b.get_future()]) .wait_timeout(Duration::from_millis(10))); } diff --git a/pending_changelog/4213.txt b/pending_changelog/4213.txt new file mode 100644 index 00000000000..791edd47804 --- /dev/null +++ b/pending_changelog/4213.txt @@ -0,0 +1,5 @@ +Backwards compat +================ + + * Outbound payments which are awaiting a response to a BOLT 12 invoice request + will not be able to complete after upgrading to 0.3 (#4213).