From 6bb663f8f21e7bb0c54137dca40b0d73f42efe1e Mon Sep 17 00:00:00 2001 From: Jurvis Tan <5944973+jurvis@users.noreply.github.com> Date: Sat, 7 May 2022 16:09:37 -0700 Subject: [PATCH] Send PaymentForwardedFailed for forward failures --- fuzz/src/chanmon_consistency.rs | 1 + lightning/src/ln/chanmon_update_fail_tests.rs | 14 +-- lightning/src/ln/channel.rs | 5 +- lightning/src/ln/channelmanager.rs | 117 ++++++++++++------ lightning/src/ln/functional_test_utils.rs | 72 +++++++++-- lightning/src/ln/functional_tests.rs | 72 ++++++----- lightning/src/ln/monitor_tests.rs | 2 +- lightning/src/ln/onion_route_tests.rs | 14 +-- lightning/src/ln/payment_tests.rs | 12 +- lightning/src/ln/priv_short_conf_tests.rs | 2 +- lightning/src/ln/reorg_tests.rs | 2 +- 11 files changed, 212 insertions(+), 101 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 8472f8fa627..5a528dae073 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -853,6 +853,7 @@ pub fn do_test(data: &[u8], underlying_out: Out) { events::Event::PendingHTLCsForwardable { .. } => { nodes[$node].process_pending_htlc_forwards(); }, + events::Event::PaymentForwardedFailed { .. } => {}, _ => if out.may_fail.load(atomic::Ordering::Acquire) { return; } else { diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 58fe30ba261..0ad8f505f9d 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -828,7 +828,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1)); - expect_pending_htlcs_forwardable!(nodes[2]); + expect_pending_htlcs_forwardable!(nodes[2], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -909,7 +909,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[1], 1); let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events(); @@ -1689,7 +1689,7 @@ fn test_monitor_update_on_pending_forwards() { let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1)); - expect_pending_htlcs_forwardable!(nodes[2]); + expect_pending_htlcs_forwardable!(nodes[2], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[2], 1); let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -1710,7 +1710,7 @@ fn test_monitor_update_on_pending_forwards() { commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false); chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); @@ -2094,7 +2094,7 @@ fn test_fail_htlc_on_broadcast_after_claim() { check_closed_broadcast!(nodes[1], true); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); check_added_monitors!(nodes[1], 1); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); expect_payment_sent_without_paths!(nodes[0], payment_preimage); @@ -2456,7 +2456,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f }; if second_fails { assert!(nodes[2].node.fail_htlc_backwards(&payment_hash)); - expect_pending_htlcs_forwardable!(nodes[2]); + expect_pending_htlcs_forwardable!(nodes[2], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[2], 1); get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); } else { @@ -2490,7 +2490,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f if second_fails { reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false)); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); } else { reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false)); } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index f8eacfb8e12..7a0c33f5f7d 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -5608,7 +5608,7 @@ impl Channel { /// those explicitly stated to be allowed after shutdown completes, eg some simple getters). /// Also returns the list of payment_hashes for channels which we can safely fail backwards /// immediately (others we will have to allow to time out). - pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>) { + pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, HTLCDestination)>) { // Note that we MUST only generate a monitor update that indicates force-closure - we're // called during initialization prior to the chain_monitor in the encompassing ChannelManager // being fully configured in some cases. Thus, its likely any monitor events we generate will @@ -5618,10 +5618,11 @@ impl Channel { // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and // return them to fail the payment. let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len()); + let counterparty_node_id = self.get_counterparty_node_id(); for htlc_update in self.holding_cell_htlc_updates.drain(..) { match htlc_update { HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => { - dropped_outbound_htlcs.push((source, payment_hash)); + dropped_outbound_htlcs.push((source, payment_hash, HTLCDestination::OpenChannel { node_id: counterparty_node_id, channel_id: self.channel_id })); }, _ => {} } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 3aab92ad0f7..556192b36ee 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -36,12 +36,12 @@ use bitcoin::secp256k1; use chain; use chain::{Confirm, ChannelMonitorUpdateErr, Watch, BestBlock}; use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; -use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID}; +use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID, HTLCUpdate}; use chain::transaction::{OutPoint, TransactionData}; // Since this struct is returned in `list_channels` methods, expose it here in case users want to // construct one themselves. use ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret}; -use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch}; +use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, HTLCDestination, UpdateFulfillCommitFetch}; use ln::features::{ChannelTypeFeatures, InitFeatures, NodeFeatures}; use routing::router::{PaymentParameters, Route, RouteHop, RoutePath, RouteParameters}; use ln::msgs; @@ -274,7 +274,7 @@ enum ClaimFundsFromHop { DuplicateClaim, } -type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>); +type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, HTLCDestination)>); /// Error type returned across the channel_state mutex boundary. When an Err is generated for a /// Channel, we generally end up with a ChannelError::Close for which we have to close the channel @@ -1823,7 +1823,7 @@ impl ChannelMana }; for htlc_source in failed_htlcs.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, HTLCDestination::OpenChannel { node_id: counterparty_node_id, channel_id: *channel_id }); } let _ = handle_error!(self, result, counterparty_node_id); @@ -1879,7 +1879,7 @@ impl ChannelMana let (monitor_update_option, mut failed_htlcs) = shutdown_res; log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len()); for htlc_source in failed_htlcs.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, htlc_source.2); } if let Some((funding_txo, monitor_update)) = monitor_update_option { // There isn't anything we can do if we get an update failure - we're already @@ -2912,7 +2912,8 @@ impl ChannelMana phantom_shared_secret: $phantom_ss, }); failed_forwards.push((htlc_source, payment_hash, - HTLCFailReason::Reason { failure_code: $err_code, data: $err_data } + HTLCFailReason::Reason { failure_code: $err_code, data: $err_data }, + HTLCDestination::Unknown { previous_hop_scid: prev_short_channel_id } )); continue; } @@ -2991,7 +2992,8 @@ impl ChannelMana } let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get()); failed_forwards.push((htlc_source, payment_hash, - HTLCFailReason::Reason { failure_code, data } + HTLCFailReason::Reason { failure_code, data }, + HTLCDestination::OpenChannel { node_id: chan.get().get_counterparty_node_id(), channel_id: forward_chan_id } )); continue; }, @@ -3133,7 +3135,8 @@ impl ChannelMana incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret, phantom_shared_secret, }), payment_hash, - HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data } + HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }, + HTLCDestination::Unknown { previous_hop_scid: $htlc.prev_hop.short_channel_id } )); } } @@ -3261,8 +3264,8 @@ impl ChannelMana } } - for (htlc_source, payment_hash, failure_reason) in failed_forwards.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason); + for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) { + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason, destination); } self.forward_htlcs(&mut phantom_receives); @@ -3481,7 +3484,7 @@ impl ChannelMana } for htlc_source in timed_out_mpp_htlcs.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }); + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0.clone()), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }, HTLCDestination::Unknown { previous_hop_scid: htlc_source.0.short_channel_id } ); } for (err, counterparty_node_id) in handle_errors.drain(..) { @@ -3508,8 +3511,9 @@ impl ChannelMana htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array( self.best_block.read().unwrap().height())); self.fail_htlc_backwards_internal(channel_state.take().unwrap(), - HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash, - HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }); + HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash, + HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }, + HTLCDestination::Unknown { previous_hop_scid: htlc.prev_hop.short_channel_id }); } true } else { false } @@ -3566,7 +3570,7 @@ impl ChannelMana fn fail_holding_cell_htlcs(&self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32]) { for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) { match htlc_src { - HTLCSource::PreviousHopData(HTLCPreviousHopData { .. }) => { + HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, .. }) => { let (failure_code, onion_failure_data) = match self.channel_state.lock().unwrap().by_id.entry(channel_id) { hash_map::Entry::Occupied(chan_entry) => { @@ -3575,8 +3579,16 @@ impl ChannelMana hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new()) }; let channel_state = self.channel_state.lock().unwrap(); - self.fail_htlc_backwards_internal(channel_state, - htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data}); + + match channel_state.by_id.get(&channel_id) { + Some(channel) => { + let node_id = channel.get_counterparty_node_id().clone(); + self.fail_htlc_backwards_internal(channel_state, + htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, HTLCDestination::OpenChannel { node_id, channel_id }) + }, + None => self.fail_htlc_backwards_internal(channel_state, + htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, HTLCDestination::Unknown { previous_hop_scid: short_channel_id } ) + } }, HTLCSource::OutboundRoute { session_priv, payment_id, path, payment_params, .. } => { let mut session_priv_bytes = [0; 32]; @@ -3629,7 +3641,7 @@ impl ChannelMana /// to fail and take the channel_state lock for each iteration (as we take ownership and may /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to /// still-available channels. - fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) { + fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason, receiver: HTLCDestination) { //TODO: There is a timing attack here where if a node fails an HTLC back to us they can //identify whether we sent it or not based on the (I presume) very different runtime //between the branches here. We should make this async and move it into the forward HTLCs @@ -3736,7 +3748,7 @@ impl ChannelMana pending_events.push(path_failure); if let Some(ev) = full_failure_ev { pending_events.push(ev); } }, - HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, .. }) => { + HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, outpoint }) => { let err_packet = match onion_error { HTLCFailReason::Reason { failure_code, data } => { log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code); @@ -3774,6 +3786,11 @@ impl ChannelMana time_forwardable: time }); } + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(events::Event::PaymentForwardedFailed { + source_channel_id: outpoint.to_channel_id(), + destination: receiver + }); }, } } @@ -3831,8 +3848,9 @@ impl ChannelMana htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array( self.best_block.read().unwrap().height())); self.fail_htlc_backwards_internal(channel_state.take().unwrap(), - HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash, - HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data }); + HTLCSource::PreviousHopData(htlc.prev_hop.clone()), &payment_hash, + HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data }, + HTLCDestination::Unknown { previous_hop_scid: htlc.prev_hop.short_channel_id } ); } else { match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) { ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => { @@ -4079,6 +4097,7 @@ impl ChannelMana return; } + let counterparty_node_id = channel.get().get_counterparty_node_id(); let updates = channel.get_mut().monitor_updating_restored(&self.logger, self.get_our_node_id(), self.genesis_hash, self.best_block.read().unwrap().height()); let channel_update = if updates.funding_locked.is_some() && channel.get().is_usable() { // We only send a channel_update in the case where we are just now sending a @@ -4097,12 +4116,14 @@ impl ChannelMana if let Some(upd) = channel_update { channel_state.pending_msg_events.push(upd); } - (updates.failed_htlcs, updates.finalized_claimed_htlcs) + + let failed_htlcs_with_chan_id: Vec<(HTLCSource, PaymentHash, HTLCFailReason, HTLCDestination)> = updates.failed_htlcs.into_iter().map(|h| (h.0, h.1, h.2, HTLCDestination::OpenChannel { node_id: counterparty_node_id, channel_id: funding_txo.to_channel_id() })).collect(); + (failed_htlcs_with_chan_id, updates.finalized_claimed_htlcs) }; post_handle_chan_restoration!(self, chan_restoration_res); self.finalize_claims(finalized_claims); for failure in pending_failures.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2); + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, failure.3); } } @@ -4346,7 +4367,7 @@ impl ChannelMana } fn internal_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> { - let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>; + let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash, [u8; 32])>; let result: Result<(), _> = loop { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -4364,7 +4385,7 @@ impl ChannelMana } let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), channel_state, chan_entry); - dropped_htlcs = htlcs; + dropped_htlcs = htlcs.into_iter().map(|h| (h.0, h.1, msg.channel_id)).collect(); // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update { @@ -4391,7 +4412,7 @@ impl ChannelMana } }; for htlc_source in dropped_htlcs.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, HTLCDestination::OpenChannel { node_id: counterparty_node_id.clone(), channel_id: htlc_source.2 }); } let _ = handle_error!(self, result, *counterparty_node_id); @@ -4677,7 +4698,7 @@ impl ChannelMana short_channel_id, channel_outpoint)) => { for failure in pending_failures.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2); + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, HTLCDestination::OpenChannel { node_id: *counterparty_node_id, channel_id: channel_outpoint.to_channel_id() }); } self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]); self.finalize_claims(finalized_claim_htlcs); @@ -4832,7 +4853,13 @@ impl ChannelMana self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.onchain_value_satoshis.map(|v| v * 1000), true); } else { log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); + // let {payment_hash, payment_preimage} = htlc_update; + + match htlc_update { + HTLCUpdate { source, payment_hash, payment_preimage, .. } => { + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, HTLCDestination::Payment { payment_hash, payment_preimage }) + } + } } }, MonitorEvent::CommitmentTxConfirmed(funding_outpoint) | @@ -5469,7 +5496,7 @@ where let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel); timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason { failure_code, data, - })); + }, HTLCDestination::OpenChannel { node_id: channel.get_counterparty_node_id(), channel_id: channel.channel_id() })); } if let Some(funding_locked) = funding_locked_opt { send_funding_locked!(short_to_id, pending_msg_events, channel, funding_locked); @@ -5527,6 +5554,7 @@ where }); if let Some(height) = height_opt { + let chan_by_id = &channel_state.by_id; channel_state.claimable_htlcs.retain(|payment_hash, htlcs| { htlcs.retain(|htlc| { // If height is approaching the number of blocks we think it takes us to get @@ -5536,10 +5564,27 @@ where if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height)); + + let (node_id, chan_id) = match short_to_id.get(&htlc.prev_hop.short_channel_id) { + None => (None, None), + Some(chan_id) => { + match &chan_by_id.get(chan_id) { + None => (None, None), + Some(chan) => (Some(chan.get_counterparty_node_id()), Some(chan_id)) + } + }, + }; + + let destination = if let (Some(node_id), Some(chan_id)) = (node_id, chan_id) { + HTLCDestination::OpenChannel { node_id, channel_id: chan_id.clone() } + } else { + HTLCDestination::Unknown { previous_hop_scid: htlc.prev_hop.short_channel_id } + }; + timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data - })); + }, destination)); false } else { true } }); @@ -5550,8 +5595,8 @@ where self.handle_init_event_channel_failures(failed_channels); - for (source, payment_hash, reason) in timed_out_htlcs.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason); + for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) { + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason, destination); } } @@ -6825,7 +6870,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> }; for htlc_source in failed_htlcs.drain(..) { - channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); + channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, htlc_source.2); } //TODO: Broadcast channel update for closed channels, but only after we've made a @@ -7013,7 +7058,7 @@ mod tests { check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -7131,8 +7176,10 @@ mod tests { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + // We have to forward pending HTLCs twice - once tries to forward the payment forward (and + // fails), the second will process the resulting failure and fail the HTLC backward expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -7173,7 +7220,7 @@ mod tests { check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index b4807ea6886..461ac6b8e88 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1130,7 +1130,7 @@ macro_rules! commitment_signed_dance { { commitment_signed_dance!($node_a, $node_b, $commitment_signed, $fail_backwards, true); if $fail_backwards { - $crate::expect_pending_htlcs_forwardable!($node_a); + expect_pending_htlcs_forwardable!($node_a, PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!($node_a, 1); let channel_state = $node_a.node.channel_state.lock().unwrap(); @@ -1197,16 +1197,61 @@ macro_rules! get_route_and_payment_hash { }} } +pub struct PaymentForwardedFailedConditions { + pub expected_pending_htlcs_forwardable: bool, + pub expected_payment_forwarding_failed: Option, +} + +impl PaymentForwardedFailedConditions { + pub fn new() -> Self { + Self { + expected_pending_htlcs_forwardable: false, + expected_payment_forwarding_failed: None, + } + } + + pub fn pending_htlcs_forwardable(mut self) -> Self { + self.expected_pending_htlcs_forwardable = true; + self + } + + pub fn payment_forwarding_failed(mut self) -> Self { + self.expected_payment_forwarding_failed = Some(1); + self + } + + pub fn payment_forwarding_failed_with_count(mut self, count: u32) -> Self { + self.expected_payment_forwarding_failed = Some(count); + self + } +} + #[macro_export] -/// Clears (and ignores) a PendingHTLCsForwardable event -macro_rules! expect_pending_htlcs_forwardable_ignore { - ($node: expr) => {{ +macro_rules! expect_pending_htlcs_forwardable_conditions { + ($node: expr, $conditions: expr) => {{ let events = $node.node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); match events[0] { $crate::util::events::Event::PendingHTLCsForwardable { .. } => { }, _ => panic!("Unexpected event"), }; + + if let Some(count) = $conditions.expected_payment_forwarding_failed { + assert_eq!(events.len() as u32, count + 1u32); + } else { + assert_eq!(events.len(), 1); + } + }} +} + +#[macro_export] +/// Clears (and ignores) a PendingHTLCsForwardable event +macro_rules! expect_pending_htlcs_forwardable_ignore { + ($node: expr) => {{ + expect_pending_htlcs_forwardable_conditions!($node, $crate::ln::functional_test_utils::PaymentForwardedFailedConditions::new()); + }}; + + ($node: expr, $conditions: expr) => {{ + expect_pending_htlcs_forwardable_conditions!($node, $conditions); }} } @@ -1214,7 +1259,15 @@ macro_rules! expect_pending_htlcs_forwardable_ignore { /// Handles a PendingHTLCsForwardable event macro_rules! expect_pending_htlcs_forwardable { ($node: expr) => {{ - $crate::expect_pending_htlcs_forwardable_ignore!($node); + expect_pending_htlcs_forwardable_ignore!($node); + $node.node.process_pending_htlc_forwards(); + + // Ensure process_pending_htlc_forwards is idempotent. + $node.node.process_pending_htlc_forwards(); + }}; + + ($node: expr, $conditions: expr) => {{ + expect_pending_htlcs_forwardable_ignore!($node, $conditions); $node.node.process_pending_htlc_forwards(); // Ensure process_pending_htlc_forwards is idempotent. @@ -1225,6 +1278,8 @@ macro_rules! expect_pending_htlcs_forwardable { #[cfg(test)] macro_rules! expect_pending_htlcs_forwardable_from_events { ($node: expr, $events: expr, $ignore: expr) => {{ + // We need to clear pending events since there may possibly be `PaymentForwardingFailed` events here + $node.node.get_and_clear_pending_events(); assert_eq!($events.len(), 1); match $events[0] { Event::PendingHTLCsForwardable { .. } => { }, @@ -1701,7 +1756,8 @@ pub fn fail_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id()); } assert!(expected_paths[0].last().unwrap().node.fail_htlc_backwards(&our_payment_hash)); - expect_pending_htlcs_forwardable!(expected_paths[0].last().unwrap()); + + expect_pending_htlcs_forwardable!(expected_paths[0].last().unwrap(), PaymentForwardedFailedConditions::new().payment_forwarding_failed_with_count(expected_paths.len() as u32)); check_added_monitors!(expected_paths[0].last().unwrap(), expected_paths.len()); let mut per_path_msgs: Vec<((msgs::UpdateFailHTLC, msgs::CommitmentSigned), PublicKey)> = Vec::with_capacity(expected_paths.len()); @@ -1736,7 +1792,7 @@ pub fn fail_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe node.node.handle_update_fail_htlc(&prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0); commitment_signed_dance!(node, prev_node, next_msgs.as_ref().unwrap().1, update_next_node); if !update_next_node { - expect_pending_htlcs_forwardable!(node); + expect_pending_htlcs_forwardable!(node, PaymentForwardedFailedConditions::new().payment_forwarding_failed()); } } let events = node.node.get_and_clear_pending_msg_events(); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index ce1b448cd50..b8bcfdd37b5 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1146,7 +1146,7 @@ fn holding_cell_htlc_counting() { // We have to forward pending HTLCs twice - once tries to forward the payment forward (and // fails), the second will process the resulting failure and fail the HTLC backward. expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[1], 1); let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -2574,7 +2574,7 @@ fn claim_htlc_outputs_single_tx() { check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let mut events = nodes[0].node.get_and_clear_pending_events(); expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true); - match events[1] { + match events.last().unwrap() { Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} _ => panic!("Unexpected event"), } @@ -2878,7 +2878,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { check_spends!(commitment_tx[0], chan_2.3); nodes[2].node.fail_htlc_backwards(&payment_hash); check_added_monitors!(nodes[2], 0); - expect_pending_htlcs_forwardable!(nodes[2]); + expect_pending_htlcs_forwardable!(nodes[2], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[2], 1); let events = nodes[2].node.get_and_clear_pending_msg_events(); @@ -2950,7 +2950,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { } } - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -3018,7 +3018,7 @@ fn test_simple_commitment_revoked_fail_backward() { check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -3081,7 +3081,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let (_, third_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value); assert!(nodes[2].node.fail_htlc_backwards(&first_payment_hash)); - expect_pending_htlcs_forwardable!(nodes[2]); + expect_pending_htlcs_forwardable!(nodes[2], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -3094,7 +3094,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use // Drop the last RAA from 3 -> 2 assert!(nodes[2].node.fail_htlc_backwards(&second_payment_hash)); - expect_pending_htlcs_forwardable!(nodes[2]); + expect_pending_htlcs_forwardable!(nodes[2], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -3111,7 +3111,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use check_added_monitors!(nodes[2], 1); assert!(nodes[2].node.fail_htlc_backwards(&third_payment_hash)); - expect_pending_htlcs_forwardable!(nodes[2]); + expect_pending_htlcs_forwardable!(nodes[2], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -3143,11 +3143,15 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use // commitment transaction for nodes[0] until process_pending_htlc_forwards(). check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); + assert_eq!(events.len(), 2); match events[0] { Event::PendingHTLCsForwardable { .. } => { }, _ => panic!("Unexpected event"), }; + match events[1] { + Event::PaymentForwardedFailed { .. } => { }, + _ => panic!("Unexpected event"), + } // Deliberately don't process the pending fail-back so they all fail back at once after // block connection just like the !deliver_bs_raa case } @@ -3161,7 +3165,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use assert!(ANTI_REORG_DELAY > PAYMENT_EXPIRY_BLOCKS); // We assume payments will also expire let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 4 }); + assert_eq!(events.len(), if deliver_bs_raa { 2 + (nodes.len() - 1) } else { 4 + nodes.len() }); match events[0] { Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { }, _ => panic!("Unexepected event"), @@ -4191,7 +4195,7 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { connect_block(&nodes[1], &block); } - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[1], 1); let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -4255,7 +4259,7 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { connect_blocks(&nodes[1], 1); if forwarded_htlc { - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[1], 1); let fail_commit = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(fail_commit.len(), 1); @@ -5289,7 +5293,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() { mine_transaction(&nodes[1], &htlc_timeout_tx); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); @@ -5463,7 +5467,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_5)); assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_6)); check_added_monitors!(nodes[4], 0); - expect_pending_htlcs_forwardable!(nodes[4]); + expect_pending_htlcs_forwardable!(nodes[4], PaymentForwardedFailedConditions::new().payment_forwarding_failed_with_count(4)); check_added_monitors!(nodes[4], 1); let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id()); @@ -5477,7 +5481,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_2)); assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_4)); check_added_monitors!(nodes[5], 0); - expect_pending_htlcs_forwardable!(nodes[5]); + expect_pending_htlcs_forwardable!(nodes[5], PaymentForwardedFailedConditions::new().payment_forwarding_failed_with_count(2)); check_added_monitors!(nodes[5], 1); let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id()); @@ -5487,7 +5491,8 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2); - expect_pending_htlcs_forwardable!(nodes[3]); + // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events + expect_pending_htlcs_forwardable!(nodes[3], PaymentForwardedFailedConditions::new().payment_forwarding_failed_with_count(6)); check_added_monitors!(nodes[3], 1); let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id()); nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]); @@ -5522,11 +5527,11 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno } let events = nodes[2].node.get_and_clear_pending_events(); let close_event = if deliver_last_raa { - assert_eq!(events.len(), 2); - events[1].clone() + assert_eq!(events.len(), 2 + 6); + events.last().clone().unwrap() } else { assert_eq!(events.len(), 1); - events[0].clone() + events.last().clone().unwrap() }; match close_event { Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} @@ -5538,7 +5543,8 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno if deliver_last_raa { expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true); } else { - expect_pending_htlcs_forwardable!(nodes[2]); + let forwarding_failure_count = if announce_latest { 9 as u32 } else { 6 as u32 }; + expect_pending_htlcs_forwardable!(nodes[2], PaymentForwardedFailedConditions::new().payment_forwarding_failed_with_count(forwarding_failure_count)); } check_added_monitors!(nodes[2], 3); @@ -5900,7 +5906,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no let htlc_value = if use_dust { 50000 } else { 3000000 }; let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], htlc_value); assert!(nodes[1].node.fail_htlc_backwards(&our_payment_hash)); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[1], 1); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -6358,7 +6364,7 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() { // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process. let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(process_htlc_forwards_event.len(), 1); + assert_eq!(process_htlc_forwards_event.len(), 2); match &process_htlc_forwards_event[0] { &Event::PendingHTLCsForwardable { .. } => {}, _ => panic!("Unexpected event"), @@ -7029,7 +7035,7 @@ fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_upda check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_4.len(), 1); @@ -7073,7 +7079,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { // Fail one HTLC to prune it in the will-be-latest-local commitment tx assert!(nodes[1].node.fail_htlc_backwards(&payment_hash_2)); check_added_monitors!(nodes[1], 0); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[1], 1); let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -7478,7 +7484,7 @@ fn test_check_htlc_underpaying() { // Note that we first have to wait a random delay before processing the receipt of the HTLC, // and then will wait a second random delay before failing the HTLC back: expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); // Node 3 is expecting payment of 100_000 but received 10_000, // it should fail htlc like we didn't know the preimage. @@ -7756,7 +7762,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] }); let events = nodes[0].node.get_and_clear_pending_events(); expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true); - match events[1] { + match events.last().unwrap() { Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} _ => panic!("Unexpected event"), } @@ -8032,7 +8038,7 @@ fn test_bump_txn_sanitize_tracking_maps() { // Broadcast set of revoked txn on A connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH); - expect_pending_htlcs_forwardable_ignore!(nodes[0]); + expect_pending_htlcs_forwardable_ignore!(nodes[0], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0); mine_transaction(&nodes[0], &revoked_local_txn[0]); @@ -8594,7 +8600,7 @@ fn test_bad_secret_hash() { // We have to forward pending HTLCs once to process the receipt of the HTLC and then // again to process the pending backwards-failure of the HTLC expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[1], 1); // We should fail the payment back @@ -9399,7 +9405,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t // additional block built on top of the current chain. nodes[1].chain_monitor.chain_monitor.transactions_confirmed( &nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -9582,7 +9588,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { // Now we go fail back the first HTLC from the user end. nodes[1].node.fail_htlc_backwards(&our_payment_hash); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_pending_htlcs_forwardable_ignore!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed_with_count(2)); nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); @@ -9599,7 +9605,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { if let Event::PaymentPathFailed { .. } = failure_events[1] {} else { panic!(); } } else { // Let the second HTLC fail and claim the first - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_pending_htlcs_forwardable_ignore!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed_with_count(1)); nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); @@ -9696,7 +9702,7 @@ fn test_inconsistent_mpp_params() { } expect_pending_htlcs_forwardable_ignore!(nodes[3]); nodes[3].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_ignore!(nodes[3]); + expect_pending_htlcs_forwardable_ignore!(nodes[3], PaymentForwardedFailedConditions::new().payment_forwarding_failed_with_count(1)); nodes[3].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[3], 1); @@ -9705,7 +9711,7 @@ fn test_inconsistent_mpp_params() { nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false); - expect_pending_htlcs_forwardable!(nodes[2]); + expect_pending_htlcs_forwardable!(nodes[2], PaymentForwardedFailedConditions::new().payment_forwarding_failed_with_count(1)); check_added_monitors!(nodes[2], 1); let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 6fa0aab9d31..b90f3ab83b7 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -73,7 +73,7 @@ fn chanmon_fail_from_stale_commitment() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[1], 1); let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index cf74fa8631b..82ec14e0446 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -123,7 +123,7 @@ fn run_onion_failure_test_with_fail_intercept(_name: &str, test_case: expect_htlc_forward!(&nodes[2]); expect_event!(&nodes[2], Event::PaymentReceived); callback_node(); - expect_pending_htlcs_forwardable!(nodes[2]); + expect_pending_htlcs_forwardable!(nodes[2], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); } let update_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -823,7 +823,7 @@ fn test_phantom_onion_hmac_failure() { }; expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_pending_htlcs_forwardable_ignore!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -895,7 +895,7 @@ fn test_phantom_invalid_onion_payload() { } expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_pending_htlcs_forwardable_ignore!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -951,7 +951,7 @@ fn test_phantom_final_incorrect_cltv_expiry() { } expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_pending_htlcs_forwardable_ignore!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -997,7 +997,7 @@ fn test_phantom_failure_too_low_cltv() { expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_pending_htlcs_forwardable_ignore!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -1042,7 +1042,7 @@ fn test_phantom_failure_too_low_recv_amt() { nodes[1].node.process_pending_htlc_forwards(); expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_pending_htlcs_forwardable_ignore!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -1135,7 +1135,7 @@ fn test_phantom_failure_reject_payment() { nodes[1].node.process_pending_htlc_forwards(); expect_payment_received!(nodes[1], payment_hash, payment_secret, recv_amt_msat); assert!(nodes[1].node.fail_htlc_backwards(&payment_hash)); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_pending_htlcs_forwardable_ignore!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 46d5d22b49a..155af7d8357 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -61,7 +61,7 @@ fn retry_single_path_payment() { check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable!(&nodes[1]); + expect_pending_htlcs_forwardable!(&nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); @@ -164,7 +164,7 @@ fn mpp_retry() { // Attempt to forward the payment and complete the 2nd path's failure. expect_pending_htlcs_forwardable!(&nodes[2]); - expect_pending_htlcs_forwardable!(&nodes[2]); + expect_pending_htlcs_forwardable!(&nodes[2], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); let htlc_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); @@ -236,7 +236,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { } // Failed HTLC from node 3 -> 1 - expect_pending_htlcs_forwardable!(nodes[3]); + expect_pending_htlcs_forwardable!(nodes[3], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); let htlc_fail_updates_3_1 = get_htlc_update_msgs!(nodes[3], nodes[1].node.get_our_node_id()); assert_eq!(htlc_fail_updates_3_1.update_fail_htlcs.len(), 1); nodes[1].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &htlc_fail_updates_3_1.update_fail_htlcs[0]); @@ -244,7 +244,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { commitment_signed_dance!(nodes[1], nodes[3], htlc_fail_updates_3_1.commitment_signed, false); // Failed HTLC from node 1 -> 0 - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); let htlc_fail_updates_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert_eq!(htlc_fail_updates_1_0.update_fail_htlcs.len(), 1); nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_fail_updates_1_0.update_fail_htlcs[0]); @@ -298,7 +298,7 @@ fn retry_expired_payment() { check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable!(&nodes[1]); + expect_pending_htlcs_forwardable!(&nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); @@ -778,7 +778,7 @@ fn test_fulfill_restart_failure() { reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); nodes[1].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[1], 1); let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_fail_updates.update_fail_htlcs[0]); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index 4fce2bb1b2b..02976b81fe4 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -507,7 +507,7 @@ fn test_scid_alias_returned() { commitment_signed_dance!(nodes[1], nodes[0], &as_updates.commitment_signed, false, true); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); check_added_monitors!(nodes[1], 1); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 96fda526d68..340592de12f 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -146,7 +146,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { txdata: vec![], }; connect_block(&nodes[1], &block); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable!(nodes[1], PaymentForwardedFailedConditions::new().payment_forwarding_failed()); } check_added_monitors!(nodes[1], 1);