From 9f5b999c263779ec0e2f03b4b2bc0febba8c98ff Mon Sep 17 00:00:00 2001 From: jurvis Date: Thu, 14 Jul 2022 20:45:21 -0700 Subject: [PATCH] Send failure event if we fail to handle a HTLC In `ChannelManager::fail_htlc_backwards_internal`, we push a `HTLCHandlingFailed` containing some information about the HTLC --- fuzz/src/chanmon_consistency.rs | 2 +- lightning/src/chain/chainmonitor.rs | 16 +- lightning/src/chain/channelmonitor.rs | 4 + lightning/src/chain/mod.rs | 3 +- lightning/src/ln/chanmon_update_fail_tests.rs | 18 +- lightning/src/ln/channel.rs | 5 +- lightning/src/ln/channelmanager.rs | 157 +++++++++++------- lightning/src/ln/functional_test_utils.rs | 55 +++--- lightning/src/ln/functional_tests.rs | 155 ++++++++++------- lightning/src/ln/monitor_tests.rs | 4 +- lightning/src/ln/onion_route_tests.rs | 16 +- lightning/src/ln/payment_tests.rs | 50 +++--- lightning/src/ln/priv_short_conf_tests.rs | 6 +- lightning/src/ln/reorg_tests.rs | 4 +- lightning/src/util/test_utils.rs | 2 +- 15 files changed, 299 insertions(+), 198 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index bfaa6501d1e..dc9504878f6 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -148,7 +148,7 @@ impl chain::Watch for TestChainMonitor { self.chain_monitor.update_channel(funding_txo, update) } - fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec)> { + fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec, Option)> { return self.chain_monitor.release_pending_monitor_events(); } } diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index e6b5733520a..ebb34707054 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -43,6 +43,7 @@ use prelude::*; use sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard}; use core::ops::Deref; use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; +use bitcoin::secp256k1::PublicKey; #[derive(Clone, Copy, Hash, PartialEq, Eq)] /// A specific update's ID stored in a `MonitorUpdateId`, separated out to make the contents @@ -235,7 +236,7 @@ pub struct ChainMonitor)>>, + pending_monitor_events: Mutex, Option)>>, /// The best block height seen, used as a proxy for the passage of time. highest_chain_height: AtomicUsize, } @@ -299,7 +300,7 @@ where C::Target: chain::Filter, log_trace!(self.logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)), Err(ChannelMonitorUpdateErr::PermanentFailure) => { monitor_state.channel_perm_failed.store(true, Ordering::Release); - self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)])); + self.pending_monitor_events.lock().unwrap().push((*funding_outpoint, vec![MonitorEvent::UpdateFailed(*funding_outpoint)], monitor.get_counterparty_node_id())); }, Err(ChannelMonitorUpdateErr::TemporaryFailure) => { log_debug!(self.logger, "Channel Monitor sync for channel {} in progress, holding events until completion!", log_funding_info!(monitor)); @@ -458,7 +459,7 @@ where C::Target: chain::Filter, self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id: monitor_data.monitor.get_latest_update_id(), - }])); + }], monitor_data.monitor.get_counterparty_node_id())); }, MonitorUpdateId { contents: UpdateOrigin::ChainSync(_) } => { if !monitor_data.has_pending_chainsync_updates(&pending_monitor_updates) { @@ -476,10 +477,12 @@ where C::Target: chain::Filter, /// channel_monitor_updated once with the highest ID. #[cfg(any(test, fuzzing))] pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) { + let monitors = self.monitors.read().unwrap(); + let counterparty_node_id = if let Some(mon) = monitors.get(&funding_txo) { mon.monitor.get_counterparty_node_id() } else { None }; self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id, - }])); + }], counterparty_node_id)); } #[cfg(any(test, fuzzing, feature = "_test_utils"))] @@ -666,7 +669,7 @@ where C::Target: chain::Filter, } } - fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec)> { + fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec, Option)> { let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0); for monitor_state in self.monitors.read().unwrap().values() { let is_pending_monitor_update = monitor_state.has_pending_chainsync_updates(&monitor_state.pending_monitor_updates.lock().unwrap()); @@ -695,7 +698,8 @@ where C::Target: chain::Filter, let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events(); if monitor_events.len() > 0 { let monitor_outpoint = monitor_state.monitor.get_funding_txo().0; - pending_monitor_events.push((monitor_outpoint, monitor_events)); + let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id(); + pending_monitor_events.push((monitor_outpoint, monitor_events, counterparty_node_id)); } } } diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 80cd9cb9d45..220951e07f9 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1209,6 +1209,10 @@ impl ChannelMonitor { self.inner.lock().unwrap().get_cur_holder_commitment_number() } + pub(crate) fn get_counterparty_node_id(&self) -> Option { + self.inner.lock().unwrap().counterparty_node_id + } + /// Used by ChannelManager deserialization to broadcast the latest holder state if its copy of /// the Channel was out-of-date. You may use it to get a broadcastable holder toxic tx in case of /// fallen-behind, i.e when receiving a channel_reestablish with a proof that our counterparty side knows diff --git a/lightning/src/chain/mod.rs b/lightning/src/chain/mod.rs index 5959508de3b..a0eb17c6be0 100644 --- a/lightning/src/chain/mod.rs +++ b/lightning/src/chain/mod.rs @@ -15,6 +15,7 @@ use bitcoin::blockdata::script::Script; use bitcoin::blockdata::transaction::{Transaction, TxOut}; use bitcoin::hash_types::{BlockHash, Txid}; use bitcoin::network::constants::Network; +use bitcoin::secp256k1::PublicKey; use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, MonitorEvent}; use chain::keysinterface::Sign; @@ -302,7 +303,7 @@ pub trait Watch { /// /// For details on asynchronous [`ChannelMonitor`] updating and returning /// [`MonitorEvent::UpdateCompleted`] here, see [`ChannelMonitorUpdateErr::TemporaryFailure`]. - fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec)>; + fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec, Option)>; } /// The `Filter` trait defines behavior for indicating chain activity of interest pertaining to diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 0d2b3b6a9f9..79be8f4e70b 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -26,7 +26,7 @@ use ln::msgs; use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler}; use util::config::UserConfig; use util::enforcing_trait_impls::EnforcingSigner; -use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason}; +use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination}; use util::errors::APIError; use util::ser::{ReadableArgs, Writeable}; use util::test_utils::TestBroadcaster; @@ -832,7 +832,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA nodes[2].node.fail_htlc_backwards(&payment_hash_1); - expect_pending_htlcs_forwardable!(nodes[2]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash: payment_hash_1 })); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -913,7 +913,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); check_added_monitors!(nodes[1], 0); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 })); check_added_monitors!(nodes[1], 1); let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events(); @@ -1690,14 +1690,14 @@ fn test_monitor_update_on_pending_forwards() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); + let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); // Rebalance a bit so that we can send backwards from 3 to 1. send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); nodes[2].node.fail_htlc_backwards(&payment_hash_1); - expect_pending_htlcs_forwardable!(nodes[2]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash: payment_hash_1 })); check_added_monitors!(nodes[2], 1); let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -1718,7 +1718,7 @@ fn test_monitor_update_on_pending_forwards() { commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false); chanmon_cfgs[1].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 })); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); @@ -2106,7 +2106,7 @@ fn test_fail_htlc_on_broadcast_after_claim() { check_closed_broadcast!(nodes[1], true); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); check_added_monitors!(nodes[1], 1); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 })); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); expect_payment_sent_without_paths!(nodes[0], payment_preimage); @@ -2469,7 +2469,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f }; if second_fails { nodes[2].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable!(nodes[2]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash })); check_added_monitors!(nodes[2], 1); get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); } else { @@ -2505,7 +2505,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f if second_fails { reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false)); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 })); } else { reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false)); } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index f612cb97943..ed5bff63d47 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -5765,7 +5765,7 @@ impl Channel { /// those explicitly stated to be allowed after shutdown completes, eg some simple getters). /// Also returns the list of payment_hashes for channels which we can safely fail backwards /// immediately (others we will have to allow to time out). - pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>) { + pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>) { // Note that we MUST only generate a monitor update that indicates force-closure - we're // called during initialization prior to the chain_monitor in the encompassing ChannelManager // being fully configured in some cases. Thus, its likely any monitor events we generate will @@ -5775,10 +5775,11 @@ impl Channel { // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and // return them to fail the payment. let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len()); + let counterparty_node_id = self.get_counterparty_node_id(); for htlc_update in self.holding_cell_htlc_updates.drain(..) { match htlc_update { HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => { - dropped_outbound_htlcs.push((source, payment_hash)); + dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id)); }, _ => {} } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index bae408d29f0..021127d5c55 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -52,7 +52,7 @@ use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSA use ln::wire::Encode; use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner, Recipient}; use util::config::{UserConfig, ChannelConfig}; -use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; +use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; use util::{byte_utils, events}; use util::scid_utils::fake_scid; use util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter}; @@ -281,7 +281,7 @@ enum ClaimFundsFromHop { DuplicateClaim, } -type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>); +type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>); /// Error type returned across the channel_state mutex boundary. When an Err is generated for a /// Channel, we generally end up with a ChannelError::Close for which we have to close the channel @@ -1890,7 +1890,8 @@ impl ChannelMana }; for htlc_source in failed_htlcs.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); + let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id }; + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); } let _ = handle_error!(self, result, *counterparty_node_id); @@ -1946,7 +1947,9 @@ impl ChannelMana let (monitor_update_option, mut failed_htlcs) = shutdown_res; log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len()); for htlc_source in failed_htlcs.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); + let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; + let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: channel_id }; + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); } if let Some((funding_txo, monitor_update)) = monitor_update_option { // There isn't anything we can do if we get an update failure - we're already @@ -3107,21 +3110,42 @@ impl ChannelMana HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo { routing, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value }, prev_funding_outpoint } => { + macro_rules! failure_handler { + ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => { + log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg); + + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: prev_short_channel_id, + outpoint: prev_funding_outpoint, + htlc_id: prev_htlc_id, + incoming_packet_shared_secret: incoming_shared_secret, + phantom_shared_secret: $phantom_ss, + }); + + let reason = if $next_hop_unknown { + HTLCDestination::UnknownNextHop { requested_forward_scid: short_chan_id } + } else { + HTLCDestination::FailedPayment{ payment_hash } + }; + + failed_forwards.push((htlc_source, payment_hash, + HTLCFailReason::Reason { failure_code: $err_code, data: $err_data }, + reason + )); + continue; + } + } macro_rules! fail_forward { ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => { { - log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg); - let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { - short_channel_id: prev_short_channel_id, - outpoint: prev_funding_outpoint, - htlc_id: prev_htlc_id, - incoming_packet_shared_secret: incoming_shared_secret, - phantom_shared_secret: $phantom_ss, - }); - failed_forwards.push((htlc_source, payment_hash, - HTLCFailReason::Reason { failure_code: $err_code, data: $err_data } - )); - continue; + failure_handler!($msg, $err_code, $err_data, $phantom_ss, true); + } + } + } + macro_rules! failed_payment { + ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => { + { + failure_handler!($msg, $err_code, $err_data, $phantom_ss, false); } } } @@ -3137,17 +3161,17 @@ impl ChannelMana // `update_fail_malformed_htlc`, meaning here we encrypt the error as // if it came from us (the second-to-last hop) but contains the sha256 // of the onion. - fail_forward!(err_msg, err_code, sha256_of_onion.to_vec(), None); + failed_payment!(err_msg, err_code, sha256_of_onion.to_vec(), None); }, Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => { - fail_forward!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret)); + failed_payment!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret)); }, }; match next_hop { onion_utils::Hop::Receive(hop_data) => { match self.construct_recv_pending_htlc_info(hop_data, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value, Some(phantom_shared_secret)) { Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, vec![(info, prev_htlc_id)])), - Err(ReceiveError { err_code, err_data, msg }) => fail_forward!(msg, err_code, err_data, Some(phantom_shared_secret)) + Err(ReceiveError { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret)) } }, _ => panic!(), @@ -3198,7 +3222,8 @@ impl ChannelMana } let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get()); failed_forwards.push((htlc_source, payment_hash, - HTLCFailReason::Reason { failure_code, data } + HTLCFailReason::Reason { failure_code, data }, + HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id } )); continue; }, @@ -3327,7 +3352,7 @@ impl ChannelMana }; macro_rules! fail_htlc { - ($htlc: expr) => { + ($htlc: expr, $payment_hash: expr) => { let mut htlc_msat_height_data = byte_utils::be64_to_array($htlc.value).to_vec(); htlc_msat_height_data.extend_from_slice( &byte_utils::be32_to_array(self.best_block.read().unwrap().height()), @@ -3339,7 +3364,8 @@ impl ChannelMana incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret, phantom_shared_secret, }), payment_hash, - HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data } + HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }, + HTLCDestination::FailedPayment { payment_hash: $payment_hash }, )); } } @@ -3358,7 +3384,7 @@ impl ChannelMana if htlcs.len() == 1 { if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0)); - fail_htlc!(claimable_htlc); + fail_htlc!(claimable_htlc, payment_hash); continue } } @@ -3380,7 +3406,7 @@ impl ChannelMana if total_value >= msgs::MAX_VALUE_MSAT || total_value > $payment_data.total_msat { log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)", log_bytes!(payment_hash.0), total_value, $payment_data.total_msat); - fail_htlc!(claimable_htlc); + fail_htlc!(claimable_htlc, payment_hash); } else if total_value == $payment_data.total_msat { htlcs.push(claimable_htlc); new_events.push(events::Event::PaymentReceived { @@ -3414,7 +3440,7 @@ impl ChannelMana let payment_preimage = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) { Ok(payment_preimage) => payment_preimage, Err(()) => { - fail_htlc!(claimable_htlc); + fail_htlc!(claimable_htlc, payment_hash); continue } }; @@ -3433,7 +3459,7 @@ impl ChannelMana }, hash_map::Entry::Occupied(_) => { log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0)); - fail_htlc!(claimable_htlc); + fail_htlc!(claimable_htlc, payment_hash); } } } @@ -3442,17 +3468,17 @@ impl ChannelMana hash_map::Entry::Occupied(inbound_payment) => { if payment_data.is_none() { log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0)); - fail_htlc!(claimable_htlc); + fail_htlc!(claimable_htlc, payment_hash); continue }; let payment_data = payment_data.unwrap(); if inbound_payment.get().payment_secret != payment_data.payment_secret { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0)); - fail_htlc!(claimable_htlc); + fail_htlc!(claimable_htlc, payment_hash); } else if inbound_payment.get().min_value_msat.is_some() && payment_data.total_msat < inbound_payment.get().min_value_msat.unwrap() { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our minimum value (had {}, needed {}).", log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap()); - fail_htlc!(claimable_htlc); + fail_htlc!(claimable_htlc, payment_hash); } else { let payment_received_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage); if payment_received_generated { @@ -3471,8 +3497,8 @@ impl ChannelMana } } - for (htlc_source, payment_hash, failure_reason) in failed_forwards.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason); + for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) { + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason, destination); } self.forward_htlcs(&mut phantom_receives); @@ -3695,7 +3721,8 @@ impl ChannelMana } for htlc_source in timed_out_mpp_htlcs.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }); + let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 }; + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0.clone()), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }, receiver ); } for (err, counterparty_node_id) in handle_errors.drain(..) { @@ -3731,7 +3758,8 @@ impl ChannelMana self.best_block.read().unwrap().height())); self.fail_htlc_backwards_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash, - HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }); + HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }, + HTLCDestination::FailedPayment { payment_hash: *payment_hash }); } } } @@ -3787,7 +3815,7 @@ impl ChannelMana // be surfaced to the user. fn fail_holding_cell_htlcs( &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32], - _counterparty_node_id: &PublicKey + counterparty_node_id: &PublicKey ) { for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) { match htlc_src { @@ -3800,8 +3828,9 @@ impl ChannelMana hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new()) }; let channel_state = self.channel_state.lock().unwrap(); - self.fail_htlc_backwards_internal(channel_state, - htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data}); + + let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id }; + self.fail_htlc_backwards_internal(channel_state, htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver) }, HTLCSource::OutboundRoute { session_priv, payment_id, path, payment_params, .. } => { let mut session_priv_bytes = [0; 32]; @@ -3854,7 +3883,7 @@ impl ChannelMana /// to fail and take the channel_state lock for each iteration (as we take ownership and may /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to /// still-available channels. - fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) { + fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason, destination: HTLCDestination) { //TODO: There is a timing attack here where if a node fails an HTLC back to us they can //identify whether we sent it or not based on the (I presume) very different runtime //between the branches here. We should make this async and move it into the forward HTLCs @@ -3979,7 +4008,7 @@ impl ChannelMana pending_events.push(path_failure); if let Some(ev) = full_failure_ev { pending_events.push(ev); } }, - HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, .. }) => { + HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, outpoint }) => { let err_packet = match onion_error { HTLCFailReason::Reason { failure_code, data } => { log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code); @@ -4011,12 +4040,16 @@ impl ChannelMana } } mem::drop(channel_state_lock); + let mut pending_events = self.pending_events.lock().unwrap(); if let Some(time) = forward_event { - let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push(events::Event::PendingHTLCsForwardable { time_forwardable: time }); } + pending_events.push(events::Event::HTLCHandlingFailed { + prev_channel_id: outpoint.to_channel_id(), + failed_next_destination: destination + }); }, } } @@ -4108,7 +4141,8 @@ impl ChannelMana self.best_block.read().unwrap().height())); self.fail_htlc_backwards_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash, - HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data }); + HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data }, + HTLCDestination::FailedPayment { payment_hash } ); } else { match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) { ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => { @@ -4352,7 +4386,7 @@ impl ChannelMana let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); let chan_restoration_res; - let (mut pending_failures, finalized_claims) = { + let (mut pending_failures, finalized_claims, counterparty_node_id) = { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) { @@ -4363,6 +4397,7 @@ impl ChannelMana return; } + let counterparty_node_id = channel.get().get_counterparty_node_id(); let updates = channel.get_mut().monitor_updating_restored(&self.logger, self.get_our_node_id(), self.genesis_hash, self.best_block.read().unwrap().height()); let channel_update = if updates.channel_ready.is_some() && channel.get().is_usable() { // We only send a channel_update in the case where we are just now sending a @@ -4381,12 +4416,14 @@ impl ChannelMana if let Some(upd) = channel_update { channel_state.pending_msg_events.push(upd); } - (updates.failed_htlcs, updates.finalized_claimed_htlcs) + + (updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id) }; post_handle_chan_restoration!(self, chan_restoration_res); self.finalize_claims(finalized_claims); for failure in pending_failures.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2); + let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() }; + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver); } } @@ -4745,7 +4782,8 @@ impl ChannelMana } }; for htlc_source in dropped_htlcs.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); + let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id }; + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); } let _ = handle_error!(self, result, *counterparty_node_id); @@ -5031,7 +5069,8 @@ impl ChannelMana short_channel_id, channel_outpoint)) => { for failure in pending_failures.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2); + let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: channel_outpoint.to_channel_id() }; + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver); } self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]); self.finalize_claims(finalized_claim_htlcs); @@ -5178,7 +5217,7 @@ impl ChannelMana let mut failed_channels = Vec::new(); let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events(); let has_pending_monitor_events = !pending_monitor_events.is_empty(); - for (funding_outpoint, mut monitor_events) in pending_monitor_events.drain(..) { + for (funding_outpoint, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) { for monitor_event in monitor_events.drain(..) { match monitor_event { MonitorEvent::HTLCEvent(htlc_update) => { @@ -5187,7 +5226,8 @@ impl ChannelMana self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id()); } else { log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); + let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() }; + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); } }, MonitorEvent::CommitmentTxConfirmed(funding_outpoint) | @@ -5829,7 +5869,7 @@ where let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel); timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason { failure_code, data, - })); + }, HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() })); } if let Some(channel_ready) = channel_ready_opt { send_channel_ready!(short_to_chan_info, pending_msg_events, channel, channel_ready); @@ -5910,10 +5950,11 @@ where if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height)); + timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data - })); + }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() })); false } else { true } }); @@ -5924,8 +5965,8 @@ where self.handle_init_event_channel_failures(failed_channels); - for (source, payment_hash, reason) in timed_out_htlcs.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason); + for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) { + self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason, destination); } } @@ -7281,7 +7322,9 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> }; for htlc_source in failed_htlcs.drain(..) { - channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }); + let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; + let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; + channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); } //TODO: Broadcast channel update for closed channels, but only after we've made a @@ -7306,7 +7349,7 @@ mod tests { use ln::msgs::ChannelMessageHandler; use routing::router::{PaymentParameters, RouteParameters, find_route}; use util::errors::APIError; - use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; + use util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; use util::test_utils; use chain::keysinterface::KeysInterface; @@ -7469,7 +7512,7 @@ mod tests { check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash: our_payment_hash })); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -7589,8 +7632,10 @@ mod tests { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + // We have to forward pending HTLCs twice - once tries to forward the payment forward (and + // fails), the second will process the resulting failure and fail the HTLC backward expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash })); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -7631,7 +7676,7 @@ mod tests { check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash })); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index c4f2480eb18..107699f40c7 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -46,6 +46,7 @@ use core::cell::RefCell; use alloc::rc::Rc; use sync::{Arc, Mutex}; use core::mem; +use core::iter::repeat; pub const CHAN_CONFIRM_DEPTH: u32 = 10; @@ -1187,7 +1188,7 @@ macro_rules! commitment_signed_dance { { commitment_signed_dance!($node_a, $node_b, $commitment_signed, $fail_backwards, true); if $fail_backwards { - $crate::expect_pending_htlcs_forwardable!($node_a); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!($node_a, HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason($crate::util::events::HTLCDestination::NextHopChannel{ node_id: Some($node_b.node.get_our_node_id()), channel_id: $commitment_signed.channel_id })); check_added_monitors!($node_a, 1); let channel_state = $node_a.node.channel_state.lock().unwrap(); @@ -1257,7 +1258,7 @@ macro_rules! get_route_and_payment_hash { pub struct HTLCHandlingFailedConditions { pub expected_pending_htlcs_forwardable: bool, pub expected_htlc_processing_failed: Option, - pub expected_destination: Option, + pub expected_destinations: Option>, } impl HTLCHandlingFailedConditions { @@ -1265,24 +1266,19 @@ impl HTLCHandlingFailedConditions { Self { expected_pending_htlcs_forwardable: false, expected_htlc_processing_failed: None, - expected_destination: None, + expected_destinations: None, } } - pub fn htlc_processing_failed_with_count(mut self, count: u32) -> Self { - self.expected_htlc_processing_failed = Some(count); - self - } - pub fn htlc_processing_failed_with_reason(mut self, reason: HTLCDestination) -> Self { self.expected_htlc_processing_failed = Some(1); - self.expected_destination = Some(reason); + self.expected_destinations = Some(vec![reason]); self } - pub fn htlc_processing_failed_with_count_and_reason(mut self, count: u32, reason: HTLCDestination) -> Self { - self.expected_htlc_processing_failed = Some(count); - self.expected_destination = Some(reason); + pub fn htlc_processing_failed_with_reasons(mut self, reasons: Vec) -> Self { + self.expected_htlc_processing_failed = Some(reasons.len() as u32); + self.expected_destinations = Some(reasons); self } } @@ -1290,27 +1286,35 @@ impl HTLCHandlingFailedConditions { #[macro_export] macro_rules! expect_pending_htlcs_forwardable_conditions { ($node: expr, $conditions: expr) => {{ + let conditions = $conditions; let events = $node.node.get_and_clear_pending_events(); match events[0] { $crate::util::events::Event::PendingHTLCsForwardable { .. } => { }, _ => panic!("Unexpected event"), }; - if let Some(count) = $conditions.expected_htlc_processing_failed { + if let Some(count) = conditions.expected_htlc_processing_failed { assert_eq!(events.len() as u32, count + 1u32); } else { assert_eq!(events.len(), 1); } - if let Some(reason) = $conditions.expected_destination { - for event in events { - match event { - $crate::util::events::Event::PendingHTLCsForwardable { .. } => { }, - $crate::util::events::Event::HTLCHandlingFailed { ref failed_next_destination, .. } => { - assert_eq!(reason, failed_next_destination.clone()); - }, - _ => panic!("Unexpected event"), - } + if let Some(destinations) = conditions.expected_destinations { + expect_htlc_handling_failed_destinations!(events, destinations) + } + }} +} + +#[macro_export] +macro_rules! expect_htlc_handling_failed_destinations { + ($events: expr, $destinations: expr) => {{ + for event in $events { + match event { + $crate::util::events::Event::PendingHTLCsForwardable { .. } => { }, + $crate::util::events::Event::HTLCHandlingFailed { ref failed_next_destination, .. } => { + assert!($destinations.contains(&failed_next_destination)) + }, + _ => panic!("Unexpected destination"), } } }} @@ -1359,8 +1363,6 @@ macro_rules! expect_pending_htlcs_forwardable_and_htlc_handling_failed { #[cfg(test)] macro_rules! expect_pending_htlcs_forwardable_from_events { ($node: expr, $events: expr, $ignore: expr) => {{ - // We need to clear pending events since there may possibly be `PaymentForwardingFailed` events here - $node.node.get_and_clear_pending_events(); assert_eq!($events.len(), 1); match $events[0] { Event::PendingHTLCsForwardable { .. } => { }, @@ -1892,7 +1894,8 @@ pub fn fail_payment_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe assert_eq!(path.last().unwrap().node.get_our_node_id(), expected_paths[0].last().unwrap().node.get_our_node_id()); } expected_paths[0].last().unwrap().node.fail_htlc_backwards(&our_payment_hash); - expect_pending_htlcs_forwardable!(expected_paths[0].last().unwrap()); + let expected_destinations: Vec = repeat(HTLCDestination::FailedPayment { payment_hash: our_payment_hash }).take(expected_paths.len()).collect(); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(expected_paths[0].last().unwrap(), HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reasons(expected_destinations)); pass_failed_payment_back(origin_node, expected_paths, skip_last, our_payment_hash); } @@ -1933,7 +1936,7 @@ pub fn pass_failed_payment_back<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expe node.node.handle_update_fail_htlc(&prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0); commitment_signed_dance!(node, prev_node, next_msgs.as_ref().unwrap().1, update_next_node); if !update_next_node { - expect_pending_htlcs_forwardable!(node); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(node, HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::NextHopChannel { node_id: Some(prev_node.node.get_our_node_id()), channel_id: next_msgs.as_ref().unwrap().0.channel_id })); } } let events = node.node.get_and_clear_pending_msg_events(); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 4894ed02b3b..b705c83873e 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -31,7 +31,7 @@ use ln::msgs; use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, OptionalField, ErrorAction}; use util::enforcing_trait_impls::EnforcingSigner; use util::{byte_utils, test_utils}; -use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason}; +use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination}; use util::errors::APIError; use util::ser::{Writeable, ReadableArgs}; use util::config::UserConfig; @@ -54,6 +54,7 @@ use io; use prelude::*; use alloc::collections::BTreeSet; use core::default::Default; +use core::iter::repeat; use sync::{Arc, Mutex}; use ln::functional_test_utils::*; @@ -1150,7 +1151,7 @@ fn holding_cell_htlc_counting() { // We have to forward pending HTLCs twice - once tries to forward the payment forward (and // fails), the second will process the resulting failure and fail the HTLC backward. expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 })); check_added_monitors!(nodes[1], 1); let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -2616,7 +2617,7 @@ fn claim_htlc_outputs_single_tx() { check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let mut events = nodes[0].node.get_and_clear_pending_events(); expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true); - match events[1] { + match events.last().unwrap() { Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} _ => panic!("Unexpected event"), } @@ -2928,7 +2929,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { check_spends!(commitment_tx[0], chan_2.3); nodes[2].node.fail_htlc_backwards(&payment_hash); check_added_monitors!(nodes[2], 0); - expect_pending_htlcs_forwardable!(nodes[2]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() })); check_added_monitors!(nodes[2], 1); let events = nodes[2].node.get_and_clear_pending_msg_events(); @@ -3000,7 +3001,7 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { } } - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 })); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -3068,7 +3069,7 @@ fn test_simple_commitment_revoked_fail_backward() { check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 })); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -3131,7 +3132,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let (_, third_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value); nodes[2].node.fail_htlc_backwards(&first_payment_hash); - expect_pending_htlcs_forwardable!(nodes[2]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash: first_payment_hash })); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -3144,7 +3145,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use // Drop the last RAA from 3 -> 2 nodes[2].node.fail_htlc_backwards(&second_payment_hash); - expect_pending_htlcs_forwardable!(nodes[2]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash: second_payment_hash })); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -3161,7 +3162,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use check_added_monitors!(nodes[2], 1); nodes[2].node.fail_htlc_backwards(&third_payment_hash); - expect_pending_htlcs_forwardable!(nodes[2]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash: third_payment_hash })); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); assert!(updates.update_add_htlcs.is_empty()); @@ -3193,11 +3194,15 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use // commitment transaction for nodes[0] until process_pending_htlc_forwards(). check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 1); + assert_eq!(events.len(), 2); match events[0] { Event::PendingHTLCsForwardable { .. } => { }, _ => panic!("Unexpected event"), }; + match events[1] { + Event::HTLCHandlingFailed { .. } => { }, + _ => panic!("Unexpected event"), + } // Deliberately don't process the pending fail-back so they all fail back at once after // block connection just like the !deliver_bs_raa case } @@ -3211,7 +3216,7 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use assert!(ANTI_REORG_DELAY > PAYMENT_EXPIRY_BLOCKS); // We assume payments will also expire let events = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 4 }); + assert_eq!(events.len(), if deliver_bs_raa { 2 + (nodes.len() - 1) } else { 4 + nodes.len() }); match events[0] { Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { }, _ => panic!("Unexepected event"), @@ -4285,7 +4290,7 @@ fn do_test_htlc_timeout(send_partial_mpp: bool) { connect_block(&nodes[1], &block); } - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash: our_payment_hash })); check_added_monitors!(nodes[1], 1); let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -4349,7 +4354,7 @@ fn do_test_holding_cell_htlc_add_timeouts(forwarded_htlc: bool) { connect_blocks(&nodes[1], 1); if forwarded_htlc { - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 })); check_added_monitors!(nodes[1], 1); let fail_commit = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(fail_commit.len(), 1); @@ -5397,7 +5402,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() { mine_transaction(&nodes[1], &htlc_timeout_tx); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 })); let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); @@ -5517,18 +5522,18 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno &[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]); let nodes = create_network(6, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()); - create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); - let chan = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()); - create_announced_chan_between_nodes(&nodes, 3, 4, InitFeatures::known(), InitFeatures::known()); - create_announced_chan_between_nodes(&nodes, 3, 5, InitFeatures::known(), InitFeatures::known()); + let _chan_0_2 = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()); + let _chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known()); + let chan_2_3 = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()); + let chan_3_4 = create_announced_chan_between_nodes(&nodes, 3, 4, InitFeatures::known(), InitFeatures::known()); + let chan_3_5 = create_announced_chan_between_nodes(&nodes, 3, 5, InitFeatures::known(), InitFeatures::known()); // Rebalance and check output sanity... send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000); send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000); - assert_eq!(get_local_commitment_txn!(nodes[3], chan.2)[0].output.len(), 2); + assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2); - let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis; + let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan_2_3.2).unwrap().holder_dust_limit_satoshis; // 0th HTLC: let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee // 1st HTLC: @@ -5563,8 +5568,8 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno // Double-check that six of the new HTLC were added // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie, // with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included). - assert_eq!(get_local_commitment_txn!(nodes[3], chan.2).len(), 1); - assert_eq!(get_local_commitment_txn!(nodes[3], chan.2)[0].output.len(), 8); + assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2).len(), 1); + assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 8); // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go. // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs @@ -5573,7 +5578,14 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno nodes[4].node.fail_htlc_backwards(&payment_hash_5); nodes[4].node.fail_htlc_backwards(&payment_hash_6); check_added_monitors!(nodes[4], 0); - expect_pending_htlcs_forwardable!(nodes[4]); + + let failed_destinations = vec![ + HTLCDestination::FailedPayment { payment_hash: payment_hash_1 }, + HTLCDestination::FailedPayment { payment_hash: payment_hash_3 }, + HTLCDestination::FailedPayment { payment_hash: payment_hash_5 }, + HTLCDestination::FailedPayment { payment_hash: payment_hash_6 }, + ]; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reasons(failed_destinations)); check_added_monitors!(nodes[4], 1); let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id()); @@ -5587,7 +5599,12 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno nodes[5].node.fail_htlc_backwards(&payment_hash_2); nodes[5].node.fail_htlc_backwards(&payment_hash_4); check_added_monitors!(nodes[5], 0); - expect_pending_htlcs_forwardable!(nodes[5]); + + let failed_destinations_2 = vec![ + HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }, + HTLCDestination::FailedPayment { payment_hash: payment_hash_4 }, + ]; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reasons(failed_destinations_2)); check_added_monitors!(nodes[5], 1); let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id()); @@ -5595,9 +5612,18 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]); commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false); - let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2); - - expect_pending_htlcs_forwardable!(nodes[3]); + let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2); + + // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events + let failed_destinations_3 = vec![ + HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, + HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, + HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, + HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 }, + HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 }, + HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 }, + ]; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reasons(failed_destinations_3)); check_added_monitors!(nodes[3], 1); let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id()); nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]); @@ -5623,7 +5649,7 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno // // Alternatively, we may broadcast the previous commitment transaction, which should only // result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs. - let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2); + let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2); if announce_latest { mine_transaction(&nodes[2], &ds_last_commitment_tx[0]); @@ -5632,11 +5658,11 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno } let events = nodes[2].node.get_and_clear_pending_events(); let close_event = if deliver_last_raa { - assert_eq!(events.len(), 2); - events[1].clone() + assert_eq!(events.len(), 2 + 6); + events.last().clone().unwrap() } else { assert_eq!(events.len(), 1); - events[0].clone() + events.last().clone().unwrap() }; match close_event { Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} @@ -5647,8 +5673,17 @@ fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, anno check_closed_broadcast!(nodes[2], true); if deliver_last_raa { expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true); + + let expected_destinations: Vec = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect(); + expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations); } else { - expect_pending_htlcs_forwardable!(nodes[2]); + let expected_destinations: Vec = if announce_latest { + repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect() + } else { + repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect() + }; + + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reasons(expected_destinations)); } check_added_monitors!(nodes[2], 3); @@ -6012,7 +6047,7 @@ fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no let htlc_value = if use_dust { 50000 } else { 3000000 }; let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], htlc_value); nodes[1].node.fail_htlc_backwards(&our_payment_hash); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash: our_payment_hash })); check_added_monitors!(nodes[1], 1); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -6472,7 +6507,7 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() { // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process. let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events(); - assert_eq!(process_htlc_forwards_event.len(), 1); + assert_eq!(process_htlc_forwards_event.len(), 2); match &process_htlc_forwards_event[0] { &Event::PendingHTLCsForwardable { .. } => {}, _ => panic!("Unexpected event"), @@ -7097,7 +7132,7 @@ fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_upda let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known()); - create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000, InitFeatures::known(), InitFeatures::known()); + let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000, InitFeatures::known(), InitFeatures::known()); let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000); @@ -7145,7 +7180,7 @@ fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_upda check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 })); let events_4 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_4.len(), 1); @@ -7189,7 +7224,7 @@ fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) { // Fail one HTLC to prune it in the will-be-latest-local commitment tx nodes[1].node.fail_htlc_backwards(&payment_hash_2); check_added_monitors!(nodes[1], 0); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash: payment_hash_2 })); check_added_monitors!(nodes[1], 1); let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -7590,7 +7625,7 @@ fn test_check_htlc_underpaying() { // Note that we first have to wait a random delay before processing the receipt of the HTLC, // and then will wait a second random delay before failing the HTLC back: expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash: our_payment_hash })); // Node 3 is expecting payment of 100_000 but received 10_000, // it should fail htlc like we didn't know the preimage. @@ -7868,7 +7903,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] }); let events = nodes[0].node.get_and_clear_pending_events(); expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true); - match events[1] { + match events.last().unwrap() { Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {} _ => panic!("Unexpected event"), } @@ -8144,19 +8179,19 @@ fn test_bump_txn_sanitize_tracking_maps() { let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known()); // Lock HTLC in both directions - let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0; - route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000).0; + let (payment_preimage_1, _, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000); + let (_, payment_hash_2, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000); let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2); assert_eq!(revoked_local_txn[0].input.len(), 1); assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid()); // Revoke local commitment tx - claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage); + claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1); // Broadcast set of revoked txn on A connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH); - expect_pending_htlcs_forwardable_ignore!(nodes[0]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash: payment_hash_2 })); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0); mine_transaction(&nodes[0], &revoked_local_txn[0]); @@ -8709,7 +8744,7 @@ fn test_bad_secret_hash() { // All the below cases should end up being handled exactly identically, so we macro the // resulting events. macro_rules! handle_unknown_invalid_payment_data { - () => { + ($payment_hash: expr) => { check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); let payment_event = SendEvent::from_event(events.pop().unwrap()); @@ -8719,7 +8754,7 @@ fn test_bad_secret_hash() { // We have to forward pending HTLCs once to process the receipt of the HTLC and then // again to process the pending backwards-failure of the HTLC expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment{ payment_hash: $payment_hash })); check_added_monitors!(nodes[1], 1); // We should fail the payment back @@ -8740,17 +8775,17 @@ fn test_bad_secret_hash() { // Send a payment with the right payment hash but the wrong payment secret nodes[0].node.send_payment(&route, our_payment_hash, &Some(random_payment_secret)).unwrap(); - handle_unknown_invalid_payment_data!(); + handle_unknown_invalid_payment_data!(our_payment_hash); expect_payment_failed!(nodes[0], our_payment_hash, true, expected_error_code, expected_error_data); // Send a payment with a random payment hash, but the right payment secret nodes[0].node.send_payment(&route, random_payment_hash, &Some(our_payment_secret)).unwrap(); - handle_unknown_invalid_payment_data!(); + handle_unknown_invalid_payment_data!(random_payment_hash); expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data); // Send a payment with a random payment hash and random payment secret nodes[0].node.send_payment(&route, random_payment_hash, &Some(random_payment_secret)).unwrap(); - handle_unknown_invalid_payment_data!(); + handle_unknown_invalid_payment_data!(random_payment_hash); expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data); } @@ -9576,7 +9611,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t // additional block built on top of the current chain. nodes[1].chain_monitor.chain_monitor.transactions_confirmed( &nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id })); check_added_monitors!(nodes[1], 1); let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -9759,7 +9794,11 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { // Now we go fail back the first HTLC from the user end. nodes[1].node.fail_htlc_backwards(&our_payment_hash); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + let expected_destinations = vec![ + HTLCDestination::FailedPayment { payment_hash: our_payment_hash }, + HTLCDestination::FailedPayment { payment_hash: our_payment_hash }, + ]; + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reasons(expected_destinations)); nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); @@ -9776,7 +9815,7 @@ fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { if let Event::PaymentPathFailed { .. } = failure_events[1] {} else { panic!(); } } else { // Let the second HTLC fail and claim the first - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash: our_payment_hash })); nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); @@ -9818,7 +9857,7 @@ fn test_inconsistent_mpp_params() { create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known()); create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, InitFeatures::known(), InitFeatures::known()); create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known()); - create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known()); + let chan_2_3 =create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known()); let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id()) .with_features(InvoiceFeatures::known()); @@ -9873,7 +9912,7 @@ fn test_inconsistent_mpp_params() { } expect_pending_htlcs_forwardable_ignore!(nodes[3]); nodes[3].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_ignore!(nodes[3]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash: our_payment_hash })); nodes[3].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[3], 1); @@ -9882,7 +9921,7 @@ fn test_inconsistent_mpp_params() { nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false); - expect_pending_htlcs_forwardable!(nodes[2]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 })); check_added_monitors!(nodes[2], 1); let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); @@ -10002,7 +10041,11 @@ fn test_double_partial_claim() { connect_blocks(&nodes[3], TEST_FINAL_CLTV); connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later - expect_pending_htlcs_forwardable!(nodes[3]); + let failed_destinations = vec![ + HTLCDestination::FailedPayment { payment_hash }, + HTLCDestination::FailedPayment { payment_hash }, + ]; + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reasons(failed_destinations)); pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 7c46f7b6d28..636c17f1572 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -15,7 +15,7 @@ use ln::channel; use ln::channelmanager::BREAKDOWN_TIMEOUT; use ln::features::InitFeatures; use ln::msgs::ChannelMessageHandler; -use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; +use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; use bitcoin::blockdata::script::Builder; use bitcoin::blockdata::opcodes; @@ -74,7 +74,7 @@ fn chanmon_fail_from_stale_commitment() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 })); check_added_monitors!(nodes[1], 1); let fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); diff --git a/lightning/src/ln/onion_route_tests.rs b/lightning/src/ln/onion_route_tests.rs index c66f45a4492..540baaa0f81 100644 --- a/lightning/src/ln/onion_route_tests.rs +++ b/lightning/src/ln/onion_route_tests.rs @@ -23,7 +23,7 @@ use ln::features::{InitFeatures, InvoiceFeatures, NodeFeatures}; use ln::msgs; use ln::msgs::{ChannelMessageHandler, ChannelUpdate, OptionalField}; use ln::wire::Encode; -use util::events::{Event, MessageSendEvent, MessageSendEventsProvider}; +use util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider}; use util::ser::{ReadableArgs, Writeable, Writer}; use util::{byte_utils, test_utils}; use util::config::{UserConfig, ChannelConfig}; @@ -126,7 +126,7 @@ fn run_onion_failure_test_with_fail_intercept(_name: &str, test_case: expect_htlc_forward!(&nodes[2]); expect_event!(&nodes[2], Event::PaymentReceived); callback_node(); - expect_pending_htlcs_forwardable!(nodes[2]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() })); } let update_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -1036,7 +1036,7 @@ fn test_phantom_onion_hmac_failure() { }; expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash })); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -1108,7 +1108,7 @@ fn test_phantom_invalid_onion_payload() { } expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash })); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -1164,7 +1164,7 @@ fn test_phantom_final_incorrect_cltv_expiry() { } expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash })); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -1210,7 +1210,7 @@ fn test_phantom_failure_too_low_cltv() { expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash })); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -1255,7 +1255,7 @@ fn test_phantom_failure_too_low_recv_amt() { nodes[1].node.process_pending_htlc_forwards(); expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() })); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(&nodes[1], 1); @@ -1352,7 +1352,7 @@ fn test_phantom_failure_reject_payment() { nodes[1].node.process_pending_htlc_forwards(); expect_payment_received!(nodes[1], payment_hash, payment_secret, recv_amt_msat); nodes[1].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash })); nodes[1].node.process_pending_htlc_forwards(); let update_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index d3feb96df11..efd4375ec04 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -21,7 +21,7 @@ use ln::features::{InitFeatures, InvoiceFeatures}; use ln::msgs; use ln::msgs::ChannelMessageHandler; use routing::router::{PaymentParameters, get_route}; -use util::events::{ClosureReason, Event, MessageSendEvent, MessageSendEventsProvider}; +use util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider}; use util::test_utils; use util::errors::APIError; use util::enforcing_trait_impls::EnforcingSigner; @@ -43,7 +43,7 @@ fn retry_single_path_payment() { let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let _chan_0 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let _chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1, InitFeatures::known(), InitFeatures::known()); + let chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1, InitFeatures::known(), InitFeatures::known()); // Rebalance to find a route send_payment(&nodes[2], &vec!(&nodes[1])[..], 3_000_000); @@ -62,7 +62,7 @@ fn retry_single_path_payment() { check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable!(&nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1.2 })); let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); @@ -120,10 +120,10 @@ fn mpp_retry() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); - let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let chan_4_id = create_announced_chan_between_nodes(&nodes, 3, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + let (chan_1_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + let (chan_2_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()); + let (chan_3_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()); + let (chan_4_update, _, chan_4_id, _) = create_announced_chan_between_nodes(&nodes, 3, 2, InitFeatures::known(), InitFeatures::known()); // Rebalance send_payment(&nodes[3], &vec!(&nodes[2])[..], 1_500_000); @@ -131,11 +131,11 @@ fn mpp_retry() { let path = route.paths[0].clone(); route.paths.push(path); route.paths[0][0].pubkey = nodes[1].node.get_our_node_id(); - route.paths[0][0].short_channel_id = chan_1_id; - route.paths[0][1].short_channel_id = chan_3_id; + route.paths[0][0].short_channel_id = chan_1_update.contents.short_channel_id; + route.paths[0][1].short_channel_id = chan_3_update.contents.short_channel_id; route.paths[1][0].pubkey = nodes[2].node.get_our_node_id(); - route.paths[1][0].short_channel_id = chan_2_id; - route.paths[1][1].short_channel_id = chan_4_id; + route.paths[1][0].short_channel_id = chan_2_update.contents.short_channel_id; + route.paths[1][1].short_channel_id = chan_4_update.contents.short_channel_id; // Initiate the MPP payment. let payment_id = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); @@ -165,7 +165,7 @@ fn mpp_retry() { // Attempt to forward the payment and complete the 2nd path's failure. expect_pending_htlcs_forwardable!(&nodes[2]); - expect_pending_htlcs_forwardable!(&nodes[2]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[2], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_4_id })); let htlc_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); @@ -206,20 +206,20 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); - let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + let (chan_1_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + let (chan_2_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()); + let (chan_3_update, _, chan_3_id, _) = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()); + let (chan_4_update, _, _, _) = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()); let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 100_000); let path = route.paths[0].clone(); route.paths.push(path); route.paths[0][0].pubkey = nodes[1].node.get_our_node_id(); - route.paths[0][0].short_channel_id = chan_1_id; - route.paths[0][1].short_channel_id = chan_3_id; + route.paths[0][0].short_channel_id = chan_1_update.contents.short_channel_id; + route.paths[0][1].short_channel_id = chan_3_update.contents.short_channel_id; route.paths[1][0].pubkey = nodes[2].node.get_our_node_id(); - route.paths[1][0].short_channel_id = chan_2_id; - route.paths[1][1].short_channel_id = chan_4_id; + route.paths[1][0].short_channel_id = chan_2_update.contents.short_channel_id; + route.paths[1][1].short_channel_id = chan_4_update.contents.short_channel_id; // Initiate the MPP payment. let _ = nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); @@ -237,7 +237,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { } // Failed HTLC from node 3 -> 1 - expect_pending_htlcs_forwardable!(nodes[3]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash })); let htlc_fail_updates_3_1 = get_htlc_update_msgs!(nodes[3], nodes[1].node.get_our_node_id()); assert_eq!(htlc_fail_updates_3_1.update_fail_htlcs.len(), 1); nodes[1].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &htlc_fail_updates_3_1.update_fail_htlcs[0]); @@ -245,7 +245,7 @@ fn do_mpp_receive_timeout(send_partial_mpp: bool) { commitment_signed_dance!(nodes[1], nodes[3], htlc_fail_updates_3_1.commitment_signed, false); // Failed HTLC from node 1 -> 0 - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_3_id })); let htlc_fail_updates_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert_eq!(htlc_fail_updates_1_0.update_fail_htlcs.len(), 1); nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_fail_updates_1_0.update_fail_htlcs[0]); @@ -280,7 +280,7 @@ fn retry_expired_payment() { let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let _chan_0 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let _chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1, InitFeatures::known(), InitFeatures::known()); + let chan_1 = create_announced_chan_between_nodes(&nodes, 2, 1, InitFeatures::known(), InitFeatures::known()); // Rebalance to find a route send_payment(&nodes[2], &vec!(&nodes[1])[..], 3_000_000); @@ -299,7 +299,7 @@ fn retry_expired_payment() { check_added_monitors!(nodes[1], 0); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable!(&nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(&nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_1.2 })); let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(htlc_updates.update_add_htlcs.is_empty()); assert_eq!(htlc_updates.update_fail_htlcs.len(), 1); @@ -803,7 +803,7 @@ fn test_fulfill_restart_failure() { reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); nodes[1].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::FailedPayment { payment_hash })); check_added_monitors!(nodes[1], 1); let htlc_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_fail_updates.update_fail_htlcs[0]); diff --git a/lightning/src/ln/priv_short_conf_tests.rs b/lightning/src/ln/priv_short_conf_tests.rs index 508ba414037..bd8cf6f632e 100644 --- a/lightning/src/ln/priv_short_conf_tests.rs +++ b/lightning/src/ln/priv_short_conf_tests.rs @@ -22,7 +22,7 @@ use ln::msgs; use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, OptionalField, ChannelUpdate, ErrorAction}; use ln::wire::Encode; use util::enforcing_trait_impls::EnforcingSigner; -use util::events::{ClosureReason, Event, MessageSendEvent, MessageSendEventsProvider}; +use util::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider}; use util::config::UserConfig; use util::ser::{Writeable, ReadableArgs}; use util::test_utils; @@ -478,7 +478,7 @@ fn test_scid_alias_returned() { let nodes = create_network(3, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 0, InitFeatures::known(), InitFeatures::known()); - create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000, 0, InitFeatures::known(), InitFeatures::known()); + let chan = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 10_000, 0, InitFeatures::known(), InitFeatures::known()); let last_hop = nodes[2].node.list_usable_channels(); let mut hop_hints = vec![RouteHint(vec![RouteHintHop { @@ -508,7 +508,7 @@ fn test_scid_alias_returned() { commitment_signed_dance!(nodes[1], nodes[0], &as_updates.commitment_signed, false, true); expect_pending_htlcs_forwardable!(nodes[1]); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan.0.channel_id })); check_added_monitors!(nodes[1], 1); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 29349392f76..3ccd216336d 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -16,7 +16,7 @@ use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs}; use ln::features::InitFeatures; use ln::msgs::ChannelMessageHandler; use util::enforcing_trait_impls::EnforcingSigner; -use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason}; +use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; use util::test_utils; use util::ser::{ReadableArgs, Writeable}; @@ -147,7 +147,7 @@ fn do_test_onchain_htlc_reorg(local_commitment: bool, claim: bool) { txdata: vec![], }; connect_block(&nodes[1], &block); - expect_pending_htlcs_forwardable!(nodes[1]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], HTLCHandlingFailedConditions::new().htlc_processing_failed_with_reason(HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 })); } check_added_monitors!(nodes[1], 1); diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 4f3d800be5d..fe99b06c9bc 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -169,7 +169,7 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { update_res } - fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec)> { + fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec, Option)> { return self.chain_monitor.release_pending_monitor_events(); } }