diff --git a/lightning-liquidity/tests/lsps2_integration_tests.rs b/lightning-liquidity/tests/lsps2_integration_tests.rs index e4ace27b715..92052891dd5 100644 --- a/lightning-liquidity/tests/lsps2_integration_tests.rs +++ b/lightning-liquidity/tests/lsps2_integration_tests.rs @@ -1338,14 +1338,14 @@ fn client_trusts_lsp_end_to_end_test() { let total_fee_msat = match service_events[0].clone() { Event::PaymentForwarded { - prev_node_id, - next_node_id, + ref prev_htlcs, + ref next_htlcs, skimmed_fee_msat, total_fee_earned_msat, .. } => { - assert_eq!(prev_node_id, Some(payer_node_id)); - assert_eq!(next_node_id, Some(client_node_id)); + assert_eq!(prev_htlcs[0].node_id, Some(payer_node_id)); + assert_eq!(next_htlcs[0].node_id, Some(client_node_id)); service_handler.payment_forwarded(channel_id, skimmed_fee_msat.unwrap_or(0)).unwrap(); Some(total_fee_earned_msat.unwrap() - skimmed_fee_msat.unwrap()) }, diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 515a3dc5f1d..ee93211af47 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -2868,6 +2868,7 @@ impl ChannelMonitorImpl { let outbound_payment = match source { None => panic!("Outbound HTLCs should have a source"), Some(&HTLCSource::PreviousHopData(_)) => false, + Some(&HTLCSource::TrampolineForward { .. }) => false, Some(&HTLCSource::OutboundRoute { .. }) => true, }; return Some(Balance::MaybeTimeoutClaimableHTLC { @@ -3080,6 +3081,7 @@ impl ChannelMonitor { let outbound_payment = match source { None => panic!("Outbound HTLCs should have a source"), Some(HTLCSource::PreviousHopData(_)) => false, + Some(HTLCSource::TrampolineForward { .. }) => false, Some(HTLCSource::OutboundRoute { .. }) => true, }; if outbound_payment { diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index d97ae6097b6..f8e34df8bf9 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -44,7 +44,7 @@ use crate::util::ser::{ WithoutLength, Writeable, Writer, }; -use crate::io; +use crate::io::{self, ErrorKind::InvalidData as IOInvalidData}; use crate::sync::Arc; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; @@ -575,6 +575,14 @@ pub enum HTLCHandlingFailureType { /// The payment hash of the payment we attempted to process. payment_hash: PaymentHash, }, + /// We were responsible for pathfinding and forwarding of a trampoline payment, but failed to + /// do so. An example of such an instance is when we can't find a route to the specified + /// trampoline destination. + TrampolineForward { + /// The set of HTLCs dispatched by our node in an attempt to complete the trampoline forward + /// which have failed. + attempted_htlcs: Vec, + }, } impl_writeable_tlv_based_enum_upgradable!(HTLCHandlingFailureType, @@ -592,6 +600,9 @@ impl_writeable_tlv_based_enum_upgradable!(HTLCHandlingFailureType, (4, Receive) => { (0, payment_hash, required), }, + (5, TrampolineForward) => { + (1, attempted_htlcs, required_vec), + }, ); /// The reason for HTLC failures in [`Event::HTLCHandlingFailed`]. @@ -729,6 +740,25 @@ pub enum InboundChannelFunds { DualFunded, } +/// Identifies the channel and peer committed to a HTLC, used for both incoming and outgoing HTLCs. +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct HTLCLocator { + /// The channel that the HTLC was sent or received on. + pub channel_id: ChannelId, + + /// The `user_channel_id` for `channel_id`. + pub user_channel_id: Option, + + /// The public key identity of the node that the HTLC was sent to or received from. + pub node_id: Option, +} + +impl_writeable_tlv_based!(HTLCLocator, { + (1, channel_id, required), + (3, user_channel_id, option), + (5, node_id, option), +}); + /// An Event which you should probably take some action in response to. /// /// Note that while Writeable and Readable are implemented for Event, you probably shouldn't use @@ -1312,38 +1342,22 @@ pub enum Event { /// This event is generated when a payment has been successfully forwarded through us and a /// forwarding fee earned. /// + /// Note that downgrading from 0.3 with pending trampoline forwards that use multipart payments + /// will produce an event that only provides information about the first htlc that was + /// received/dispatched. + /// /// # Failure Behavior and Persistence /// This event will eventually be replayed after failures-to-handle (i.e., the event handler /// returning `Err(ReplayEvent ())`) and will be persisted across restarts. PaymentForwarded { - /// The channel id of the incoming channel between the previous node and us. - /// - /// This is only `None` for events generated or serialized by versions prior to 0.0.107. - prev_channel_id: Option, - /// The channel id of the outgoing channel between the next node and us. - /// - /// This is only `None` for events generated or serialized by versions prior to 0.0.107. - next_channel_id: Option, - /// The `user_channel_id` of the incoming channel between the previous node and us. - /// - /// This is only `None` for events generated or serialized by versions prior to 0.0.122. - prev_user_channel_id: Option, - /// The `user_channel_id` of the outgoing channel between the next node and us. - /// - /// This will be `None` if the payment was settled via an on-chain transaction. See the - /// caveat described for the `total_fee_earned_msat` field. Moreover it will be `None` for - /// events generated or serialized by versions prior to 0.0.122. - next_user_channel_id: Option, - /// The node id of the previous node. - /// - /// This is only `None` for HTLCs received prior to 0.1 or for events serialized by - /// versions prior to 0.1 - prev_node_id: Option, - /// The node id of the next node. - /// - /// This is only `None` for HTLCs received prior to 0.1 or for events serialized by - /// versions prior to 0.1 - next_node_id: Option, + /// The set of HTLCs forwarded to our node that will be claimed by this forward. Contains a + /// single HTLC for source-routed payments, and may contain multiple HTLCs when we acted as + /// a trampoline router, responsible for pathfinding within the route. + prev_htlcs: Vec, + /// The set of HTLCs forwarded by our node that have been claimed by this forward. Contains + /// a single HTLC for regular source-routed payments, and may contain multiple HTLCs when + /// we acted as a trampoline router, responsible for pathfinding within the route. + next_htlcs: Vec, /// The total fee, in milli-satoshis, which was earned as a result of the payment. /// /// Note that if we force-closed the channel over which we forwarded an HTLC while the HTLC @@ -1660,12 +1674,17 @@ pub enum Event { /// Indicates that the HTLC was accepted, but could not be processed when or after attempting to /// forward it. /// + /// Note that downgrading from 0.3 with pending trampoline forwards that have incoming multipart + /// payments will produce an event that only provides information about the first htlc that was + /// received/dispatched. + /// /// # Failure Behavior and Persistence /// This event will eventually be replayed after failures-to-handle (i.e., the event handler /// returning `Err(ReplayEvent ())`) and will be persisted across restarts. HTLCHandlingFailed { - /// The channel over which the HTLC was received. - prev_channel_id: ChannelId, + /// The channel(s) over which the HTLC(s) was received. May contain multiple entries for + /// trampoline forwards. + prev_channel_ids: Vec, /// The type of HTLC handling that failed. failure_type: HTLCHandlingFailureType, /// The reason that the HTLC failed. @@ -2027,29 +2046,33 @@ impl Writeable for Event { }); }, &Event::PaymentForwarded { - prev_channel_id, - next_channel_id, - prev_user_channel_id, - next_user_channel_id, - prev_node_id, - next_node_id, + ref prev_htlcs, + ref next_htlcs, total_fee_earned_msat, skimmed_fee_msat, claim_from_onchain_tx, outbound_amount_forwarded_msat, } => { 7u8.write(writer)?; + // Fields 1, 3, 9, 11, 13 and 15 are written for backwards compatibility. + let legacy_prev = prev_htlcs.first().ok_or(io::Error::from(IOInvalidData))?; + let legacy_next = next_htlcs.first().ok_or(io::Error::from(IOInvalidData))?; write_tlv_fields!(writer, { (0, total_fee_earned_msat, option), - (1, prev_channel_id, option), + (1, Some(legacy_prev.channel_id), option), (2, claim_from_onchain_tx, required), - (3, next_channel_id, option), + (3, Some(legacy_next.channel_id), option), (5, outbound_amount_forwarded_msat, option), (7, skimmed_fee_msat, option), - (9, prev_user_channel_id, option), - (11, next_user_channel_id, option), - (13, prev_node_id, option), - (15, next_node_id, option), + (9, legacy_prev.user_channel_id, option), + (11, legacy_next.user_channel_id, option), + (13, legacy_prev.node_id, option), + (15, legacy_next.node_id, option), + // HTLCs are written as required, rather than required_vec, so that they can be + // deserialized using default_value to fill in legacy fields which expects + // LengthReadable (required_vec is WithoutLength). + (17, *prev_htlcs, required), + (19, *next_htlcs, required), }); }, &Event::ChannelClosed { @@ -2197,15 +2220,19 @@ impl Writeable for Event { }) }, &Event::HTLCHandlingFailed { - ref prev_channel_id, + ref prev_channel_ids, ref failure_type, ref failure_reason, } => { 25u8.write(writer)?; + let legacy_chan_id = + prev_channel_ids.first().ok_or(io::Error::from(IOInvalidData))?; write_tlv_fields!(writer, { - (0, prev_channel_id, required), + // Write legacy field to remain backwards compatible. + (0, legacy_chan_id, required), (1, failure_reason, option), (2, failure_type, required), + (3, *prev_channel_ids, required), }) }, &Event::BumpTransaction(ref event) => { @@ -2544,35 +2571,51 @@ impl MaybeReadable for Event { }, 7u8 => { let mut f = || { - let mut prev_channel_id = None; - let mut next_channel_id = None; - let mut prev_user_channel_id = None; - let mut next_user_channel_id = None; - let mut prev_node_id = None; - let mut next_node_id = None; + // Legacy values that have been replaced by prev_htlcs and next_htlcs. + let mut prev_channel_id_legacy = None; + let mut next_channel_id_legacy = None; + let mut prev_user_channel_id_legacy = None; + let mut next_user_channel_id_legacy = None; + let mut prev_node_id_legacy = None; + let mut next_node_id_legacy = None; + let mut total_fee_earned_msat = None; let mut skimmed_fee_msat = None; let mut claim_from_onchain_tx = false; let mut outbound_amount_forwarded_msat = None; + let mut prev_htlcs = vec![]; + let mut next_htlcs = vec![]; read_tlv_fields!(reader, { (0, total_fee_earned_msat, option), - (1, prev_channel_id, option), + (1, prev_channel_id_legacy, option), (2, claim_from_onchain_tx, required), - (3, next_channel_id, option), + (3, next_channel_id_legacy, option), (5, outbound_amount_forwarded_msat, option), (7, skimmed_fee_msat, option), - (9, prev_user_channel_id, option), - (11, next_user_channel_id, option), - (13, prev_node_id, option), - (15, next_node_id, option), + (9, prev_user_channel_id_legacy, option), + (11, next_user_channel_id_legacy, option), + (13, prev_node_id_legacy, option), + (15, next_node_id_legacy, option), + // We can unwrap in the eagerly-evaluated default_value code because we + // always write legacy fields to be backwards compatible, and expect + // this field to be set because the legacy field was only None for versions + // before 0.0.107 and we do not allow upgrades with pending forwards to 0.1 + // for any version before 0.0.123. + (17, prev_htlcs, (default_value, vec![HTLCLocator{ + channel_id: prev_channel_id_legacy.unwrap(), + user_channel_id: prev_user_channel_id_legacy, + node_id: prev_node_id_legacy, + }])), + (19, next_htlcs, (default_value, vec![HTLCLocator{ + channel_id: next_channel_id_legacy.unwrap(), + user_channel_id: next_user_channel_id_legacy, + node_id: next_node_id_legacy, + }])), }); + Ok(Some(Event::PaymentForwarded { - prev_channel_id, - next_channel_id, - prev_user_channel_id, - next_user_channel_id, - prev_node_id, - next_node_id, + prev_htlcs, + next_htlcs, total_fee_earned_msat, skimmed_fee_msat, claim_from_onchain_tx, @@ -2761,13 +2804,17 @@ impl MaybeReadable for Event { }, 25u8 => { let mut f = || { - let mut prev_channel_id = ChannelId::new_zero(); + let mut prev_channel_id_legacy = ChannelId::new_zero(); let mut failure_reason = None; let mut failure_type_opt = UpgradableRequired(None); + let mut prev_channel_ids = vec![]; read_tlv_fields!(reader, { - (0, prev_channel_id, required), + (0, prev_channel_id_legacy, required), (1, failure_reason, option), (2, failure_type_opt, upgradable_required), + (3, prev_channel_ids, (default_value, vec![ + prev_channel_id_legacy, + ])), }); // If a legacy HTLCHandlingFailureType::UnknownNextHop was written, upgrade @@ -2782,7 +2829,7 @@ impl MaybeReadable for Event { failure_reason = Some(LocalHTLCFailureReason::UnknownNextPeer.into()); } Ok(Some(Event::HTLCHandlingFailed { - prev_channel_id, + prev_channel_ids, failure_type: _init_tlv_based_struct_field!( failure_type_opt, upgradable_required diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f3399ff8787..ce75b7b54f3 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -718,6 +718,7 @@ impl Default for OptionalOfferPaymentParams { pub(crate) enum SentHTLCId { PreviousHopData { prev_outbound_scid_alias: u64, htlc_id: u64 }, OutboundRoute { session_priv: [u8; SECRET_KEY_SIZE] }, + TrampolineForward { session_priv: [u8; SECRET_KEY_SIZE] }, } impl SentHTLCId { pub(crate) fn from_source(source: &HTLCSource) -> Self { @@ -726,6 +727,9 @@ impl SentHTLCId { prev_outbound_scid_alias: hop_data.prev_outbound_scid_alias, htlc_id: hop_data.htlc_id, }, + HTLCSource::TrampolineForward { session_priv, .. } => { + Self::TrampolineForward { session_priv: session_priv.secret_bytes() } + }, HTLCSource::OutboundRoute { session_priv, .. } => { Self::OutboundRoute { session_priv: session_priv.secret_bytes() } }, @@ -740,6 +744,9 @@ impl_writeable_tlv_based_enum!(SentHTLCId, (2, OutboundRoute) => { (0, session_priv, required), }, + (4, TrampolineForward) => { + (0, session_priv, required), + }, ); // (src_outbound_scid_alias, src_counterparty_node_id, src_funding_outpoint, src_chan_id, src_user_chan_id) @@ -756,6 +763,16 @@ mod fuzzy_channelmanager { #[derive(Clone, Debug, PartialEq, Eq)] pub enum HTLCSource { PreviousHopData(HTLCPreviousHopData), + TrampolineForward { + /// We might be forwarding an incoming payment that was received over MPP, and therefore + /// need to store the vector of corresponding `HTLCPreviousHopData` values. + previous_hop_data: Vec, + incoming_trampoline_shared_secret: [u8; 32], + path: Path, + /// In order to decode inter-Trampoline errors, we need to store the session_priv key + /// given we're effectively creating new outbound routes. + session_priv: SecretKey, + }, OutboundRoute { path: Path, session_priv: SecretKey, @@ -790,6 +807,16 @@ mod fuzzy_channelmanager { /// channel remains unconfirmed for too long. pub cltv_expiry: Option, } + + impl From<&HTLCPreviousHopData> for events::HTLCLocator { + fn from(value: &HTLCPreviousHopData) -> Self { + events::HTLCLocator { + channel_id: value.channel_id, + user_channel_id: value.user_channel_id, + node_id: value.counterparty_node_id, + } + } + } } #[cfg(fuzzing)] pub use self::fuzzy_channelmanager::*; @@ -818,6 +845,18 @@ impl core::hash::Hash for HTLCSource { first_hop_htlc_msat.hash(hasher); bolt12_invoice.hash(hasher); }, + HTLCSource::TrampolineForward { + previous_hop_data, + incoming_trampoline_shared_secret, + path, + session_priv, + } => { + 2u8.hash(hasher); + previous_hop_data.hash(hasher); + incoming_trampoline_shared_secret.hash(hasher); + path.hash(hasher); + session_priv[..].hash(hasher); + }, } } } @@ -1320,8 +1359,8 @@ pub(crate) enum MonitorUpdateCompletionAction { /// completes a monitor update containing the payment preimage. In that case, after the inbound /// edge completes, we will surface an [`Event::PaymentForwarded`] as well as unblock the /// outbound edge. - EmitEventAndFreeOtherChannel { - event: events::Event, + EmitEventOptionAndFreeOtherChannel { + event: Option, downstream_counterparty_and_funding_outpoint: Option, }, /// Indicates we should immediately resume the operation of another channel, unless there is @@ -1331,8 +1370,8 @@ pub(crate) enum MonitorUpdateCompletionAction { /// This is usually generated when we've forwarded an HTLC and want to block the outbound edge /// from completing a monitor update which removes the payment preimage until the inbound edge /// completes a monitor update containing the payment preimage. However, we use this variant - /// instead of [`Self::EmitEventAndFreeOtherChannel`] when we discover that the claim was in - /// fact duplicative and we simply want to resume the outbound edge channel immediately. + /// instead of [`Self::EmitEventOptionAndFreeOtherChannel`] when we discover that the claim was + /// in fact duplicative and we simply want to resume the outbound edge channel immediately. /// /// This variant should thus never be written to disk, as it is processed inline rather than /// stored for later processing. @@ -1355,8 +1394,11 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction, (4, blocking_action, upgradable_required), (5, downstream_channel_id, required), }, - (2, EmitEventAndFreeOtherChannel) => { - (0, event, upgradable_required), + (2, EmitEventOptionAndFreeOtherChannel) => { + // LDK prior to 0.3 required this field. It will not be present for trampoline payments + // with multiple incoming HTLCS, so nodes cannot downgrade while trampoline payments + // are in the process of being resolved. + (0, event, upgradable_option), // LDK prior to 0.0.116 did not have this field as the monitor update application order was // required by clients. If we downgrade to something prior to 0.0.116 this may result in // monitor updates which aren't properly blocked or resumed, however that's fine - we don't @@ -7228,7 +7270,7 @@ where .push(failure); self.pending_events.lock().unwrap().push_back(( events::Event::HTLCHandlingFailed { - prev_channel_id: incoming_channel_id, + prev_channel_ids: vec![incoming_channel_id], failure_type, failure_reason: Some(failure_reason), }, @@ -8689,6 +8731,19 @@ where debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread); } + let push_forward_htlcs_failure = + |prev_outbound_scid_alias: u64, failure: HTLCForwardInfo| { + let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); + match forward_htlcs.entry(prev_outbound_scid_alias) { + hash_map::Entry::Occupied(mut entry) => { + entry.get_mut().push(failure); + }, + hash_map::Entry::Vacant(entry) => { + entry.insert(vec![failure]); + }, + } + }; + //TODO: There is a timing attack here where if a node fails an HTLC back to us they can //identify whether we sent it or not based on the (I presume) very different runtime //between the branches here. We should make this async and move it into the forward HTLCs @@ -8747,49 +8802,81 @@ where if blinded_failure.is_some() { "blinded " } else { "" }, onion_error ); - // In case of trampoline + phantom we prioritize the trampoline failure over the phantom failure. - // TODO: Correctly wrap the error packet twice if failing back a trampoline + phantom HTLC. - let secondary_shared_secret = trampoline_shared_secret.or(*phantom_shared_secret); - let failure = match blinded_failure { - Some(BlindedFailure::FromIntroductionNode) => { - let blinded_onion_error = HTLCFailReason::reason( - LocalHTLCFailureReason::InvalidOnionBlinding, - vec![0; 32], - ); - let err_packet = blinded_onion_error.get_encrypted_failure_packet( - incoming_packet_shared_secret, - &secondary_shared_secret, - ); - HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet } - }, - Some(BlindedFailure::FromBlindedNode) => HTLCForwardInfo::FailMalformedHTLC { - htlc_id: *htlc_id, - failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), - sha256_of_onion: [0; 32], - }, - None => { - let err_packet = onion_error.get_encrypted_failure_packet( - incoming_packet_shared_secret, - &secondary_shared_secret, - ); - HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet } - }, - }; - let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); - match forward_htlcs.entry(*prev_outbound_scid_alias) { - hash_map::Entry::Occupied(mut entry) => { - entry.get_mut().push(failure); - }, - hash_map::Entry::Vacant(entry) => { - entry.insert(vec![failure]); + push_forward_htlcs_failure( + *prev_outbound_scid_alias, + get_htlc_forward_failure( + blinded_failure, + onion_error, + incoming_packet_shared_secret, + trampoline_shared_secret, + phantom_shared_secret, + *htlc_id, + ), + ); + + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push_back(( + events::Event::HTLCHandlingFailed { + prev_channel_ids: vec![*channel_id], + failure_type, + failure_reason: Some(onion_error.into()), }, + None, + )); + }, + HTLCSource::TrampolineForward { + previous_hop_data, + incoming_trampoline_shared_secret, + .. + } => { + // TODO: what do we want to do with this given we do not wish to propagate it directly? + let _decoded_onion_failure = + onion_error.decode_onion_failure(&self.secp_ctx, &self.logger, &source); + let incoming_trampoline_shared_secret = Some(*incoming_trampoline_shared_secret); + + // TODO: when we receive a failure from a single outgoing trampoline HTLC, we don't + // necessarily want to fail all of our incoming HTLCs back yet. We may have other + // outgoing HTLCs that need to resolve first. This will be tracked in our + // pending_outbound_payments in a followup. + for current_hop_data in previous_hop_data { + let incoming_packet_shared_secret = + ¤t_hop_data.incoming_packet_shared_secret; + let channel_id = ¤t_hop_data.channel_id; + let short_channel_id = ¤t_hop_data.prev_outbound_scid_alias; + let htlc_id = ¤t_hop_data.htlc_id; + let blinded_failure = ¤t_hop_data.blinded_failure; + log_trace!( + WithContext::from(&self.logger, None, Some(*channel_id), Some(*payment_hash)), + "Failing {}HTLC with payment_hash {} backwards from us following Trampoline forwarding failure: {:?}", + if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error + ); + let onion_error = HTLCFailReason::reason( + LocalHTLCFailureReason::TemporaryTrampolineFailure, + Vec::new(), + ); + push_forward_htlcs_failure( + *short_channel_id, + get_htlc_forward_failure( + blinded_failure, + &onion_error, + incoming_packet_shared_secret, + &incoming_trampoline_shared_secret, + &None, + *htlc_id, + ), + ); } - mem::drop(forward_htlcs); + + // We only want to emit a single event for trampoline failures, so we do it once + // we've failed back all of our incoming HTLCs. let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push_back(( events::Event::HTLCHandlingFailed { - prev_channel_id: *channel_id, + prev_channel_ids: previous_hop_data + .iter() + .map(|prev| prev.channel_id) + .collect(), failure_type, failure_reason: Some(onion_error.into()), }, @@ -9028,6 +9115,133 @@ where } } + /// Claims funds for a forwarded HTLC where we are an intermediate hop. + /// + /// Processes attribution data, calculates fees earned, and emits a [`Event::PaymentForwarded`] + /// event upon successful claim. `make_payment_forwarded_event` is responsible for creating a + /// single [`Event::PaymentForwarded`] event that represents the forward. + fn claim_funds_from_htlc_forward_hop( + &self, payment_preimage: PaymentPreimage, + make_payment_forwarded_event: impl Fn(Option) -> Option, + startup_replay: bool, next_channel_counterparty_node_id: PublicKey, + next_channel_outpoint: OutPoint, next_channel_id: ChannelId, hop_data: HTLCPreviousHopData, + attribution_data: Option, send_timestamp: Option, + ) { + let _prev_channel_id = hop_data.channel_id; + let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); + + // Obtain hold time, if available. + let hold_time = hold_time_since(send_timestamp).unwrap_or(0); + + // If attribution data was received from downstream, we shift it and get it ready for adding our hold + // time. Note that fulfilled HTLCs take a fast path to the incoming side. We don't need to wait for RAA + // to record the hold time like we do for failed HTLCs. + let attribution_data = process_fulfill_attribution_data( + attribution_data, + &hop_data.incoming_packet_shared_secret, + hold_time, + ); + + #[cfg(test)] + let claiming_chan_funding_outpoint = hop_data.outpoint; + self.claim_funds_from_hop( + hop_data, + payment_preimage, + None, + Some(attribution_data), + |htlc_claim_value_msat, definitely_duplicate| { + let chan_to_release = Some(EventUnblockedChannel { + counterparty_node_id: next_channel_counterparty_node_id, + funding_txo: next_channel_outpoint, + channel_id: next_channel_id, + blocking_action: completed_blocker, + }); + + if definitely_duplicate && startup_replay { + // On startup we may get redundant claims which are related to + // monitor updates still in flight. In that case, we shouldn't + // immediately free, but instead let that monitor update complete + // in the background. + #[cfg(test)] + { + let per_peer_state = self.per_peer_state.deadlocking_read(); + // The channel we'd unblock should already be closed, or... + let channel_closed = per_peer_state + .get(&next_channel_counterparty_node_id) + .map(|lck| lck.deadlocking_lock()) + .map(|peer| !peer.channel_by_id.contains_key(&next_channel_id)) + .unwrap_or(true); + let background_events = self.pending_background_events.lock().unwrap(); + // there should be a `BackgroundEvent` pending... + let matching_bg_event = + background_events.iter().any(|ev| { + match ev { + // to apply a monitor update that blocked the claiming channel, + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { + funding_txo, + update, + .. + } => { + if *funding_txo == claiming_chan_funding_outpoint { + assert!( + update.updates.iter().any(|upd| { + if let ChannelMonitorUpdateStep::PaymentPreimage { + payment_preimage: update_preimage, .. + } = upd { + payment_preimage == *update_preimage + } else { false } + }), + "{:?}", + update + ); + true + } else { + false + } + }, + // or the monitor update has completed and will unblock + // immediately once we get going. + BackgroundEvent::MonitorUpdatesComplete { + channel_id, .. + } => *channel_id == _prev_channel_id, + } + }); + assert!(channel_closed || matching_bg_event, "{:?}", *background_events); + } + (None, None) + } else if definitely_duplicate { + if let Some(other_chan) = chan_to_release { + ( + Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately { + downstream_counterparty_node_id: other_chan.counterparty_node_id, + downstream_channel_id: other_chan.channel_id, + blocking_action: other_chan.blocking_action, + }), + None, + ) + } else { + (None, None) + } + } else { + let event = make_payment_forwarded_event(htlc_claim_value_msat); + if let Some(ref payment_forwarded) = event { + debug_assert!(matches!( + payment_forwarded, + &events::Event::PaymentForwarded { .. } + )); + } + ( + Some(MonitorUpdateCompletionAction::EmitEventOptionAndFreeOtherChannel { + event, + downstream_counterparty_and_funding_outpoint: chan_to_release, + }), + None, + ) + } + }, + ); + } + fn claim_funds_from_hop< ComplFunc: FnOnce( Option, @@ -9409,136 +9623,88 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } }, HTLCSource::PreviousHopData(hop_data) => { - let prev_channel_id = hop_data.channel_id; - let prev_user_channel_id = hop_data.user_channel_id; - let prev_node_id = hop_data.counterparty_node_id; - let completed_blocker = - RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); - - // Obtain hold time, if available. - let hold_time = hold_time_since(send_timestamp).unwrap_or(0); - - // If attribution data was received from downstream, we shift it and get it ready for adding our hold - // time. Note that fulfilled HTLCs take a fast path to the incoming side. We don't need to wait for RAA - // to record the hold time like we do for failed HTLCs. - let attribution_data = process_fulfill_attribution_data( - attribution_data, - &hop_data.incoming_packet_shared_secret, - hold_time, - ); - - #[cfg(test)] - let claiming_chan_funding_outpoint = hop_data.outpoint; - self.claim_funds_from_hop( - hop_data, + let prev_htlcs = vec![events::HTLCLocator::from(&hop_data)]; + self.claim_funds_from_htlc_forward_hop( payment_preimage, - None, - Some(attribution_data), - |htlc_claim_value_msat, definitely_duplicate| { - let chan_to_release = Some(EventUnblockedChannel { - counterparty_node_id: next_channel_counterparty_node_id, - funding_txo: next_channel_outpoint, - channel_id: next_channel_id, - blocking_action: completed_blocker, - }); - - if definitely_duplicate && startup_replay { - // On startup we may get redundant claims which are related to - // monitor updates still in flight. In that case, we shouldn't - // immediately free, but instead let that monitor update complete - // in the background. - #[cfg(test)] - { - let per_peer_state = self.per_peer_state.deadlocking_read(); - // The channel we'd unblock should already be closed, or... - let channel_closed = per_peer_state - .get(&next_channel_counterparty_node_id) - .map(|lck| lck.deadlocking_lock()) - .map(|peer| !peer.channel_by_id.contains_key(&next_channel_id)) - .unwrap_or(true); - let background_events = - self.pending_background_events.lock().unwrap(); - // there should be a `BackgroundEvent` pending... - let matching_bg_event = - background_events.iter().any(|ev| { - match ev { - // to apply a monitor update that blocked the claiming channel, - BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - funding_txo, update, .. - } => { - if *funding_txo == claiming_chan_funding_outpoint { - assert!(update.updates.iter().any(|upd| - if let ChannelMonitorUpdateStep::PaymentPreimage { - payment_preimage: update_preimage, .. - } = upd { - payment_preimage == *update_preimage - } else { false } - ), "{:?}", update); - true - } else { false } - }, - // or the monitor update has completed and will unblock - // immediately once we get going. - BackgroundEvent::MonitorUpdatesComplete { - channel_id, .. - } => - *channel_id == prev_channel_id, - } - }); - assert!( - channel_closed || matching_bg_event, - "{:?}", - *background_events - ); - } - (None, None) - } else if definitely_duplicate { - if let Some(other_chan) = chan_to_release { - (Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately { - downstream_counterparty_node_id: other_chan.counterparty_node_id, - downstream_channel_id: other_chan.channel_id, - blocking_action: other_chan.blocking_action, - }), None) - } else { - (None, None) - } - } else { - let total_fee_earned_msat = - if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { - if let Some(claimed_htlc_value) = htlc_claim_value_msat { - Some(claimed_htlc_value - forwarded_htlc_value) - } else { - None - } + |htlc_claim_value_msat: Option| -> Option { + let total_fee_earned_msat = + if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat { + if let Some(claimed_htlc_value) = htlc_claim_value_msat { + Some(claimed_htlc_value - forwarded_htlc_value) } else { None - }; - debug_assert!( - skimmed_fee_msat <= total_fee_earned_msat, - "skimmed_fee_msat must always be included in total_fee_earned_msat" - ); - ( - Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { - event: events::Event::PaymentForwarded { - prev_channel_id: Some(prev_channel_id), - next_channel_id: Some(next_channel_id), - prev_user_channel_id, - next_user_channel_id, - prev_node_id, - next_node_id: Some(next_channel_counterparty_node_id), - total_fee_earned_msat, - skimmed_fee_msat, - claim_from_onchain_tx: from_onchain, - outbound_amount_forwarded_msat: forwarded_htlc_value_msat, - }, - downstream_counterparty_and_funding_outpoint: chan_to_release, - }), - None, - ) - } + } + } else { + None + }; + debug_assert!( + skimmed_fee_msat <= total_fee_earned_msat, + "skimmed_fee_msat must always be included in total_fee_earned_msat" + ); + + Some(events::Event::PaymentForwarded { + prev_htlcs: prev_htlcs.clone(), + next_htlcs: vec![events::HTLCLocator { + channel_id: next_channel_id, + user_channel_id: next_user_channel_id, + node_id: Some(next_channel_counterparty_node_id), + }], + total_fee_earned_msat, + skimmed_fee_msat, + claim_from_onchain_tx: from_onchain, + outbound_amount_forwarded_msat: forwarded_htlc_value_msat, + }) }, + startup_replay, + next_channel_counterparty_node_id, + next_channel_outpoint, + next_channel_id, + hop_data, + attribution_data, + send_timestamp, ); }, + HTLCSource::TrampolineForward { previous_hop_data, .. } => { + // Only emit a single event for trampoline claims. + let prev_htlcs: Vec = + previous_hop_data.iter().map(Into::into).collect(); + for (i, current_previous_hop_data) in previous_hop_data.into_iter().enumerate() { + self.claim_funds_from_htlc_forward_hop( + payment_preimage, + |_: Option| -> Option { + if i == 0 { + Some(events::Event::PaymentForwarded { + prev_htlcs: prev_htlcs.clone(), + // TODO: When trampoline payments are tracked in our + // pending_outbound_payments, we'll be able to provide all the + // outgoing htlcs for this forward. + next_htlcs: vec![events::HTLCLocator { + channel_id: next_channel_id, + user_channel_id: next_user_channel_id, + node_id: Some(next_channel_counterparty_node_id), + }], + // TODO: When trampoline payments are tracked in our + // pending_outbound_payments, we'll be able to lookup our total + // fee earnings. + total_fee_earned_msat: None, + skimmed_fee_msat, + claim_from_onchain_tx: from_onchain, + outbound_amount_forwarded_msat: forwarded_htlc_value_msat, + }) + } else { + None + } + }, + startup_replay, + next_channel_counterparty_node_id, + next_channel_outpoint, + next_channel_id, + current_previous_hop_data, + attribution_data.clone(), + send_timestamp, + ); + } + }, } } @@ -9746,11 +9912,13 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } }, - MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { + MonitorUpdateCompletionAction::EmitEventOptionAndFreeOtherChannel { event, downstream_counterparty_and_funding_outpoint, } => { - self.pending_events.lock().unwrap().push_back((event, None)); + if let Some(event) = event { + self.pending_events.lock().unwrap().push_back((event, None)); + } if let Some(unblocked) = downstream_counterparty_and_funding_outpoint { self.handle_monitor_update_release( unblocked.counterparty_node_id, @@ -12964,6 +13132,43 @@ This indicates a bug inside LDK. Please report this error at https://github.com/ } } +/// Constructs an HTLC forward failure for sending back to the previous hop, converting to a blinded +/// failure where appropriate. +/// +/// When both trampoline and phantom secrets are present, the trampoline secret takes priority +/// for error encryption. +fn get_htlc_forward_failure( + blinded_failure: &Option, onion_error: &HTLCFailReason, + incoming_packet_shared_secret: &[u8; 32], trampoline_shared_secret: &Option<[u8; 32]>, + phantom_shared_secret: &Option<[u8; 32]>, htlc_id: u64, +) -> HTLCForwardInfo { + // TODO: Correctly wrap the error packet twice if failing back a trampoline + phantom HTLC. + let secondary_shared_secret = trampoline_shared_secret.or(*phantom_shared_secret); + match blinded_failure { + Some(BlindedFailure::FromIntroductionNode) => { + let blinded_onion_error = + HTLCFailReason::reason(LocalHTLCFailureReason::InvalidOnionBlinding, vec![0; 32]); + let err_packet = blinded_onion_error.get_encrypted_failure_packet( + incoming_packet_shared_secret, + &secondary_shared_secret, + ); + HTLCForwardInfo::FailHTLC { htlc_id, err_packet } + }, + Some(BlindedFailure::FromBlindedNode) => HTLCForwardInfo::FailMalformedHTLC { + htlc_id, + failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(), + sha256_of_onion: [0; 32], + }, + None => { + let err_packet = onion_error.get_encrypted_failure_packet( + incoming_packet_shared_secret, + &secondary_shared_secret, + ); + HTLCForwardInfo::FailHTLC { htlc_id, err_packet } + }, + } +} + /// Parameters used with [`create_bolt11_invoice`]. /// /// [`create_bolt11_invoice`]: ChannelManager::create_bolt11_invoice @@ -16468,6 +16673,20 @@ impl Readable for HTLCSource { }) } 1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)), + 2 => { + _init_and_read_len_prefixed_tlv_fields!(reader, { + (1, previous_hop_data, required_vec), + (3, incoming_trampoline_shared_secret, required), + (5, path, required), + (7, session_priv, required), + }); + Ok(HTLCSource::TrampolineForward { + previous_hop_data: _init_tlv_based_struct_field!(previous_hop_data, required_vec), + incoming_trampoline_shared_secret: _init_tlv_based_struct_field!(incoming_trampoline_shared_secret, required), + path: _init_tlv_based_struct_field!(path, required), + session_priv: _init_tlv_based_struct_field!(session_priv, required), + }) + }, _ => Err(DecodeError::UnknownRequiredFeature), } } @@ -16500,6 +16719,20 @@ impl Writeable for HTLCSource { 1u8.write(writer)?; field.write(writer)?; }, + HTLCSource::TrampolineForward { + ref previous_hop_data, + incoming_trampoline_shared_secret, + ref session_priv, + ref path, + } => { + 2u8.write(writer)?; + write_tlv_fields!(writer, { + (1, *previous_hop_data, required_vec), + (3, incoming_trampoline_shared_secret, required), + (5, *path, required), + (7, session_priv, required), + }); + }, } Ok(()) } @@ -17124,6 +17357,91 @@ fn dedup_decode_update_add_htlcs( }); } +/// Checks if a forwarded HTLC claim needs to be replayed on startup, returning None if it doesn't +/// need to be replayed. When the HTLC needs to be claimed, it returns a bool indicating whether +/// deserialization of should be failed due to missing information. +fn prev_hop_needs_claim_replay( + prev_hop: &HTLCPreviousHopData, payment_preimage: PaymentPreimage, + inbound_edge_monitor: Option<&ChannelMonitor>, + short_to_chan_info: &HashMap, logger: &L, +) -> Option +where + L::Target: Logger, +{ + // Note that for channels which have gone to chain, `get_all_current_outbound_htlcs` is never + // pruned and always returns a constant set until the monitor is removed/archived. Thus, we + // want to skip replaying claims that have definitely been resolved on-chain. + + // If the inbound monitor is not present, we assume it was fully resolved and properly archived, + // implying this payment had plenty of time to get claimed and we can safely skip any further + // attempts to claim it (they wouldn't succeed anyway as we don't have a monitor against which + // to do so). + let inbound_edge_monitor = inbound_edge_monitor?; + + // Second, if the inbound edge of the payment's monitor has been fully claimed we've had at + // least `ANTI_REORG_DELAY` blocks to get any PaymentForwarded event(s) to the user and assume + // that there's no need to try to replay the claim just for that. + let inbound_edge_balances = inbound_edge_monitor.get_claimable_balances(); + if inbound_edge_balances.is_empty() { + return None; + } + + let mut fail_read = false; + if prev_hop.counterparty_node_id.is_none() { + // We no longer support claiming an HTLC where we don't have the counterparty_node_id + // available if the claim has to go to a closed channel. Its possible we can get away with + // it if the channel is not yet closed, but its by no means a guarantee. + + // Thus, in this case we are a bit more aggressive with our pruning - if we have no use for + // the claim (because the inbound edge of the payment's monitor has already claimed the + // HTLC) we skip trying to replay the claim. + let htlc_payment_hash: PaymentHash = payment_preimage.into(); + let balance_could_incl_htlc = |bal| match bal { + &Balance::ClaimableOnChannelClose { .. } => { + // The channel is still open, assume we can still claim against it + true + }, + &Balance::MaybePreimageClaimableHTLC { payment_hash, .. } => { + payment_hash == htlc_payment_hash + }, + _ => false, + }; + let htlc_may_be_in_balances = inbound_edge_balances.iter().any(balance_could_incl_htlc); + if !htlc_may_be_in_balances { + return None; + } + + // First check if we're absolutely going to fail - if we need to replay this claim to get + // the preimage into the inbound edge monitor but the channel is closed (and thus we'll + // immediately panic if we call claim_funds_from_hop). + if short_to_chan_info.get(&prev_hop.prev_outbound_scid_alias).is_none() { + log_error!(logger, + "We need to replay the HTLC claim for payment_hash {} (preimage {}) but cannot do so as the HTLC was forwarded prior to LDK 0.0.124. \ + All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1", + htlc_payment_hash, + payment_preimage, + ); + fail_read = true; + } + + // At this point we're confident we need the claim, but the inbound edge channel is still + // live. As long as this remains the case, we can conceivably proceed, but we run some risk + // of panicking at runtime. The user ideally should have read the release notes and we + // wouldn't be here, but we go ahead and let things run in the hope that it'll all just + // work out. + log_error!(logger, + "We need to replay the HTLC claim for payment_hash {} (preimage {}) but don't have all the required information to do so reliably. \ + As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime! \ + All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1. \ + Continuing anyway, though panics may occur!", + htlc_payment_hash, + payment_preimage, + ); + } + + Some(fail_read) +} + // Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the // SipmleArcChannelManager type: impl< @@ -18011,51 +18329,32 @@ where let htlc_id = SentHTLCId::from_source(&htlc_source); match htlc_source { HTLCSource::PreviousHopData(prev_hop_data) => { - let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { - info.prev_funding_outpoint == prev_hop_data.outpoint - && info.prev_htlc_id == prev_hop_data.htlc_id - }; - // The ChannelMonitor is now responsible for this HTLC's - // failure/success and will let us know what its outcome is. If we - // still have an entry for this HTLC in `forward_htlcs`, - // `pending_intercepted_htlcs`, or `decode_update_add_htlcs`, we were apparently not - // persisted after the monitor was when forwarding the payment. - dedup_decode_update_add_htlcs( + reconcile_pending_htlcs_with_monitor( + &mut forward_htlcs_legacy, + &mut pending_events_read, + &mut pending_intercepted_htlcs_legacy, &mut decode_update_add_htlcs, - &prev_hop_data, - "HTLC was forwarded to the closed channel", - &args.logger, - ); - dedup_decode_update_add_htlcs( &mut decode_update_add_htlcs_legacy, - &prev_hop_data, - "HTLC was forwarded to the closed channel", - &args.logger, + prev_hop_data, + &logger, + htlc.payment_hash, + monitor.channel_id(), ); - forward_htlcs_legacy.retain(|_, forwards| { - forwards.retain(|forward| { - if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - false - } else { true } - } else { true } - }); - !forwards.is_empty() - }); - pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { - if pending_forward_matches_htlc(&htlc_info) { - log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.channel_id()); - pending_events_read.retain(|(event, _)| { - if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { - intercepted_id != ev_id - } else { true } - }); - false - } else { true } - }); + }, + HTLCSource::TrampolineForward { previous_hop_data, .. } => { + for prev_hop_data in previous_hop_data { + reconcile_pending_htlcs_with_monitor( + &mut forward_htlcs_legacy, + &mut pending_events_read, + &mut pending_intercepted_htlcs_legacy, + &mut decode_update_add_htlcs, + &mut decode_update_add_htlcs_legacy, + prev_hop_data, + &logger, + htlc.payment_hash, + monitor.channel_id(), + ); + } }, HTLCSource::OutboundRoute { payment_id, @@ -18155,107 +18454,66 @@ where // preimages from it which may be needed in upstream channels for forwarded // payments. let mut fail_read = false; - let outbound_claimed_htlcs_iter = monitor.get_all_current_outbound_htlcs() + let outbound_claimed_htlcs_iter = monitor + .get_all_current_outbound_htlcs() .into_iter() .filter_map(|(htlc_source, (htlc, preimage_opt))| { - if let HTLCSource::PreviousHopData(prev_hop) = &htlc_source { - if let Some(payment_preimage) = preimage_opt { - let inbound_edge_monitor = args.channel_monitors.get(&prev_hop.channel_id); - // Note that for channels which have gone to chain, - // `get_all_current_outbound_htlcs` is never pruned and always returns - // a constant set until the monitor is removed/archived. Thus, we - // want to skip replaying claims that have definitely been resolved - // on-chain. - - // If the inbound monitor is not present, we assume it was fully - // resolved and properly archived, implying this payment had plenty - // of time to get claimed and we can safely skip any further - // attempts to claim it (they wouldn't succeed anyway as we don't - // have a monitor against which to do so). - let inbound_edge_monitor = if let Some(monitor) = inbound_edge_monitor { - monitor - } else { - return None; - }; - // Second, if the inbound edge of the payment's monitor has been - // fully claimed we've had at least `ANTI_REORG_DELAY` blocks to - // get any PaymentForwarded event(s) to the user and assume that - // there's no need to try to replay the claim just for that. - let inbound_edge_balances = inbound_edge_monitor.get_claimable_balances(); - if inbound_edge_balances.is_empty() { - return None; - } + let payment_preimage = preimage_opt?; - if prev_hop.counterparty_node_id.is_none() { - // We no longer support claiming an HTLC where we don't have - // the counterparty_node_id available if the claim has to go to - // a closed channel. Its possible we can get away with it if - // the channel is not yet closed, but its by no means a - // guarantee. - - // Thus, in this case we are a bit more aggressive with our - // pruning - if we have no use for the claim (because the - // inbound edge of the payment's monitor has already claimed - // the HTLC) we skip trying to replay the claim. - let htlc_payment_hash: PaymentHash = payment_preimage.into(); - let balance_could_incl_htlc = |bal| match bal { - &Balance::ClaimableOnChannelClose { .. } => { - // The channel is still open, assume we can still - // claim against it - true - }, - &Balance::MaybePreimageClaimableHTLC { payment_hash, .. } => { - payment_hash == htlc_payment_hash - }, - _ => false, - }; - let htlc_may_be_in_balances = - inbound_edge_balances.iter().any(balance_could_incl_htlc); - if !htlc_may_be_in_balances { - return None; - } + let prev_htlcs = match &htlc_source { + HTLCSource::PreviousHopData(prev_hop) => vec![prev_hop], + HTLCSource::TrampolineForward { previous_hop_data, .. } => { + previous_hop_data.iter().collect() + }, + // If it was an outbound payment, we've handled it above - if a preimage + // came in and we persisted the `ChannelManager` we either handled it + // and are good to go or the channel force-closed - we don't have to + // handle the channel still live case here. + _ => vec![], + }; - // First check if we're absolutely going to fail - if we need - // to replay this claim to get the preimage into the inbound - // edge monitor but the channel is closed (and thus we'll - // immediately panic if we call claim_funds_from_hop). - if short_to_chan_info.get(&prev_hop.prev_outbound_scid_alias).is_none() { - log_error!(args.logger, - "We need to replay the HTLC claim for payment_hash {} (preimage {}) but cannot do so as the HTLC was forwarded prior to LDK 0.0.124.\ - All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1", - htlc_payment_hash, - payment_preimage, - ); - fail_read = true; - } + let prev_htlcs_count = prev_htlcs.len(); + if prev_htlcs_count == 0 { + return None; + } - // At this point we're confident we need the claim, but the - // inbound edge channel is still live. As long as this remains - // the case, we can conceivably proceed, but we run some risk - // of panicking at runtime. The user ideally should have read - // the release notes and we wouldn't be here, but we go ahead - // and let things run in the hope that it'll all just work out. - log_error!(args.logger, - "We need to replay the HTLC claim for payment_hash {} (preimage {}) but don't have all the required information to do so reliably.\ - As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime!\ - All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1\ - Continuing anyway, though panics may occur!", - htlc_payment_hash, - payment_preimage, - ); + for prev_hop in prev_htlcs { + let inbound_edge_monitor = + args.channel_monitors.get(&prev_hop.channel_id).copied(); + if let Some(fail_claim_read) = prev_hop_needs_claim_replay( + prev_hop, + payment_preimage, + inbound_edge_monitor, + &short_to_chan_info, + &args.logger, + ) { + // We can only fail to read from disk for legacy HTLCs that have + // a single prev_htlc. If we could fail_claim_read for multiple + // prev_htlcs, it wouldn't be correct to exit early on our first + // claimable prev_hop (because a subsequent one may + // fail_claim_read). + if fail_claim_read { + debug_assert!(prev_htlcs_count == 1); } - Some((htlc_source, payment_preimage, htlc.amount_msat, - is_channel_closed, monitor.get_counterparty_node_id(), - monitor.get_funding_txo(), monitor.channel_id())) - } else { None } - } else { - // If it was an outbound payment, we've handled it above - if a preimage - // came in and we persisted the `ChannelManager` we either handled it and - // are good to go or the channel force-closed - we don't have to handle the - // channel still live case here. - None + fail_read |= fail_claim_read; + return Some(( + // When we have multiple prev_htlcs we assume that they all + // share the same htlc_source which contains all previous hops, + // so we can exit on the first claimable prev_hop because this + // will result in all prev_hops being claimed. + htlc_source, + payment_preimage, + htlc.amount_msat, + is_channel_closed, + monitor.get_counterparty_node_id(), + monitor.get_funding_txo(), + monitor.channel_id(), + )); + } } + + None }); for tuple in outbound_claimed_htlcs_iter { pending_claims_to_replay.push(tuple); @@ -18445,7 +18703,7 @@ where let logger = WithContext::from(&args.logger, Some(node_id), Some(*channel_id), None); for action in actions.iter() { - if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { + if let MonitorUpdateCompletionAction::EmitEventOptionAndFreeOtherChannel { downstream_counterparty_and_funding_outpoint: Some(EventUnblockedChannel { counterparty_node_id: blocked_node_id, @@ -18971,6 +19229,64 @@ where } } +/// Removes pending HTLC entries that the ChannelMonitor has already taken responsibility for, +/// cleaning up state mismatches that can occur during restart. +fn reconcile_pending_htlcs_with_monitor( + forward_htlcs_legacy: &mut HashMap>, + pending_events_read: &mut VecDeque<(Event, Option)>, + pending_intercepted_htlcs_legacy: &mut HashMap, + decode_update_add_htlcs: &mut HashMap>, + decode_update_add_htlcs_legacy: &mut HashMap>, + prev_hop_data: HTLCPreviousHopData, logger: &impl Logger, payment_hash: PaymentHash, + channel_id: ChannelId, +) { + let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| { + info.prev_funding_outpoint == prev_hop_data.outpoint + && info.prev_htlc_id == prev_hop_data.htlc_id + }; + // The ChannelMonitor is now responsible for this HTLC's + // failure/success and will let us know what its outcome is. If we + // still have an entry for this HTLC in `forward_htlcs`, + // `pending_intercepted_htlcs`, or `decode_update_add_htlcs`, we were apparently not + // persisted after the monitor was when forwarding the payment. + dedup_decode_update_add_htlcs( + decode_update_add_htlcs, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &logger, + ); + dedup_decode_update_add_htlcs( + decode_update_add_htlcs_legacy, + &prev_hop_data, + "HTLC was forwarded to the closed channel", + &logger, + ); + forward_htlcs_legacy.retain(|_, forwards| { + forwards.retain(|forward| { + if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", + &payment_hash, &channel_id); + false + } else { true } + } else { true } + }); + !forwards.is_empty() + }); + pending_intercepted_htlcs_legacy.retain(|intercepted_id, htlc_info| { + if pending_forward_matches_htlc(&htlc_info) { + log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", + &payment_hash, &channel_id); + pending_events_read.retain(|(event, _)| { + if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { + intercepted_id != ev_id + } else { true } + }); + false + } else { true } + }); +} + #[cfg(test)] mod tests { use crate::events::{ClosureReason, Event, HTLCHandlingFailureType}; diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index e072deb6a97..33fe0c72cef 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -3059,17 +3059,16 @@ pub fn expect_payment_forwarded>( ) -> Option { match event { Event::PaymentForwarded { - prev_channel_id, - next_channel_id, - prev_user_channel_id, - next_user_channel_id, - prev_node_id, - next_node_id, + prev_htlcs, + next_htlcs, total_fee_earned_msat, skimmed_fee_msat, claim_from_onchain_tx, .. } => { + assert_eq!(prev_htlcs.len(), 1); + assert_eq!(next_htlcs.len(), 1); + if allow_1_msat_fee_overpay { // Aggregating fees for blinded paths may result in a rounding error, causing slight // overpayment in fees. @@ -3084,33 +3083,36 @@ pub fn expect_payment_forwarded>( // overpaid amount. assert!(skimmed_fee_msat == expected_extra_fees_msat); if !upstream_force_closed { - assert_eq!(prev_node.node().get_our_node_id(), prev_node_id.unwrap()); + let prev_node_id = prev_htlcs[0].node_id.unwrap(); + let prev_channel_id = prev_htlcs[0].channel_id; + let prev_user_channel_id = prev_htlcs[0].user_channel_id.unwrap(); + + assert_eq!(prev_node.node().get_our_node_id(), prev_node_id); // Is the event prev_channel_id in one of the channels between the two nodes? let node_chans = node.node().list_channels(); - assert!(node_chans.iter().any(|x| x.counterparty.node_id == prev_node_id.unwrap() - && x.channel_id == prev_channel_id.unwrap() - && x.user_channel_id == prev_user_channel_id.unwrap())); + assert!(node_chans.iter().any(|x| x.counterparty.node_id == prev_node_id + && x.channel_id == prev_channel_id + && x.user_channel_id == prev_user_channel_id)); } // We check for force closures since a force closed channel is removed from the // node's channel list if !downstream_force_closed { + let next_node_id = next_htlcs[0].node_id.unwrap(); + let next_channel_id = next_htlcs[0].channel_id; + let next_user_channel_id = next_htlcs[0].user_channel_id.unwrap(); // As documented, `next_user_channel_id` will only be `Some` if we didn't settle via an // onchain transaction, just as the `total_fee_earned_msat` field. Rather than // introducing yet another variable, we use the latter's state as a flag to detect // this and only check if it's `Some`. - assert_eq!(next_node.node().get_our_node_id(), next_node_id.unwrap()); + assert_eq!(next_node.node().get_our_node_id(), next_node_id); let node_chans = node.node().list_channels(); if total_fee_earned_msat.is_none() { - assert!(node_chans - .iter() - .any(|x| x.counterparty.node_id == next_node_id.unwrap() - && x.channel_id == next_channel_id.unwrap())); + assert!(node_chans.iter().any(|x| x.counterparty.node_id == next_node_id + && x.channel_id == next_channel_id)); } else { - assert!(node_chans - .iter() - .any(|x| x.counterparty.node_id == next_node_id.unwrap() - && x.channel_id == next_channel_id.unwrap() - && x.user_channel_id == next_user_channel_id.unwrap())); + assert!(node_chans.iter().any(|x| x.counterparty.node_id == next_node_id + && x.channel_id == next_channel_id + && x.user_channel_id == next_user_channel_id)); } } assert_eq!(claim_from_onchain_tx, downstream_force_closed); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index e2963dbeb09..933cb2d2cdc 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1459,37 +1459,37 @@ pub fn test_htlc_on_chain_success() { connect_blocks(&nodes[1], TEST_FINAL_CLTV); // Confirm blocks until the HTLC expires let forwarded_events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(forwarded_events.len(), 3); - let chan_id = Some(chan_1.2); + let chan_id = chan_1.2; match forwarded_events[0] { Event::PaymentForwarded { + ref prev_htlcs, + ref next_htlcs, total_fee_earned_msat, - prev_channel_id, claim_from_onchain_tx, - next_channel_id, outbound_amount_forwarded_msat, .. } => { assert_eq!(total_fee_earned_msat, Some(1000)); - assert_eq!(prev_channel_id, chan_id); + assert_eq!(prev_htlcs[0].channel_id, chan_id); assert_eq!(claim_from_onchain_tx, true); - assert_eq!(next_channel_id, Some(chan_2.2)); + assert_eq!(next_htlcs[0].channel_id, chan_2.2); assert_eq!(outbound_amount_forwarded_msat, Some(3000000)); }, _ => panic!(), } match forwarded_events[1] { Event::PaymentForwarded { + ref prev_htlcs, + ref next_htlcs, total_fee_earned_msat, - prev_channel_id, claim_from_onchain_tx, - next_channel_id, outbound_amount_forwarded_msat, .. } => { assert_eq!(total_fee_earned_msat, Some(1000)); - assert_eq!(prev_channel_id, chan_id); + assert_eq!(prev_htlcs[0].channel_id, chan_id); assert_eq!(claim_from_onchain_tx, true); - assert_eq!(next_channel_id, Some(chan_2.2)); + assert_eq!(next_htlcs[0].channel_id, chan_2.2); assert_eq!(outbound_amount_forwarded_msat, Some(3000000)); }, _ => panic!(), @@ -3965,17 +3965,17 @@ pub fn test_onchain_to_onchain_claim() { assert_eq!(events.len(), 2); match events[0] { Event::PaymentForwarded { + ref prev_htlcs, + ref next_htlcs, total_fee_earned_msat, - prev_channel_id, claim_from_onchain_tx, - next_channel_id, outbound_amount_forwarded_msat, .. } => { assert_eq!(total_fee_earned_msat, Some(1000)); - assert_eq!(prev_channel_id, Some(chan_1.2)); + assert_eq!(prev_htlcs[0].channel_id, chan_1.2); assert_eq!(claim_from_onchain_tx, true); - assert_eq!(next_channel_id, Some(chan_2.2)); + assert_eq!(next_htlcs[0].channel_id, chan_2.2); assert_eq!(outbound_amount_forwarded_msat, Some(3000000)); }, _ => panic!("Unexpected event"), diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 04915affa20..a754c4fb81f 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -3792,8 +3792,8 @@ fn do_test_lost_timeout_monitor_events(confirm_tx: CommitmentType, dust_htlcs: b Event::PaymentFailed { payment_hash, .. } => { assert_eq!(payment_hash, Some(hash_b)); }, - Event::HTLCHandlingFailed { prev_channel_id, .. } => { - assert_eq!(prev_channel_id, chan_a); + Event::HTLCHandlingFailed { prev_channel_ids, .. } => { + assert_eq!(prev_channel_ids[0], chan_a); }, _ => panic!("Wrong event {ev:?}"), } diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 40580a09c8c..93729aa7e34 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -611,6 +611,11 @@ impl Path { } } +impl_writeable_tlv_based!(Path,{ + (1, hops, required_vec), + (3, blinded_tail, option), +}); + /// A route directs a payment from the sender (us) to the recipient. If the recipient supports MPP, /// it can take multiple paths. Each path is composed of one or more hops through the network. #[derive(Clone, Debug, Hash, PartialEq, Eq)] diff --git a/lightning/src/util/ser.rs b/lightning/src/util/ser.rs index f821aa5afc0..24e535b70bb 100644 --- a/lightning/src/util/ser.rs +++ b/lightning/src/util/ser.rs @@ -1100,6 +1100,8 @@ impl_for_vec!(crate::routing::router::TrampolineHop); impl_for_vec_with_element_length_prefix!(crate::ln::msgs::UpdateAddHTLC); impl_writeable_for_vec_with_element_length_prefix!(&crate::ln::msgs::UpdateAddHTLC); impl_for_vec!(u32); +impl_for_vec!(crate::events::HTLCLocator); +impl_for_vec!(crate::ln::types::ChannelId); impl Writeable for Vec { #[inline] diff --git a/pending_changelog/4304.txt b/pending_changelog/4304.txt new file mode 100644 index 00000000000..8c1580a2f4c --- /dev/null +++ b/pending_changelog/4304.txt @@ -0,0 +1,3 @@ +## Backwards Compatibility + +* Downgrade is not possible while the node has in-flight trampoline forwards.