lightning/ln/
channelmanager.rs

1// This file is Copyright its original authors, visible in version control
2// history.
3//
4// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7// You may not use this file except in accordance with one or both of these
8// licenses.
9
10//! The top-level channel management and payment tracking stuff lives here.
11//!
12//! The [`ChannelManager`] is the main chunk of logic implementing the lightning protocol and is
13//! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those
14//! upon reconnect to the relevant peer(s).
15//!
16//! It does not manage routing logic (see [`Router`] for that) nor does it manage constructing
17//! on-chain transactions (it only monitors the chain to watch for any force-closes that might
18//! imply it needs to fail HTLCs/payments/channels it manages).
19
20use bitcoin::block::Header;
21use bitcoin::constants::ChainHash;
22use bitcoin::key::constants::SECRET_KEY_SIZE;
23use bitcoin::network::Network;
24use bitcoin::transaction::Transaction;
25
26use bitcoin::hash_types::{BlockHash, Txid};
27use bitcoin::hashes::hmac::Hmac;
28use bitcoin::hashes::sha256::Hash as Sha256;
29use bitcoin::hashes::{Hash, HashEngine, HmacEngine};
30
31use bitcoin::secp256k1::Secp256k1;
32use bitcoin::secp256k1::{PublicKey, SecretKey};
33use bitcoin::{secp256k1, Sequence, SignedAmount};
34
35use crate::blinded_path::message::{
36	AsyncPaymentsContext, BlindedMessagePath, MessageForwardNode, OffersContext,
37};
38use crate::blinded_path::payment::{
39	AsyncBolt12OfferContext, Bolt12OfferContext, PaymentContext, UnauthenticatedReceiveTlvs,
40};
41use crate::blinded_path::NodeIdLookUp;
42use crate::chain;
43use crate::chain::chaininterface::{
44	BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator,
45};
46use crate::chain::channelmonitor::{
47	Balance, ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, MonitorEvent,
48	WithChannelMonitor, ANTI_REORG_DELAY, CLTV_CLAIM_BUFFER, HTLC_FAIL_BACK_BUFFER,
49	LATENCY_GRACE_PERIOD_BLOCKS, MAX_BLOCKS_FOR_CONF,
50};
51use crate::chain::transaction::{OutPoint, TransactionData};
52use crate::chain::{BestBlock, ChannelMonitorUpdateStatus, Confirm, Watch};
53use crate::events::{
54	self, ClosureReason, Event, EventHandler, EventsProvider, HTLCHandlingFailureType,
55	InboundChannelFunds, PaymentFailureReason, ReplayEvent,
56};
57use crate::events::{FundingInfo, PaidBolt12Invoice};
58use crate::ln::chan_utils::selected_commitment_sat_per_1000_weight;
59#[cfg(any(test, fuzzing))]
60use crate::ln::channel::QuiescentAction;
61use crate::ln::channel::{
62	self, hold_time_since, Channel, ChannelError, ChannelUpdateStatus, DisconnectResult,
63	FundedChannel, FundingTxSigned, InboundV1Channel, OutboundV1Channel, PendingV2Channel,
64	ReconnectionMsg, ShutdownResult, SpliceFundingFailed, StfuResponse, UpdateFulfillCommitFetch,
65	WithChannelContext,
66};
67use crate::ln::channel_state::ChannelDetails;
68use crate::ln::funding::SpliceContribution;
69use crate::ln::inbound_payment;
70use crate::ln::interactivetxs::InteractiveTxMessageSend;
71use crate::ln::msgs;
72use crate::ln::msgs::{
73	BaseMessageHandler, ChannelMessageHandler, CommitmentUpdate, DecodeError, LightningError,
74	MessageSendEvent,
75};
76use crate::ln::onion_payment::{
77	check_incoming_htlc_cltv, create_fwd_pending_htlc_info, create_recv_pending_htlc_info,
78	decode_incoming_update_add_htlc_onion, invalid_payment_err_data, HopConnector, InboundHTLCErr,
79	NextPacketDetails,
80};
81use crate::ln::onion_utils::{self};
82use crate::ln::onion_utils::{
83	decode_fulfill_attribution_data, HTLCFailReason, LocalHTLCFailureReason,
84};
85use crate::ln::onion_utils::{process_fulfill_attribution_data, AttributionData};
86use crate::ln::our_peer_storage::{EncryptedOurPeerStorage, PeerStorageMonitorHolder};
87#[cfg(test)]
88use crate::ln::outbound_payment;
89use crate::ln::outbound_payment::{
90	OutboundPayments, PendingOutboundPayment, RetryableInvoiceRequest, SendAlongPathArgs,
91	StaleExpiration,
92};
93use crate::ln::types::ChannelId;
94use crate::offers::async_receive_offer_cache::AsyncReceiveOfferCache;
95use crate::offers::flow::{HeldHtlcReplyPath, InvreqResponseInstructions, OffersMessageFlow};
96use crate::offers::invoice::{
97	Bolt12Invoice, DerivedSigningPubkey, InvoiceBuilder, DEFAULT_RELATIVE_EXPIRY,
98};
99use crate::offers::invoice_error::InvoiceError;
100use crate::offers::invoice_request::InvoiceRequest;
101use crate::offers::nonce::Nonce;
102use crate::offers::offer::{Offer, OfferFromHrn};
103use crate::offers::parse::Bolt12SemanticError;
104use crate::offers::refund::Refund;
105use crate::offers::signer;
106use crate::offers::static_invoice::StaticInvoice;
107use crate::onion_message::async_payments::{
108	AsyncPaymentsMessage, AsyncPaymentsMessageHandler, HeldHtlcAvailable, OfferPaths,
109	OfferPathsRequest, ReleaseHeldHtlc, ServeStaticInvoice, StaticInvoicePersisted,
110};
111use crate::onion_message::dns_resolution::HumanReadableName;
112use crate::onion_message::messenger::{
113	MessageRouter, MessageSendInstructions, Responder, ResponseInstruction,
114};
115use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
116use crate::routing::router::{
117	BlindedTail, FixedRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route,
118	RouteParameters, RouteParametersConfig, Router,
119};
120use crate::sign::ecdsa::EcdsaChannelSigner;
121use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider};
122#[cfg(any(feature = "_test_utils", test))]
123use crate::types::features::Bolt11InvoiceFeatures;
124use crate::types::features::{
125	Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures,
126};
127use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret};
128use crate::types::string::UntrustedString;
129use crate::util::config::{ChannelConfig, ChannelConfigOverrides, ChannelConfigUpdate, UserConfig};
130use crate::util::errors::APIError;
131use crate::util::logger::{Level, Logger, WithContext};
132use crate::util::scid_utils::fake_scid;
133use crate::util::ser::{
134	BigSize, FixedLengthReader, LengthReadable, MaybeReadable, Readable, ReadableArgs, VecWriter,
135	WithoutLength, Writeable, Writer,
136};
137use crate::util::wakers::{Future, Notifier};
138
139#[cfg(test)]
140use crate::blinded_path::payment::BlindedPaymentPath;
141
142#[cfg(feature = "dnssec")]
143use {
144	crate::blinded_path::message::DNSResolverContext,
145	crate::onion_message::dns_resolution::{
146		DNSResolverMessage, DNSResolverMessageHandler, DNSSECProof, DNSSECQuery,
147	},
148	crate::onion_message::messenger::Destination,
149};
150
151#[cfg(c_bindings)]
152use {
153	crate::offers::offer::OfferWithDerivedMetadataBuilder,
154	crate::offers::refund::RefundMaybeWithDerivedMetadataBuilder,
155};
156#[cfg(not(c_bindings))]
157use {
158	crate::offers::offer::{DerivedMetadata, OfferBuilder},
159	crate::offers::refund::RefundBuilder,
160	crate::onion_message::messenger::DefaultMessageRouter,
161	crate::routing::gossip::NetworkGraph,
162	crate::routing::router::DefaultRouter,
163	crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters},
164	crate::sign::KeysManager,
165};
166
167use lightning_invoice::{
168	Bolt11Invoice, Bolt11InvoiceDescription, CreationError, Currency, Description,
169	InvoiceBuilder as Bolt11InvoiceBuilder, SignOrCreationError, DEFAULT_EXPIRY_TIME,
170};
171
172use alloc::collections::{btree_map, BTreeMap};
173
174use crate::io;
175use crate::io::Read;
176use crate::prelude::*;
177use crate::sync::{Arc, FairRwLock, LockHeldState, LockTestExt, Mutex, RwLock, RwLockReadGuard};
178use bitcoin::hex::impl_fmt_traits;
179
180use core::borrow::Borrow;
181use core::cell::RefCell;
182use core::convert::Infallible;
183use core::ops::Deref;
184use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
185use core::time::Duration;
186use core::{cmp, mem};
187// Re-export this for use in the public API.
188#[cfg(any(test, feature = "_externalize_tests"))]
189pub(crate) use crate::ln::outbound_payment::PaymentSendFailure;
190pub use crate::ln::outbound_payment::{
191	Bolt11PaymentError, Bolt12PaymentError, ProbeSendFailure, RecipientOnionFields, Retry,
192	RetryableSendFailure,
193};
194use crate::ln::script::ShutdownScript;
195
196// We hold various information about HTLC relay in the HTLC objects in Channel itself:
197//
198// Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should
199// forward the HTLC with information it will give back to us when it does so, or if it should Fail
200// the HTLC with the relevant message for the Channel to handle giving to the remote peer.
201//
202// Once said HTLC is committed in the Channel, if the PendingHTLCStatus indicated Forward, the
203// Channel will return the PendingHTLCInfo back to us, and we will create an HTLCForwardInfo
204// with it to track where it came from (in case of onwards-forward error), waiting a random delay
205// before we forward it.
206//
207// We will then use HTLCForwardInfo's PendingHTLCInfo to construct an outbound HTLC, with a
208// relevant HTLCSource::PreviousHopData filled in to indicate where it came from (which we can use
209// to either fail-backwards or fulfill the HTLC backwards along the relevant path).
210// Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is
211// our payment, which we can use to decode errors or inform the user that the payment was sent.
212
213/// Information about where a received HTLC('s onion) has indicated the HTLC should go.
214#[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug
215#[cfg_attr(test, derive(Debug, PartialEq))]
216pub enum PendingHTLCRouting {
217	/// An HTLC which should be forwarded on to another node.
218	Forward {
219		/// The onion which should be included in the forwarded HTLC, telling the next hop what to
220		/// do with the HTLC.
221		onion_packet: msgs::OnionPacket,
222		/// The short channel ID of the channel which we were instructed to forward this HTLC to.
223		///
224		/// This could be a real on-chain SCID, an SCID alias, or some other SCID which has meaning
225		/// to the receiving node, such as one returned from
226		/// [`ChannelManager::get_intercept_scid`] or [`ChannelManager::get_phantom_scid`].
227		short_channel_id: u64, // This should be NonZero<u64> eventually when we bump MSRV
228		/// Set if this HTLC is being forwarded within a blinded path.
229		blinded: Option<BlindedForward>,
230		/// The absolute CLTV of the inbound HTLC
231		incoming_cltv_expiry: Option<u32>,
232		/// Whether this HTLC should be held by our node until we receive a corresponding
233		/// [`ReleaseHeldHtlc`] onion message.
234		hold_htlc: Option<()>,
235	},
236	/// An HTLC which should be forwarded on to another Trampoline node.
237	TrampolineForward {
238		/// The onion shared secret we build with the sender (or the preceding Trampoline node) used
239		/// to decrypt the onion.
240		///
241		/// This is later used to encrypt failure packets in the event that the HTLC is failed.
242		incoming_shared_secret: [u8; 32],
243		/// The onion which should be included in the forwarded HTLC, telling the next hop what to
244		/// do with the HTLC.
245		onion_packet: msgs::TrampolineOnionPacket,
246		/// The node ID of the Trampoline node which we need to route this HTLC to.
247		node_id: PublicKey,
248		/// Set if this HTLC is being forwarded within a blinded path.
249		blinded: Option<BlindedForward>,
250		/// The absolute CLTV of the inbound HTLC
251		incoming_cltv_expiry: u32,
252	},
253	/// The onion indicates that this is a payment for an invoice (supposedly) generated by us.
254	///
255	/// Note that at this point, we have not checked that the invoice being paid was actually
256	/// generated by us, but rather it's claiming to pay an invoice of ours.
257	Receive {
258		/// Information about the amount the sender intended to pay and (potential) proof that this
259		/// is a payment for an invoice we generated. This proof of payment is is also used for
260		/// linking MPP parts of a larger payment.
261		payment_data: msgs::FinalOnionHopData,
262		/// Additional data which we (allegedly) instructed the sender to include in the onion.
263		///
264		/// For HTLCs received by LDK, this will ultimately be exposed in
265		/// [`Event::PaymentClaimable::onion_fields`] as
266		/// [`RecipientOnionFields::payment_metadata`].
267		payment_metadata: Option<Vec<u8>>,
268		/// The context of the payment included by the recipient in a blinded path, or `None` if a
269		/// blinded path was not used.
270		///
271		/// Used in part to determine the [`events::PaymentPurpose`].
272		payment_context: Option<PaymentContext>,
273		/// CLTV expiry of the received HTLC.
274		///
275		/// Used to track when we should expire pending HTLCs that go unclaimed.
276		incoming_cltv_expiry: u32,
277		/// If the onion had forwarding instructions to one of our phantom node SCIDs, this will
278		/// provide the onion shared secret used to decrypt the next level of forwarding
279		/// instructions.
280		phantom_shared_secret: Option<[u8; 32]>,
281		/// Custom TLVs which were set by the sender.
282		///
283		/// For HTLCs received by LDK, this will ultimately be exposed in
284		/// [`Event::PaymentClaimable::onion_fields`] as
285		/// [`RecipientOnionFields::custom_tlvs`].
286		custom_tlvs: Vec<(u64, Vec<u8>)>,
287		/// Set if this HTLC is the final hop in a multi-hop blinded path.
288		requires_blinded_error: bool,
289	},
290	/// The onion indicates that this is for payment to us but which contains the preimage for
291	/// claiming included, and is unrelated to any invoice we'd previously generated (aka a
292	/// "keysend" or "spontaneous" payment).
293	ReceiveKeysend {
294		/// Information about the amount the sender intended to pay and possibly a token to
295		/// associate MPP parts of a larger payment.
296		///
297		/// This will only be filled in if receiving MPP keysend payments is enabled, and it being
298		/// present will cause deserialization to fail on versions of LDK prior to 0.0.116.
299		payment_data: Option<msgs::FinalOnionHopData>,
300		/// Preimage for this onion payment. This preimage is provided by the sender and will be
301		/// used to settle the spontaneous payment.
302		payment_preimage: PaymentPreimage,
303		/// Additional data which we (allegedly) instructed the sender to include in the onion.
304		///
305		/// For HTLCs received by LDK, this will ultimately bubble back up as
306		/// [`RecipientOnionFields::payment_metadata`].
307		payment_metadata: Option<Vec<u8>>,
308		/// CLTV expiry of the received HTLC.
309		///
310		/// Used to track when we should expire pending HTLCs that go unclaimed.
311		incoming_cltv_expiry: u32,
312		/// Custom TLVs which were set by the sender.
313		///
314		/// For HTLCs received by LDK, these will ultimately bubble back up as
315		/// [`RecipientOnionFields::custom_tlvs`].
316		custom_tlvs: Vec<(u64, Vec<u8>)>,
317		/// Set if this HTLC is the final hop in a multi-hop blinded path.
318		requires_blinded_error: bool,
319		/// Set if we are receiving a keysend to a blinded path, meaning we created the
320		/// [`PaymentSecret`] and should verify it using our
321		/// [`NodeSigner::get_expanded_key`].
322		has_recipient_created_payment_secret: bool,
323		/// The [`InvoiceRequest`] associated with the [`Offer`] corresponding to this payment.
324		invoice_request: Option<InvoiceRequest>,
325		/// The context of the payment included by the recipient in a blinded path, or `None` if a
326		/// blinded path was not used.
327		///
328		/// Used in part to determine the [`events::PaymentPurpose`].
329		payment_context: Option<PaymentContext>,
330	},
331}
332
333/// Information used to forward or fail this HTLC that is being forwarded within a blinded path.
334#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
335pub struct BlindedForward {
336	/// The `blinding_point` that was set in the inbound [`msgs::UpdateAddHTLC`], or in the inbound
337	/// onion payload if we're the introduction node. Useful for calculating the next hop's
338	/// [`msgs::UpdateAddHTLC::blinding_point`].
339	pub inbound_blinding_point: PublicKey,
340	/// If needed, this determines how this HTLC should be failed backwards, based on whether we are
341	/// the introduction node.
342	pub failure: BlindedFailure,
343	/// Overrides the next hop's [`msgs::UpdateAddHTLC::blinding_point`]. Set if this HTLC is being
344	/// forwarded within a [`BlindedPaymentPath`] that was concatenated to another blinded path that
345	/// starts at the next hop.
346	///
347	/// [`BlindedPaymentPath`]: crate::blinded_path::payment::BlindedPaymentPath
348	pub next_blinding_override: Option<PublicKey>,
349}
350
351impl PendingHTLCRouting {
352	// Used to override the onion failure code and data if the HTLC is blinded.
353	fn blinded_failure(&self) -> Option<BlindedFailure> {
354		match self {
355			Self::Forward { blinded: Some(BlindedForward { failure, .. }), .. } => Some(*failure),
356			Self::TrampolineForward { blinded: Some(BlindedForward { failure, .. }), .. } => {
357				Some(*failure)
358			},
359			Self::Receive { requires_blinded_error: true, .. } => {
360				Some(BlindedFailure::FromBlindedNode)
361			},
362			Self::ReceiveKeysend { requires_blinded_error: true, .. } => {
363				Some(BlindedFailure::FromBlindedNode)
364			},
365			_ => None,
366		}
367	}
368
369	fn incoming_cltv_expiry(&self) -> Option<u32> {
370		match self {
371			Self::Forward { incoming_cltv_expiry, .. } => *incoming_cltv_expiry,
372			Self::TrampolineForward { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry),
373			Self::Receive { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry),
374			Self::ReceiveKeysend { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry),
375		}
376	}
377
378	/// Whether this HTLC should be held by our node until we receive a corresponding
379	/// [`ReleaseHeldHtlc`] onion message.
380	pub(super) fn should_hold_htlc(&self) -> bool {
381		match self {
382			Self::Forward { hold_htlc: Some(()), .. } => true,
383			_ => false,
384		}
385	}
386}
387
388/// Information about an incoming HTLC, including the [`PendingHTLCRouting`] describing where it
389/// should go next.
390#[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug
391#[cfg_attr(test, derive(Debug, PartialEq))]
392pub struct PendingHTLCInfo {
393	/// Further routing details based on whether the HTLC is being forwarded or received.
394	pub routing: PendingHTLCRouting,
395	/// The onion shared secret we build with the sender used to decrypt the onion.
396	///
397	/// This is later used to encrypt failure packets in the event that the HTLC is failed.
398	pub incoming_shared_secret: [u8; 32],
399	/// Hash of the payment preimage, to lock the payment until the receiver releases the preimage.
400	pub payment_hash: PaymentHash,
401	/// Amount received in the incoming HTLC.
402	///
403	/// This field was added in LDK 0.0.113 and will be `None` for objects written by prior
404	/// versions.
405	pub incoming_amt_msat: Option<u64>,
406	/// The amount the sender indicated should be forwarded on to the next hop or amount the sender
407	/// intended for us to receive for received payments.
408	///
409	/// If the received amount is less than this for received payments, an intermediary hop has
410	/// attempted to steal some of our funds and we should fail the HTLC (the sender should retry
411	/// it along another path).
412	///
413	/// Because nodes can take less than their required fees, and because senders may wish to
414	/// improve their own privacy, this amount may be less than [`Self::incoming_amt_msat`] for
415	/// received payments. In such cases, recipients must handle this HTLC as if it had received
416	/// [`Self::outgoing_amt_msat`].
417	pub outgoing_amt_msat: u64,
418	/// The CLTV the sender has indicated we should set on the forwarded HTLC (or has indicated
419	/// should have been set on the received HTLC for received payments).
420	pub outgoing_cltv_value: u32,
421	/// The fee taken for this HTLC in addition to the standard protocol HTLC fees.
422	///
423	/// If this is a payment for forwarding, this is the fee we are taking before forwarding the
424	/// HTLC.
425	///
426	/// If this is a received payment, this is the fee that our counterparty took.
427	///
428	/// This is used to allow LSPs to take fees as a part of payments, without the sender having to
429	/// shoulder them.
430	pub skimmed_fee_msat: Option<u64>,
431}
432
433#[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug
434pub(super) enum HTLCFailureMsg {
435	Relay(msgs::UpdateFailHTLC),
436	Malformed(msgs::UpdateFailMalformedHTLC),
437}
438
439/// Stores whether we can't forward an HTLC or relevant forwarding info
440#[derive(Clone)] // See FundedChannel::revoke_and_ack for why, tl;dr: Rust bug
441pub(super) enum PendingHTLCStatus {
442	Forward(PendingHTLCInfo),
443	Fail(HTLCFailureMsg),
444}
445
446#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
447pub(super) struct PendingAddHTLCInfo {
448	pub(super) forward_info: PendingHTLCInfo,
449
450	// These fields are produced in `forward_htlcs()` and consumed in
451	// `process_pending_htlc_forwards()` for constructing the
452	// `HTLCSource::PreviousHopData` for failed and forwarded
453	// HTLCs.
454	//
455	// Note that this may be an outbound SCID alias for the associated channel.
456	prev_outbound_scid_alias: u64,
457	prev_htlc_id: u64,
458	prev_counterparty_node_id: PublicKey,
459	prev_channel_id: ChannelId,
460	prev_funding_outpoint: OutPoint,
461	prev_user_channel_id: u128,
462}
463
464impl PendingAddHTLCInfo {
465	fn htlc_previous_hop_data(&self) -> HTLCPreviousHopData {
466		let phantom_shared_secret = match self.forward_info.routing {
467			PendingHTLCRouting::Receive { phantom_shared_secret, .. } => phantom_shared_secret,
468			_ => None,
469		};
470		HTLCPreviousHopData {
471			prev_outbound_scid_alias: self.prev_outbound_scid_alias,
472			user_channel_id: Some(self.prev_user_channel_id),
473			outpoint: self.prev_funding_outpoint,
474			channel_id: self.prev_channel_id,
475			counterparty_node_id: Some(self.prev_counterparty_node_id),
476			htlc_id: self.prev_htlc_id,
477			incoming_packet_shared_secret: self.forward_info.incoming_shared_secret,
478			phantom_shared_secret,
479			blinded_failure: self.forward_info.routing.blinded_failure(),
480			cltv_expiry: self.forward_info.routing.incoming_cltv_expiry(),
481		}
482	}
483}
484
485#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
486pub(super) enum HTLCForwardInfo {
487	AddHTLC(PendingAddHTLCInfo),
488	FailHTLC { htlc_id: u64, err_packet: msgs::OnionErrorPacket },
489	FailMalformedHTLC { htlc_id: u64, failure_code: u16, sha256_of_onion: [u8; 32] },
490}
491
492/// Whether this blinded HTLC is being failed backwards by the introduction node or a blinded node,
493/// which determines the failure message that should be used.
494#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
495pub enum BlindedFailure {
496	/// This HTLC is being failed backwards by the introduction node, and thus should be failed with
497	/// [`msgs::UpdateFailHTLC`] and error code [`LocalHTLCFailureReason::InvalidOnionBlinding`].
498	FromIntroductionNode,
499	/// This HTLC is being failed backwards by a blinded node within the path, and thus should be
500	/// failed with [`msgs::UpdateFailMalformedHTLC`] and error code
501	/// [`LocalHTLCFailureReason::InvalidOnionBlinding`].
502	FromBlindedNode,
503}
504
505#[derive(PartialEq, Eq)]
506enum OnionPayload {
507	/// Indicates this incoming onion payload is for the purpose of paying an invoice.
508	Invoice {
509		/// This is only here for backwards-compatibility in serialization, in the future it can be
510		/// removed, breaking clients running 0.0.106 and earlier.
511		_legacy_hop_data: Option<msgs::FinalOnionHopData>,
512	},
513	/// Contains the payer-provided preimage.
514	Spontaneous(PaymentPreimage),
515}
516
517/// HTLCs that are to us and can be failed/claimed by the user
518#[derive(PartialEq, Eq)]
519struct ClaimableHTLC {
520	prev_hop: HTLCPreviousHopData,
521	cltv_expiry: u32,
522	/// The amount (in msats) of this MPP part
523	value: u64,
524	/// The amount (in msats) that the sender intended to be sent in this MPP
525	/// part (used for validating total MPP amount)
526	sender_intended_value: u64,
527	onion_payload: OnionPayload,
528	timer_ticks: u8,
529	/// The total value received for a payment (sum of all MPP parts if the payment is a MPP).
530	/// Gets set to the amount reported when pushing [`Event::PaymentClaimable`].
531	total_value_received: Option<u64>,
532	/// The sender intended sum total of all MPP parts specified in the onion
533	total_msat: u64,
534	/// The extra fee our counterparty skimmed off the top of this HTLC.
535	counterparty_skimmed_fee_msat: Option<u64>,
536}
537
538impl From<&ClaimableHTLC> for events::ClaimedHTLC {
539	fn from(val: &ClaimableHTLC) -> Self {
540		events::ClaimedHTLC {
541			counterparty_node_id: val.prev_hop.counterparty_node_id,
542			channel_id: val.prev_hop.channel_id,
543			user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0),
544			cltv_expiry: val.cltv_expiry,
545			value_msat: val.value,
546			counterparty_skimmed_fee_msat: val.counterparty_skimmed_fee_msat.unwrap_or(0),
547		}
548	}
549}
550
551impl PartialOrd for ClaimableHTLC {
552	fn partial_cmp(&self, other: &ClaimableHTLC) -> Option<cmp::Ordering> {
553		Some(self.cmp(other))
554	}
555}
556impl Ord for ClaimableHTLC {
557	fn cmp(&self, other: &ClaimableHTLC) -> cmp::Ordering {
558		let res = (self.prev_hop.channel_id, self.prev_hop.htlc_id)
559			.cmp(&(other.prev_hop.channel_id, other.prev_hop.htlc_id));
560		if res.is_eq() {
561			debug_assert!(self == other, "ClaimableHTLCs from the same source should be identical");
562		}
563		res
564	}
565}
566
567/// A trait defining behavior for creating and verifing the HMAC for authenticating a given data.
568pub trait Verification {
569	/// Constructs an HMAC to include in [`OffersContext`] for the data along with the given
570	/// [`Nonce`].
571	fn hmac_for_offer_payment(
572		&self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
573	) -> Hmac<Sha256>;
574
575	/// Authenticates the data using an HMAC and a [`Nonce`] taken from an [`OffersContext`].
576	fn verify_for_offer_payment(
577		&self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
578	) -> Result<(), ()>;
579}
580
581impl Verification for UnauthenticatedReceiveTlvs {
582	fn hmac_for_offer_payment(
583		&self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
584	) -> Hmac<Sha256> {
585		signer::hmac_for_payment_tlvs(self, nonce, expanded_key)
586	}
587
588	fn verify_for_offer_payment(
589		&self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
590	) -> Result<(), ()> {
591		signer::verify_payment_tlvs(self, hmac, nonce, expanded_key)
592	}
593}
594
595/// A user-provided identifier in [`ChannelManager::send_payment`] used to uniquely identify
596/// a payment and ensure idempotency in LDK.
597///
598/// This is not exported to bindings users as we just use [u8; 32] directly
599#[derive(Hash, Copy, Clone, PartialEq, Eq)]
600pub struct PaymentId(pub [u8; Self::LENGTH]);
601
602impl PaymentId {
603	/// Number of bytes in the id.
604	pub const LENGTH: usize = 32;
605}
606
607impl PaymentId {
608	fn for_inbound_from_htlcs<I: Iterator<Item = (ChannelId, u64)>>(
609		key: &[u8; 32], htlcs: I,
610	) -> PaymentId {
611		let mut prev_pair = None;
612		let mut hasher = HmacEngine::new(key);
613		for (channel_id, htlc_id) in htlcs {
614			hasher.input(&channel_id.0);
615			hasher.input(&htlc_id.to_le_bytes());
616			if let Some(prev) = prev_pair {
617				debug_assert!(prev < (channel_id, htlc_id), "HTLCs should be sorted");
618			}
619			prev_pair = Some((channel_id, htlc_id));
620		}
621		PaymentId(Hmac::<Sha256>::from_engine(hasher).to_byte_array())
622	}
623}
624
625impl Borrow<[u8]> for PaymentId {
626	fn borrow(&self) -> &[u8] {
627		&self.0[..]
628	}
629}
630
631impl_fmt_traits! {
632	impl fmt_traits for PaymentId {
633		const LENGTH: usize = 32;
634	}
635}
636
637impl Writeable for PaymentId {
638	fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
639		self.0.write(w)
640	}
641}
642
643impl Readable for PaymentId {
644	fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
645		let buf: [u8; 32] = Readable::read(r)?;
646		Ok(PaymentId(buf))
647	}
648}
649
650/// An identifier used to uniquely identify an intercepted HTLC to LDK.
651///
652/// This is not exported to bindings users as we just use [u8; 32] directly
653#[derive(Hash, Copy, Clone, PartialEq, Eq)]
654pub struct InterceptId(pub [u8; 32]);
655
656impl InterceptId {
657	/// This intercept id corresponds to an HTLC that will be forwarded on
658	/// [`ChannelManager::forward_intercepted_htlc`].
659	fn from_incoming_shared_secret(ss: &[u8; 32]) -> Self {
660		Self(Sha256::hash(ss).to_byte_array())
661	}
662
663	/// This intercept id corresponds to an HTLC that will be forwarded on receipt of a
664	/// [`ReleaseHeldHtlc`] onion message.
665	fn from_htlc_id_and_chan_id(
666		htlc_id: u64, channel_id: &ChannelId, counterparty_node_id: &PublicKey,
667	) -> Self {
668		let mut sha = Sha256::engine();
669		sha.input(&htlc_id.to_be_bytes());
670		sha.input(&channel_id.0);
671		sha.input(&counterparty_node_id.serialize());
672		Self(Sha256::from_engine(sha).to_byte_array())
673	}
674}
675
676impl Borrow<[u8]> for InterceptId {
677	fn borrow(&self) -> &[u8] {
678		&self.0[..]
679	}
680}
681impl_fmt_traits! {
682	impl fmt_traits for InterceptId {
683		const LENGTH: usize = 32;
684	}
685}
686
687impl Writeable for InterceptId {
688	fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
689		self.0.write(w)
690	}
691}
692
693impl Readable for InterceptId {
694	fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
695		let buf: [u8; 32] = Readable::read(r)?;
696		Ok(InterceptId(buf))
697	}
698}
699
700/// Optional arguments to [`ChannelManager::pay_for_offer`]
701#[cfg_attr(
702	feature = "dnssec",
703	doc = "and [`ChannelManager::pay_for_offer_from_human_readable_name`]"
704)]
705/// .
706///
707/// These fields will often not need to be set, and the provided [`Self::default`] can be used.
708pub struct OptionalOfferPaymentParams {
709	/// A note that is communicated to the recipient about this payment via
710	/// [`InvoiceRequest::payer_note`].
711	pub payer_note: Option<String>,
712	/// Pathfinding options which tweak how the path is constructed to the recipient.
713	pub route_params_config: RouteParametersConfig,
714	/// The number of tries or time during which we'll retry this payment if some paths to the
715	/// recipient fail.
716	///
717	/// Once the retry limit is reached, further path failures will not be retried and the payment
718	/// will ultimately fail once all pending paths have failed (generating an
719	/// [`Event::PaymentFailed`]).
720	pub retry_strategy: Retry,
721}
722
723impl Default for OptionalOfferPaymentParams {
724	fn default() -> Self {
725		Self {
726			payer_note: None,
727			route_params_config: Default::default(),
728			#[cfg(feature = "std")]
729			retry_strategy: Retry::Timeout(core::time::Duration::from_secs(2)),
730			#[cfg(not(feature = "std"))]
731			retry_strategy: Retry::Attempts(3),
732		}
733	}
734}
735
736#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
737/// Uniquely describes an HTLC by its source. Just the guaranteed-unique subset of [`HTLCSource`].
738pub(crate) enum SentHTLCId {
739	PreviousHopData { prev_outbound_scid_alias: u64, htlc_id: u64 },
740	OutboundRoute { session_priv: [u8; SECRET_KEY_SIZE] },
741}
742impl SentHTLCId {
743	pub(crate) fn from_source(source: &HTLCSource) -> Self {
744		match source {
745			HTLCSource::PreviousHopData(hop_data) => Self::PreviousHopData {
746				prev_outbound_scid_alias: hop_data.prev_outbound_scid_alias,
747				htlc_id: hop_data.htlc_id,
748			},
749			HTLCSource::OutboundRoute { session_priv, .. } => {
750				Self::OutboundRoute { session_priv: session_priv.secret_bytes() }
751			},
752		}
753	}
754}
755impl_writeable_tlv_based_enum!(SentHTLCId,
756	(0, PreviousHopData) => {
757		(0, prev_outbound_scid_alias, required),
758		(2, htlc_id, required),
759	},
760	(2, OutboundRoute) => {
761		(0, session_priv, required),
762	},
763);
764
765// (src_outbound_scid_alias, src_counterparty_node_id, src_funding_outpoint, src_chan_id, src_user_chan_id)
766type PerSourcePendingForward =
767	(u64, PublicKey, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>);
768
769type FailedHTLCForward = (HTLCSource, PaymentHash, HTLCFailReason, HTLCHandlingFailureType);
770
771mod fuzzy_channelmanager {
772	use super::*;
773
774	/// Tracks the inbound corresponding to an outbound HTLC
775	#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
776	#[derive(Clone, Debug, PartialEq, Eq)]
777	pub enum HTLCSource {
778		PreviousHopData(HTLCPreviousHopData),
779		OutboundRoute {
780			path: Path,
781			session_priv: SecretKey,
782			/// Technically we can recalculate this from the route, but we cache it here to avoid
783			/// doing a double-pass on route when we get a failure back
784			first_hop_htlc_msat: u64,
785			payment_id: PaymentId,
786			/// The BOLT12 invoice associated with this payment, if any. This is stored here to ensure
787			/// we can provide proof-of-payment details in payment claim events even after a restart
788			/// with a stale ChannelManager state.
789			bolt12_invoice: Option<PaidBolt12Invoice>,
790		},
791	}
792
793	/// Tracks the inbound corresponding to an outbound HTLC
794	#[derive(Clone, Debug, Hash, PartialEq, Eq)]
795	pub struct HTLCPreviousHopData {
796		pub prev_outbound_scid_alias: u64,
797		pub user_channel_id: Option<u128>,
798		pub htlc_id: u64,
799		pub incoming_packet_shared_secret: [u8; 32],
800		pub phantom_shared_secret: Option<[u8; 32]>,
801		pub blinded_failure: Option<BlindedFailure>,
802		pub channel_id: ChannelId,
803
804		// These fields are consumed by `claim_funds_from_hop()` when updating a force-closed backwards
805		// channel with a preimage provided by the forward channel.
806		pub outpoint: OutPoint,
807		pub counterparty_node_id: Option<PublicKey>,
808		/// Used to preserve our backwards channel by failing back in case an HTLC claim in the forward
809		/// channel remains unconfirmed for too long.
810		pub cltv_expiry: Option<u32>,
811	}
812}
813#[cfg(fuzzing)]
814pub use self::fuzzy_channelmanager::*;
815#[cfg(not(fuzzing))]
816pub(crate) use self::fuzzy_channelmanager::*;
817
818#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
819impl core::hash::Hash for HTLCSource {
820	fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
821		match self {
822			HTLCSource::PreviousHopData(prev_hop_data) => {
823				0u8.hash(hasher);
824				prev_hop_data.hash(hasher);
825			},
826			HTLCSource::OutboundRoute {
827				path,
828				session_priv,
829				payment_id,
830				first_hop_htlc_msat,
831				bolt12_invoice,
832			} => {
833				1u8.hash(hasher);
834				path.hash(hasher);
835				session_priv[..].hash(hasher);
836				payment_id.hash(hasher);
837				first_hop_htlc_msat.hash(hasher);
838				bolt12_invoice.hash(hasher);
839			},
840		}
841	}
842}
843impl HTLCSource {
844	#[cfg(any(test, all(ldk_test_vectors, feature = "grind_signatures")))]
845	pub fn dummy() -> Self {
846		HTLCSource::OutboundRoute {
847			path: Path { hops: Vec::new(), blinded_tail: None },
848			session_priv: SecretKey::from_slice(&[1; 32]).unwrap(),
849			first_hop_htlc_msat: 0,
850			payment_id: PaymentId([2; 32]),
851			bolt12_invoice: None,
852		}
853	}
854
855	/// Checks whether this HTLCSource could possibly match the given HTLC output in a commitment
856	/// transaction. Useful to ensure different datastructures match up.
857	pub(crate) fn possibly_matches_output(
858		&self, htlc: &super::chan_utils::HTLCOutputInCommitment,
859	) -> bool {
860		if let HTLCSource::OutboundRoute { first_hop_htlc_msat, .. } = self {
861			*first_hop_htlc_msat == htlc.amount_msat
862		} else {
863			// There's nothing we can check for forwarded HTLCs
864			true
865		}
866	}
867
868	/// Returns the CLTV expiry of the inbound HTLC (i.e. the source referred to by this object),
869	/// if the source was a forwarded HTLC and the HTLC was first forwarded on LDK 0.1.1 or later.
870	pub(crate) fn inbound_htlc_expiry(&self) -> Option<u32> {
871		match self {
872			Self::PreviousHopData(HTLCPreviousHopData { cltv_expiry, .. }) => *cltv_expiry,
873			_ => None,
874		}
875	}
876
877	pub(crate) fn static_invoice(&self) -> Option<StaticInvoice> {
878		match self {
879			Self::OutboundRoute {
880				bolt12_invoice: Some(PaidBolt12Invoice::StaticInvoice(inv)),
881				..
882			} => Some(inv.clone()),
883			_ => None,
884		}
885	}
886}
887
888/// This enum is used to specify which error data to send to peers when failing back an HTLC
889/// using [`ChannelManager::fail_htlc_backwards_with_reason`].
890///
891/// For more info on failure codes, see <https://github.com/lightning/bolts/blob/master/04-onion-routing.md#failure-messages>.
892#[derive(Clone, Copy)]
893pub enum FailureCode {
894	/// We had a temporary error processing the payment. Useful if no other error codes fit
895	/// and you want to indicate that the payer may want to retry.
896	TemporaryNodeFailure,
897	/// We have a required feature which was not in this onion. For example, you may require
898	/// some additional metadata that was not provided with this payment.
899	RequiredNodeFeatureMissing,
900	/// You may wish to use this when a `payment_preimage` is unknown, or the CLTV expiry of
901	/// the HTLC is too close to the current block height for safe handling.
902	/// Using this failure code in [`ChannelManager::fail_htlc_backwards_with_reason`] is
903	/// equivalent to calling [`ChannelManager::fail_htlc_backwards`].
904	IncorrectOrUnknownPaymentDetails,
905	/// We failed to process the payload after the onion was decrypted. You may wish to
906	/// use this when receiving custom HTLC TLVs with even type numbers that you don't recognize.
907	///
908	/// If available, the tuple data may include the type number and byte offset in the
909	/// decrypted byte stream where the failure occurred.
910	InvalidOnionPayload(Option<(u64, u16)>),
911}
912
913impl Into<LocalHTLCFailureReason> for FailureCode {
914	fn into(self) -> LocalHTLCFailureReason {
915		match self {
916			FailureCode::TemporaryNodeFailure => LocalHTLCFailureReason::TemporaryNodeFailure,
917			FailureCode::RequiredNodeFeatureMissing => LocalHTLCFailureReason::RequiredNodeFeature,
918			FailureCode::IncorrectOrUnknownPaymentDetails => {
919				LocalHTLCFailureReason::IncorrectPaymentDetails
920			},
921			FailureCode::InvalidOnionPayload(_) => LocalHTLCFailureReason::InvalidOnionPayload,
922		}
923	}
924}
925
926/// Error type returned across the peer_state mutex boundary. When an Err is generated for a
927/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
928/// immediately (ie with no further calls on it made). Thus, this step happens inside a
929/// peer_state lock. We then return the set of things that need to be done outside the lock in
930/// this struct and call handle_error!() on it.
931struct MsgHandleErrInternal {
932	err: msgs::LightningError,
933	closes_channel: bool,
934	shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
935	tx_abort: Option<msgs::TxAbort>,
936}
937impl MsgHandleErrInternal {
938	fn send_err_msg_no_close(err: String, channel_id: ChannelId) -> Self {
939		Self {
940			err: LightningError {
941				err: err.clone(),
942				action: msgs::ErrorAction::SendErrorMessage {
943					msg: msgs::ErrorMessage { channel_id, data: err },
944				},
945			},
946			closes_channel: false,
947			shutdown_finish: None,
948			tx_abort: None,
949		}
950	}
951
952	fn from_no_close(err: msgs::LightningError) -> Self {
953		Self { err, closes_channel: false, shutdown_finish: None, tx_abort: None }
954	}
955
956	fn from_finish_shutdown(
957		err: String, channel_id: ChannelId, shutdown_res: ShutdownResult,
958		channel_update: Option<msgs::ChannelUpdate>,
959	) -> Self {
960		let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() };
961		let action = if shutdown_res.monitor_update.is_some() {
962			// We have a closing `ChannelMonitorUpdate`, which means the channel was funded and we
963			// should disconnect our peer such that we force them to broadcast their latest
964			// commitment upon reconnecting.
965			msgs::ErrorAction::DisconnectPeer { msg: Some(err_msg) }
966		} else {
967			msgs::ErrorAction::SendErrorMessage { msg: err_msg }
968		};
969		Self {
970			err: LightningError { err, action },
971			closes_channel: true,
972			shutdown_finish: Some((shutdown_res, channel_update)),
973			tx_abort: None,
974		}
975	}
976
977	fn from_chan_no_close(err: ChannelError, channel_id: ChannelId) -> Self {
978		let tx_abort = match &err {
979			&ChannelError::Abort(reason) => Some(reason.into_tx_abort_msg(channel_id)),
980			_ => None,
981		};
982		let err = match err {
983			ChannelError::Warn(msg) => LightningError {
984				err: msg.clone(),
985				action: msgs::ErrorAction::SendWarningMessage {
986					msg: msgs::WarningMessage { channel_id, data: msg },
987					log_level: Level::Warn,
988				},
989			},
990			ChannelError::WarnAndDisconnect(msg) => LightningError {
991				err: msg.clone(),
992				action: msgs::ErrorAction::DisconnectPeerWithWarning {
993					msg: msgs::WarningMessage { channel_id, data: msg },
994				},
995			},
996			ChannelError::Ignore(msg) => {
997				LightningError { err: msg, action: msgs::ErrorAction::IgnoreError }
998			},
999			ChannelError::Abort(reason) => {
1000				LightningError { err: reason.to_string(), action: msgs::ErrorAction::IgnoreError }
1001			},
1002			ChannelError::Close((msg, _)) | ChannelError::SendError(msg) => LightningError {
1003				err: msg.clone(),
1004				action: msgs::ErrorAction::SendErrorMessage {
1005					msg: msgs::ErrorMessage { channel_id, data: msg },
1006				},
1007			},
1008		};
1009		Self { err, closes_channel: false, shutdown_finish: None, tx_abort }
1010	}
1011
1012	fn dont_send_error_message(&mut self) {
1013		match &mut self.err.action {
1014			msgs::ErrorAction::DisconnectPeer { msg } => *msg = None,
1015			msgs::ErrorAction::SendErrorMessage { msg: _ } => {
1016				self.err.action = msgs::ErrorAction::IgnoreError;
1017			},
1018			_ => {},
1019		}
1020	}
1021
1022	fn closes_channel(&self) -> bool {
1023		self.closes_channel
1024	}
1025}
1026
1027/// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should
1028/// be sent in the order they appear in the return value, however sometimes the order needs to be
1029/// variable at runtime (eg FundedChannel::channel_reestablish needs to re-send messages in the order
1030/// they were originally sent). In those cases, this enum is also returned.
1031#[derive(Clone, PartialEq, Debug)]
1032pub(super) enum RAACommitmentOrder {
1033	/// Send the CommitmentUpdate messages first
1034	CommitmentFirst,
1035	/// Send the RevokeAndACK message first
1036	RevokeAndACKFirst,
1037}
1038
1039/// Similar to scenarios used by [`RAACommitmentOrder`], this determines whether a `channel_ready`
1040/// message should be sent first (i.e., prior to a `commitment_update`) or after the initial
1041/// `commitment_update` and `tx_signatures` for channel funding.
1042pub(super) enum ChannelReadyOrder {
1043	/// Send `channel_ready` message first.
1044	ChannelReadyFirst,
1045	/// Send initial `commitment_update` and `tx_signatures` first.
1046	SignaturesFirst,
1047}
1048
1049/// Information about a payment which is currently being claimed.
1050#[derive(Clone, Debug, PartialEq, Eq)]
1051struct ClaimingPayment {
1052	amount_msat: u64,
1053	payment_purpose: events::PaymentPurpose,
1054	receiver_node_id: PublicKey,
1055	htlcs: Vec<events::ClaimedHTLC>,
1056	sender_intended_value: Option<u64>,
1057	onion_fields: Option<RecipientOnionFields>,
1058	payment_id: Option<PaymentId>,
1059	/// When we claim and generate a [`Event::PaymentClaimed`], we want to block any
1060	/// payment-preimage-removing RAA [`ChannelMonitorUpdate`]s until the [`Event::PaymentClaimed`]
1061	/// is handled, ensuring we can regenerate the event on restart. We pick a random channel to
1062	/// block and store it here.
1063	///
1064	/// Note that once we disallow downgrades to 0.1 we should be able to simply use
1065	/// [`Self::htlcs`] to generate this rather than storing it here (as we won't need the funding
1066	/// outpoint), allowing us to remove this field.
1067	durable_preimage_channel: Option<(OutPoint, PublicKey, ChannelId)>,
1068}
1069impl_writeable_tlv_based!(ClaimingPayment, {
1070	(0, amount_msat, required),
1071	(1, durable_preimage_channel, option),
1072	(2, payment_purpose, required),
1073	(4, receiver_node_id, required),
1074	(5, htlcs, optional_vec),
1075	(7, sender_intended_value, option),
1076	(9, onion_fields, option),
1077	(11, payment_id, option),
1078});
1079
1080struct ClaimablePayment {
1081	purpose: events::PaymentPurpose,
1082	onion_fields: Option<RecipientOnionFields>,
1083	htlcs: Vec<ClaimableHTLC>,
1084}
1085
1086impl ClaimablePayment {
1087	fn inbound_payment_id(&self, secret: &[u8; 32]) -> PaymentId {
1088		PaymentId::for_inbound_from_htlcs(
1089			secret,
1090			self.htlcs.iter().map(|htlc| (htlc.prev_hop.channel_id, htlc.prev_hop.htlc_id)),
1091		)
1092	}
1093
1094	/// Returns the inbound `(channel_id, user_channel_id)` pairs for all HTLCs associated with the payment.
1095	///
1096	/// Note: The `user_channel_id` will be `None` for HTLCs created using LDK version 0.0.117 or prior.
1097	fn receiving_channel_ids(&self) -> Vec<(ChannelId, Option<u128>)> {
1098		self.htlcs
1099			.iter()
1100			.map(|htlc| (htlc.prev_hop.channel_id, htlc.prev_hop.user_channel_id))
1101			.collect()
1102	}
1103}
1104
1105/// Represent the channel funding transaction type.
1106enum FundingType {
1107	/// This variant is useful when we want LDK to validate the funding transaction and
1108	/// broadcast it automatically.
1109	///
1110	/// This is the normal flow.
1111	Checked(Transaction),
1112	/// This variant is useful when we want LDK to validate the funding transaction and
1113	/// broadcast it manually.
1114	///
1115	/// Used in LSPS2 on a client_trusts_lsp model
1116	CheckedManualBroadcast(Transaction),
1117	/// This variant is useful when we want to loosen the validation checks and allow to
1118	/// manually broadcast the funding transaction, leaving the responsibility to the caller.
1119	///
1120	/// This is useful in cases of constructing the funding transaction as part of another
1121	/// flow and the caller wants to perform the validation and broadcasting. An example of such
1122	/// scenario could be when constructing the funding transaction as part of a Payjoin
1123	/// transaction.
1124	Unchecked(OutPoint),
1125}
1126
1127impl FundingType {
1128	fn txid(&self) -> Txid {
1129		match self {
1130			FundingType::Checked(tx) => tx.compute_txid(),
1131			FundingType::CheckedManualBroadcast(tx) => tx.compute_txid(),
1132			FundingType::Unchecked(outp) => outp.txid,
1133		}
1134	}
1135
1136	fn transaction_or_dummy(&self) -> Transaction {
1137		match self {
1138			FundingType::Checked(tx) => tx.clone(),
1139			FundingType::CheckedManualBroadcast(tx) => tx.clone(),
1140			FundingType::Unchecked(_) => Transaction {
1141				version: bitcoin::transaction::Version::TWO,
1142				lock_time: bitcoin::absolute::LockTime::ZERO,
1143				input: Vec::new(),
1144				output: Vec::new(),
1145			},
1146		}
1147	}
1148
1149	fn is_manual_broadcast(&self) -> bool {
1150		match self {
1151			FundingType::Checked(_) => false,
1152			FundingType::CheckedManualBroadcast(_) => true,
1153			FundingType::Unchecked(_) => true,
1154		}
1155	}
1156}
1157
1158/// Information about claimable or being-claimed payments
1159struct ClaimablePayments {
1160	/// Map from payment hash to the payment data and any HTLCs which are to us and can be
1161	/// failed/claimed by the user.
1162	///
1163	/// Note that, no consistency guarantees are made about the channels given here actually
1164	/// existing anymore by the time you go to read them!
1165	///
1166	/// When adding to the map, [`Self::pending_claiming_payments`] must also be checked to ensure
1167	/// we don't get a duplicate payment.
1168	claimable_payments: HashMap<PaymentHash, ClaimablePayment>,
1169
1170	/// Map from payment hash to the payment data for HTLCs which we have begun claiming, but which
1171	/// are waiting on a [`ChannelMonitorUpdate`] to complete in order to be surfaced to the user
1172	/// as an [`events::Event::PaymentClaimed`].
1173	pending_claiming_payments: HashMap<PaymentHash, ClaimingPayment>,
1174}
1175
1176impl ClaimablePayments {
1177	/// Moves a payment from [`Self::claimable_payments`] to [`Self::pending_claiming_payments`].
1178	///
1179	/// If `custom_tlvs_known` is false and custom even TLVs are set by the sender, the set of
1180	/// pending HTLCs will be returned in the `Err` variant of this method. They MUST then be
1181	/// failed by the caller as they will not be in either [`Self::claimable_payments`] or
1182	/// [`Self::pending_claiming_payments`].
1183	///
1184	/// If `custom_tlvs_known` is true, and a matching payment is found, it will always be moved.
1185	///
1186	/// If no payment is found, `Err(Vec::new())` is returned.
1187	#[rustfmt::skip]
1188	fn begin_claiming_payment<L: Deref, S: Deref>(
1189		&mut self, payment_hash: PaymentHash, node_signer: &S, logger: &L,
1190		inbound_payment_id_secret: &[u8; 32], custom_tlvs_known: bool,
1191	) -> Result<(Vec<ClaimableHTLC>, ClaimingPayment), Vec<ClaimableHTLC>>
1192		where L::Target: Logger, S::Target: NodeSigner,
1193	{
1194		match self.claimable_payments.remove(&payment_hash) {
1195			Some(payment) => {
1196				let mut receiver_node_id = node_signer.get_node_id(Recipient::Node)
1197					.expect("Failed to get node_id for node recipient");
1198				for htlc in payment.htlcs.iter() {
1199					if htlc.prev_hop.phantom_shared_secret.is_some() {
1200						let phantom_pubkey = node_signer.get_node_id(Recipient::PhantomNode)
1201							.expect("Failed to get node_id for phantom node recipient");
1202						receiver_node_id = phantom_pubkey;
1203						break;
1204					}
1205				}
1206
1207				if let Some(RecipientOnionFields { custom_tlvs, .. }) = &payment.onion_fields {
1208					if !custom_tlvs_known && custom_tlvs.iter().any(|(typ, _)| typ % 2 == 0) {
1209						log_info!(logger, "Rejecting payment with payment hash {} as we cannot accept payment with unknown even TLVs: {}",
1210							&payment_hash, log_iter!(custom_tlvs.iter().map(|(typ, _)| typ).filter(|typ| *typ % 2 == 0)));
1211						return Err(payment.htlcs);
1212					}
1213				}
1214
1215				let payment_id = payment.inbound_payment_id(inbound_payment_id_secret);
1216				let claiming_payment = self.pending_claiming_payments
1217					.entry(payment_hash)
1218					.and_modify(|_| {
1219						debug_assert!(false, "Shouldn't get a duplicate pending claim event ever");
1220						log_error!(logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug",
1221							&payment_hash);
1222					})
1223					.or_insert_with(|| {
1224						let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect();
1225						let sender_intended_value = payment.htlcs.first().map(|htlc| htlc.total_msat);
1226						// Pick an "arbitrary" channel to block RAAs on until the `PaymentSent`
1227						// event is processed, specifically the last channel to get claimed.
1228						let durable_preimage_channel = payment.htlcs.last().map_or(None, |htlc| {
1229							if let Some(node_id) = htlc.prev_hop.counterparty_node_id {
1230								Some((htlc.prev_hop.outpoint, node_id, htlc.prev_hop.channel_id))
1231							} else {
1232								None
1233							}
1234						});
1235						debug_assert!(durable_preimage_channel.is_some());
1236						ClaimingPayment {
1237							amount_msat: payment.htlcs.iter().map(|source| source.value).sum(),
1238							payment_purpose: payment.purpose,
1239							receiver_node_id,
1240							htlcs,
1241							sender_intended_value,
1242							onion_fields: payment.onion_fields,
1243							payment_id: Some(payment_id),
1244							durable_preimage_channel,
1245						}
1246					}).clone();
1247
1248				Ok((payment.htlcs, claiming_payment))
1249			},
1250			None => Err(Vec::new())
1251		}
1252	}
1253}
1254
1255/// Events which we process internally but cannot be processed immediately at the generation site
1256/// usually because we're running pre-full-init. They are handled immediately once we detect we are
1257/// running normally, and specifically must be processed before any other non-background
1258/// [`ChannelMonitorUpdate`]s are applied.
1259#[derive(Debug)]
1260enum BackgroundEvent {
1261	/// Handle a ChannelMonitorUpdate which may or may not close the channel and may unblock the
1262	/// channel to continue normal operation.
1263	///
1264	/// Any such events that exist in [`ChannelManager::pending_background_events`] will *also* be
1265	/// tracked in [`PeerState::in_flight_monitor_updates`].
1266	///
1267	/// Note that any such events are lost on shutdown, so in general they must be updates which
1268	/// are regenerated on startup.
1269	MonitorUpdateRegeneratedOnStartup {
1270		counterparty_node_id: PublicKey,
1271		funding_txo: OutPoint,
1272		channel_id: ChannelId,
1273		update: ChannelMonitorUpdate,
1274	},
1275	/// Some [`ChannelMonitorUpdate`] (s) completed before we were serialized but we still have
1276	/// them marked pending, thus we need to run any [`MonitorUpdateCompletionAction`] (s) pending
1277	/// on a channel.
1278	MonitorUpdatesComplete { counterparty_node_id: PublicKey, channel_id: ChannelId },
1279}
1280
1281/// A pointer to a channel that is unblocked when an event is surfaced
1282#[derive(Debug)]
1283pub(crate) struct EventUnblockedChannel {
1284	counterparty_node_id: PublicKey,
1285	funding_txo: OutPoint,
1286	channel_id: ChannelId,
1287	blocking_action: RAAMonitorUpdateBlockingAction,
1288}
1289
1290impl Writeable for EventUnblockedChannel {
1291	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
1292		self.counterparty_node_id.write(writer)?;
1293		self.funding_txo.write(writer)?;
1294		self.channel_id.write(writer)?;
1295		self.blocking_action.write(writer)
1296	}
1297}
1298
1299impl MaybeReadable for EventUnblockedChannel {
1300	fn read<R: Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
1301		let counterparty_node_id = Readable::read(reader)?;
1302		let funding_txo = Readable::read(reader)?;
1303		let channel_id = Readable::read(reader)?;
1304		let blocking_action = match RAAMonitorUpdateBlockingAction::read(reader)? {
1305			Some(blocking_action) => blocking_action,
1306			None => return Ok(None),
1307		};
1308		Ok(Some(EventUnblockedChannel {
1309			counterparty_node_id,
1310			funding_txo,
1311			channel_id,
1312			blocking_action,
1313		}))
1314	}
1315}
1316
1317#[derive(Debug)]
1318/// Note that these run after all *non-blocked* [`ChannelMonitorUpdate`]s have been persisted.
1319/// Thus, they're primarily useful for (and currently only used for) claims, where the
1320/// [`ChannelMonitorUpdate`] we care about is a preimage update, which bypass the monitor update
1321/// blocking logic entirely and can never be blocked.
1322pub(crate) enum MonitorUpdateCompletionAction {
1323	/// Indicates that a payment ultimately destined for us was claimed and we should emit an
1324	/// [`events::Event::PaymentClaimed`] to the user if we haven't yet generated such an event for
1325	/// this payment. Note that this is only best-effort. On restart it's possible such a duplicate
1326	/// event can be generated.
1327	PaymentClaimed {
1328		payment_hash: PaymentHash,
1329		/// A pending MPP claim which hasn't yet completed.
1330		///
1331		/// Not written to disk.
1332		pending_mpp_claim: Option<(PublicKey, ChannelId, PendingMPPClaimPointer)>,
1333	},
1334	/// Indicates an [`events::Event`] should be surfaced to the user and possibly resume the
1335	/// operation of another channel.
1336	///
1337	/// This is usually generated when we've forwarded an HTLC and want to block the outbound edge
1338	/// from completing a monitor update which removes the payment preimage until the inbound edge
1339	/// completes a monitor update containing the payment preimage. In that case, after the inbound
1340	/// edge completes, we will surface an [`Event::PaymentForwarded`] as well as unblock the
1341	/// outbound edge.
1342	EmitEventAndFreeOtherChannel {
1343		event: events::Event,
1344		downstream_counterparty_and_funding_outpoint: Option<EventUnblockedChannel>,
1345	},
1346	/// Indicates we should immediately resume the operation of another channel, unless there is
1347	/// some other reason why the channel is blocked. In practice this simply means immediately
1348	/// removing the [`RAAMonitorUpdateBlockingAction`] provided from the blocking set.
1349	///
1350	/// This is usually generated when we've forwarded an HTLC and want to block the outbound edge
1351	/// from completing a monitor update which removes the payment preimage until the inbound edge
1352	/// completes a monitor update containing the payment preimage. However, we use this variant
1353	/// instead of [`Self::EmitEventAndFreeOtherChannel`] when we discover that the claim was in
1354	/// fact duplicative and we simply want to resume the outbound edge channel immediately.
1355	///
1356	/// This variant should thus never be written to disk, as it is processed inline rather than
1357	/// stored for later processing.
1358	FreeOtherChannelImmediately {
1359		downstream_counterparty_node_id: PublicKey,
1360		blocking_action: RAAMonitorUpdateBlockingAction,
1361		downstream_channel_id: ChannelId,
1362	},
1363}
1364
1365impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction,
1366	(0, PaymentClaimed) => {
1367		(0, payment_hash, required),
1368		(9999999999, pending_mpp_claim, (static_value, None)),
1369	},
1370	// Note that FreeOtherChannelImmediately should never be written - we were supposed to free
1371	// *immediately*. However, for simplicity we implement read/write here.
1372	(1, FreeOtherChannelImmediately) => {
1373		(0, downstream_counterparty_node_id, required),
1374		(4, blocking_action, upgradable_required),
1375		(5, downstream_channel_id, required),
1376	},
1377	(2, EmitEventAndFreeOtherChannel) => {
1378		(0, event, upgradable_required),
1379		// LDK prior to 0.0.116 did not have this field as the monitor update application order was
1380		// required by clients. If we downgrade to something prior to 0.0.116 this may result in
1381		// monitor updates which aren't properly blocked or resumed, however that's fine - we don't
1382		// support async monitor updates even in LDK 0.0.116 and once we do we'll require no
1383		// downgrades to prior versions.
1384		(1, downstream_counterparty_and_funding_outpoint, upgradable_option),
1385	},
1386);
1387
1388#[derive(Clone, Debug, PartialEq, Eq)]
1389pub(crate) struct PaymentCompleteUpdate {
1390	counterparty_node_id: PublicKey,
1391	channel_funding_outpoint: OutPoint,
1392	channel_id: ChannelId,
1393	htlc_id: SentHTLCId,
1394}
1395
1396impl_writeable_tlv_based!(PaymentCompleteUpdate, {
1397	(1, channel_funding_outpoint, required),
1398	(3, counterparty_node_id, required),
1399	(5, channel_id, required),
1400	(7, htlc_id, required),
1401});
1402
1403#[derive(Clone, Debug, PartialEq, Eq)]
1404pub(crate) enum EventCompletionAction {
1405	ReleaseRAAChannelMonitorUpdate {
1406		counterparty_node_id: PublicKey,
1407		// Was required until LDK 0.2. Always filled in as `Some`.
1408		channel_funding_outpoint: Option<OutPoint>,
1409		channel_id: ChannelId,
1410	},
1411
1412	/// When a payment's resolution is communicated to the downstream logic via
1413	/// [`Event::PaymentSent`] or [`Event::PaymentFailed`] we may want to mark the payment as
1414	/// fully-resolved in the [`ChannelMonitor`], which we do via this action.
1415	/// Note that this action will be dropped on downgrade to LDK prior to 0.2!
1416	ReleasePaymentCompleteChannelMonitorUpdate(PaymentCompleteUpdate),
1417}
1418impl_writeable_tlv_based_enum!(EventCompletionAction,
1419	(0, ReleaseRAAChannelMonitorUpdate) => {
1420		(0, channel_funding_outpoint, option),
1421		(2, counterparty_node_id, required),
1422		(3, channel_id, (default_value, {
1423			if channel_funding_outpoint.is_none() {
1424				Err(DecodeError::InvalidValue)?
1425			}
1426			ChannelId::v1_from_funding_outpoint(channel_funding_outpoint.unwrap())
1427		})),
1428	}
1429	{1, ReleasePaymentCompleteChannelMonitorUpdate} => (),
1430);
1431
1432/// The source argument which is passed to [`ChannelManager::claim_mpp_part`].
1433///
1434/// This is identical to [`MPPClaimHTLCSource`] except that [`Self::counterparty_node_id`] is an
1435/// `Option`, whereas it is required in [`MPPClaimHTLCSource`]. In the future, we should ideally
1436/// drop this and merge the two, however doing so may break upgrades for nodes which have pending
1437/// forwarded payments.
1438struct HTLCClaimSource {
1439	counterparty_node_id: PublicKey,
1440	funding_txo: OutPoint,
1441	channel_id: ChannelId,
1442	htlc_id: u64,
1443}
1444
1445impl From<&MPPClaimHTLCSource> for HTLCClaimSource {
1446	fn from(o: &MPPClaimHTLCSource) -> HTLCClaimSource {
1447		HTLCClaimSource {
1448			counterparty_node_id: o.counterparty_node_id,
1449			funding_txo: o.funding_txo,
1450			channel_id: o.channel_id,
1451			htlc_id: o.htlc_id,
1452		}
1453	}
1454}
1455
1456#[derive(Debug)]
1457pub(crate) struct PendingMPPClaim {
1458	channels_without_preimage: Vec<(PublicKey, ChannelId)>,
1459	channels_with_preimage: Vec<(PublicKey, ChannelId)>,
1460}
1461
1462#[derive(Clone, Debug, Hash, PartialEq, Eq)]
1463/// The source of an HTLC which is being claimed as a part of an incoming payment. Each part is
1464/// tracked in [`ChannelMonitor`]s, so that it can be converted to an [`HTLCClaimSource`] for claim
1465/// replays on startup.
1466struct MPPClaimHTLCSource {
1467	counterparty_node_id: PublicKey,
1468	funding_txo: OutPoint,
1469	channel_id: ChannelId,
1470	htlc_id: u64,
1471}
1472
1473impl_writeable_tlv_based!(MPPClaimHTLCSource, {
1474	(0, counterparty_node_id, required),
1475	(2, funding_txo, required),
1476	(4, channel_id, required),
1477	(6, htlc_id, required),
1478});
1479
1480#[derive(Clone, Debug, PartialEq, Eq)]
1481/// When we're claiming a(n MPP) payment, we want to store information about that payment in the
1482/// [`ChannelMonitor`] so that we can replay the claim without any information from the
1483/// [`ChannelManager`] at all. This struct stores that information with enough to replay claims
1484/// against all MPP parts as well as generate an [`Event::PaymentClaimed`].
1485pub(crate) struct PaymentClaimDetails {
1486	mpp_parts: Vec<MPPClaimHTLCSource>,
1487	/// Use [`ClaimingPayment`] as a stable source of all the fields we need to generate the
1488	/// [`Event::PaymentClaimed`].
1489	claiming_payment: ClaimingPayment,
1490}
1491
1492impl_writeable_tlv_based!(PaymentClaimDetails, {
1493	(0, mpp_parts, required_vec),
1494	(2, claiming_payment, required),
1495});
1496
1497#[derive(Clone)]
1498pub(crate) struct PendingMPPClaimPointer(Arc<Mutex<PendingMPPClaim>>);
1499
1500impl PartialEq for PendingMPPClaimPointer {
1501	fn eq(&self, o: &Self) -> bool {
1502		Arc::ptr_eq(&self.0, &o.0)
1503	}
1504}
1505impl Eq for PendingMPPClaimPointer {}
1506
1507impl core::fmt::Debug for PendingMPPClaimPointer {
1508	fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
1509		self.0.lock().unwrap().fmt(f)
1510	}
1511}
1512
1513#[derive(Clone, PartialEq, Eq, Debug)]
1514/// If something is blocked on the completion of an RAA-generated [`ChannelMonitorUpdate`] we track
1515/// the blocked action here. See enum variants for more info.
1516pub(crate) enum RAAMonitorUpdateBlockingAction {
1517	/// A forwarded payment was claimed. We block the downstream channel completing its monitor
1518	/// update which removes the HTLC preimage until the upstream channel has gotten the preimage
1519	/// durably to disk.
1520	ForwardedPaymentInboundClaim {
1521		/// The upstream channel ID (i.e. the inbound edge).
1522		channel_id: ChannelId,
1523		/// The HTLC ID on the inbound edge.
1524		htlc_id: u64,
1525	},
1526	/// We claimed an MPP payment across multiple channels. We have to block removing the payment
1527	/// preimage from any monitor until the last monitor is updated to contain the payment
1528	/// preimage. Otherwise we may not be able to replay the preimage on the monitor(s) that
1529	/// weren't updated on startup.
1530	///
1531	/// This variant is *not* written to disk, instead being inferred from [`ChannelMonitor`]
1532	/// state.
1533	ClaimedMPPPayment { pending_claim: PendingMPPClaimPointer },
1534}
1535
1536impl RAAMonitorUpdateBlockingAction {
1537	fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
1538		Self::ForwardedPaymentInboundClaim {
1539			channel_id: prev_hop.channel_id,
1540			htlc_id: prev_hop.htlc_id,
1541		}
1542	}
1543}
1544
1545impl_writeable_tlv_based_enum_upgradable!(RAAMonitorUpdateBlockingAction,
1546	(0, ForwardedPaymentInboundClaim) => { (0, channel_id, required), (2, htlc_id, required) },
1547	unread_variants: ClaimedMPPPayment
1548);
1549
1550impl Readable for Option<RAAMonitorUpdateBlockingAction> {
1551	fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
1552		Ok(RAAMonitorUpdateBlockingAction::read(reader)?)
1553	}
1554}
1555
1556/// State we hold per-peer.
1557pub(super) struct PeerState<SP: Deref>
1558where
1559	SP::Target: SignerProvider,
1560{
1561	/// `channel_id` -> `Channel`
1562	///
1563	/// Holds all channels where the peer is the counterparty.
1564	pub(super) channel_by_id: HashMap<ChannelId, Channel<SP>>,
1565	/// `temporary_channel_id` -> `InboundChannelRequest`.
1566	///
1567	/// When manual channel acceptance is enabled, this holds all unaccepted inbound channels where
1568	/// the peer is the counterparty. If the channel is accepted, then the entry in this table is
1569	/// removed, and an InboundV1Channel is created and placed in the `inbound_v1_channel_by_id` table. If
1570	/// the channel is rejected, then the entry is simply removed.
1571	pub(super) inbound_channel_request_by_id: HashMap<ChannelId, InboundChannelRequest>,
1572	/// The latest `InitFeatures` we heard from the peer.
1573	latest_features: InitFeatures,
1574	/// Messages to send to the peer - pushed to in the same lock that they are generated in (except
1575	/// for broadcast messages, where ordering isn't as strict).
1576	pub(super) pending_msg_events: Vec<MessageSendEvent>,
1577	/// Map from Channel IDs to pending [`ChannelMonitorUpdate`]s which have been passed to the
1578	/// user but which have not yet completed. We still keep the funding outpoint around to backfill
1579	/// the legacy TLV field to support downgrading.
1580	///
1581	/// Note that the channel may no longer exist. For example if the channel was closed but we
1582	/// later needed to claim an HTLC which is pending on-chain, we may generate a monitor update
1583	/// for a missing channel.
1584	///
1585	/// Note that any pending [`BackgroundEvent::MonitorUpdateRegeneratedOnStartup`]s which are
1586	/// sitting in [`ChannelManager::pending_background_events`] will *also* be tracked here. This
1587	/// avoids a race condition during [`ChannelManager::pending_background_events`] processing
1588	/// where we complete one [`ChannelMonitorUpdate`] (but there are more pending as background
1589	/// events) but we conclude all pending [`ChannelMonitorUpdate`]s have completed and its safe
1590	/// to run post-completion actions.
1591	in_flight_monitor_updates: BTreeMap<ChannelId, (OutPoint, Vec<ChannelMonitorUpdate>)>,
1592	/// Map from a specific channel to some action(s) that should be taken when all pending
1593	/// [`ChannelMonitorUpdate`]s for the channel complete updating.
1594	///
1595	/// Note that because we generally only have one entry here a HashMap is pretty overkill. A
1596	/// BTreeMap currently stores more than ten elements per leaf node, so even up to a few
1597	/// channels with a peer this will just be one allocation and will amount to a linear list of
1598	/// channels to walk, avoiding the whole hashing rigmarole.
1599	///
1600	/// Note that the channel may no longer exist. For example, if a channel was closed but we
1601	/// later needed to claim an HTLC which is pending on-chain, we may generate a monitor update
1602	/// for a missing channel. While a malicious peer could construct a second channel with the
1603	/// same `temporary_channel_id` (or final `channel_id` in the case of 0conf channels or prior
1604	/// to funding appearing on-chain), the downstream `ChannelMonitor` set is required to ensure
1605	/// duplicates do not occur, so such channels should fail without a monitor update completing.
1606	///
1607	/// Note that these run after all *non-blocked* [`ChannelMonitorUpdate`]s have been persisted.
1608	/// Thus, they're primarily useful for (and currently only used for) claims, where the
1609	/// [`ChannelMonitorUpdate`] we care about is a preimage update, which bypass the monitor
1610	/// update blocking logic entirely and can never be blocked.
1611	monitor_update_blocked_actions: BTreeMap<ChannelId, Vec<MonitorUpdateCompletionAction>>,
1612	/// If another channel's [`ChannelMonitorUpdate`] needs to complete before a channel we have
1613	/// with this peer can complete an RAA [`ChannelMonitorUpdate`] (e.g. because the RAA update
1614	/// will remove a preimage that needs to be durably in an upstream channel first), we put an
1615	/// entry here to note that the channel with the key's ID is blocked on a set of actions.
1616	actions_blocking_raa_monitor_updates: BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
1617	/// The latest [`ChannelMonitor::get_latest_update_id`] value for all closed channels as they
1618	/// exist on-disk/in our [`chain::Watch`].
1619	///
1620	/// If there are any updates pending in [`Self::in_flight_monitor_updates`] this will contain
1621	/// the highest `update_id` of all the pending in-flight updates (note that any pending updates
1622	/// not yet applied sitting in [`ChannelManager::pending_background_events`] will also be
1623	/// considered as they are also in [`Self::in_flight_monitor_updates`]).
1624	///
1625	/// Note that channels which were closed prior to LDK 0.1 may have a value here of `u64::MAX`.
1626	closed_channel_monitor_update_ids: BTreeMap<ChannelId, u64>,
1627	/// The peer is currently connected (i.e. we've seen a
1628	/// [`BaseMessageHandler::peer_connected`] and no corresponding
1629	/// [`BaseMessageHandler::peer_disconnected`].
1630	pub is_connected: bool,
1631	/// Holds the peer storage data for the channel partner on a per-peer basis.
1632	peer_storage: Vec<u8>,
1633}
1634
1635impl<SP: Deref> PeerState<SP>
1636where
1637	SP::Target: SignerProvider,
1638{
1639	/// Indicates that a peer meets the criteria where we're ok to remove it from our storage.
1640	/// If true is passed for `require_disconnected`, the function will return false if we haven't
1641	/// disconnected from the node already, ie. `PeerState::is_connected` is set to `true`.
1642	fn ok_to_remove(&self, require_disconnected: bool) -> bool {
1643		if require_disconnected && self.is_connected {
1644			return false;
1645		}
1646		for (_, updates) in self.in_flight_monitor_updates.values() {
1647			if !updates.is_empty() {
1648				return false;
1649			}
1650		}
1651		let chan_is_funded_or_outbound = |(_, channel): (_, &Channel<SP>)| {
1652			channel.is_funded() || channel.funding().is_outbound()
1653		};
1654		!self.channel_by_id.iter().any(chan_is_funded_or_outbound)
1655			&& self.monitor_update_blocked_actions.is_empty()
1656			&& self.closed_channel_monitor_update_ids.is_empty()
1657	}
1658
1659	// Returns a count of all channels we have with this peer, including unfunded channels.
1660	fn total_channel_count(&self) -> usize {
1661		self.channel_by_id.len() + self.inbound_channel_request_by_id.len()
1662	}
1663
1664	// Returns a bool indicating if the given `channel_id` matches a channel we have with this peer.
1665	fn has_channel(&self, channel_id: &ChannelId) -> bool {
1666		self.channel_by_id.contains_key(channel_id)
1667			|| self.inbound_channel_request_by_id.contains_key(channel_id)
1668	}
1669}
1670
1671#[derive(Clone)]
1672pub(super) enum OpenChannelMessage {
1673	V1(msgs::OpenChannel),
1674	V2(msgs::OpenChannelV2),
1675}
1676
1677pub(super) enum OpenChannelMessageRef<'a> {
1678	V1(&'a msgs::OpenChannel),
1679	V2(&'a msgs::OpenChannelV2),
1680}
1681
1682/// A not-yet-accepted inbound (from counterparty) channel. Once
1683/// accepted, the parameters will be used to construct a channel.
1684pub(super) struct InboundChannelRequest {
1685	/// The original OpenChannel message.
1686	pub open_channel_msg: OpenChannelMessage,
1687	/// The number of ticks remaining before the request expires.
1688	pub ticks_remaining: i32,
1689}
1690
1691/// The number of ticks that may elapse while we're waiting for an unaccepted inbound channel to be
1692/// accepted. An unaccepted channel that exceeds this limit will be abandoned.
1693const UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS: i32 = 2;
1694
1695/// The number of blocks of historical feerate estimates we keep around and consider when deciding
1696/// to force-close a channel for having too-low fees. Also the number of blocks we have to see
1697/// after startup before we consider force-closing channels for having too-low fees.
1698pub(super) const FEERATE_TRACKING_BLOCKS: usize = 144;
1699
1700/// Stores a PaymentSecret and any other data we may need to validate an inbound payment is
1701/// actually ours and not some duplicate HTLC sent to us by a node along the route.
1702///
1703/// For users who don't want to bother doing their own payment preimage storage, we also store that
1704/// here.
1705///
1706/// Note that this struct will be removed entirely soon, in favor of storing no inbound payment data
1707/// and instead encoding it in the payment secret.
1708#[derive(Debug)]
1709struct PendingInboundPayment {
1710	/// The payment secret that the sender must use for us to accept this payment
1711	payment_secret: PaymentSecret,
1712	/// Time at which this HTLC expires - blocks with a header time above this value will result in
1713	/// this payment being removed.
1714	expiry_time: u64,
1715	/// Arbitrary identifier the user specifies (or not)
1716	user_payment_id: u64,
1717	// Other required attributes of the payment, optionally enforced:
1718	payment_preimage: Option<PaymentPreimage>,
1719	min_value_msat: Option<u64>,
1720}
1721
1722/// [`SimpleArcChannelManager`] is useful when you need a [`ChannelManager`] with a static lifetime, e.g.
1723/// when you're using `lightning-net-tokio` (since `tokio::spawn` requires parameters with static
1724/// lifetimes). Other times you can afford a reference, which is more efficient, in which case
1725/// [`SimpleRefChannelManager`] is the more appropriate type. Defining these type aliases prevents
1726/// issues such as overly long function definitions. Note that the `ChannelManager` can take any type
1727/// that implements [`NodeSigner`], [`EntropySource`], and [`SignerProvider`] for its keys manager,
1728/// or, respectively, [`Router`] for its router, but this type alias chooses the concrete types
1729/// of [`KeysManager`] and [`DefaultRouter`].
1730///
1731/// This is not exported to bindings users as type aliases aren't supported in most languages.
1732#[cfg(not(c_bindings))]
1733pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<
1734	Arc<M>,
1735	Arc<T>,
1736	Arc<KeysManager>,
1737	Arc<KeysManager>,
1738	Arc<KeysManager>,
1739	Arc<F>,
1740	Arc<
1741		DefaultRouter<
1742			Arc<NetworkGraph<Arc<L>>>,
1743			Arc<L>,
1744			Arc<KeysManager>,
1745			Arc<RwLock<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
1746			ProbabilisticScoringFeeParameters,
1747			ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>,
1748		>,
1749	>,
1750	Arc<DefaultMessageRouter<Arc<NetworkGraph<Arc<L>>>, Arc<L>, Arc<KeysManager>>>,
1751	Arc<L>,
1752>;
1753
1754/// [`SimpleRefChannelManager`] is a type alias for a ChannelManager reference, and is the reference
1755/// counterpart to the [`SimpleArcChannelManager`] type alias. Use this type by default when you don't
1756/// need a ChannelManager with a static lifetime. You'll need a static lifetime in cases such as
1757/// usage of lightning-net-tokio (since `tokio::spawn` requires parameters with static lifetimes).
1758/// But if this is not necessary, using a reference is more efficient. Defining these type aliases
1759/// issues such as overly long function definitions. Note that the ChannelManager can take any type
1760/// that implements [`NodeSigner`], [`EntropySource`], and [`SignerProvider`] for its keys manager,
1761/// or, respectively, [`Router`]  for its router, but this type alias chooses the concrete types
1762/// of [`KeysManager`] and [`DefaultRouter`].
1763///
1764/// This is not exported to bindings users as type aliases aren't supported in most languages.
1765#[cfg(not(c_bindings))]
1766pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, 'i, M, T, F, L> = ChannelManager<
1767	&'a M,
1768	&'b T,
1769	&'c KeysManager,
1770	&'c KeysManager,
1771	&'c KeysManager,
1772	&'d F,
1773	&'e DefaultRouter<
1774		&'f NetworkGraph<&'g L>,
1775		&'g L,
1776		&'c KeysManager,
1777		&'h RwLock<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>,
1778		ProbabilisticScoringFeeParameters,
1779		ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>,
1780	>,
1781	&'i DefaultMessageRouter<&'f NetworkGraph<&'g L>, &'g L, &'c KeysManager>,
1782	&'g L,
1783>;
1784
1785/// A trivial trait which describes any [`ChannelManager`].
1786///
1787/// This is not exported to bindings users as general cover traits aren't useful in other
1788/// languages.
1789pub trait AChannelManager {
1790	/// A type implementing [`chain::Watch`].
1791	type Watch: chain::Watch<Self::Signer> + ?Sized;
1792	/// A type that may be dereferenced to [`Self::Watch`].
1793	type M: Deref<Target = Self::Watch>;
1794	/// A type implementing [`BroadcasterInterface`].
1795	type Broadcaster: BroadcasterInterface + ?Sized;
1796	/// A type that may be dereferenced to [`Self::Broadcaster`].
1797	type T: Deref<Target = Self::Broadcaster>;
1798	/// A type implementing [`EntropySource`].
1799	type EntropySource: EntropySource + ?Sized;
1800	/// A type that may be dereferenced to [`Self::EntropySource`].
1801	type ES: Deref<Target = Self::EntropySource>;
1802	/// A type implementing [`NodeSigner`].
1803	type NodeSigner: NodeSigner + ?Sized;
1804	/// A type that may be dereferenced to [`Self::NodeSigner`].
1805	type NS: Deref<Target = Self::NodeSigner>;
1806	/// A type implementing [`EcdsaChannelSigner`].
1807	type Signer: EcdsaChannelSigner + Sized;
1808	/// A type implementing [`SignerProvider`] for [`Self::Signer`].
1809	type SignerProvider: SignerProvider<EcdsaSigner = Self::Signer> + ?Sized;
1810	/// A type that may be dereferenced to [`Self::SignerProvider`].
1811	type SP: Deref<Target = Self::SignerProvider>;
1812	/// A type implementing [`FeeEstimator`].
1813	type FeeEstimator: FeeEstimator + ?Sized;
1814	/// A type that may be dereferenced to [`Self::FeeEstimator`].
1815	type F: Deref<Target = Self::FeeEstimator>;
1816	/// A type implementing [`Router`].
1817	type Router: Router + ?Sized;
1818	/// A type that may be dereferenced to [`Self::Router`].
1819	type R: Deref<Target = Self::Router>;
1820	/// A type implementing [`MessageRouter`].
1821	type MessageRouter: MessageRouter + ?Sized;
1822	/// A type that may be dereferenced to [`Self::MessageRouter`].
1823	type MR: Deref<Target = Self::MessageRouter>;
1824	/// A type implementing [`Logger`].
1825	type Logger: Logger + ?Sized;
1826	/// A type that may be dereferenced to [`Self::Logger`].
1827	type L: Deref<Target = Self::Logger>;
1828	/// Returns a reference to the actual [`ChannelManager`] object.
1829	fn get_cm(
1830		&self,
1831	) -> &ChannelManager<
1832		Self::M,
1833		Self::T,
1834		Self::ES,
1835		Self::NS,
1836		Self::SP,
1837		Self::F,
1838		Self::R,
1839		Self::MR,
1840		Self::L,
1841	>;
1842}
1843
1844impl<
1845		M: Deref,
1846		T: Deref,
1847		ES: Deref,
1848		NS: Deref,
1849		SP: Deref,
1850		F: Deref,
1851		R: Deref,
1852		MR: Deref,
1853		L: Deref,
1854	> AChannelManager for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
1855where
1856	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
1857	T::Target: BroadcasterInterface,
1858	ES::Target: EntropySource,
1859	NS::Target: NodeSigner,
1860	SP::Target: SignerProvider,
1861	F::Target: FeeEstimator,
1862	R::Target: Router,
1863	MR::Target: MessageRouter,
1864	L::Target: Logger,
1865{
1866	type Watch = M::Target;
1867	type M = M;
1868	type Broadcaster = T::Target;
1869	type T = T;
1870	type EntropySource = ES::Target;
1871	type ES = ES;
1872	type NodeSigner = NS::Target;
1873	type NS = NS;
1874	type Signer = <SP::Target as SignerProvider>::EcdsaSigner;
1875	type SignerProvider = SP::Target;
1876	type SP = SP;
1877	type FeeEstimator = F::Target;
1878	type F = F;
1879	type Router = R::Target;
1880	type R = R;
1881	type MessageRouter = MR::Target;
1882	type MR = MR;
1883	type Logger = L::Target;
1884	type L = L;
1885	fn get_cm(&self) -> &ChannelManager<M, T, ES, NS, SP, F, R, MR, L> {
1886		self
1887	}
1888}
1889
1890/// A lightning node's channel state machine and payment management logic, which facilitates
1891/// sending, forwarding, and receiving payments through lightning channels.
1892///
1893/// [`ChannelManager`] is parameterized by a number of components to achieve this.
1894/// - [`chain::Watch`] (typically [`ChainMonitor`]) for on-chain monitoring and enforcement of each
1895///   channel
1896/// - [`BroadcasterInterface`] for broadcasting transactions related to opening, funding, and
1897///   closing channels
1898/// - [`EntropySource`] for providing random data needed for cryptographic operations
1899/// - [`NodeSigner`] for cryptographic operations scoped to the node
1900/// - [`SignerProvider`] for providing signers whose operations are scoped to individual channels
1901/// - [`FeeEstimator`] to determine transaction fee rates needed to have a transaction mined in a
1902///   timely manner
1903/// - [`Router`] for finding payment paths when initiating and retrying payments
1904/// - [`MessageRouter`] for finding message paths when initiating and retrying onion messages
1905/// - [`Logger`] for logging operational information of varying degrees
1906///
1907/// Additionally, it implements the following traits:
1908/// - [`ChannelMessageHandler`] to handle off-chain channel activity from peers
1909/// - [`BaseMessageHandler`] to handle peer dis/connection and send messages to peers
1910/// - [`OffersMessageHandler`] for BOLT 12 message handling and sending
1911/// - [`EventsProvider`] to generate user-actionable [`Event`]s
1912/// - [`chain::Listen`] and [`chain::Confirm`] for notification of on-chain activity
1913///
1914/// Thus, [`ChannelManager`] is typically used to parameterize a [`MessageHandler`] and an
1915/// [`OnionMessenger`]. The latter is required to support BOLT 12 functionality.
1916///
1917/// # `ChannelManager` vs `ChannelMonitor`
1918///
1919/// It's important to distinguish between the *off-chain* management and *on-chain* enforcement of
1920/// lightning channels. [`ChannelManager`] exchanges messages with peers to manage the off-chain
1921/// state of each channel. During this process, it generates a [`ChannelMonitor`] for each channel
1922/// and a [`ChannelMonitorUpdate`] for each relevant change, notifying its parameterized
1923/// [`chain::Watch`] of them.
1924///
1925/// An implementation of [`chain::Watch`], such as [`ChainMonitor`], is responsible for aggregating
1926/// these [`ChannelMonitor`]s and applying any [`ChannelMonitorUpdate`]s to them. It then monitors
1927/// for any pertinent on-chain activity, enforcing claims as needed.
1928///
1929/// This division of off-chain management and on-chain enforcement allows for interesting node
1930/// setups. For instance, on-chain enforcement could be moved to a separate host or have added
1931/// redundancy, possibly as a watchtower. See [`chain::Watch`] for the relevant interface.
1932///
1933/// # Initialization
1934///
1935/// Use [`ChannelManager::new`] with the most recent [`BlockHash`] when creating a fresh instance.
1936/// Otherwise, if restarting, construct [`ChannelManagerReadArgs`] with the necessary parameters and
1937/// references to any deserialized [`ChannelMonitor`]s that were previously persisted. Use this to
1938/// deserialize the [`ChannelManager`] and feed it any new chain data since it was last online, as
1939/// detailed in the [`ChannelManagerReadArgs`] documentation.
1940///
1941/// ```
1942/// use bitcoin::BlockHash;
1943/// use bitcoin::network::Network;
1944/// use lightning::chain::BestBlock;
1945/// # use lightning::chain::channelmonitor::ChannelMonitor;
1946/// use lightning::ln::channelmanager::{ChainParameters, ChannelManager, ChannelManagerReadArgs};
1947/// # use lightning::routing::gossip::NetworkGraph;
1948/// use lightning::util::config::UserConfig;
1949/// use lightning::util::ser::ReadableArgs;
1950///
1951/// # fn read_channel_monitors() -> Vec<ChannelMonitor<lightning::sign::InMemorySigner>> { vec![] }
1952/// # fn example<
1953/// #     'a,
1954/// #     L: lightning::util::logger::Logger,
1955/// #     ES: lightning::sign::EntropySource,
1956/// #     S: for <'b> lightning::routing::scoring::LockableScore<'b, ScoreLookUp = SL>,
1957/// #     SL: lightning::routing::scoring::ScoreLookUp<ScoreParams = SP>,
1958/// #     SP: Sized,
1959/// #     R: lightning::io::Read,
1960/// # >(
1961/// #     fee_estimator: &dyn lightning::chain::chaininterface::FeeEstimator,
1962/// #     chain_monitor: &dyn lightning::chain::Watch<lightning::sign::InMemorySigner>,
1963/// #     tx_broadcaster: &dyn lightning::chain::chaininterface::BroadcasterInterface,
1964/// #     router: &lightning::routing::router::DefaultRouter<&NetworkGraph<&'a L>, &'a L, &ES, &S, SP, SL>,
1965/// #     message_router: &lightning::onion_message::messenger::DefaultMessageRouter<&NetworkGraph<&'a L>, &'a L, &ES>,
1966/// #     logger: &L,
1967/// #     entropy_source: &ES,
1968/// #     node_signer: &dyn lightning::sign::NodeSigner,
1969/// #     signer_provider: &lightning::sign::DynSignerProvider,
1970/// #     best_block: lightning::chain::BestBlock,
1971/// #     current_timestamp: u32,
1972/// #     mut reader: R,
1973/// # ) -> Result<(), lightning::ln::msgs::DecodeError> {
1974/// // Fresh start with no channels
1975/// let params = ChainParameters {
1976///     network: Network::Bitcoin,
1977///     best_block,
1978/// };
1979/// let config = UserConfig::default();
1980/// let channel_manager = ChannelManager::new(
1981///     fee_estimator, chain_monitor, tx_broadcaster, router, message_router, logger,
1982///     entropy_source, node_signer, signer_provider, config.clone(), params, current_timestamp,
1983/// );
1984///
1985/// // Restart from deserialized data
1986/// let mut channel_monitors = read_channel_monitors();
1987/// let args = ChannelManagerReadArgs::new(
1988///     entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster,
1989///     router, message_router, logger, config, channel_monitors.iter().collect(),
1990/// );
1991/// let (block_hash, channel_manager) =
1992///     <(BlockHash, ChannelManager<_, _, _, _, _, _, _, _, _>)>::read(&mut reader, args)?;
1993///
1994/// // Update the ChannelManager and ChannelMonitors with the latest chain data
1995/// // ...
1996///
1997/// // Move the monitors to the ChannelManager's chain::Watch parameter
1998/// for monitor in channel_monitors {
1999///     chain_monitor.watch_channel(monitor.channel_id(), monitor);
2000/// }
2001/// # Ok(())
2002/// # }
2003/// ```
2004///
2005/// # Operation
2006///
2007/// The following is required for [`ChannelManager`] to function properly:
2008/// - Handle messages from peers using its [`ChannelMessageHandler`] implementation (typically
2009///   called by [`PeerManager::read_event`] when processing network I/O)
2010/// - Process peer connections and send messages to peers obtained via its [`BaseMessageHandler`]
2011///   implementation (typically initiated when [`PeerManager::process_events`] is called)
2012/// - Feed on-chain activity using either its [`chain::Listen`] or [`chain::Confirm`] implementation
2013///   as documented by those traits
2014/// - Perform any periodic channel and payment checks by calling [`timer_tick_occurred`] roughly
2015///   every minute
2016/// - Persist to disk whenever [`get_and_clear_needs_persistence`] returns `true` using a
2017///   [`KVStoreSync`] implementation
2018/// - Handle [`Event`]s obtained via its [`EventsProvider`] implementation
2019///
2020/// The [`Future`] returned by [`get_event_or_persistence_needed_future`] is useful in determining
2021/// when the last two requirements need to be checked.
2022///
2023/// The [`lightning-block-sync`] and [`lightning-transaction-sync`] crates provide utilities that
2024/// simplify feeding in on-chain activity using the [`chain::Listen`] and [`chain::Confirm`] traits,
2025/// respectively. The remaining requirements can be met using the [`lightning-background-processor`]
2026/// crate. For languages other than Rust, the availability of similar utilities may vary.
2027///
2028/// # Channels
2029///
2030/// [`ChannelManager`]'s primary function involves managing a channel state. Without channels,
2031/// payments can't be sent. Use [`list_channels`] or [`list_usable_channels`] for a snapshot of the
2032/// currently open channels.
2033///
2034/// ```
2035/// # use lightning::ln::channelmanager::AChannelManager;
2036/// #
2037/// # fn example<T: AChannelManager>(channel_manager: T) {
2038/// # let channel_manager = channel_manager.get_cm();
2039/// let channels = channel_manager.list_usable_channels();
2040/// for details in channels {
2041///     println!("{:?}", details);
2042/// }
2043/// # }
2044/// ```
2045///
2046/// Each channel is identified using a [`ChannelId`], which will change throughout the channel's
2047/// life cycle. Additionally, channels are assigned a `user_channel_id`, which is given in
2048/// [`Event`]s associated with the channel and serves as a fixed identifier but is otherwise unused
2049/// by [`ChannelManager`].
2050///
2051/// ## Opening Channels
2052///
2053/// To an open a channel with a peer, call [`create_channel`]. This will initiate the process of
2054/// opening an outbound channel, which requires self-funding when handling
2055/// [`Event::FundingGenerationReady`].
2056///
2057/// ```
2058/// # use bitcoin::{ScriptBuf, Transaction};
2059/// # use bitcoin::secp256k1::PublicKey;
2060/// # use lightning::ln::channelmanager::AChannelManager;
2061/// # use lightning::events::{Event, EventsProvider};
2062/// #
2063/// # trait Wallet {
2064/// #     fn create_funding_transaction(
2065/// #         &self, _amount_sats: u64, _output_script: ScriptBuf
2066/// #     ) -> Transaction;
2067/// # }
2068/// #
2069/// # fn example<T: AChannelManager, W: Wallet>(channel_manager: T, wallet: W, peer_id: PublicKey) {
2070/// # let channel_manager = channel_manager.get_cm();
2071/// let value_sats = 1_000_000;
2072/// let push_msats = 10_000_000;
2073/// match channel_manager.create_channel(peer_id, value_sats, push_msats, 42, None, None) {
2074///     Ok(channel_id) => println!("Opening channel {}", channel_id),
2075///     Err(e) => println!("Error opening channel: {:?}", e),
2076/// }
2077///
2078/// // On the event processing thread once the peer has responded
2079/// channel_manager.process_pending_events(&|event| {
2080///     match event {
2081///         Event::FundingGenerationReady {
2082///             temporary_channel_id, counterparty_node_id, channel_value_satoshis, output_script,
2083///             user_channel_id, ..
2084///         } => {
2085///             assert_eq!(user_channel_id, 42);
2086///             let funding_transaction = wallet.create_funding_transaction(
2087///                 channel_value_satoshis, output_script
2088///             );
2089///             match channel_manager.funding_transaction_generated(
2090///                 temporary_channel_id, counterparty_node_id, funding_transaction
2091///             ) {
2092///                 Ok(()) => println!("Funding channel {}", temporary_channel_id),
2093///                 Err(e) => println!("Error funding channel {}: {:?}", temporary_channel_id, e),
2094///             }
2095///         },
2096///         Event::ChannelPending { channel_id, user_channel_id, former_temporary_channel_id, .. } => {
2097///             assert_eq!(user_channel_id, 42);
2098///             println!(
2099///                 "Channel {} now {} pending (funding transaction has been broadcasted)", channel_id,
2100///                 former_temporary_channel_id.unwrap()
2101///             );
2102///         },
2103///         Event::ChannelReady { channel_id, user_channel_id, .. } => {
2104///             assert_eq!(user_channel_id, 42);
2105///             println!("Channel {} ready", channel_id);
2106///         },
2107///         // ...
2108///     #     _ => {},
2109///     }
2110///     Ok(())
2111/// });
2112/// # }
2113/// ```
2114///
2115/// ## Accepting Channels
2116///
2117/// Inbound channels are initiated by peers and are automatically accepted unless [`ChannelManager`]
2118/// has [`UserConfig::manually_accept_inbound_channels`] set. In that case, the channel may be
2119/// either accepted or rejected when handling [`Event::OpenChannelRequest`].
2120///
2121/// ```
2122/// # use bitcoin::secp256k1::PublicKey;
2123/// # use lightning::ln::channelmanager::AChannelManager;
2124/// # use lightning::events::{Event, EventsProvider};
2125/// #
2126/// # fn is_trusted(counterparty_node_id: PublicKey) -> bool {
2127/// #     // ...
2128/// #     unimplemented!()
2129/// # }
2130/// #
2131/// # fn example<T: AChannelManager>(channel_manager: T) {
2132/// # let channel_manager = channel_manager.get_cm();
2133/// # let error_message = "Channel force-closed";
2134/// channel_manager.process_pending_events(&|event| {
2135///     match event {
2136///         Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, ..  } => {
2137///             if !is_trusted(counterparty_node_id) {
2138///                 match channel_manager.force_close_broadcasting_latest_txn(
2139///                     &temporary_channel_id, &counterparty_node_id, error_message.to_string()
2140///                 ) {
2141///                     Ok(()) => println!("Rejecting channel {}", temporary_channel_id),
2142///                     Err(e) => println!("Error rejecting channel {}: {:?}", temporary_channel_id, e),
2143///                 }
2144///                 return Ok(());
2145///             }
2146///
2147///             let user_channel_id = 43;
2148///             match channel_manager.accept_inbound_channel(
2149///                 &temporary_channel_id, &counterparty_node_id, user_channel_id, None
2150///             ) {
2151///                 Ok(()) => println!("Accepting channel {}", temporary_channel_id),
2152///                 Err(e) => println!("Error accepting channel {}: {:?}", temporary_channel_id, e),
2153///             }
2154///         },
2155///         // ...
2156///     #     _ => {},
2157///     }
2158///     Ok(())
2159/// });
2160/// # }
2161/// ```
2162///
2163/// ## Closing Channels
2164///
2165/// There are two ways to close a channel: either cooperatively using [`close_channel`] or
2166/// unilaterally using [`force_close_broadcasting_latest_txn`]. The former is ideal as it makes for
2167/// lower fees and immediate access to funds. However, the latter may be necessary if the
2168/// counterparty isn't behaving properly or has gone offline. [`Event::ChannelClosed`] is generated
2169/// once the channel has been closed successfully.
2170///
2171/// ```
2172/// # use bitcoin::secp256k1::PublicKey;
2173/// # use lightning::ln::types::ChannelId;
2174/// # use lightning::ln::channelmanager::AChannelManager;
2175/// # use lightning::events::{Event, EventsProvider};
2176/// #
2177/// # fn example<T: AChannelManager>(
2178/// #     channel_manager: T, channel_id: ChannelId, counterparty_node_id: PublicKey
2179/// # ) {
2180/// # let channel_manager = channel_manager.get_cm();
2181/// match channel_manager.close_channel(&channel_id, &counterparty_node_id) {
2182///     Ok(()) => println!("Closing channel {}", channel_id),
2183///     Err(e) => println!("Error closing channel {}: {:?}", channel_id, e),
2184/// }
2185///
2186/// // On the event processing thread
2187/// channel_manager.process_pending_events(&|event| {
2188///     match event {
2189///         Event::ChannelClosed { channel_id, user_channel_id, ..  } => {
2190///             assert_eq!(user_channel_id, 42);
2191///             println!("Channel {} closed", channel_id);
2192///         },
2193///         // ...
2194///     #     _ => {},
2195///     }
2196///     Ok(())
2197/// });
2198/// # }
2199/// ```
2200///
2201/// # Payments
2202///
2203/// [`ChannelManager`] is responsible for sending, forwarding, and receiving payments through its
2204/// channels. A payment is typically initiated from a [BOLT 11] invoice or a [BOLT 12] offer, though
2205/// spontaneous (i.e., keysend) payments are also possible. Incoming payments don't require
2206/// maintaining any additional state as [`ChannelManager`] can reconstruct the [`PaymentPreimage`]
2207/// from the [`PaymentSecret`]. Sending payments, however, require tracking in order to retry failed
2208/// HTLCs.
2209///
2210/// After a payment is initiated, it will appear in [`list_recent_payments`] until a short time
2211/// after either an [`Event::PaymentSent`] or [`Event::PaymentFailed`] is handled. Failed HTLCs
2212/// for a payment will be retried according to the payment's [`Retry`] strategy or until
2213/// [`abandon_payment`] is called.
2214///
2215/// ## BOLT 11 Invoices
2216///
2217/// The [`lightning-invoice`] crate is useful for creating BOLT 11 invoices. However, in order to
2218/// construct a [`Bolt11Invoice`] that is compatible with [`ChannelManager`], use
2219/// [`create_bolt11_invoice`]. This method serves as a convenience for building invoices with the
2220/// [`PaymentHash`] and [`PaymentSecret`] returned from [`create_inbound_payment`]. To provide your
2221/// own [`PaymentHash`], override the appropriate [`Bolt11InvoiceParameters`], which is equivalent
2222/// to using [`create_inbound_payment_for_hash`].
2223///
2224/// [`ChannelManager`] generates an [`Event::PaymentClaimable`] once the full payment has been
2225/// received. Call [`claim_funds`] to release the [`PaymentPreimage`], which in turn will result in
2226/// an [`Event::PaymentClaimed`].
2227///
2228/// ```
2229/// # use lightning::events::{Event, EventsProvider, PaymentPurpose};
2230/// # use lightning::ln::channelmanager::{AChannelManager, Bolt11InvoiceParameters};
2231/// #
2232/// # fn example<T: AChannelManager>(channel_manager: T) {
2233/// # let channel_manager = channel_manager.get_cm();
2234/// let params = Bolt11InvoiceParameters {
2235///     amount_msats: Some(10_000_000),
2236///     invoice_expiry_delta_secs: Some(3600),
2237///     ..Default::default()
2238/// };
2239/// let invoice = match channel_manager.create_bolt11_invoice(params) {
2240///     Ok(invoice) => {
2241///         println!("Creating invoice with payment hash {}", invoice.payment_hash());
2242///         invoice
2243///     },
2244///     Err(e) => panic!("Error creating invoice: {}", e),
2245/// };
2246///
2247/// // On the event processing thread
2248/// channel_manager.process_pending_events(&|event| {
2249///     match event {
2250///         Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose {
2251///             PaymentPurpose::Bolt11InvoicePayment { payment_preimage: Some(payment_preimage), .. } => {
2252///                 assert_eq!(payment_hash.0, invoice.payment_hash().as_ref());
2253///                 println!("Claiming payment {}", payment_hash);
2254///                 channel_manager.claim_funds(payment_preimage);
2255///             },
2256///             PaymentPurpose::Bolt11InvoicePayment { payment_preimage: None, .. } => {
2257///                 println!("Unknown payment hash: {}", payment_hash);
2258///             },
2259///             PaymentPurpose::SpontaneousPayment(payment_preimage) => {
2260///                 assert_ne!(payment_hash.0, invoice.payment_hash().as_ref());
2261///                 println!("Claiming spontaneous payment {}", payment_hash);
2262///                 channel_manager.claim_funds(payment_preimage);
2263///             },
2264///             // ...
2265/// #           _ => {},
2266///         },
2267///         Event::PaymentClaimed { payment_hash, amount_msat, .. } => {
2268///             assert_eq!(payment_hash.0, invoice.payment_hash().as_ref());
2269///             println!("Claimed {} msats", amount_msat);
2270///         },
2271///         // ...
2272/// #       _ => {},
2273///     }
2274///     Ok(())
2275/// });
2276/// # }
2277/// ```
2278///
2279/// ```
2280/// # use bitcoin::hashes::Hash;
2281/// # use lightning::events::{Event, EventsProvider};
2282/// # use lightning::types::payment::PaymentHash;
2283/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, Retry};
2284/// # use lightning::routing::router::RouteParametersConfig;
2285/// # use lightning_invoice::Bolt11Invoice;
2286/// #
2287/// # fn example<T: AChannelManager>(
2288/// #     channel_manager: T, invoice: &Bolt11Invoice, route_params_config: RouteParametersConfig,
2289/// #     retry: Retry
2290/// # ) {
2291/// # let channel_manager = channel_manager.get_cm();
2292/// # let payment_id = PaymentId([42; 32]);
2293/// # let payment_hash = PaymentHash((*invoice.payment_hash()).to_byte_array());
2294/// match channel_manager.pay_for_bolt11_invoice(
2295///     invoice, payment_id, None, route_params_config, retry
2296/// ) {
2297///     Ok(()) => println!("Sending payment with hash {}", payment_hash),
2298///     Err(e) => println!("Failed sending payment with hash {}: {:?}", payment_hash, e),
2299/// }
2300///
2301/// let expected_payment_id = payment_id;
2302/// let expected_payment_hash = payment_hash;
2303/// assert!(
2304///     channel_manager.list_recent_payments().iter().find(|details| matches!(
2305///         details,
2306///         RecentPaymentDetails::Pending {
2307///             payment_id: expected_payment_id,
2308///             payment_hash: expected_payment_hash,
2309///             ..
2310///         }
2311///     )).is_some()
2312/// );
2313///
2314/// // On the event processing thread
2315/// channel_manager.process_pending_events(&|event| {
2316///     match event {
2317///         Event::PaymentSent { payment_hash, .. } => println!("Paid {}", payment_hash),
2318///         Event::PaymentFailed { payment_hash: Some(payment_hash), .. } =>
2319///             println!("Failed paying {}", payment_hash),
2320///         // ...
2321///     #     _ => {},
2322///     }
2323///     Ok(())
2324/// });
2325/// # }
2326/// ```
2327///
2328/// ## BOLT 12 Offers
2329///
2330/// The [`offers`] module is useful for creating BOLT 12 offers. An [`Offer`] is a precursor to a
2331/// [`Bolt12Invoice`], which must first be requested by the payer. The interchange of these messages
2332/// as defined in the specification is handled by [`ChannelManager`] and its implementation of
2333/// [`OffersMessageHandler`]. However, this only works with an [`Offer`] created using a builder
2334/// returned by [`create_offer_builder`]. With this approach, BOLT 12 offers and invoices are
2335/// stateless just as BOLT 11 invoices are.
2336///
2337/// ```
2338/// # use lightning::events::{Event, EventsProvider, PaymentPurpose};
2339/// # use lightning::ln::channelmanager::AChannelManager;
2340/// # use lightning::offers::parse::Bolt12SemanticError;
2341/// # use lightning::routing::router::RouteParametersConfig;
2342/// #
2343/// # fn example<T: AChannelManager>(channel_manager: T) -> Result<(), Bolt12SemanticError> {
2344/// # let channel_manager = channel_manager.get_cm();
2345/// let offer = channel_manager
2346///     .create_offer_builder()?
2347/// # ;
2348/// # // Needed for compiling for c_bindings
2349/// # let builder: lightning::offers::offer::OfferBuilder<_, _> = offer.into();
2350/// # let offer = builder
2351///     .description("coffee".to_string())
2352///     .amount_msats(10_000_000)
2353///     .build()?;
2354/// let bech32_offer = offer.to_string();
2355///
2356/// // On the event processing thread
2357/// channel_manager.process_pending_events(&|event| {
2358///     match event {
2359///         Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose {
2360///             PaymentPurpose::Bolt12OfferPayment { payment_preimage: Some(payment_preimage), .. } => {
2361///                 println!("Claiming payment {}", payment_hash);
2362///                 channel_manager.claim_funds(payment_preimage);
2363///             },
2364///             PaymentPurpose::Bolt12OfferPayment { payment_preimage: None, .. } => {
2365///                 println!("Unknown payment hash: {}", payment_hash);
2366///             }
2367/// #           _ => {},
2368///         },
2369///         Event::PaymentClaimed { payment_hash, amount_msat, .. } => {
2370///             println!("Claimed {} msats", amount_msat);
2371///         },
2372///         // ...
2373///     #     _ => {},
2374///     }
2375///     Ok(())
2376/// });
2377/// # Ok(())
2378/// # }
2379/// ```
2380///
2381/// Use [`pay_for_offer`] to initiated payment, which sends an [`InvoiceRequest`] for an [`Offer`]
2382/// and pays the [`Bolt12Invoice`] response.
2383///
2384/// ```
2385/// # use lightning::events::{Event, EventsProvider};
2386/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails};
2387/// # use lightning::offers::offer::Offer;
2388/// #
2389/// # fn example<T: AChannelManager>(
2390/// #     channel_manager: T, offer: &Offer, amount_msats: Option<u64>,
2391/// # ) {
2392/// # let channel_manager = channel_manager.get_cm();
2393/// let payment_id = PaymentId([42; 32]);
2394/// match channel_manager.pay_for_offer(
2395///     offer, amount_msats, payment_id, Default::default(),
2396/// ) {
2397///     Ok(()) => println!("Requesting invoice for offer"),
2398///     Err(e) => println!("Unable to request invoice for offer: {:?}", e),
2399/// }
2400///
2401/// // First the payment will be waiting on an invoice
2402/// let expected_payment_id = payment_id;
2403/// assert!(
2404///     channel_manager.list_recent_payments().iter().find(|details| matches!(
2405///         details,
2406///         RecentPaymentDetails::AwaitingInvoice { payment_id: expected_payment_id }
2407///     )).is_some()
2408/// );
2409///
2410/// // Once the invoice is received, a payment will be sent
2411/// assert!(
2412///     channel_manager.list_recent_payments().iter().find(|details| matches!(
2413///         details,
2414///         RecentPaymentDetails::Pending { payment_id: expected_payment_id, ..  }
2415///     )).is_some()
2416/// );
2417///
2418/// // On the event processing thread
2419/// channel_manager.process_pending_events(&|event| {
2420///     match event {
2421///         Event::PaymentSent { payment_id: Some(payment_id), .. } => println!("Paid {}", payment_id),
2422///         Event::PaymentFailed { payment_id, .. } => println!("Failed paying {}", payment_id),
2423///         // ...
2424///     #     _ => {},
2425///     }
2426///     Ok(())
2427/// });
2428/// # }
2429/// ```
2430///
2431/// ## BOLT 12 Refunds
2432///
2433/// A [`Refund`] is a request for an invoice to be paid. Like *paying* for an [`Offer`], *creating*
2434/// a [`Refund`] involves maintaining state since it represents a future outbound payment.
2435/// Therefore, use [`create_refund_builder`] when creating one, otherwise [`ChannelManager`] will
2436/// refuse to pay any corresponding [`Bolt12Invoice`] that it receives.
2437///
2438/// ```
2439/// # use core::time::Duration;
2440/// # use lightning::events::{Event, EventsProvider};
2441/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, Retry};
2442/// # use lightning::offers::parse::Bolt12SemanticError;
2443/// # use lightning::routing::router::RouteParametersConfig;
2444/// #
2445/// # fn example<T: AChannelManager>(
2446/// #     channel_manager: T, amount_msats: u64, absolute_expiry: Duration, retry: Retry,
2447/// #     route_params_config: RouteParametersConfig
2448/// # ) -> Result<(), Bolt12SemanticError> {
2449/// # let channel_manager = channel_manager.get_cm();
2450/// let payment_id = PaymentId([42; 32]);
2451/// let refund = channel_manager
2452///     .create_refund_builder(
2453///         amount_msats, absolute_expiry, payment_id, retry, route_params_config
2454///     )?
2455/// # ;
2456/// # // Needed for compiling for c_bindings
2457/// # let builder: lightning::offers::refund::RefundBuilder<_> = refund.into();
2458/// # let refund = builder
2459///     .description("coffee".to_string())
2460///     .payer_note("refund for order 1234".to_string())
2461///     .build()?;
2462/// let bech32_refund = refund.to_string();
2463///
2464/// // First the payment will be waiting on an invoice
2465/// let expected_payment_id = payment_id;
2466/// assert!(
2467///     channel_manager.list_recent_payments().iter().find(|details| matches!(
2468///         details,
2469///         RecentPaymentDetails::AwaitingInvoice { payment_id: expected_payment_id }
2470///     )).is_some()
2471/// );
2472///
2473/// // Once the invoice is received, a payment will be sent
2474/// assert!(
2475///     channel_manager.list_recent_payments().iter().find(|details| matches!(
2476///         details,
2477///         RecentPaymentDetails::Pending { payment_id: expected_payment_id, ..  }
2478///     )).is_some()
2479/// );
2480///
2481/// // On the event processing thread
2482/// channel_manager.process_pending_events(&|event| {
2483///     match event {
2484///         Event::PaymentSent { payment_id: Some(payment_id), .. } => println!("Paid {}", payment_id),
2485///         Event::PaymentFailed { payment_id, .. } => println!("Failed paying {}", payment_id),
2486///         // ...
2487///     #     _ => {},
2488///     }
2489///     Ok(())
2490/// });
2491/// # Ok(())
2492/// # }
2493/// ```
2494///
2495/// Use [`request_refund_payment`] to send a [`Bolt12Invoice`] for receiving the refund. Similar to
2496/// *creating* an [`Offer`], this is stateless as it represents an inbound payment.
2497///
2498/// ```
2499/// # use lightning::events::{Event, EventsProvider, PaymentPurpose};
2500/// # use lightning::ln::channelmanager::AChannelManager;
2501/// # use lightning::offers::refund::Refund;
2502/// #
2503/// # fn example<T: AChannelManager>(channel_manager: T, refund: &Refund) {
2504/// # let channel_manager = channel_manager.get_cm();
2505/// let known_payment_hash = match channel_manager.request_refund_payment(refund) {
2506///     Ok(invoice) => {
2507///         let payment_hash = invoice.payment_hash();
2508///         println!("Requesting refund payment {}", payment_hash);
2509///         payment_hash
2510///     },
2511///     Err(e) => panic!("Unable to request payment for refund: {:?}", e),
2512/// };
2513///
2514/// // On the event processing thread
2515/// channel_manager.process_pending_events(&|event| {
2516///     match event {
2517///         Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose {
2518///             PaymentPurpose::Bolt12RefundPayment { payment_preimage: Some(payment_preimage), .. } => {
2519///                 assert_eq!(payment_hash, known_payment_hash);
2520///                 println!("Claiming payment {}", payment_hash);
2521///                 channel_manager.claim_funds(payment_preimage);
2522///             },
2523///             PaymentPurpose::Bolt12RefundPayment { payment_preimage: None, .. } => {
2524///                 println!("Unknown payment hash: {}", payment_hash);
2525///             },
2526///             // ...
2527/// #           _ => {},
2528///     },
2529///     Event::PaymentClaimed { payment_hash, amount_msat, .. } => {
2530///         assert_eq!(payment_hash, known_payment_hash);
2531///         println!("Claimed {} msats", amount_msat);
2532///     },
2533///     // ...
2534/// #     _ => {},
2535///     }
2536///     Ok(())
2537/// });
2538/// # }
2539/// ```
2540///
2541/// # Persistence
2542///
2543/// Implements [`Writeable`] to write out all channel state to disk. Implies [`peer_disconnected`] for
2544/// all peers during write/read (though does not modify this instance, only the instance being
2545/// serialized). This will result in any channels which have not yet exchanged [`funding_created`] (i.e.,
2546/// called [`funding_transaction_generated`] for outbound channels) being closed.
2547///
2548/// Note that you can be a bit lazier about writing out `ChannelManager` than you can be with
2549/// [`ChannelMonitor`]. With [`ChannelMonitor`] you MUST durably write each
2550/// [`ChannelMonitorUpdate`] before returning from
2551/// [`chain::Watch::watch_channel`]/[`update_channel`] or before completing async writes. With
2552/// `ChannelManager`s, writing updates happens out-of-band (and will prevent any other
2553/// `ChannelManager` operations from occurring during the serialization process). If the
2554/// deserialized version is out-of-date compared to the [`ChannelMonitor`] passed by reference to
2555/// [`read`], those channels will be force-closed based on the `ChannelMonitor` state and no funds
2556/// will be lost (modulo on-chain transaction fees).
2557///
2558/// Note that the deserializer is only implemented for `(`[`BlockHash`]`, `[`ChannelManager`]`)`, which
2559/// tells you the last block hash which was connected. You should get the best block tip before using the manager.
2560/// See [`chain::Listen`] and [`chain::Confirm`] for more details.
2561///
2562/// # `ChannelUpdate` Messages
2563///
2564/// Note that `ChannelManager` is responsible for tracking liveness of its channels and generating
2565/// [`ChannelUpdate`] messages informing peers that the channel is temporarily disabled. To avoid
2566/// spam due to quick disconnection/reconnection, updates are not sent until the channel has been
2567/// offline for a full minute. In order to track this, you must call
2568/// [`timer_tick_occurred`] roughly once per minute, though it doesn't have to be perfect.
2569///
2570/// # DoS Mitigation
2571///
2572/// To avoid trivial DoS issues, `ChannelManager` limits the number of inbound connections and
2573/// inbound channels without confirmed funding transactions. This may result in nodes which we do
2574/// not have a channel with being unable to connect to us or open new channels with us if we have
2575/// many peers with unfunded channels.
2576///
2577/// Because it is an indication of trust, inbound channels which we've accepted as 0conf are
2578/// exempted from the count of unfunded channels. Similarly, outbound channels and connections are
2579/// never limited. Please ensure you limit the count of such channels yourself.
2580///
2581/// # Type Aliases
2582///
2583/// Rather than using a plain `ChannelManager`, it is preferable to use either a [`SimpleArcChannelManager`]
2584/// a [`SimpleRefChannelManager`], for conciseness. See their documentation for more details, but
2585/// essentially you should default to using a [`SimpleRefChannelManager`], and use a
2586/// [`SimpleArcChannelManager`] when you require a `ChannelManager` with a static lifetime, such as when
2587/// you're using lightning-net-tokio.
2588///
2589/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
2590/// [`MessageHandler`]: crate::ln::peer_handler::MessageHandler
2591/// [`OnionMessenger`]: crate::onion_message::messenger::OnionMessenger
2592/// [`PeerManager::read_event`]: crate::ln::peer_handler::PeerManager::read_event
2593/// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
2594/// [`timer_tick_occurred`]: Self::timer_tick_occurred
2595/// [`get_and_clear_needs_persistence`]: Self::get_and_clear_needs_persistence
2596/// [`KVStoreSync`]: crate::util::persist::KVStoreSync
2597/// [`get_event_or_persistence_needed_future`]: Self::get_event_or_persistence_needed_future
2598/// [`lightning-block-sync`]: https://docs.rs/lightning_block_sync/latest/lightning_block_sync
2599/// [`lightning-transaction-sync`]: https://docs.rs/lightning_transaction_sync/latest/lightning_transaction_sync
2600/// [`lightning-background-processor`]: https://docs.rs/lightning-background-processor/latest/lightning_background_processor
2601/// [`list_channels`]: Self::list_channels
2602/// [`list_usable_channels`]: Self::list_usable_channels
2603/// [`create_channel`]: Self::create_channel
2604/// [`close_channel`]: Self::force_close_broadcasting_latest_txn
2605/// [`force_close_broadcasting_latest_txn`]: Self::force_close_broadcasting_latest_txn
2606/// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md
2607/// [BOLT 12]: https://github.com/rustyrussell/lightning-rfc/blob/guilt/offers/12-offer-encoding.md
2608/// [`list_recent_payments`]: Self::list_recent_payments
2609/// [`abandon_payment`]: Self::abandon_payment
2610/// [`lightning-invoice`]: https://docs.rs/lightning_invoice/latest/lightning_invoice
2611/// [`create_bolt11_invoice`]: Self::create_bolt11_invoice
2612/// [`create_inbound_payment`]: Self::create_inbound_payment
2613/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
2614/// [`claim_funds`]: Self::claim_funds
2615/// [`send_payment`]: Self::send_payment
2616/// [`offers`]: crate::offers
2617/// [`create_offer_builder`]: Self::create_offer_builder
2618/// [`pay_for_offer`]: Self::pay_for_offer
2619/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
2620/// [`create_refund_builder`]: Self::create_refund_builder
2621/// [`request_refund_payment`]: Self::request_refund_payment
2622/// [`peer_disconnected`]: msgs::BaseMessageHandler::peer_disconnected
2623/// [`funding_created`]: msgs::FundingCreated
2624/// [`funding_transaction_generated`]: Self::funding_transaction_generated
2625/// [`BlockHash`]: bitcoin::hash_types::BlockHash
2626/// [`update_channel`]: chain::Watch::update_channel
2627/// [`ChannelUpdate`]: msgs::ChannelUpdate
2628/// [`read`]: ReadableArgs::read
2629//
2630// Lock order:
2631// The tree structure below illustrates the lock order requirements for the different locks of the
2632// `ChannelManager`. Locks can be held at the same time if they are on the same branch in the tree,
2633// and should then be taken in the order of the lowest to the highest level in the tree.
2634// Note that locks on different branches shall not be taken at the same time, as doing so will
2635// create a new lock order for those specific locks in the order they were taken.
2636//
2637// Lock order tree:
2638//
2639// `pending_offers_messages`
2640//
2641// `pending_async_payments_messages`
2642//
2643// `total_consistency_lock`
2644//  |
2645//  |__`forward_htlcs`
2646//  |
2647//  |__`pending_intercepted_htlcs`
2648//  |
2649//  |__`decode_update_add_htlcs`
2650//  |
2651//  |__`per_peer_state`
2652//      |
2653//      |__`claimable_payments`
2654//      |
2655//      |__`pending_outbound_payments` // This field's struct contains a map of pending outbounds
2656//         |
2657//         |__`peer_state`
2658//            |
2659//            |__`short_to_chan_info`
2660//            |
2661//            |__`outbound_scid_aliases`
2662//            |
2663//            |__`best_block`
2664//            |
2665//            |__`pending_events`
2666//               |
2667//               |__`pending_background_events`
2668//
2669pub struct ChannelManager<
2670	M: Deref,
2671	T: Deref,
2672	ES: Deref,
2673	NS: Deref,
2674	SP: Deref,
2675	F: Deref,
2676	R: Deref,
2677	MR: Deref,
2678	L: Deref,
2679> where
2680	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
2681	T::Target: BroadcasterInterface,
2682	ES::Target: EntropySource,
2683	NS::Target: NodeSigner,
2684	SP::Target: SignerProvider,
2685	F::Target: FeeEstimator,
2686	R::Target: Router,
2687	MR::Target: MessageRouter,
2688	L::Target: Logger,
2689{
2690	config: RwLock<UserConfig>,
2691	chain_hash: ChainHash,
2692	fee_estimator: LowerBoundedFeeEstimator<F>,
2693	chain_monitor: M,
2694	tx_broadcaster: T,
2695	router: R,
2696
2697	#[cfg(test)]
2698	pub(super) flow: OffersMessageFlow<MR, L>,
2699	#[cfg(not(test))]
2700	flow: OffersMessageFlow<MR, L>,
2701
2702	/// See `ChannelManager` struct-level documentation for lock order requirements.
2703	#[cfg(any(test, feature = "_test_utils"))]
2704	pub(super) best_block: RwLock<BestBlock>,
2705	#[cfg(not(any(test, feature = "_test_utils")))]
2706	/// See `ChannelManager` struct-level documentation for lock order requirements.
2707	best_block: RwLock<BestBlock>,
2708	pub(super) secp_ctx: Secp256k1<secp256k1::All>,
2709
2710	/// The session_priv bytes and retry metadata of outbound payments which are pending resolution.
2711	/// The authoritative state of these HTLCs resides either within Channels or ChannelMonitors
2712	/// (if the channel has been force-closed), however we track them here to prevent duplicative
2713	/// PaymentSent/PaymentPathFailed events. Specifically, in the case of a duplicative
2714	/// update_fulfill_htlc message after a reconnect, we may "claim" a payment twice.
2715	/// Additionally, because ChannelMonitors are often not re-serialized after connecting block(s)
2716	/// which may generate a claim event, we may receive similar duplicate claim/fail MonitorEvents
2717	/// after reloading from disk while replaying blocks against ChannelMonitors.
2718	///
2719	/// See `PendingOutboundPayment` documentation for more info.
2720	///
2721	/// See `ChannelManager` struct-level documentation for lock order requirements.
2722	pending_outbound_payments: OutboundPayments<L>,
2723
2724	/// SCID/SCID Alias -> forward infos. Key of 0 means payments received.
2725	///
2726	/// Note that because we may have an SCID Alias as the key we can have two entries per channel,
2727	/// though in practice we probably won't be receiving HTLCs for a channel both via the alias
2728	/// and via the classic SCID.
2729	///
2730	/// Note that no consistency guarantees are made about the existence of a channel with the
2731	/// `short_channel_id` here, nor the `short_channel_id` in the `PendingHTLCInfo`!
2732	///
2733	/// See `ChannelManager` struct-level documentation for lock order requirements.
2734	#[cfg(test)]
2735	pub(super) forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
2736	#[cfg(not(test))]
2737	forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
2738	/// Storage for HTLCs that have been intercepted.
2739	///
2740	/// These HTLCs fall into two categories:
2741	/// 1. HTLCs that are bubbled up to the user and held until the invocation of
2742	///    [`ChannelManager::forward_intercepted_htlc`] or [`ChannelManager::fail_intercepted_htlc`]
2743	///    (or timeout)
2744	/// 2. HTLCs that are being held on behalf of an often-offline sender until receipt of a
2745	///    [`ReleaseHeldHtlc`] onion message from an often-offline recipient
2746	///
2747	/// See `ChannelManager` struct-level documentation for lock order requirements.
2748	pending_intercepted_htlcs: Mutex<HashMap<InterceptId, PendingAddHTLCInfo>>,
2749
2750	/// Outbound SCID Alias -> pending `update_add_htlc`s to decode.
2751	/// We use the scid alias because regular scids may change if a splice occurs.
2752	///
2753	/// Note that no consistency guarantees are made about the existence of a channel with the
2754	/// `short_channel_id` here, nor the `channel_id` in `UpdateAddHTLC`!
2755	///
2756	/// See `ChannelManager` struct-level documentation for lock order requirements.
2757	decode_update_add_htlcs: Mutex<HashMap<u64, Vec<msgs::UpdateAddHTLC>>>,
2758
2759	/// The sets of payments which are claimable or currently being claimed. See
2760	/// [`ClaimablePayments`]' individual field docs for more info.
2761	///
2762	/// See `ChannelManager` struct-level documentation for lock order requirements.
2763	claimable_payments: Mutex<ClaimablePayments>,
2764
2765	/// The set of outbound SCID aliases across all our channels, including unconfirmed channels
2766	/// and some closed channels which reached a usable state prior to being closed. This is used
2767	/// only to avoid duplicates, and is not persisted explicitly to disk, but rebuilt from the
2768	/// active channel list on load.
2769	///
2770	/// See `ChannelManager` struct-level documentation for lock order requirements.
2771	outbound_scid_aliases: Mutex<HashSet<u64>>,
2772
2773	/// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s.
2774	///
2775	/// Outbound SCID aliases are added here once the channel is available for normal use, with
2776	/// SCIDs being added once the funding transaction is confirmed at the channel's required
2777	/// confirmation depth.
2778	///
2779	/// Note that while this holds `counterparty_node_id`s and `channel_id`s, no consistency
2780	/// guarantees are made about the existence of a peer with the `counterparty_node_id` nor a
2781	/// channel with the `channel_id` in our other maps.
2782	///
2783	/// See `ChannelManager` struct-level documentation for lock order requirements.
2784	#[cfg(test)]
2785	pub(super) short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, ChannelId)>>,
2786	#[cfg(not(test))]
2787	short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, ChannelId)>>,
2788
2789	our_network_pubkey: PublicKey,
2790
2791	inbound_payment_key: inbound_payment::ExpandedKey,
2792
2793	/// LDK puts the [fake scids] that it generates into namespaces, to identify the type of an
2794	/// incoming payment. To make it harder for a third-party to identify the type of a payment,
2795	/// we encrypt the namespace identifier using these bytes.
2796	///
2797	/// [fake scids]: crate::util::scid_utils::fake_scid
2798	fake_scid_rand_bytes: [u8; 32],
2799
2800	/// When we send payment probes, we generate the [`PaymentHash`] based on this cookie secret
2801	/// and a random [`PaymentId`]. This allows us to discern probes from real payments, without
2802	/// keeping additional state.
2803	probing_cookie_secret: [u8; 32],
2804
2805	/// When generating [`PaymentId`]s for inbound payments, we HMAC the HTLCs with this secret.
2806	inbound_payment_id_secret: [u8; 32],
2807
2808	/// The highest block timestamp we've seen, which is usually a good guess at the current time.
2809	/// Assuming most miners are generating blocks with reasonable timestamps, this shouldn't be
2810	/// very far in the past, and can only ever be up to two hours in the future.
2811	highest_seen_timestamp: AtomicUsize,
2812
2813	/// The bulk of our storage. Currently the `per_peer_state` stores our channels on a per-peer
2814	/// basis, as well as the peer's latest features.
2815	///
2816	/// If we are connected to a peer we always at least have an entry here, even if no channels
2817	/// are currently open with that peer.
2818	///
2819	/// Because adding or removing an entry is rare, we usually take an outer read lock and then
2820	/// operate on the inner value freely. This opens up for parallel per-peer operation for
2821	/// channels.
2822	///
2823	/// Note that the same thread must never acquire two inner `PeerState` locks at the same time.
2824	///
2825	/// See `ChannelManager` struct-level documentation for lock order requirements.
2826	#[cfg(not(any(test, feature = "_test_utils")))]
2827	per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<SP>>>>,
2828	#[cfg(any(test, feature = "_test_utils"))]
2829	pub(super) per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<SP>>>>,
2830
2831	/// We only support using one of [`ChannelMonitorUpdateStatus::InProgress`] and
2832	/// [`ChannelMonitorUpdateStatus::Completed`] without restarting. Because the API does not
2833	/// otherwise directly enforce this, we enforce it in non-test builds here by storing which one
2834	/// is in use.
2835	#[cfg(not(any(test, feature = "_externalize_tests")))]
2836	monitor_update_type: AtomicUsize,
2837
2838	/// The set of events which we need to give to the user to handle. In some cases an event may
2839	/// require some further action after the user handles it (currently only blocking a monitor
2840	/// update from being handed to the user to ensure the included changes to the channel state
2841	/// are handled by the user before they're persisted durably to disk). In that case, the second
2842	/// element in the tuple is set to `Some` with further details of the action.
2843	///
2844	/// Note that events MUST NOT be removed from pending_events after deserialization, as they
2845	/// could be in the middle of being processed without the direct mutex held.
2846	///
2847	/// See `ChannelManager` struct-level documentation for lock order requirements.
2848	#[cfg(not(any(test, feature = "_test_utils")))]
2849	pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
2850	#[cfg(any(test, feature = "_test_utils"))]
2851	pub(crate) pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
2852
2853	/// A simple atomic flag to ensure only one task at a time can be processing events asynchronously.
2854	pending_events_processor: AtomicBool,
2855
2856	/// A simple atomic flag to ensure only one task at a time can be processing HTLC forwards via
2857	/// [`Self::process_pending_htlc_forwards`].
2858	pending_htlc_forwards_processor: AtomicBool,
2859
2860	/// If we are running during init (either directly during the deserialization method or in
2861	/// block connection methods which run after deserialization but before normal operation) we
2862	/// cannot provide the user with [`ChannelMonitorUpdate`]s through the normal update flow -
2863	/// prior to normal operation the user may not have loaded the [`ChannelMonitor`]s into their
2864	/// [`ChainMonitor`] and thus attempting to update it will fail or panic.
2865	///
2866	/// Thus, we place them here to be handled as soon as possible once we are running normally.
2867	///
2868	/// See `ChannelManager` struct-level documentation for lock order requirements.
2869	///
2870	/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
2871	pending_background_events: Mutex<Vec<BackgroundEvent>>,
2872	/// Used when we have to take a BIG lock to make sure everything is self-consistent.
2873	/// Essentially just when we're serializing ourselves out.
2874	/// Taken first everywhere where we are making changes before any other locks.
2875	/// When acquiring this lock in read mode, rather than acquiring it directly, call
2876	/// `PersistenceNotifierGuard::notify_on_drop(..)` and pass the lock to it, to ensure the
2877	/// Notifier the lock contains sends out a notification when the lock is released.
2878	total_consistency_lock: RwLock<()>,
2879	/// Tracks the progress of channels going through batch funding by whether funding_signed was
2880	/// received and the monitor has been persisted.
2881	///
2882	/// This information does not need to be persisted as funding nodes can forget
2883	/// unfunded channels upon disconnection.
2884	funding_batch_states: Mutex<BTreeMap<Txid, Vec<(ChannelId, PublicKey, bool)>>>,
2885
2886	background_events_processed_since_startup: AtomicBool,
2887
2888	event_persist_notifier: Notifier,
2889	needs_persist_flag: AtomicBool,
2890
2891	/// Tracks the message events that are to be broadcasted when we are connected to some peer.
2892	pending_broadcast_messages: Mutex<Vec<MessageSendEvent>>,
2893
2894	/// We only want to force-close our channels on peers based on stale feerates when we're
2895	/// confident the feerate on the channel is *really* stale, not just became stale recently.
2896	/// Thus, we store the fee estimates we had as of the last [`FEERATE_TRACKING_BLOCKS`] blocks
2897	/// (after startup completed) here, and only force-close when channels have a lower feerate
2898	/// than we predicted any time in the last [`FEERATE_TRACKING_BLOCKS`] blocks.
2899	///
2900	/// We only keep this in memory as we assume any feerates we receive immediately after startup
2901	/// may be bunk (as they often are if Bitcoin Core crashes) and want to delay taking any
2902	/// actions for a day anyway.
2903	///
2904	/// The first element in the pair is the
2905	/// [`ConfirmationTarget::MinAllowedAnchorChannelRemoteFee`] estimate, the second the
2906	/// [`ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee`] estimate.
2907	last_days_feerates: Mutex<VecDeque<(u32, u32)>>,
2908
2909	#[cfg(feature = "_test_utils")]
2910	/// In testing, it is useful be able to forge a name -> offer mapping so that we can pay an
2911	/// offer generated in the test.
2912	///
2913	/// This allows for doing so, validating proofs as normal, but, if they pass, replacing the
2914	/// offer they resolve to to the given one.
2915	pub testing_dnssec_proof_offer_resolution_override: Mutex<HashMap<HumanReadableName, Offer>>,
2916
2917	#[cfg(test)]
2918	pub(super) entropy_source: ES,
2919	#[cfg(not(test))]
2920	entropy_source: ES,
2921	node_signer: NS,
2922	#[cfg(test)]
2923	pub(super) signer_provider: SP,
2924	#[cfg(not(test))]
2925	signer_provider: SP,
2926
2927	logger: L,
2928}
2929
2930/// Chain-related parameters used to construct a new `ChannelManager`.
2931///
2932/// Typically, the block-specific parameters are derived from the best block hash for the network,
2933/// as a newly constructed `ChannelManager` will not have created any channels yet. These parameters
2934/// are not needed when deserializing a previously constructed `ChannelManager`.
2935#[derive(Clone, Copy, PartialEq)]
2936pub struct ChainParameters {
2937	/// The network for determining the `chain_hash` in Lightning messages.
2938	pub network: Network,
2939
2940	/// The hash and height of the latest block successfully connected.
2941	///
2942	/// Used to track on-chain channel funding outputs and send payments with reliable timelocks.
2943	pub best_block: BestBlock,
2944}
2945
2946#[derive(Copy, Clone, PartialEq)]
2947#[must_use]
2948enum NotifyOption {
2949	DoPersist,
2950	SkipPersistHandleEvents,
2951	SkipPersistNoEvents,
2952}
2953
2954/// Whenever we release the `ChannelManager`'s `total_consistency_lock`, from read mode, it is
2955/// desirable to notify any listeners on `await_persistable_update_timeout`/
2956/// `await_persistable_update` when new updates are available for persistence. Therefore, this
2957/// struct is responsible for locking the total consistency lock and, upon going out of scope,
2958/// sending the aforementioned notification (since the lock being released indicates that the
2959/// updates are ready for persistence).
2960///
2961/// We allow callers to either always notify by constructing with `notify_on_drop` or choose to
2962/// notify or not based on whether relevant changes have been made, providing a closure to
2963/// `optionally_notify` which returns a `NotifyOption`.
2964struct PersistenceNotifierGuard<'a, F: FnOnce() -> NotifyOption> {
2965	event_persist_notifier: &'a Notifier,
2966	needs_persist_flag: &'a AtomicBool,
2967	// Always `Some` once initialized, but tracked as an `Option` to obtain the closure by value in
2968	// [`PersistenceNotifierGuard::drop`].
2969	should_persist: Option<F>,
2970	// We hold onto this result so the lock doesn't get released immediately.
2971	_read_guard: RwLockReadGuard<'a, ()>,
2972}
2973
2974// We don't care what the concrete F is here, it's unused
2975impl<'a> PersistenceNotifierGuard<'a, fn() -> NotifyOption> {
2976	/// Notifies any waiters and indicates that we need to persist, in addition to possibly having
2977	/// events to handle.
2978	///
2979	/// This must always be called if the changes included a `ChannelMonitorUpdate`, as well as in
2980	/// other cases where losing the changes on restart may result in a force-close or otherwise
2981	/// isn't ideal.
2982	fn notify_on_drop<C: AChannelManager>(
2983		cm: &'a C,
2984	) -> PersistenceNotifierGuard<'a, impl FnOnce() -> NotifyOption> {
2985		Self::optionally_notify(cm, || -> NotifyOption { NotifyOption::DoPersist })
2986	}
2987
2988	fn optionally_notify<F: FnOnce() -> NotifyOption, C: AChannelManager>(
2989		cm: &'a C, persist_check: F,
2990	) -> PersistenceNotifierGuard<'a, impl FnOnce() -> NotifyOption> {
2991		let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
2992		let force_notify = cm.get_cm().process_background_events();
2993
2994		PersistenceNotifierGuard {
2995			event_persist_notifier: &cm.get_cm().event_persist_notifier,
2996			needs_persist_flag: &cm.get_cm().needs_persist_flag,
2997			should_persist: Some(move || {
2998				// Pick the "most" action between `persist_check` and the background events
2999				// processing and return that.
3000				let notify = persist_check();
3001				match (notify, force_notify) {
3002					(NotifyOption::DoPersist, _) => NotifyOption::DoPersist,
3003					(_, NotifyOption::DoPersist) => NotifyOption::DoPersist,
3004					(NotifyOption::SkipPersistHandleEvents, _) => {
3005						NotifyOption::SkipPersistHandleEvents
3006					},
3007					(_, NotifyOption::SkipPersistHandleEvents) => {
3008						NotifyOption::SkipPersistHandleEvents
3009					},
3010					_ => NotifyOption::SkipPersistNoEvents,
3011				}
3012			}),
3013			_read_guard: read_guard,
3014		}
3015	}
3016
3017	/// Note that if any [`ChannelMonitorUpdate`]s are possibly generated,
3018	/// [`ChannelManager::process_background_events`] MUST be called first (or
3019	/// [`Self::optionally_notify`] used).
3020	fn optionally_notify_skipping_background_events<F: Fn() -> NotifyOption, C: AChannelManager>(
3021		cm: &'a C, persist_check: F,
3022	) -> PersistenceNotifierGuard<'a, F> {
3023		let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
3024
3025		PersistenceNotifierGuard {
3026			event_persist_notifier: &cm.get_cm().event_persist_notifier,
3027			needs_persist_flag: &cm.get_cm().needs_persist_flag,
3028			should_persist: Some(persist_check),
3029			_read_guard: read_guard,
3030		}
3031	}
3032}
3033
3034impl<'a, F: FnOnce() -> NotifyOption> Drop for PersistenceNotifierGuard<'a, F> {
3035	fn drop(&mut self) {
3036		let should_persist = match self.should_persist.take() {
3037			Some(should_persist) => should_persist,
3038			None => {
3039				debug_assert!(false);
3040				return;
3041			},
3042		};
3043		match should_persist() {
3044			NotifyOption::DoPersist => {
3045				self.needs_persist_flag.store(true, Ordering::Release);
3046				self.event_persist_notifier.notify()
3047			},
3048			NotifyOption::SkipPersistHandleEvents => self.event_persist_notifier.notify(),
3049			NotifyOption::SkipPersistNoEvents => {},
3050		}
3051	}
3052}
3053
3054/// The amount of time in blocks we require our counterparty wait to claim their money (ie time
3055/// between when we, or our watchtower, must check for them having broadcast a theft transaction).
3056///
3057/// This can be increased (but not decreased) through [`ChannelHandshakeConfig::our_to_self_delay`]
3058///
3059/// [`ChannelHandshakeConfig::our_to_self_delay`]: crate::util::config::ChannelHandshakeConfig::our_to_self_delay
3060pub const BREAKDOWN_TIMEOUT: u16 = 6 * 24;
3061/// The amount of time in blocks we're willing to wait to claim money back to us. This matches
3062/// the maximum required amount in lnd as of March 2021.
3063pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7;
3064
3065/// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound
3066/// HTLC's CLTV. The current default represents roughly eight hours of blocks at six blocks/hour.
3067///
3068/// This can be increased (but not decreased) through [`ChannelConfig::cltv_expiry_delta`]
3069///
3070/// [`ChannelConfig::cltv_expiry_delta`]: crate::util::config::ChannelConfig::cltv_expiry_delta
3071// This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER,
3072// i.e. the node we forwarded the payment on to should always have enough room to reliably time out
3073// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
3074// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more).
3075pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6 * 8;
3076// This should be long enough to allow a payment path drawn across multiple routing hops with substantial
3077// `cltv_expiry_delta`. Indeed, the length of those values is the reaction delay offered to a routing node
3078// in case of HTLC on-chain settlement. While appearing less competitive, a node operator could decide to
3079// scale them up to suit its security policy. At the network-level, we shouldn't constrain them too much,
3080// while avoiding to introduce a DoS vector. Further, a low CTLV_FAR_FAR_AWAY could be a source of
3081// routing failure for any HTLC sender picking up an LDK node among the first hops.
3082pub(crate) const CLTV_FAR_FAR_AWAY: u32 = 14 * 24 * 6;
3083
3084/// Minimum CLTV difference between the current block height and received inbound payments.
3085/// Invoices generated for payment to us must set their `min_final_cltv_expiry_delta` field to at least
3086/// this value.
3087// Note that we fail if exactly HTLC_FAIL_BACK_BUFFER + 1 was used, so we need to add one for
3088// any payments to succeed. Further, we don't want payments to fail if a block was found while
3089// a payment was being routed, so we add an extra block to be safe.
3090pub const MIN_FINAL_CLTV_EXPIRY_DELTA: u16 = HTLC_FAIL_BACK_BUFFER as u16 + 3;
3091
3092// Check that our MIN_CLTV_EXPIRY_DELTA gives us enough time to get everything on chain and locked
3093// in with enough time left to fail the corresponding HTLC back to our inbound edge before they
3094// force-close on us.
3095// In other words, if the next-hop peer fails HTLC LATENCY_GRACE_PERIOD_BLOCKS after our
3096// CLTV_CLAIM_BUFFER (because that's how many blocks we allow them after expiry), we'll still have
3097// 2*MAX_BLOCKS_FOR_CONF + ANTI_REORG_DELAY left to get two transactions on chain and the second
3098// fully locked in before the peer force-closes on us (LATENCY_GRACE_PERIOD_BLOCKS before the
3099// expiry, i.e. assuming the peer force-closes right at the expiry and we're behind by
3100// LATENCY_GRACE_PERIOD_BLOCKS).
3101const _CHECK_CLTV_EXPIRY_SANITY: () = assert!(
3102	MIN_CLTV_EXPIRY_DELTA as u32
3103		>= 2 * LATENCY_GRACE_PERIOD_BLOCKS + 2 * MAX_BLOCKS_FOR_CONF + ANTI_REORG_DELAY
3104);
3105
3106// Check that our MIN_CLTV_EXPIRY_DELTA gives us enough time to get the HTLC preimage back to our
3107// counterparty if the outbound edge gives us the preimage only one block before we'd force-close
3108// the channel.
3109// ie they provide the preimage LATENCY_GRACE_PERIOD_BLOCKS - 1 after the HTLC expires, then we
3110// pass the preimage back, which takes LATENCY_GRACE_PERIOD_BLOCKS to complete, and we want to make
3111// sure this all happens at least N blocks before the inbound HTLC expires (where N is the
3112// counterparty's CLTV_CLAIM_BUFFER or equivalent).
3113const _ASSUMED_COUNTERPARTY_CLTV_CLAIM_BUFFER: u32 = 6 * 6;
3114
3115const _CHECK_COUNTERPARTY_REALISTIC: () =
3116	assert!(_ASSUMED_COUNTERPARTY_CLTV_CLAIM_BUFFER >= CLTV_CLAIM_BUFFER);
3117
3118const _CHECK_CLTV_EXPIRY_OFFCHAIN: () = assert!(
3119	MIN_CLTV_EXPIRY_DELTA as u32
3120		>= 2 * LATENCY_GRACE_PERIOD_BLOCKS - 1 + _ASSUMED_COUNTERPARTY_CLTV_CLAIM_BUFFER
3121);
3122
3123/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until expiry of incomplete MPPs
3124pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;
3125
3126/// The number of ticks of [`ChannelManager::timer_tick_occurred`] where a peer is disconnected
3127/// until we mark the channel disabled and gossip the update.
3128pub(crate) const DISABLE_GOSSIP_TICKS: u8 = 10;
3129
3130/// The number of ticks of [`ChannelManager::timer_tick_occurred`] where a peer is connected until
3131/// we mark the channel enabled and gossip the update.
3132pub(crate) const ENABLE_GOSSIP_TICKS: u8 = 5;
3133
3134/// The maximum number of unfunded channels we can have per-peer before we start rejecting new
3135/// (inbound) ones. The number of peers with unfunded channels is limited separately in
3136/// [`MAX_UNFUNDED_CHANNEL_PEERS`].
3137pub(super) const MAX_UNFUNDED_CHANS_PER_PEER: usize = 4;
3138
3139/// The maximum number of peers from which we will allow pending unfunded channels. Once we reach
3140/// this many peers we reject new (inbound) channels from peers with which we don't have a channel.
3141pub(super) const MAX_UNFUNDED_CHANNEL_PEERS: usize = 50;
3142
3143/// The maximum allowed size for peer storage, in bytes.
3144///
3145/// This constant defines the upper limit for the size of data
3146/// that can be stored for a peer. It is set to 1024 bytes (1 kilobyte)
3147/// to prevent excessive resource consumption.
3148#[cfg(not(test))]
3149const MAX_PEER_STORAGE_SIZE: usize = 1024;
3150
3151/// The maximum number of peers which we do not have a (funded) channel with. Once we reach this
3152/// many peers we reject new (inbound) connections.
3153const MAX_NO_CHANNEL_PEERS: usize = 250;
3154
3155/// Used by [`ChannelManager::list_recent_payments`] to express the status of recent payments.
3156/// These include payments that have yet to find a successful path, or have unresolved HTLCs.
3157#[derive(Debug, PartialEq)]
3158pub enum RecentPaymentDetails {
3159	/// When an invoice was requested and thus a payment has not yet been sent.
3160	AwaitingInvoice {
3161		/// A user-provided identifier in [`ChannelManager::pay_for_offer`] used to uniquely identify a
3162		/// payment and ensure idempotency in LDK.
3163		payment_id: PaymentId,
3164	},
3165	/// When a payment is still being sent and awaiting successful delivery.
3166	Pending {
3167		/// A user-provided identifier in [`send_payment`] or [`pay_for_offer`] used to uniquely
3168		/// identify a payment and ensure idempotency in LDK.
3169		///
3170		/// [`send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
3171		/// [`pay_for_offer`]: crate::ln::channelmanager::ChannelManager::pay_for_offer
3172		payment_id: PaymentId,
3173		/// Hash of the payment that is currently being sent but has yet to be fulfilled or
3174		/// abandoned.
3175		payment_hash: PaymentHash,
3176		/// Total amount (in msat, excluding fees) across all paths for this payment,
3177		/// not just the amount currently inflight.
3178		total_msat: u64,
3179	},
3180	/// When a pending payment is fulfilled, we continue tracking it until all pending HTLCs have
3181	/// been resolved. Upon receiving [`Event::PaymentSent`], we delay for a few minutes before the
3182	/// payment is removed from tracking.
3183	Fulfilled {
3184		/// A user-provided identifier in [`send_payment`] or [`pay_for_offer`] used to uniquely
3185		/// identify a payment and ensure idempotency in LDK.
3186		///
3187		/// [`send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
3188		/// [`pay_for_offer`]: crate::ln::channelmanager::ChannelManager::pay_for_offer
3189		payment_id: PaymentId,
3190		/// Hash of the payment that was claimed. `None` for serializations of [`ChannelManager`]
3191		/// made before LDK version 0.0.104.
3192		payment_hash: Option<PaymentHash>,
3193	},
3194	/// After a payment's retries are exhausted per the provided [`Retry`], or it is explicitly
3195	/// abandoned via [`ChannelManager::abandon_payment`], it is marked as abandoned until all
3196	/// pending HTLCs for this payment resolve and an [`Event::PaymentFailed`] is generated.
3197	Abandoned {
3198		/// A user-provided identifier in [`send_payment`] or [`pay_for_offer`] used to uniquely
3199		/// identify a payment and ensure idempotency in LDK.
3200		///
3201		/// [`send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
3202		/// [`pay_for_offer`]: crate::ln::channelmanager::ChannelManager::pay_for_offer
3203		payment_id: PaymentId,
3204		/// Hash of the payment that we have given up trying to send.
3205		payment_hash: PaymentHash,
3206	},
3207}
3208
3209/// Route hints used in constructing invoices for [phantom node payents].
3210///
3211/// [phantom node payments]: crate::sign::PhantomKeysManager
3212#[derive(Clone)]
3213pub struct PhantomRouteHints {
3214	/// The list of channels to be included in the invoice route hints.
3215	pub channels: Vec<ChannelDetails>,
3216	/// A fake scid used for representing the phantom node's fake channel in generating the invoice
3217	/// route hints.
3218	pub phantom_scid: u64,
3219	/// The pubkey of the real backing node that would ultimately receive the payment.
3220	pub real_node_pubkey: PublicKey,
3221}
3222
3223#[rustfmt::skip]
3224macro_rules! handle_error {
3225	($self: ident, $internal: expr, $counterparty_node_id: expr) => { {
3226		// In testing, ensure there are no deadlocks where the lock is already held upon
3227		// entering the macro.
3228		debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
3229		debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
3230
3231		match $internal {
3232			Ok(msg) => Ok(msg),
3233			Err(MsgHandleErrInternal { err, shutdown_finish, tx_abort, .. }) => {
3234				let mut msg_event = None;
3235
3236				if let Some((shutdown_res, update_option)) = shutdown_finish {
3237					let counterparty_node_id = shutdown_res.counterparty_node_id;
3238					let channel_id = shutdown_res.channel_id;
3239					let logger = WithContext::from(
3240						&$self.logger, Some(counterparty_node_id), Some(channel_id), None
3241					);
3242					log_error!(logger, "Closing channel: {}", err.err);
3243
3244					$self.finish_close_channel(shutdown_res);
3245					if let Some(update) = update_option {
3246						let mut pending_broadcast_messages = $self.pending_broadcast_messages.lock().unwrap();
3247						pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate {
3248							msg: update
3249						});
3250					}
3251				} else {
3252					log_error!($self.logger, "Got non-closing error: {}", err.err);
3253				}
3254
3255				if let msgs::ErrorAction::IgnoreError = err.action {
3256					if let Some(tx_abort) = tx_abort {
3257						msg_event = Some(MessageSendEvent::SendTxAbort {
3258							node_id: $counterparty_node_id,
3259							msg: tx_abort,
3260						});
3261					}
3262				} else {
3263					msg_event = Some(MessageSendEvent::HandleError {
3264						node_id: $counterparty_node_id,
3265						action: err.action.clone()
3266					});
3267				}
3268
3269				if let Some(msg_event) = msg_event {
3270					let per_peer_state = $self.per_peer_state.read().unwrap();
3271					if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) {
3272						let mut peer_state = peer_state_mutex.lock().unwrap();
3273						if peer_state.is_connected {
3274							peer_state.pending_msg_events.push(msg_event);
3275						}
3276					}
3277				}
3278
3279				// Return error in case higher-API need one
3280				Err(err)
3281			},
3282		}
3283	} };
3284}
3285
3286/// Do not call this directly, use `convert_channel_err` instead.
3287#[rustfmt::skip]
3288macro_rules! locked_close_channel {
3289	($self: ident, $chan_context: expr, UNFUNDED) => {{
3290		$self.short_to_chan_info.write().unwrap().remove(&$chan_context.outbound_scid_alias());
3291		// If the channel was never confirmed on-chain prior to its closure, remove the
3292		// outbound SCID alias we used for it from the collision-prevention set. While we
3293		// generally want to avoid ever re-using an outbound SCID alias across all channels, we
3294		// also don't want a counterparty to be able to trivially cause a memory leak by simply
3295		// opening a million channels with us which are closed before we ever reach the funding
3296		// stage.
3297		let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$chan_context.outbound_scid_alias());
3298		debug_assert!(alias_removed);
3299	}};
3300	($self: ident, $peer_state: expr, $funded_chan: expr, $shutdown_res_mut: expr, FUNDED) => {{
3301		if let Some((_, funding_txo, _, update)) = $shutdown_res_mut.monitor_update.take() {
3302			handle_new_monitor_update!($self, funding_txo, update, $peer_state,
3303				$funded_chan.context, REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER);
3304		}
3305		// If there's a possibility that we need to generate further monitor updates for this
3306		// channel, we need to store the last update_id of it. However, we don't want to insert
3307		// into the map (which prevents the `PeerState` from being cleaned up) for channels that
3308		// never even got confirmations (which would open us up to DoS attacks).
3309		let update_id = $funded_chan.context.get_latest_monitor_update_id();
3310		if $funded_chan.funding.get_funding_tx_confirmation_height().is_some() || $funded_chan.context.minimum_depth(&$funded_chan.funding) == Some(0) || update_id > 1 {
3311			let chan_id = $funded_chan.context.channel_id();
3312			$peer_state.closed_channel_monitor_update_ids.insert(chan_id, update_id);
3313		}
3314		let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
3315		if let Some(short_id) = $funded_chan.funding.get_short_channel_id() {
3316			short_to_chan_info.remove(&short_id);
3317		} else {
3318			// If the channel was never confirmed on-chain prior to its closure, remove the
3319			// outbound SCID alias we used for it from the collision-prevention set. While we
3320			// generally want to avoid ever re-using an outbound SCID alias across all channels, we
3321			// also don't want a counterparty to be able to trivially cause a memory leak by simply
3322			// opening a million channels with us which are closed before we ever reach the funding
3323			// stage.
3324			let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$funded_chan.context.outbound_scid_alias());
3325			debug_assert!(alias_removed);
3326		}
3327		short_to_chan_info.remove(&$funded_chan.context.outbound_scid_alias());
3328		for scid in $funded_chan.context.historical_scids() {
3329			short_to_chan_info.remove(scid);
3330		}
3331	}}
3332}
3333
3334/// When a channel is removed, two things need to happen:
3335/// (a) This must be called in the same `per_peer_state` lock as the channel-closing action,
3336/// (b) [`handle_error`] needs to be called without holding any locks (except
3337///     [`ChannelManager::total_consistency_lock`]), which then calls
3338///     [`ChannelManager::finish_close_channel`].
3339///
3340/// Note that this step can be skipped if the channel was never opened (through the creation of a
3341/// [`ChannelMonitor`]/channel funding transaction) to begin with.
3342///
3343/// Returns `(boolean indicating if we should remove the Channel object from memory, a mapped
3344/// error)`, except in the `COOP_CLOSE` case, where the bool is elided (it is always implicitly
3345/// true).
3346#[rustfmt::skip]
3347macro_rules! convert_channel_err {
3348	($self: ident, $peer_state: expr, $err: expr, $chan: expr, $close: expr, $locked_close: expr, $channel_id: expr, _internal) => { {
3349		match $err {
3350			ChannelError::Warn(msg) => {
3351				(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), $channel_id))
3352			},
3353			ChannelError::WarnAndDisconnect(msg) => {
3354				(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::WarnAndDisconnect(msg), $channel_id))
3355			},
3356			ChannelError::Ignore(msg) => {
3357				(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id))
3358			},
3359			ChannelError::Abort(reason) => {
3360				(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Abort(reason), $channel_id))
3361			},
3362			ChannelError::Close((msg, reason)) => {
3363				let (mut shutdown_res, chan_update) = $close(reason);
3364				let logger = WithChannelContext::from(&$self.logger, &$chan.context(), None);
3365				log_error!(logger, "Closed channel {} due to close-required error: {}", $channel_id, msg);
3366				$locked_close(&mut shutdown_res, $chan);
3367				let err =
3368					MsgHandleErrInternal::from_finish_shutdown(msg, $channel_id, shutdown_res, chan_update);
3369				(true, err)
3370			},
3371			ChannelError::SendError(msg) => {
3372				(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::SendError(msg), $channel_id))
3373			},
3374		}
3375	} };
3376	($self: ident, $peer_state: expr, $shutdown_result: expr, $funded_channel: expr, COOP_CLOSED) => { {
3377		let chan_id = $funded_channel.context.channel_id();
3378		let reason = ChannelError::Close(("Coop Closed".to_owned(), $shutdown_result.closure_reason.clone()));
3379		let do_close = |_| {
3380			(
3381				$shutdown_result,
3382				$self.get_channel_update_for_broadcast(&$funded_channel).ok(),
3383			)
3384		};
3385		let mut locked_close = |shutdown_res_mut: &mut ShutdownResult, funded_channel: &mut FundedChannel<_>| {
3386			locked_close_channel!($self, $peer_state, funded_channel, shutdown_res_mut, FUNDED);
3387		};
3388		let (close, mut err) =
3389			convert_channel_err!($self, $peer_state, reason, $funded_channel, do_close, locked_close, chan_id, _internal);
3390		err.dont_send_error_message();
3391		debug_assert!(close);
3392		err
3393	} };
3394	($self: ident, $peer_state: expr, $err: expr, $funded_channel: expr, FUNDED_CHANNEL) => { {
3395		let chan_id = $funded_channel.context.channel_id();
3396		let mut do_close = |reason| {
3397			(
3398				$funded_channel.force_shutdown(reason),
3399				$self.get_channel_update_for_broadcast(&$funded_channel).ok(),
3400			)
3401		};
3402		let mut locked_close = |shutdown_res_mut: &mut ShutdownResult, funded_channel: &mut FundedChannel<_>| {
3403			locked_close_channel!($self, $peer_state, funded_channel, shutdown_res_mut, FUNDED);
3404		};
3405		convert_channel_err!($self, $peer_state, $err, $funded_channel, do_close, locked_close, chan_id, _internal)
3406	} };
3407	($self: ident, $peer_state: expr, $err: expr, $channel: expr, UNFUNDED_CHANNEL) => { {
3408		let chan_id = $channel.context().channel_id();
3409		let mut do_close = |reason| { ($channel.force_shutdown(reason), None) };
3410		let locked_close = |_, chan: &mut Channel<_>| { locked_close_channel!($self, chan.context(), UNFUNDED); };
3411		convert_channel_err!($self, $peer_state, $err, $channel, do_close, locked_close, chan_id, _internal)
3412	} };
3413	($self: ident, $peer_state: expr, $err: expr, $channel: expr) => {
3414		match $channel.as_funded_mut() {
3415			Some(funded_channel) => {
3416				convert_channel_err!($self, $peer_state, $err, funded_channel, FUNDED_CHANNEL)
3417			},
3418			None => {
3419				convert_channel_err!($self, $peer_state, $err, $channel, UNFUNDED_CHANNEL)
3420			},
3421		}
3422	};
3423}
3424
3425macro_rules! break_channel_entry {
3426	($self: ident, $peer_state: expr, $res: expr, $entry: expr) => {
3427		match $res {
3428			Ok(res) => res,
3429			Err(e) => {
3430				let (drop, res) = convert_channel_err!($self, $peer_state, e, $entry.get_mut());
3431				if drop {
3432					$entry.remove_entry();
3433				}
3434				break Err(res);
3435			},
3436		}
3437	};
3438}
3439
3440macro_rules! try_channel_entry {
3441	($self: ident, $peer_state: expr, $res: expr, $entry: expr) => {
3442		match $res {
3443			Ok(res) => res,
3444			Err(e) => {
3445				let (drop, res) = convert_channel_err!($self, $peer_state, e, $entry.get_mut());
3446				if drop {
3447					$entry.remove_entry();
3448				}
3449				return Err(res);
3450			},
3451		}
3452	};
3453}
3454
3455macro_rules! send_channel_ready {
3456	($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{
3457		if $channel.context.is_connected() {
3458			$pending_msg_events.push(MessageSendEvent::SendChannelReady {
3459				node_id: $channel.context.get_counterparty_node_id(),
3460				msg: $channel_ready_msg,
3461			});
3462		}
3463		// Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so
3464		// we allow collisions, but we shouldn't ever be updating the channel ID pointed to.
3465		let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
3466		let outbound_alias_insert = short_to_chan_info.insert($channel.context.outbound_scid_alias(), ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()));
3467		assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()),
3468			"SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
3469		insert_short_channel_id!(short_to_chan_info, $channel);
3470	}}
3471}
3472
3473macro_rules! insert_short_channel_id {
3474	($short_to_chan_info: ident, $channel: expr) => {{
3475		if let Some(real_scid) = $channel.funding.get_short_channel_id() {
3476			let scid_insert = $short_to_chan_info.insert(real_scid, ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()));
3477			assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()),
3478				"SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
3479		}
3480	}}
3481}
3482
3483macro_rules! emit_funding_tx_broadcast_safe_event {
3484	($locked_events: expr, $channel: expr, $funding_txo: expr) => {
3485		if !$channel.context.funding_tx_broadcast_safe_event_emitted() {
3486			$locked_events.push_back((events::Event::FundingTxBroadcastSafe {
3487				channel_id: $channel.context.channel_id(),
3488				user_channel_id: $channel.context.get_user_id(),
3489				funding_txo: $funding_txo,
3490				counterparty_node_id: $channel.context.get_counterparty_node_id(),
3491				former_temporary_channel_id: $channel.context.temporary_channel_id()
3492					.expect("Unreachable: FundingTxBroadcastSafe event feature added to channel establishment process in LDK v0.0.124 where this should never be None."),
3493			}, None));
3494			$channel.context.set_funding_tx_broadcast_safe_event_emitted();
3495		}
3496	}
3497}
3498
3499macro_rules! emit_channel_pending_event {
3500	($locked_events: expr, $channel: expr) => {
3501		if $channel.context.should_emit_channel_pending_event() {
3502			let funding_txo = $channel.funding.get_funding_txo().unwrap();
3503			let funding_redeem_script =
3504				Some($channel.funding.channel_transaction_parameters.make_funding_redeemscript());
3505			$locked_events.push_back((
3506				events::Event::ChannelPending {
3507					channel_id: $channel.context.channel_id(),
3508					former_temporary_channel_id: $channel.context.temporary_channel_id(),
3509					counterparty_node_id: $channel.context.get_counterparty_node_id(),
3510					user_channel_id: $channel.context.get_user_id(),
3511					funding_txo: funding_txo.into_bitcoin_outpoint(),
3512					channel_type: Some($channel.funding.get_channel_type().clone()),
3513					funding_redeem_script,
3514				},
3515				None,
3516			));
3517			$channel.context.set_channel_pending_event_emitted();
3518		}
3519	};
3520}
3521
3522macro_rules! emit_initial_channel_ready_event {
3523	($locked_events: expr, $channel: expr) => {
3524		if $channel.context.should_emit_initial_channel_ready_event() {
3525			debug_assert!($channel.context.channel_pending_event_emitted());
3526			$locked_events.push_back((
3527				events::Event::ChannelReady {
3528					channel_id: $channel.context.channel_id(),
3529					user_channel_id: $channel.context.get_user_id(),
3530					counterparty_node_id: $channel.context.get_counterparty_node_id(),
3531					funding_txo: $channel
3532						.funding
3533						.get_funding_txo()
3534						.map(|outpoint| outpoint.into_bitcoin_outpoint()),
3535					channel_type: $channel.funding.get_channel_type().clone(),
3536				},
3537				None,
3538			));
3539			$channel.context.set_initial_channel_ready_event_emitted();
3540		}
3541	};
3542}
3543
3544/// Handles the completion steps for when a [`ChannelMonitorUpdate`] is applied to a live channel.
3545///
3546/// You should not add new direct calls to this, generally, rather rely on
3547/// `handle_new_monitor_update` or [`ChannelManager::channel_monitor_updated`] to call it for you.
3548///
3549/// Requires that  the in-flight monitor update set for this channel is empty!
3550macro_rules! handle_monitor_update_completion {
3551	($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => {{
3552		let channel_id = $chan.context.channel_id();
3553		let outbound_scid_alias = $chan.context().outbound_scid_alias();
3554		let counterparty_node_id = $chan.context.get_counterparty_node_id();
3555
3556		#[cfg(debug_assertions)]
3557		{
3558			let in_flight_updates =
3559				$peer_state.in_flight_monitor_updates.get(&channel_id);
3560			assert!(in_flight_updates.map(|(_, updates)| updates.is_empty()).unwrap_or(true));
3561			assert!($chan.is_awaiting_monitor_update());
3562		}
3563
3564		let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3565
3566		let update_actions = $peer_state.monitor_update_blocked_actions
3567			.remove(&channel_id).unwrap_or(Vec::new());
3568
3569		if $chan.blocked_monitor_updates_pending() != 0 {
3570			mem::drop($peer_state_lock);
3571			mem::drop($per_peer_state_lock);
3572
3573			log_debug!(logger, "Channel has blocked monitor updates, completing update actions but leaving channel blocked");
3574			$self.handle_monitor_update_completion_actions(update_actions);
3575		} else {
3576			log_debug!(logger, "Channel is open and awaiting update, resuming it");
3577			let mut updates = $chan.monitor_updating_restored(
3578				&&logger,
3579				&$self.node_signer,
3580				$self.chain_hash,
3581				&*$self.config.read().unwrap(),
3582				$self.best_block.read().unwrap().height,
3583				|htlc_id| {
3584					$self.path_for_release_held_htlc(htlc_id, outbound_scid_alias, &channel_id, &counterparty_node_id)
3585				},
3586			);
3587			let channel_update = if updates.channel_ready.is_some()
3588				&& $chan.context.is_usable()
3589				&& $peer_state.is_connected
3590			{
3591				// We only send a channel_update in the case where we are just now sending a
3592				// channel_ready and the channel is in a usable state. We may re-send a
3593				// channel_update later through the announcement_signatures process for public
3594				// channels, but there's no reason not to just inform our counterparty of our fees
3595				// now.
3596				if let Ok(msg) = $self.get_channel_update_for_unicast($chan) {
3597					Some(MessageSendEvent::SendChannelUpdate { node_id: counterparty_node_id, msg })
3598				} else {
3599					None
3600				}
3601			} else {
3602				None
3603			};
3604
3605			let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption(
3606				&mut $peer_state.pending_msg_events,
3607				$chan,
3608				updates.raa,
3609				updates.commitment_update,
3610				updates.commitment_order,
3611				updates.accepted_htlcs,
3612				updates.pending_update_adds,
3613				updates.funding_broadcastable,
3614				updates.channel_ready,
3615				updates.announcement_sigs,
3616				updates.tx_signatures,
3617				None,
3618				updates.channel_ready_order,
3619			);
3620			if let Some(upd) = channel_update {
3621				$peer_state.pending_msg_events.push(upd);
3622			}
3623
3624			let unbroadcasted_batch_funding_txid =
3625				$chan.context.unbroadcasted_batch_funding_txid(&$chan.funding);
3626			core::mem::drop($peer_state_lock);
3627			core::mem::drop($per_peer_state_lock);
3628
3629			// If the channel belongs to a batch funding transaction, the progress of the batch
3630			// should be updated as we have received funding_signed and persisted the monitor.
3631			if let Some(txid) = unbroadcasted_batch_funding_txid {
3632				let mut funding_batch_states = $self.funding_batch_states.lock().unwrap();
3633				let mut batch_completed = false;
3634				if let Some(batch_state) = funding_batch_states.get_mut(&txid) {
3635					let channel_state = batch_state.iter_mut().find(|(chan_id, pubkey, _)| (
3636						*chan_id == channel_id &&
3637						*pubkey == counterparty_node_id
3638					));
3639					if let Some(channel_state) = channel_state {
3640						channel_state.2 = true;
3641					} else {
3642						debug_assert!(false, "Missing channel batch state for channel which completed initial monitor update");
3643					}
3644					batch_completed = batch_state.iter().all(|(_, _, completed)| *completed);
3645				} else {
3646					debug_assert!(false, "Missing batch state for channel which completed initial monitor update");
3647				}
3648
3649				// When all channels in a batched funding transaction have become ready, it is not necessary
3650				// to track the progress of the batch anymore and the state of the channels can be updated.
3651				if batch_completed {
3652					let removed_batch_state = funding_batch_states.remove(&txid).into_iter().flatten();
3653					let per_peer_state = $self.per_peer_state.read().unwrap();
3654					let mut batch_funding_tx = None;
3655					for (channel_id, counterparty_node_id, _) in removed_batch_state {
3656						if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
3657							let mut peer_state = peer_state_mutex.lock().unwrap();
3658							if let Some(funded_chan) = peer_state.channel_by_id
3659								.get_mut(&channel_id)
3660								.and_then(Channel::as_funded_mut)
3661							{
3662								batch_funding_tx = batch_funding_tx.or_else(|| funded_chan.context.unbroadcasted_funding(&funded_chan.funding));
3663								funded_chan.set_batch_ready();
3664								let mut pending_events = $self.pending_events.lock().unwrap();
3665								emit_channel_pending_event!(pending_events, funded_chan);
3666							}
3667						}
3668					}
3669					if let Some(tx) = batch_funding_tx {
3670						log_info!($self.logger, "Broadcasting batch funding transaction with txid {}", tx.compute_txid());
3671						$self.tx_broadcaster.broadcast_transactions(&[&tx]);
3672					}
3673				}
3674			}
3675
3676			$self.handle_monitor_update_completion_actions(update_actions);
3677
3678			if let Some(forwards) = htlc_forwards {
3679				$self.forward_htlcs(&mut [forwards][..]);
3680			}
3681			if let Some(decode) = decode_update_add_htlcs {
3682				$self.push_decode_update_add_htlcs(decode);
3683			}
3684			$self.finalize_claims(updates.finalized_claimed_htlcs);
3685			for failure in updates.failed_htlcs.drain(..) {
3686				let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id), channel_id };
3687				$self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver, None);
3688			}
3689		}
3690	}};
3691}
3692
3693macro_rules! handle_new_monitor_update {
3694	($self: ident, $update_res: expr, $logger: expr, $channel_id: expr, _internal, $completed: expr) => { {
3695		debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire));
3696		match $update_res {
3697			ChannelMonitorUpdateStatus::UnrecoverableError => {
3698				let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
3699				log_error!($logger, "{}", err_str);
3700				panic!("{}", err_str);
3701			},
3702			ChannelMonitorUpdateStatus::InProgress => {
3703				#[cfg(not(any(test, feature = "_externalize_tests")))]
3704				if $self.monitor_update_type.swap(1, Ordering::Relaxed) == 2 {
3705					panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart");
3706				}
3707				log_debug!($logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
3708					$channel_id);
3709				false
3710			},
3711			ChannelMonitorUpdateStatus::Completed => {
3712				#[cfg(not(any(test, feature = "_externalize_tests")))]
3713				if $self.monitor_update_type.swap(2, Ordering::Relaxed) == 1 {
3714					panic!("Cannot use both ChannelMonitorUpdateStatus modes InProgress and Completed without restart");
3715				}
3716				$completed;
3717				true
3718			},
3719		}
3720	} };
3721	($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, INITIAL_MONITOR) => {
3722		let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3723		handle_new_monitor_update!($self, $update_res, logger, $chan.context.channel_id(), _internal,
3724			handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan))
3725	};
3726	(
3727		$self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $logger: expr,
3728		$chan_id: expr, $counterparty_node_id: expr, $in_flight_updates: ident, $update_idx: ident,
3729		_internal_outer, $completed: expr
3730	) => { {
3731		$in_flight_updates = &mut $peer_state.in_flight_monitor_updates.entry($chan_id)
3732			.or_insert_with(|| ($funding_txo, Vec::new())).1;
3733		// During startup, we push monitor updates as background events through to here in
3734		// order to replay updates that were in-flight when we shut down. Thus, we have to
3735		// filter for uniqueness here.
3736		$update_idx = $in_flight_updates.iter().position(|upd| upd == &$update)
3737			.unwrap_or_else(|| {
3738				$in_flight_updates.push($update);
3739				$in_flight_updates.len() - 1
3740			});
3741		if $self.background_events_processed_since_startup.load(Ordering::Acquire) {
3742			let update_res = $self.chain_monitor.update_channel($chan_id, &$in_flight_updates[$update_idx]);
3743			handle_new_monitor_update!($self, update_res, $logger, $chan_id, _internal, $completed)
3744		} else {
3745			// We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we
3746			// fail to persist it. This is a fairly safe assumption, however, since anything we do
3747			// during the startup sequence should be replayed exactly if we immediately crash.
3748			let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
3749				counterparty_node_id: $counterparty_node_id,
3750				funding_txo: $funding_txo,
3751				channel_id: $chan_id,
3752				update: $in_flight_updates[$update_idx].clone(),
3753			};
3754			// We want to track the in-flight update both in `in_flight_monitor_updates` and in
3755			// `pending_background_events` to avoid a race condition during
3756			// `pending_background_events` processing where we complete one
3757			// `ChannelMonitorUpdate` (but there are more pending as background events) but we
3758			// conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to
3759			// run post-completion actions.
3760			// We could work around that with some effort, but its simpler to just track updates
3761			// twice.
3762			$self.pending_background_events.lock().unwrap().push(event);
3763			false
3764		}
3765	} };
3766	(
3767		$self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $chan_context: expr,
3768		REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER
3769	) => { {
3770		let logger = WithChannelContext::from(&$self.logger, &$chan_context, None);
3771		let chan_id = $chan_context.channel_id();
3772		let counterparty_node_id = $chan_context.get_counterparty_node_id();
3773		let in_flight_updates;
3774		let idx;
3775		handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
3776			counterparty_node_id, in_flight_updates, idx, _internal_outer,
3777			{
3778				let _ = in_flight_updates.remove(idx);
3779			})
3780	} };
3781	(
3782		$self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
3783		$per_peer_state_lock: expr, $counterparty_node_id: expr, $channel_id: expr, POST_CHANNEL_CLOSE
3784	) => { {
3785		let logger = WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None);
3786		let in_flight_updates;
3787		let idx;
3788		handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger,
3789			$channel_id, $counterparty_node_id, in_flight_updates, idx, _internal_outer,
3790			{
3791				let _ = in_flight_updates.remove(idx);
3792				if in_flight_updates.is_empty() {
3793					let update_actions = $peer_state.monitor_update_blocked_actions
3794						.remove(&$channel_id).unwrap_or(Vec::new());
3795
3796					mem::drop($peer_state_lock);
3797					mem::drop($per_peer_state_lock);
3798
3799					$self.handle_monitor_update_completion_actions(update_actions);
3800				}
3801			})
3802	} };
3803	(
3804		$self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
3805		$per_peer_state_lock: expr, $chan: expr
3806	) => { {
3807		let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3808		let chan_id = $chan.context.channel_id();
3809		let counterparty_node_id = $chan.context.get_counterparty_node_id();
3810		let in_flight_updates;
3811		let idx;
3812		handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
3813			counterparty_node_id, in_flight_updates, idx, _internal_outer,
3814			{
3815				let _ = in_flight_updates.remove(idx);
3816				if in_flight_updates.is_empty() {
3817					handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan);
3818				}
3819			})
3820	} };
3821}
3822
3823#[rustfmt::skip]
3824macro_rules! process_events_body {
3825	($self: expr, $event_to_handle: expr, $handle_event: expr) => {
3826		let mut handling_failed = false;
3827		let mut processed_all_events = false;
3828		while !handling_failed && !processed_all_events {
3829			if $self.pending_events_processor.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed).is_err() {
3830				return;
3831			}
3832
3833			let mut result;
3834
3835			{
3836				// We'll acquire our total consistency lock so that we can be sure no other
3837				// persists happen while processing monitor events.
3838				let _read_guard = $self.total_consistency_lock.read().unwrap();
3839
3840				// Because `handle_post_event_actions` may send `ChannelMonitorUpdate`s to the user we must
3841				// ensure any startup-generated background events are handled first.
3842				result = $self.process_background_events();
3843
3844				// TODO: This behavior should be documented. It's unintuitive that we query
3845				// ChannelMonitors when clearing other events.
3846				if $self.process_pending_monitor_events() {
3847					result = NotifyOption::DoPersist;
3848				}
3849			}
3850
3851			let pending_events = $self.pending_events.lock().unwrap().clone();
3852			if !pending_events.is_empty() {
3853				result = NotifyOption::DoPersist;
3854			}
3855
3856			let mut post_event_actions = Vec::new();
3857
3858			let mut num_handled_events = 0;
3859			for (event, action_opt) in pending_events {
3860				log_trace!($self.logger, "Handling event {:?}...", event);
3861				$event_to_handle = event;
3862				let event_handling_result = $handle_event;
3863				log_trace!($self.logger, "Done handling event, result: {:?}", event_handling_result);
3864				match event_handling_result {
3865					Ok(()) => {
3866						if let Some(action) = action_opt {
3867							post_event_actions.push(action);
3868						}
3869						num_handled_events += 1;
3870					}
3871					Err(_e) => {
3872						// If we encounter an error we stop handling events and make sure to replay
3873						// any unhandled events on the next invocation.
3874						handling_failed = true;
3875						break;
3876					}
3877				}
3878			}
3879
3880			{
3881				let mut pending_events = $self.pending_events.lock().unwrap();
3882				pending_events.drain(..num_handled_events);
3883				processed_all_events = pending_events.is_empty();
3884				// Note that `push_pending_forwards_ev` relies on `pending_events_processor` being
3885				// updated here with the `pending_events` lock acquired.
3886				$self.pending_events_processor.store(false, Ordering::Release);
3887			}
3888
3889			if !post_event_actions.is_empty() {
3890				$self.handle_post_event_actions(post_event_actions);
3891				// If we had some actions, go around again as we may have more events now
3892				processed_all_events = false;
3893			}
3894
3895			match result {
3896				NotifyOption::DoPersist => {
3897					$self.needs_persist_flag.store(true, Ordering::Release);
3898					$self.event_persist_notifier.notify();
3899				},
3900				NotifyOption::SkipPersistHandleEvents =>
3901					$self.event_persist_notifier.notify(),
3902				NotifyOption::SkipPersistNoEvents => {},
3903			}
3904		}
3905	}
3906}
3907
3908impl<
3909		M: Deref,
3910		T: Deref,
3911		ES: Deref,
3912		NS: Deref,
3913		SP: Deref,
3914		F: Deref,
3915		R: Deref,
3916		MR: Deref,
3917		L: Deref,
3918	> ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
3919where
3920	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
3921	T::Target: BroadcasterInterface,
3922	ES::Target: EntropySource,
3923	NS::Target: NodeSigner,
3924	SP::Target: SignerProvider,
3925	F::Target: FeeEstimator,
3926	R::Target: Router,
3927	MR::Target: MessageRouter,
3928	L::Target: Logger,
3929{
3930	/// Constructs a new `ChannelManager` to hold several channels and route between them.
3931	///
3932	/// The current time or latest block header time can be provided as the `current_timestamp`.
3933	///
3934	/// This is the main "logic hub" for all channel-related actions, and implements
3935	/// [`ChannelMessageHandler`].
3936	///
3937	/// Non-proportional fees are fixed according to our risk using the provided fee estimator.
3938	///
3939	/// Users need to notify the new `ChannelManager` when a new block is connected or
3940	/// disconnected using its [`block_connected`] and [`blocks_disconnected`] methods, starting
3941	/// from after [`params.best_block.block_hash`]. See [`chain::Listen`] and [`chain::Confirm`] for
3942	/// more details.
3943	///
3944	/// [`block_connected`]: chain::Listen::block_connected
3945	/// [`blocks_disconnected`]: chain::Listen::blocks_disconnected
3946	/// [`params.best_block.block_hash`]: chain::BestBlock::block_hash
3947	#[rustfmt::skip]
3948	pub fn new(
3949		fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, message_router: MR, logger: L,
3950		entropy_source: ES, node_signer: NS, signer_provider: SP, config: UserConfig,
3951		params: ChainParameters, current_timestamp: u32,
3952	) -> Self
3953	where
3954		L: Clone,
3955	{
3956		let mut secp_ctx = Secp256k1::new();
3957		secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
3958
3959		let expanded_inbound_key = node_signer.get_expanded_key();
3960		let our_network_pubkey = node_signer.get_node_id(Recipient::Node).unwrap();
3961
3962		let flow = OffersMessageFlow::new(
3963			ChainHash::using_genesis_block(params.network), params.best_block,
3964			our_network_pubkey, current_timestamp, expanded_inbound_key,
3965			node_signer.get_receive_auth_key(), secp_ctx.clone(), message_router, logger.clone(),
3966		);
3967
3968		ChannelManager {
3969			config: RwLock::new(config),
3970			chain_hash: ChainHash::using_genesis_block(params.network),
3971			fee_estimator: LowerBoundedFeeEstimator::new(fee_est),
3972			chain_monitor,
3973			tx_broadcaster,
3974			router,
3975			flow,
3976
3977			best_block: RwLock::new(params.best_block),
3978
3979			outbound_scid_aliases: Mutex::new(new_hash_set()),
3980			pending_outbound_payments: OutboundPayments::new(new_hash_map(), logger.clone()),
3981			forward_htlcs: Mutex::new(new_hash_map()),
3982			decode_update_add_htlcs: Mutex::new(new_hash_map()),
3983			claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }),
3984			pending_intercepted_htlcs: Mutex::new(new_hash_map()),
3985			short_to_chan_info: FairRwLock::new(new_hash_map()),
3986
3987			our_network_pubkey,
3988			secp_ctx,
3989
3990			inbound_payment_key: expanded_inbound_key,
3991			fake_scid_rand_bytes: entropy_source.get_secure_random_bytes(),
3992
3993			probing_cookie_secret: entropy_source.get_secure_random_bytes(),
3994			inbound_payment_id_secret: entropy_source.get_secure_random_bytes(),
3995
3996			highest_seen_timestamp: AtomicUsize::new(current_timestamp as usize),
3997
3998			per_peer_state: FairRwLock::new(new_hash_map()),
3999
4000			#[cfg(not(any(test, feature = "_externalize_tests")))]
4001			monitor_update_type: AtomicUsize::new(0),
4002
4003			pending_events: Mutex::new(VecDeque::new()),
4004			pending_events_processor: AtomicBool::new(false),
4005			pending_htlc_forwards_processor: AtomicBool::new(false),
4006			pending_background_events: Mutex::new(Vec::new()),
4007			total_consistency_lock: RwLock::new(()),
4008			background_events_processed_since_startup: AtomicBool::new(false),
4009			event_persist_notifier: Notifier::new(),
4010			needs_persist_flag: AtomicBool::new(false),
4011			funding_batch_states: Mutex::new(BTreeMap::new()),
4012
4013			pending_broadcast_messages: Mutex::new(Vec::new()),
4014
4015			last_days_feerates: Mutex::new(VecDeque::new()),
4016
4017			entropy_source,
4018			node_signer,
4019			signer_provider,
4020
4021			logger,
4022
4023			#[cfg(feature = "_test_utils")]
4024			testing_dnssec_proof_offer_resolution_override: Mutex::new(new_hash_map()),
4025		}
4026	}
4027
4028	/// Gets the current [`UserConfig`] which controls some global behavior and includes the
4029	/// default configuration applied to all new channels.
4030	pub fn get_current_config(&self) -> UserConfig {
4031		self.config.read().unwrap().clone()
4032	}
4033
4034	/// Updates the current [`UserConfig`] which controls some global behavior and includes the
4035	/// default configuration applied to all new channels.
4036	pub fn set_current_config(&self, new_config: UserConfig) {
4037		*self.config.write().unwrap() = new_config;
4038	}
4039
4040	#[cfg(test)]
4041	pub fn create_and_insert_outbound_scid_alias_for_test(&self) -> u64 {
4042		self.create_and_insert_outbound_scid_alias()
4043	}
4044
4045	fn create_and_insert_outbound_scid_alias(&self) -> u64 {
4046		let height = self.best_block.read().unwrap().height;
4047		let mut outbound_scid_alias = 0;
4048		let mut i = 0;
4049		loop {
4050			// fuzzing chacha20 doesn't use the key at all so we always get the same alias
4051			if cfg!(fuzzing) {
4052				outbound_scid_alias += 1;
4053			} else {
4054				outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(
4055					height,
4056					&self.chain_hash,
4057					&self.fake_scid_rand_bytes,
4058					&self.entropy_source,
4059				);
4060			}
4061			if outbound_scid_alias != 0
4062				&& self.outbound_scid_aliases.lock().unwrap().insert(outbound_scid_alias)
4063			{
4064				break;
4065			}
4066			i += 1;
4067			if i > 1_000_000 {
4068				panic!("Your RNG is busted or we ran out of possible outbound SCID aliases (which should never happen before we run out of memory to store channels");
4069			}
4070		}
4071		outbound_scid_alias
4072	}
4073
4074	/// Creates a new outbound channel to the given remote node and with the given value.
4075	///
4076	/// `user_channel_id` will be provided back as in
4077	/// [`Event::FundingGenerationReady::user_channel_id`] to allow tracking of which events
4078	/// correspond with which `create_channel` call. Note that the `user_channel_id` defaults to a
4079	/// randomized value for inbound channels. `user_channel_id` has no meaning inside of LDK, it
4080	/// is simply copied to events and otherwise ignored.
4081	///
4082	/// Raises [`APIError::APIMisuseError`] when `channel_value_satoshis` > 2**24 or `push_msat` is
4083	/// greater than `channel_value_satoshis * 1k` or `channel_value_satoshis < 1000`.
4084	///
4085	/// Raises [`APIError::ChannelUnavailable`] if the channel cannot be opened due to failing to
4086	/// generate a shutdown scriptpubkey or destination script set by
4087	/// [`SignerProvider::get_shutdown_scriptpubkey`] or [`SignerProvider::get_destination_script`].
4088	///
4089	/// Note that we do not check if you are currently connected to the given peer. If no
4090	/// connection is available, the outbound `open_channel` message may fail to send, resulting in
4091	/// the channel eventually being silently forgotten (dropped on reload).
4092	///
4093	/// If `temporary_channel_id` is specified, it will be used as the temporary channel ID of the
4094	/// channel. Otherwise, a random one will be generated for you.
4095	///
4096	/// Returns the new Channel's temporary `channel_id`. This ID will appear as
4097	/// [`Event::FundingGenerationReady::temporary_channel_id`] and in
4098	/// [`ChannelDetails::channel_id`] until after
4099	/// [`ChannelManager::funding_transaction_generated`] is called, swapping the Channel's ID for
4100	/// one derived from the funding transaction's TXID. If the counterparty rejects the channel
4101	/// immediately, this temporary ID will appear in [`Event::ChannelClosed::channel_id`].
4102	///
4103	/// [`Event::FundingGenerationReady::user_channel_id`]: events::Event::FundingGenerationReady::user_channel_id
4104	/// [`Event::FundingGenerationReady::temporary_channel_id`]: events::Event::FundingGenerationReady::temporary_channel_id
4105	/// [`Event::ChannelClosed::channel_id`]: events::Event::ChannelClosed::channel_id
4106	#[rustfmt::skip]
4107	pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u128, temporary_channel_id: Option<ChannelId>, override_config: Option<UserConfig>) -> Result<ChannelId, APIError> {
4108		if channel_value_satoshis < 1000 {
4109			return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
4110		}
4111
4112		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4113		// We want to make sure the lock is actually acquired by PersistenceNotifierGuard.
4114		debug_assert!(&self.total_consistency_lock.try_write().is_err());
4115
4116		let per_peer_state = self.per_peer_state.read().unwrap();
4117
4118		let peer_state_mutex = per_peer_state.get(&their_network_key)
4119			.ok_or_else(|| APIError::APIMisuseError{ err: format!("Not connected to node: {}", their_network_key) })?;
4120
4121		let mut peer_state = peer_state_mutex.lock().unwrap();
4122		if !peer_state.is_connected {
4123			return Err(APIError::APIMisuseError{ err: format!("Not connected to node: {}", their_network_key) });
4124		}
4125
4126		if let Some(temporary_channel_id) = temporary_channel_id {
4127			if peer_state.channel_by_id.contains_key(&temporary_channel_id) {
4128				return Err(APIError::APIMisuseError{ err: format!("Channel with temporary channel ID {} already exists!", temporary_channel_id)});
4129			}
4130		}
4131
4132		let mut channel = {
4133			let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
4134			let their_features = &peer_state.latest_features;
4135			let config = self.config.read().unwrap();
4136			let config = if let Some(config) = &override_config {
4137				config
4138			} else {
4139				&*config
4140			};
4141			match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key,
4142				their_features, channel_value_satoshis, push_msat, user_channel_id, config,
4143				self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id, &*self.logger)
4144			{
4145				Ok(res) => res,
4146				Err(e) => {
4147					self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
4148					return Err(e);
4149				},
4150			}
4151		};
4152		let logger = WithChannelContext::from(&self.logger, &channel.context, None);
4153		let res = channel.get_open_channel(self.chain_hash, &&logger);
4154
4155		let temporary_channel_id = channel.context.channel_id();
4156		match peer_state.channel_by_id.entry(temporary_channel_id) {
4157			hash_map::Entry::Occupied(_) => {
4158				if cfg!(fuzzing) {
4159					return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
4160				} else {
4161					panic!("RNG is bad???");
4162				}
4163			},
4164			hash_map::Entry::Vacant(entry) => { entry.insert(Channel::from(channel)); }
4165		}
4166
4167		if let Some(msg) = res {
4168			peer_state.pending_msg_events.push(MessageSendEvent::SendOpenChannel {
4169				node_id: their_network_key,
4170				msg,
4171			});
4172		}
4173		Ok(temporary_channel_id)
4174	}
4175
4176	fn list_funded_channels_with_filter<
4177		Fn: FnMut(&(&InitFeatures, &ChannelId, &Channel<SP>)) -> bool,
4178	>(
4179		&self, mut f: Fn,
4180	) -> Vec<ChannelDetails> {
4181		// Allocate our best estimate of the number of channels we have in the `res`
4182		// Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
4183		// a scid or a scid alias. Therefore reallocations may still occur, but is
4184		// unlikely as the `short_to_chan_info` map often contains 2 entries for
4185		// the same channel.
4186		let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
4187		{
4188			let best_block_height = self.best_block.read().unwrap().height;
4189			let per_peer_state = self.per_peer_state.read().unwrap();
4190			for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
4191				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4192				let peer_state = &mut *peer_state_lock;
4193				// Only `Channels` in the `Channel::Funded` phase can be considered funded.
4194				let filtered_chan_by_id = peer_state
4195					.channel_by_id
4196					.iter()
4197					.map(|(cid, c)| (&peer_state.latest_features, cid, c))
4198					.filter(|(_, _, chan)| chan.is_funded())
4199					.filter(|v| f(v));
4200				res.extend(filtered_chan_by_id.map(|(_, _channel_id, channel)| {
4201					ChannelDetails::from_channel(
4202						channel,
4203						best_block_height,
4204						peer_state.latest_features.clone(),
4205						&self.fee_estimator,
4206					)
4207				}));
4208			}
4209		}
4210		res
4211	}
4212
4213	/// Gets the list of open channels, in random order. See [`ChannelDetails`] field documentation for
4214	/// more information.
4215	pub fn list_channels(&self) -> Vec<ChannelDetails> {
4216		// Allocate our best estimate of the number of channels we have in the `res`
4217		// Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
4218		// a scid or a scid alias. Therefore reallocations may still occur, but is
4219		// unlikely as the `short_to_chan_info` map often contains 2 entries for
4220		// the same channel.
4221		let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
4222		{
4223			let best_block_height = self.best_block.read().unwrap().height;
4224			let per_peer_state = self.per_peer_state.read().unwrap();
4225			for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
4226				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4227				let peer_state = &mut *peer_state_lock;
4228				for (_, channel) in peer_state.channel_by_id.iter() {
4229					let details = ChannelDetails::from_channel(
4230						channel,
4231						best_block_height,
4232						peer_state.latest_features.clone(),
4233						&self.fee_estimator,
4234					);
4235					res.push(details);
4236				}
4237			}
4238		}
4239		res
4240	}
4241
4242	/// Gets the list of usable channels, in random order. Useful as an argument to
4243	/// [`Router::find_route`] to ensure non-announced channels are used.
4244	///
4245	/// These are guaranteed to have their [`ChannelDetails::is_usable`] value set to true, see the
4246	/// documentation for [`ChannelDetails::is_usable`] for more info on exactly what the criteria
4247	/// are.
4248	pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
4249		// Note we use is_live here instead of usable which leads to somewhat confused
4250		// internal/external nomenclature, but that's ok cause that's probably what the user
4251		// really wanted anyway.
4252		self.list_funded_channels_with_filter(|&(_, _, ref channel)| channel.context().is_live())
4253	}
4254
4255	/// Gets the list of channels we have with a given counterparty, in random order.
4256	pub fn list_channels_with_counterparty(
4257		&self, counterparty_node_id: &PublicKey,
4258	) -> Vec<ChannelDetails> {
4259		let best_block_height = self.best_block.read().unwrap().height;
4260		let per_peer_state = self.per_peer_state.read().unwrap();
4261
4262		if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
4263			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4264			let peer_state = &mut *peer_state_lock;
4265			let features = &peer_state.latest_features;
4266			let channel_to_details = |channel| {
4267				ChannelDetails::from_channel(
4268					channel,
4269					best_block_height,
4270					features.clone(),
4271					&self.fee_estimator,
4272				)
4273			};
4274			let chan_by_id = peer_state.channel_by_id.iter();
4275			return chan_by_id.map(|(_, chan)| chan).map(channel_to_details).collect();
4276		}
4277		vec![]
4278	}
4279
4280	/// Returns in an undefined order recent payments that -- if not fulfilled -- have yet to find a
4281	/// successful path, or have unresolved HTLCs.
4282	///
4283	/// This can be useful for payments that may have been prepared, but ultimately not sent, as a
4284	/// result of a crash. If such a payment exists, is not listed here, and an
4285	/// [`Event::PaymentSent`] has not been received, you may consider resending the payment.
4286	///
4287	/// [`Event::PaymentSent`]: events::Event::PaymentSent
4288	#[rustfmt::skip]
4289	pub fn list_recent_payments(&self) -> Vec<RecentPaymentDetails> {
4290		self.pending_outbound_payments.pending_outbound_payments.lock().unwrap().iter()
4291			.filter_map(|(payment_id, pending_outbound_payment)| match pending_outbound_payment {
4292				PendingOutboundPayment::AwaitingInvoice { .. }
4293					| PendingOutboundPayment::AwaitingOffer { .. }
4294					// InvoiceReceived is an intermediate state and doesn't need to be exposed
4295					| PendingOutboundPayment::InvoiceReceived { .. } =>
4296				{
4297					Some(RecentPaymentDetails::AwaitingInvoice { payment_id: *payment_id })
4298				},
4299				PendingOutboundPayment::StaticInvoiceReceived { .. } => {
4300					Some(RecentPaymentDetails::AwaitingInvoice { payment_id: *payment_id })
4301				},
4302				PendingOutboundPayment::Retryable { payment_hash, total_msat, .. } => {
4303					Some(RecentPaymentDetails::Pending {
4304						payment_id: *payment_id,
4305						payment_hash: *payment_hash,
4306						total_msat: *total_msat,
4307					})
4308				},
4309				PendingOutboundPayment::Abandoned { payment_hash, .. } => {
4310					Some(RecentPaymentDetails::Abandoned { payment_id: *payment_id, payment_hash: *payment_hash })
4311				},
4312				PendingOutboundPayment::Fulfilled { payment_hash, .. } => {
4313					Some(RecentPaymentDetails::Fulfilled { payment_id: *payment_id, payment_hash: *payment_hash })
4314				},
4315				PendingOutboundPayment::Legacy { .. } => None
4316			})
4317			.collect()
4318	}
4319
4320	#[rustfmt::skip]
4321	fn close_channel_internal(&self, chan_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
4322		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4323
4324		let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new();
4325		let mut shutdown_result = Ok(());
4326
4327		{
4328			let per_peer_state = self.per_peer_state.read().unwrap();
4329
4330			let peer_state_mutex = per_peer_state.get(counterparty_node_id)
4331				.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}") })?;
4332
4333			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4334			let peer_state = &mut *peer_state_lock;
4335
4336			match peer_state.channel_by_id.entry(*chan_id) {
4337				hash_map::Entry::Occupied(mut chan_entry) => {
4338					if !chan_entry.get().context().is_connected() {
4339						return Err(APIError::ChannelUnavailable {
4340							err: "Cannot begin shutdown while peer is disconnected, maybe force-close instead?".to_owned(),
4341						});
4342					}
4343
4344					if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
4345						let funding_txo_opt = chan.funding.get_funding_txo();
4346						let their_features = &peer_state.latest_features;
4347						let (shutdown_msg, mut monitor_update_opt, htlcs) =
4348							chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
4349						failed_htlcs = htlcs;
4350
4351						// We can send the `shutdown` message before updating the `ChannelMonitor`
4352						// here as we don't need the monitor update to complete until we send a
4353						// `shutdown_signed`, which we'll delay if we're pending a monitor update.
4354						peer_state.pending_msg_events.push(MessageSendEvent::SendShutdown {
4355							node_id: *counterparty_node_id,
4356							msg: shutdown_msg,
4357						});
4358
4359						debug_assert!(monitor_update_opt.is_none() || !chan.is_shutdown(),
4360							"We can't both complete shutdown and generate a monitor update");
4361
4362						// Update the monitor with the shutdown script if necessary.
4363						if let Some(monitor_update) = monitor_update_opt.take() {
4364							handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
4365								peer_state_lock, peer_state, per_peer_state, chan);
4366						}
4367					} else {
4368						let reason = ClosureReason::LocallyCoopClosedUnfundedChannel;
4369						let err = ChannelError::Close((reason.to_string(), reason));
4370						let mut chan = chan_entry.remove();
4371						let (_, mut e) = convert_channel_err!(self, peer_state, err, &mut chan);
4372						e.dont_send_error_message();
4373						shutdown_result = Err(e);
4374					}
4375				},
4376				hash_map::Entry::Vacant(_) => {
4377					return Err(APIError::ChannelUnavailable {
4378						err: format!(
4379							"Channel with id {} not found for the passed counterparty node_id {}",
4380							chan_id, counterparty_node_id,
4381						)
4382					});
4383				},
4384			}
4385		}
4386
4387		for htlc_source in failed_htlcs.drain(..) {
4388			let failure_reason = LocalHTLCFailureReason::ChannelClosed;
4389			let reason = HTLCFailReason::from_failure_code(failure_reason);
4390			let receiver = HTLCHandlingFailureType::Forward { node_id: Some(*counterparty_node_id), channel_id: *chan_id };
4391			let (source, hash) = htlc_source;
4392			self.fail_htlc_backwards_internal(&source, &hash, &reason, receiver, None);
4393		}
4394
4395		let _ = handle_error!(self, shutdown_result, *counterparty_node_id);
4396
4397		Ok(())
4398	}
4399
4400	/// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
4401	/// will be accepted on the given channel, and after additional timeout/the closing of all
4402	/// pending HTLCs, the channel will be closed on chain.
4403	///
4404	///  * If we are the channel initiator, we will pay between our [`ChannelCloseMinimum`] and
4405	///    [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`NonAnchorChannelFee`]
4406	///    fee estimate.
4407	///  * If our counterparty is the channel initiator, we will require a channel closing
4408	///    transaction feerate of at least our [`ChannelCloseMinimum`] feerate or the feerate which
4409	///    would appear on a force-closure transaction, whichever is lower. We will allow our
4410	///    counterparty to pay as much fee as they'd like, however.
4411	///
4412	/// May generate a [`SendShutdown`] message event on success, which should be relayed.
4413	///
4414	/// Raises [`APIError::ChannelUnavailable`] if the channel cannot be closed due to failing to
4415	/// generate a shutdown scriptpubkey or destination script set by
4416	/// [`SignerProvider::get_shutdown_scriptpubkey`]. A force-closure may be needed to close the
4417	/// channel.
4418	///
4419	/// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
4420	/// [`ChannelCloseMinimum`]: crate::chain::chaininterface::ConfirmationTarget::ChannelCloseMinimum
4421	/// [`NonAnchorChannelFee`]: crate::chain::chaininterface::ConfirmationTarget::NonAnchorChannelFee
4422	/// [`SendShutdown`]: MessageSendEvent::SendShutdown
4423	pub fn close_channel(
4424		&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey,
4425	) -> Result<(), APIError> {
4426		self.close_channel_internal(channel_id, counterparty_node_id, None, None)
4427	}
4428
4429	/// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
4430	/// will be accepted on the given channel, and after additional timeout/the closing of all
4431	/// pending HTLCs, the channel will be closed on chain.
4432	///
4433	/// `target_feerate_sat_per_1000_weight` has different meanings depending on if we initiated
4434	/// the channel being closed or not:
4435	///  * If we are the channel initiator, we will pay at least this feerate on the closing
4436	///    transaction. The upper-bound is set by
4437	///    [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`NonAnchorChannelFee`]
4438	///    fee estimate (or `target_feerate_sat_per_1000_weight`, if it is greater).
4439	///  * If our counterparty is the channel initiator, we will refuse to accept a channel closure
4440	///    transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which
4441	///    will appear on a force-closure transaction, whichever is lower).
4442	///
4443	/// The `shutdown_script` provided  will be used as the `scriptPubKey` for the closing transaction.
4444	/// Will fail if a shutdown script has already been set for this channel by
4445	/// [`ChannelHandshakeConfig::commit_upfront_shutdown_pubkey`]. The given shutdown script must
4446	/// also be compatible with our and the counterparty's features.
4447	///
4448	/// May generate a [`SendShutdown`] message event on success, which should be relayed.
4449	///
4450	/// Raises [`APIError::ChannelUnavailable`] if the channel cannot be closed due to failing to
4451	/// generate a shutdown scriptpubkey or destination script set by
4452	/// [`SignerProvider::get_shutdown_scriptpubkey`]. A force-closure may be needed to close the
4453	/// channel.
4454	///
4455	/// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
4456	/// [`NonAnchorChannelFee`]: crate::chain::chaininterface::ConfirmationTarget::NonAnchorChannelFee
4457	/// [`ChannelHandshakeConfig::commit_upfront_shutdown_pubkey`]: crate::util::config::ChannelHandshakeConfig::commit_upfront_shutdown_pubkey
4458	/// [`SendShutdown`]: MessageSendEvent::SendShutdown
4459	pub fn close_channel_with_feerate_and_script(
4460		&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey,
4461		target_feerate_sats_per_1000_weight: Option<u32>, shutdown_script: Option<ShutdownScript>,
4462	) -> Result<(), APIError> {
4463		self.close_channel_internal(
4464			channel_id,
4465			counterparty_node_id,
4466			target_feerate_sats_per_1000_weight,
4467			shutdown_script,
4468		)
4469	}
4470
4471	/// Applies a [`ChannelMonitorUpdate`] which may or may not be for a channel which is closed.
4472	#[rustfmt::skip]
4473	fn apply_post_close_monitor_update(
4474		&self, counterparty_node_id: PublicKey, channel_id: ChannelId, funding_txo: OutPoint,
4475		monitor_update: ChannelMonitorUpdate,
4476	) {
4477		// Note that there may be some post-close updates which need to be well-ordered with
4478		// respect to the `update_id`, so we hold the `peer_state` lock here.
4479		let per_peer_state = self.per_peer_state.read().unwrap();
4480		let mut peer_state_lock = per_peer_state.get(&counterparty_node_id)
4481			.expect("We must always have a peer entry for a peer with which we have channels that have ChannelMonitors")
4482			.lock().unwrap();
4483		let peer_state = &mut *peer_state_lock;
4484		match peer_state.channel_by_id.entry(channel_id) {
4485			hash_map::Entry::Occupied(mut chan_entry) => {
4486				if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
4487					handle_new_monitor_update!(self, funding_txo,
4488						monitor_update, peer_state_lock, peer_state, per_peer_state, chan);
4489					return;
4490				} else {
4491					debug_assert!(false, "We shouldn't have an update for a non-funded channel");
4492				}
4493			},
4494			hash_map::Entry::Vacant(_) => {},
4495		}
4496
4497		handle_new_monitor_update!(
4498			self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
4499			counterparty_node_id, channel_id, POST_CHANNEL_CLOSE
4500		);
4501	}
4502
4503	/// When a channel is removed, two things need to happen:
4504	/// (a) [`convert_channel_err`] must be called in the same `per_peer_state` lock as the
4505	///     channel-closing action,
4506	/// (b) [`handle_error`] needs to be called without holding any locks (except
4507	///     [`ChannelManager::total_consistency_lock`]), which then calls this.
4508	#[rustfmt::skip]
4509	fn finish_close_channel(&self, mut shutdown_res: ShutdownResult) {
4510		debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
4511		#[cfg(debug_assertions)]
4512		for (_, peer) in self.per_peer_state.read().unwrap().iter() {
4513			debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
4514		}
4515
4516		let logger = WithContext::from(
4517			&self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id), None
4518		);
4519
4520		log_debug!(logger, "Finishing closure of channel due to {} with {} HTLCs to fail",
4521			shutdown_res.closure_reason, shutdown_res.dropped_outbound_htlcs.len());
4522		for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
4523			let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
4524			let failure_reason = LocalHTLCFailureReason::ChannelClosed;
4525			let reason = HTLCFailReason::from_failure_code(failure_reason);
4526			let receiver = HTLCHandlingFailureType::Forward { node_id: Some(counterparty_node_id), channel_id };
4527			self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver, None);
4528		}
4529		if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
4530			debug_assert!(false, "This should have been handled in `locked_close_channel`");
4531			self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
4532		}
4533		if self.background_events_processed_since_startup.load(Ordering::Acquire) {
4534			// If a `ChannelMonitorUpdate` was applied (i.e. any time we have a funding txo and are
4535			// not in the startup sequence) check if we need to handle any
4536			// `MonitorUpdateCompletionAction`s.
4537			// TODO: If we do the `in_flight_monitor_updates.is_empty()` check in
4538			// `locked_close_channel` we can skip the locks here.
4539			if shutdown_res.channel_funding_txo.is_some() {
4540				self.channel_monitor_updated(&shutdown_res.channel_id, None, &shutdown_res.counterparty_node_id);
4541			}
4542		}
4543		let mut shutdown_results: Vec<(Result<Infallible, _>, _)> = Vec::new();
4544		if let Some(txid) = shutdown_res.unbroadcasted_batch_funding_txid {
4545			let mut funding_batch_states = self.funding_batch_states.lock().unwrap();
4546			let affected_channels = funding_batch_states.remove(&txid).into_iter().flatten();
4547			let per_peer_state = self.per_peer_state.read().unwrap();
4548			let mut has_uncompleted_channel = None;
4549			for (channel_id, counterparty_node_id, state) in affected_channels {
4550				if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
4551					let mut peer_state = peer_state_mutex.lock().unwrap();
4552					if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) {
4553						let reason = ClosureReason::FundingBatchClosure;
4554						let err = ChannelError::Close((reason.to_string(), reason));
4555						let (_, e) = convert_channel_err!(self, peer_state, err, &mut chan);
4556						shutdown_results.push((Err(e), counterparty_node_id));
4557					}
4558				}
4559				has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state));
4560			}
4561			debug_assert!(
4562				has_uncompleted_channel.unwrap_or(true),
4563				"Closing a batch where all channels have completed initial monitor update",
4564			);
4565		}
4566
4567		{
4568			let mut pending_events = self.pending_events.lock().unwrap();
4569			pending_events.push_back((events::Event::ChannelClosed {
4570				channel_id: shutdown_res.channel_id,
4571				user_channel_id: shutdown_res.user_channel_id,
4572				reason: shutdown_res.closure_reason,
4573				counterparty_node_id: Some(shutdown_res.counterparty_node_id),
4574				channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis),
4575				channel_funding_txo: shutdown_res.channel_funding_txo,
4576				last_local_balance_msat: Some(shutdown_res.last_local_balance_msat),
4577			}, None));
4578
4579			if let Some(splice_funding_failed) = shutdown_res.splice_funding_failed.take() {
4580				pending_events.push_back((events::Event::SpliceFailed {
4581					channel_id: shutdown_res.channel_id,
4582					counterparty_node_id: shutdown_res.counterparty_node_id,
4583					user_channel_id: shutdown_res.user_channel_id,
4584					abandoned_funding_txo: splice_funding_failed.funding_txo,
4585					channel_type: splice_funding_failed.channel_type,
4586					contributed_inputs: splice_funding_failed.contributed_inputs,
4587					contributed_outputs: splice_funding_failed.contributed_outputs,
4588				}, None));
4589			}
4590
4591			if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx {
4592				let funding_info = if shutdown_res.is_manual_broadcast {
4593					FundingInfo::OutPoint {
4594						outpoint: shutdown_res.channel_funding_txo
4595							.expect("We had an unbroadcasted funding tx, so should also have had a funding outpoint"),
4596					}
4597				} else {
4598					FundingInfo::Tx{ transaction }
4599				};
4600				pending_events.push_back((events::Event::DiscardFunding {
4601					channel_id: shutdown_res.channel_id, funding_info
4602				}, None));
4603			}
4604		}
4605		for (err, counterparty_node_id) in shutdown_results.drain(..) {
4606			let _ = handle_error!(self, err, counterparty_node_id);
4607		}
4608	}
4609
4610	/// `peer_msg` should be set when we receive a message from a peer, but not set when the
4611	/// user closes, which will be re-exposed as the `ChannelClosed` reason.
4612	#[rustfmt::skip]
4613	fn force_close_channel_with_peer(&self, channel_id: &ChannelId, peer_node_id: &PublicKey, reason: ClosureReason)
4614	-> Result<(), APIError> {
4615		let per_peer_state = self.per_peer_state.read().unwrap();
4616		let peer_state_mutex = per_peer_state.get(peer_node_id)
4617			.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?;
4618		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4619		let peer_state = &mut *peer_state_lock;
4620		let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None);
4621
4622		let is_from_counterparty = matches!(reason, ClosureReason::CounterpartyForceClosed { .. });
4623		let message = match &reason {
4624			ClosureReason::HolderForceClosed { message, .. } => message.clone(),
4625			_ => reason.to_string(),
4626		};
4627
4628		if let Some(mut chan) = peer_state.channel_by_id.remove(channel_id) {
4629			log_error!(logger, "Force-closing channel {}", channel_id);
4630			let err = ChannelError::Close((message, reason));
4631			let (_, mut e) = convert_channel_err!(self, peer_state, err, &mut chan);
4632			mem::drop(peer_state_lock);
4633			mem::drop(per_peer_state);
4634			if is_from_counterparty {
4635				// If the peer is the one who asked us to force-close, don't reply with a fresh
4636				// error message.
4637				e.dont_send_error_message();
4638			}
4639			let _ = handle_error!(self, Err::<(), _>(e), *peer_node_id);
4640			Ok(())
4641		} else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() {
4642			log_error!(logger, "Force-closing inbound channel request {}", &channel_id);
4643			if !is_from_counterparty && peer_state.is_connected {
4644				peer_state.pending_msg_events.push(
4645					MessageSendEvent::HandleError {
4646						node_id: *peer_node_id,
4647						action: msgs::ErrorAction::SendErrorMessage {
4648							msg: msgs::ErrorMessage { channel_id: *channel_id, data: message }
4649						},
4650					}
4651				);
4652			}
4653			// N.B. that we don't send any channel close event here: we
4654			// don't have a user_channel_id, and we never sent any opening
4655			// events anyway.
4656			Ok(())
4657		} else {
4658			Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, peer_node_id) })
4659		}
4660	}
4661
4662	#[rustfmt::skip]
4663	fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String)
4664	-> Result<(), APIError> {
4665		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4666		log_debug!(self.logger,
4667			"Force-closing channel, The error message sent to the peer : {}", error_message);
4668		// No matter what value for `broadcast_latest_txn` we set here, `Channel` will override it
4669		// and set the appropriate value.
4670		let reason = ClosureReason::HolderForceClosed {
4671			broadcasted_latest_txn: Some(true),
4672			message: error_message,
4673		};
4674		self.force_close_channel_with_peer(channel_id, &counterparty_node_id, reason)
4675	}
4676
4677	/// Force closes a channel, immediately broadcasting the latest local transaction(s),
4678	/// rejecting new HTLCs.
4679	///
4680	/// The provided `error_message` is sent to connected peers for closing
4681	/// channels and should be a human-readable description of what went wrong.
4682	///
4683	/// Fails if `channel_id` is unknown to the manager, or if the `counterparty_node_id`
4684	/// isn't the counterparty of the corresponding channel.
4685	pub fn force_close_broadcasting_latest_txn(
4686		&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String,
4687	) -> Result<(), APIError> {
4688		self.force_close_sending_error(channel_id, counterparty_node_id, error_message)
4689	}
4690
4691	/// Force close all channels, immediately broadcasting the latest local commitment transaction
4692	/// for each to the chain and rejecting new HTLCs on each.
4693	///
4694	/// The provided `error_message` is sent to connected peers for closing channels and should
4695	/// be a human-readable description of what went wrong.
4696	pub fn force_close_all_channels_broadcasting_latest_txn(&self, error_message: String) {
4697		for chan in self.list_channels() {
4698			let _ = self.force_close_broadcasting_latest_txn(
4699				&chan.channel_id,
4700				&chan.counterparty.node_id,
4701				error_message.clone(),
4702			);
4703		}
4704	}
4705
4706	/// Initiate a splice in order to add value to (splice-in) or remove value from (splice-out)
4707	/// the channel. This will spend the channel's funding transaction output, effectively replacing
4708	/// it with a new one.
4709	///
4710	/// # Arguments
4711	///
4712	/// Provide a `contribution` to determine if value is spliced in or out. The splice initiator is
4713	/// responsible for paying fees for common fields, shared inputs, and shared outputs along with
4714	/// any contributed inputs and outputs. Fees are determined using `funding_feerate_per_kw` and
4715	/// must be covered by the supplied inputs for splice-in or the channel balance for splice-out.
4716	///
4717	/// An optional `locktime` for the funding transaction may be specified. If not given, the
4718	/// current best block height is used.
4719	///
4720	/// # Events
4721	///
4722	/// Once the funding transaction has been constructed, an [`Event::SplicePending`] will be
4723	/// emitted. At this point, any inputs contributed to the splice can only be re-spent if an
4724	/// [`Event::DiscardFunding`] is seen.
4725	///
4726	/// After initial signatures have been exchanged, [`Event::FundingTransactionReadyForSigning`]
4727	/// will be generated and [`ChannelManager::funding_transaction_signed`] should be called.
4728	///
4729	/// If any failures occur while negotiating the funding transaction, an [`Event::SpliceFailed`]
4730	/// will be emitted. Any contributed inputs no longer used will be included here and thus can
4731	/// be re-spent.
4732	///
4733	/// Once the splice has been locked by both counterparties, an [`Event::ChannelReady`] will be
4734	/// emitted with the new funding output. At this point, a new splice can be negotiated by
4735	/// calling `splice_channel` again on this channel.
4736	#[rustfmt::skip]
4737	pub fn splice_channel(
4738		&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey,
4739		contribution: SpliceContribution, funding_feerate_per_kw: u32, locktime: Option<u32>,
4740	) -> Result<(), APIError> {
4741		let mut res = Ok(());
4742		PersistenceNotifierGuard::optionally_notify(self, || {
4743			let result = self.internal_splice_channel(
4744				channel_id, counterparty_node_id, contribution, funding_feerate_per_kw, locktime
4745			);
4746			res = result;
4747			match res {
4748				Ok(_) => NotifyOption::DoPersist,
4749				Err(_) => NotifyOption::SkipPersistNoEvents,
4750			}
4751		});
4752		res
4753	}
4754
4755	fn internal_splice_channel(
4756		&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey,
4757		contribution: SpliceContribution, funding_feerate_per_kw: u32, locktime: Option<u32>,
4758	) -> Result<(), APIError> {
4759		let per_peer_state = self.per_peer_state.read().unwrap();
4760
4761		let peer_state_mutex = match per_peer_state.get(counterparty_node_id).ok_or_else(|| {
4762			APIError::ChannelUnavailable {
4763				err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"),
4764			}
4765		}) {
4766			Ok(p) => p,
4767			Err(e) => return Err(e),
4768		};
4769
4770		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4771		let peer_state = &mut *peer_state_lock;
4772
4773		// Look for the channel
4774		match peer_state.channel_by_id.entry(*channel_id) {
4775			hash_map::Entry::Occupied(mut chan_phase_entry) => {
4776				let locktime = locktime.unwrap_or_else(|| self.current_best_block().height);
4777				if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() {
4778					let logger = WithChannelContext::from(&self.logger, &chan.context, None);
4779					let msg_opt = chan.splice_channel(
4780						contribution,
4781						funding_feerate_per_kw,
4782						locktime,
4783						&&logger,
4784					)?;
4785					if let Some(msg) = msg_opt {
4786						peer_state.pending_msg_events.push(MessageSendEvent::SendStfu {
4787							node_id: *counterparty_node_id,
4788							msg,
4789						});
4790					}
4791					Ok(())
4792				} else {
4793					Err(APIError::ChannelUnavailable {
4794						err: format!(
4795							"Channel with id {} is not funded, cannot splice it",
4796							channel_id
4797						),
4798					})
4799				}
4800			},
4801			hash_map::Entry::Vacant(_) => Err(APIError::ChannelUnavailable {
4802				err: format!(
4803					"Channel with id {} not found for the passed counterparty node_id {}",
4804					channel_id, counterparty_node_id,
4805				),
4806			}),
4807		}
4808	}
4809
4810	#[cfg(test)]
4811	pub(crate) fn abandon_splice(
4812		&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey,
4813	) -> Result<(), APIError> {
4814		let mut res = Ok(());
4815		PersistenceNotifierGuard::optionally_notify(self, || {
4816			let result = self.internal_abandon_splice(channel_id, counterparty_node_id);
4817			res = result;
4818			match res {
4819				Ok(_) => NotifyOption::SkipPersistHandleEvents,
4820				Err(_) => NotifyOption::SkipPersistNoEvents,
4821			}
4822		});
4823		res
4824	}
4825
4826	#[cfg(test)]
4827	fn internal_abandon_splice(
4828		&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey,
4829	) -> Result<(), APIError> {
4830		let per_peer_state = self.per_peer_state.read().unwrap();
4831
4832		let peer_state_mutex = match per_peer_state.get(counterparty_node_id).ok_or_else(|| {
4833			APIError::ChannelUnavailable {
4834				err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"),
4835			}
4836		}) {
4837			Ok(p) => p,
4838			Err(e) => return Err(e),
4839		};
4840
4841		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4842		let peer_state = &mut *peer_state_lock;
4843
4844		// Look for the channel
4845		match peer_state.channel_by_id.entry(*channel_id) {
4846			hash_map::Entry::Occupied(mut chan_phase_entry) => {
4847				if !chan_phase_entry.get().context().is_connected() {
4848					// TODO: We should probably support this, but right now `splice_channel` refuses when
4849					// the peer is disconnected, so we just check it here.
4850					return Err(APIError::ChannelUnavailable {
4851						err: "Cannot abandon splice while peer is disconnected".to_owned(),
4852					});
4853				}
4854
4855				if let Some(chan) = chan_phase_entry.get_mut().as_funded_mut() {
4856					let (tx_abort, splice_funding_failed) = chan.abandon_splice()?;
4857
4858					peer_state.pending_msg_events.push(MessageSendEvent::SendTxAbort {
4859						node_id: *counterparty_node_id,
4860						msg: tx_abort,
4861					});
4862
4863					if let Some(splice_funding_failed) = splice_funding_failed {
4864						let pending_events = &mut self.pending_events.lock().unwrap();
4865						pending_events.push_back((
4866							events::Event::SpliceFailed {
4867								channel_id: *channel_id,
4868								counterparty_node_id: *counterparty_node_id,
4869								user_channel_id: chan.context.get_user_id(),
4870								abandoned_funding_txo: splice_funding_failed.funding_txo,
4871								channel_type: splice_funding_failed.channel_type,
4872								contributed_inputs: splice_funding_failed.contributed_inputs,
4873								contributed_outputs: splice_funding_failed.contributed_outputs,
4874							},
4875							None,
4876						));
4877					}
4878
4879					Ok(())
4880				} else {
4881					Err(APIError::ChannelUnavailable {
4882						err: format!(
4883							"Channel with id {} is not funded, cannot abandon splice",
4884							channel_id
4885						),
4886					})
4887				}
4888			},
4889			hash_map::Entry::Vacant(_) => Err(APIError::ChannelUnavailable {
4890				err: format!(
4891					"Channel with id {} not found for the passed counterparty node_id {}",
4892					channel_id, counterparty_node_id,
4893				),
4894			}),
4895		}
4896	}
4897
4898	#[rustfmt::skip]
4899	fn can_forward_htlc_to_outgoing_channel(
4900		&self, chan: &mut FundedChannel<SP>, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails
4901	) -> Result<(), LocalHTLCFailureReason> {
4902		if !chan.context.should_announce()
4903			&& !self.config.read().unwrap().accept_forwards_to_priv_channels
4904		{
4905			// Note that the behavior here should be identical to the above block - we
4906			// should NOT reveal the existence or non-existence of a private channel if
4907			// we don't allow forwards outbound over them.
4908			return Err(LocalHTLCFailureReason::PrivateChannelForward);
4909		}
4910		if let HopConnector::ShortChannelId(outgoing_scid) = next_packet.outgoing_connector {
4911			if chan.funding.get_channel_type().supports_scid_privacy() && outgoing_scid != chan.context.outbound_scid_alias() {
4912				// `option_scid_alias` (referred to in LDK as `scid_privacy`) means
4913				// "refuse to forward unless the SCID alias was used", so we pretend
4914				// we don't have the channel here.
4915				return Err(LocalHTLCFailureReason::RealSCIDForward);
4916			}
4917		} else {
4918			return Err(LocalHTLCFailureReason::InvalidTrampolineForward);
4919		}
4920
4921		// Note that we could technically not return an error yet here and just hope
4922		// that the connection is reestablished or monitor updated by the time we get
4923		// around to doing the actual forward, but better to fail early if we can and
4924		// hopefully an attacker trying to path-trace payments cannot make this occur
4925		// on a small/per-node/per-channel scale.
4926		if !chan.context.is_live() {
4927			if !chan.context.is_enabled() {
4928				return Err(LocalHTLCFailureReason::ChannelDisabled);
4929			} else {
4930				return Err(LocalHTLCFailureReason::ChannelNotReady);
4931			}
4932		}
4933		if next_packet.outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() {
4934			return Err(LocalHTLCFailureReason::AmountBelowMinimum);
4935		}
4936		chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value)?;
4937
4938		Ok(())
4939	}
4940
4941	/// Executes a callback `C` that returns some value `X` on the channel found with the given
4942	/// `scid`. `None` is returned when the channel is not found.
4943	fn do_funded_channel_callback<X, C: Fn(&mut FundedChannel<SP>) -> X>(
4944		&self, scid: u64, callback: C,
4945	) -> Option<X> {
4946		let (counterparty_node_id, channel_id) =
4947			match self.short_to_chan_info.read().unwrap().get(&scid).cloned() {
4948				None => return None,
4949				Some((cp_id, id)) => (cp_id, id),
4950			};
4951		let per_peer_state = self.per_peer_state.read().unwrap();
4952		let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
4953		if peer_state_mutex_opt.is_none() {
4954			return None;
4955		}
4956		let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
4957		let peer_state = &mut *peer_state_lock;
4958		match peer_state.channel_by_id.get_mut(&channel_id).and_then(Channel::as_funded_mut) {
4959			None => None,
4960			Some(chan) => Some(callback(chan)),
4961		}
4962	}
4963
4964	#[rustfmt::skip]
4965	fn can_forward_htlc(
4966		&self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails
4967	) -> Result<(), LocalHTLCFailureReason> {
4968		let outgoing_scid = match next_packet_details.outgoing_connector {
4969			HopConnector::ShortChannelId(scid) => scid,
4970			HopConnector::Trampoline(_) => {
4971				return Err(LocalHTLCFailureReason::InvalidTrampolineForward);
4972			}
4973		};
4974		match self.do_funded_channel_callback(outgoing_scid, |chan: &mut FundedChannel<SP>| {
4975			self.can_forward_htlc_to_outgoing_channel(chan, msg, next_packet_details)
4976		}) {
4977			Some(Ok(())) => {},
4978			Some(Err(e)) => return Err(e),
4979			None => {
4980				// If we couldn't find the channel info for the scid, it may be a phantom or
4981				// intercept forward.
4982				if (self.config.read().unwrap().accept_intercept_htlcs &&
4983					fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)) ||
4984					fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, outgoing_scid, &self.chain_hash)
4985				{} else {
4986					return Err(LocalHTLCFailureReason::UnknownNextPeer);
4987				}
4988			}
4989		}
4990
4991		let cur_height = self.best_block.read().unwrap().height + 1;
4992		check_incoming_htlc_cltv(cur_height, next_packet_details.outgoing_cltv_value, msg.cltv_expiry)?;
4993
4994		Ok(())
4995	}
4996
4997	#[rustfmt::skip]
4998	fn htlc_failure_from_update_add_err(
4999		&self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey,
5000		reason: LocalHTLCFailureReason, is_intro_node_blinded_forward: bool,
5001		shared_secret: &[u8; 32]
5002	) -> HTLCFailureMsg {
5003		// at capacity, we write fields `htlc_msat` and `len`
5004		let mut res = VecWriter(Vec::with_capacity(8 + 2));
5005		if reason.is_temporary() {
5006			if reason == LocalHTLCFailureReason::AmountBelowMinimum ||
5007				reason == LocalHTLCFailureReason::FeeInsufficient {
5008				msg.amount_msat.write(&mut res).expect("Writes cannot fail");
5009			}
5010			else if reason == LocalHTLCFailureReason::IncorrectCLTVExpiry {
5011				msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
5012			}
5013			else if reason == LocalHTLCFailureReason::ChannelDisabled {
5014				// TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
5015				0u16.write(&mut res).expect("Writes cannot fail");
5016			}
5017			// See https://github.com/lightning/bolts/blob/247e83d/04-onion-routing.md?plain=1#L1414-L1415
5018			(0u16).write(&mut res).expect("Writes cannot fail");
5019		}
5020
5021		log_info!(
5022			WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash)),
5023			"Failed to accept/forward incoming HTLC: {:?}", reason,
5024		);
5025		// If `msg.blinding_point` is set, we must always fail with malformed.
5026		if msg.blinding_point.is_some() {
5027			return HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
5028				channel_id: msg.channel_id,
5029				htlc_id: msg.htlc_id,
5030				sha256_of_onion: [0; 32],
5031				failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(),
5032			});
5033		}
5034
5035		let (reason, err_data) = if is_intro_node_blinded_forward {
5036			(LocalHTLCFailureReason::InvalidOnionBlinding, &[0; 32][..])
5037		} else {
5038			(reason, &res.0[..])
5039		};
5040		let failure = HTLCFailReason::reason(reason, err_data.to_vec())
5041		.get_encrypted_failure_packet(shared_secret, &None);
5042		HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
5043			channel_id: msg.channel_id,
5044			htlc_id: msg.htlc_id,
5045			reason: failure.data,
5046			attribution_data: failure.attribution_data,
5047		})
5048	}
5049
5050	#[rustfmt::skip]
5051	fn construct_pending_htlc_fail_msg<'a>(
5052		&self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey,
5053		shared_secret: [u8; 32], inbound_err: InboundHTLCErr
5054	) -> HTLCFailureMsg {
5055		let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash));
5056		log_info!(logger, "Failed to accept/forward incoming HTLC: {}", inbound_err.msg);
5057
5058		if msg.blinding_point.is_some() {
5059			return HTLCFailureMsg::Malformed(
5060				msgs::UpdateFailMalformedHTLC {
5061					channel_id: msg.channel_id,
5062					htlc_id: msg.htlc_id,
5063					sha256_of_onion: [0; 32],
5064					failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(),
5065				}
5066			)
5067		}
5068
5069		let failure = HTLCFailReason::reason(inbound_err.reason, inbound_err.err_data.to_vec())
5070					.get_encrypted_failure_packet(&shared_secret, &None);
5071		return HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
5072			channel_id: msg.channel_id,
5073			htlc_id: msg.htlc_id,
5074			reason: failure.data,
5075			attribution_data: failure.attribution_data,
5076		});
5077	}
5078
5079	#[rustfmt::skip]
5080	fn get_pending_htlc_info<'a>(
5081		&self, msg: &msgs::UpdateAddHTLC, shared_secret: [u8; 32],
5082		decoded_hop: onion_utils::Hop, allow_underpay: bool,
5083		next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>,
5084	) -> Result<PendingHTLCInfo, InboundHTLCErr> {
5085		match decoded_hop {
5086			onion_utils::Hop::Receive { .. } | onion_utils::Hop::BlindedReceive { .. } |
5087			onion_utils::Hop::TrampolineReceive { .. } | onion_utils::Hop::TrampolineBlindedReceive { .. } => {
5088				// OUR PAYMENT!
5089				// Note that we could obviously respond immediately with an update_fulfill_htlc
5090				// message, however that would leak that we are the recipient of this payment, so
5091				// instead we stay symmetric with the forwarding case, only responding (after a
5092				// delay) once they've send us a commitment_signed!
5093				let current_height: u32 = self.best_block.read().unwrap().height;
5094				create_recv_pending_htlc_info(decoded_hop, shared_secret, msg.payment_hash,
5095					msg.amount_msat, msg.cltv_expiry, None, allow_underpay, msg.skimmed_fee_msat,
5096					current_height)
5097			},
5098			onion_utils::Hop::Forward { .. } | onion_utils::Hop::BlindedForward { .. } => {
5099				create_fwd_pending_htlc_info(msg, decoded_hop, shared_secret, next_packet_pubkey_opt)
5100			},
5101			onion_utils::Hop::TrampolineForward { .. } | onion_utils::Hop::TrampolineBlindedForward { .. } => {
5102				create_fwd_pending_htlc_info(msg, decoded_hop, shared_secret, next_packet_pubkey_opt)
5103			},
5104		}
5105	}
5106
5107	/// Gets the current [`channel_update`] for the given channel. This first checks if the channel is
5108	/// public, and thus should be called whenever the result is going to be passed out in a
5109	/// [`MessageSendEvent::BroadcastChannelUpdate`] event.
5110	///
5111	/// Note that in [`internal_closing_signed`], this function is called without the `peer_state`
5112	/// corresponding to the channel's counterparty locked, as the channel been removed from the
5113	/// storage and the `peer_state` lock has been dropped.
5114	///
5115	/// [`channel_update`]: msgs::ChannelUpdate
5116	/// [`internal_closing_signed`]: Self::internal_closing_signed
5117	fn get_channel_update_for_broadcast(
5118		&self, chan: &FundedChannel<SP>,
5119	) -> Result<msgs::ChannelUpdate, LightningError> {
5120		if !chan.context.should_announce() {
5121			return Err(LightningError {
5122				err: "Cannot broadcast a channel_update for a private channel".to_owned(),
5123				action: msgs::ErrorAction::IgnoreError,
5124			});
5125		}
5126		if chan.funding.get_short_channel_id().is_none() {
5127			return Err(LightningError {
5128				err: "Channel not yet established".to_owned(),
5129				action: msgs::ErrorAction::IgnoreError,
5130			});
5131		}
5132		let logger = WithChannelContext::from(&self.logger, &chan.context, None);
5133		log_trace!(
5134			logger,
5135			"Attempting to generate broadcast channel update for channel {}",
5136			&chan.context.channel_id()
5137		);
5138		self.get_channel_update_for_unicast(chan)
5139	}
5140
5141	/// Gets the current [`channel_update`] for the given channel. This does not check if the channel
5142	/// is public (only returning an `Err` if the channel does not yet have an assigned SCID),
5143	/// and thus MUST NOT be called unless the recipient of the resulting message has already
5144	/// provided evidence that they know about the existence of the channel.
5145	///
5146	/// Note that through [`internal_closing_signed`], this function is called without the
5147	/// `peer_state`  corresponding to the channel's counterparty locked, as the channel been
5148	/// removed from the storage and the `peer_state` lock has been dropped.
5149	///
5150	/// [`channel_update`]: msgs::ChannelUpdate
5151	/// [`internal_closing_signed`]: Self::internal_closing_signed
5152	#[rustfmt::skip]
5153	fn get_channel_update_for_unicast(&self, chan: &FundedChannel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
5154		let logger = WithChannelContext::from(&self.logger, &chan.context, None);
5155		log_trace!(logger, "Attempting to generate channel update for channel {}", chan.context.channel_id());
5156		let short_channel_id = match chan.funding.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) {
5157			None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
5158			Some(id) => id,
5159		};
5160
5161		let logger = WithChannelContext::from(&self.logger, &chan.context, None);
5162		log_trace!(logger, "Generating channel update for channel {}", chan.context.channel_id());
5163		let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
5164		let enabled = chan.context.is_enabled();
5165
5166		let unsigned = msgs::UnsignedChannelUpdate {
5167			chain_hash: self.chain_hash,
5168			short_channel_id,
5169			timestamp: chan.context.get_update_time_counter(),
5170			message_flags: 1, // Only must_be_one
5171			channel_flags: (!were_node_one) as u8 | ((!enabled as u8) << 1),
5172			cltv_expiry_delta: chan.context.get_cltv_expiry_delta(),
5173			htlc_minimum_msat: chan.context.get_counterparty_htlc_minimum_msat(),
5174			htlc_maximum_msat: chan.get_announced_htlc_max_msat(),
5175			fee_base_msat: chan.context.get_outbound_forwarding_fee_base_msat(),
5176			fee_proportional_millionths: chan.context.get_fee_proportional_millionths(),
5177			excess_data: Vec::new(),
5178		};
5179		// Panic on failure to signal LDK should be restarted to retry signing the `ChannelUpdate`.
5180		// If we returned an error and the `node_signer` cannot provide a signature for whatever
5181		// reason`, we wouldn't be able to receive inbound payments through the corresponding
5182		// channel.
5183		let sig = self.node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelUpdate(&unsigned)).unwrap();
5184
5185		Ok(msgs::ChannelUpdate {
5186			signature: sig,
5187			contents: unsigned
5188		})
5189	}
5190
5191	#[cfg(any(test, feature = "_externalize_tests"))]
5192	pub(crate) fn test_send_payment_along_path(
5193		&self, path: &Path, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields,
5194		total_value: u64, cur_height: u32, payment_id: PaymentId,
5195		keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32],
5196	) -> Result<(), APIError> {
5197		let _lck = self.total_consistency_lock.read().unwrap();
5198		self.send_payment_along_path(SendAlongPathArgs {
5199			path,
5200			payment_hash,
5201			recipient_onion: &recipient_onion,
5202			total_value,
5203			cur_height,
5204			payment_id,
5205			keysend_preimage,
5206			invoice_request: None,
5207			bolt12_invoice: None,
5208			session_priv_bytes,
5209			hold_htlc_at_next_hop: false,
5210		})
5211	}
5212
5213	fn send_payment_along_path(&self, args: SendAlongPathArgs) -> Result<(), APIError> {
5214		let SendAlongPathArgs {
5215			path,
5216			payment_hash,
5217			recipient_onion,
5218			total_value,
5219			cur_height,
5220			payment_id,
5221			keysend_preimage,
5222			invoice_request,
5223			bolt12_invoice,
5224			session_priv_bytes,
5225			hold_htlc_at_next_hop,
5226		} = args;
5227		// The top-level caller should hold the total_consistency_lock read lock.
5228		debug_assert!(self.total_consistency_lock.try_write().is_err());
5229		let prng_seed = self.entropy_source.get_secure_random_bytes();
5230		let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
5231
5232		let (onion_packet, htlc_msat, htlc_cltv) = onion_utils::create_payment_onion(
5233			&self.secp_ctx,
5234			&path,
5235			&session_priv,
5236			total_value,
5237			recipient_onion,
5238			cur_height,
5239			payment_hash,
5240			keysend_preimage,
5241			invoice_request,
5242			prng_seed,
5243		)
5244		.map_err(|e| {
5245			let first_hop_key = Some(path.hops.first().unwrap().pubkey);
5246			let logger = WithContext::from(&self.logger, first_hop_key, None, Some(*payment_hash));
5247			log_error!(logger, "Failed to build an onion for path for payment hash {payment_hash}");
5248			e
5249		})?;
5250
5251		let err: Result<(), _> = loop {
5252			let first_chan_scid = &path.hops.first().unwrap().short_channel_id;
5253			let first_chan = self.short_to_chan_info.read().unwrap().get(first_chan_scid).cloned();
5254
5255			let (counterparty_node_id, id) = match first_chan {
5256				None => {
5257					let first_hop_key = Some(path.hops.first().unwrap().pubkey);
5258					let logger =
5259						WithContext::from(&self.logger, first_hop_key, None, Some(*payment_hash));
5260					log_error!(logger, "Failed to find first-hop for payment hash {payment_hash}");
5261					return Err(APIError::ChannelUnavailable {
5262						err: "No channel available with first hop!".to_owned(),
5263					});
5264				},
5265				Some((cp_id, chan_id)) => (cp_id, chan_id),
5266			};
5267
5268			let logger = WithContext::from(
5269				&self.logger,
5270				Some(counterparty_node_id),
5271				Some(id),
5272				Some(*payment_hash),
5273			);
5274			log_trace!(
5275				logger,
5276				"Attempting to send payment with payment hash {payment_hash} along path with next hop {first_chan_scid}"
5277			);
5278
5279			let per_peer_state = self.per_peer_state.read().unwrap();
5280			let peer_state_mutex = per_peer_state.get(&counterparty_node_id).ok_or_else(|| {
5281				APIError::ChannelUnavailable {
5282					err: "No peer matching the path's first hop found!".to_owned(),
5283				}
5284			})?;
5285			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5286			let peer_state = &mut *peer_state_lock;
5287			if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(id) {
5288				match chan_entry.get_mut().as_funded_mut() {
5289					Some(chan) => {
5290						if !chan.context.is_live() {
5291							return Err(APIError::ChannelUnavailable {
5292								err: "Peer for first hop currently disconnected".to_owned(),
5293							});
5294						}
5295						let funding_txo = chan.funding.get_funding_txo().unwrap();
5296						let logger = WithChannelContext::from(
5297							&self.logger,
5298							&chan.context,
5299							Some(*payment_hash),
5300						);
5301						let htlc_source = HTLCSource::OutboundRoute {
5302							path: path.clone(),
5303							session_priv: session_priv.clone(),
5304							first_hop_htlc_msat: htlc_msat,
5305							payment_id,
5306							bolt12_invoice: bolt12_invoice.cloned(),
5307						};
5308						let send_res = chan.send_htlc_and_commit(
5309							htlc_msat,
5310							*payment_hash,
5311							htlc_cltv,
5312							htlc_source,
5313							onion_packet,
5314							None,
5315							hold_htlc_at_next_hop,
5316							&self.fee_estimator,
5317							&&logger,
5318						);
5319						match break_channel_entry!(self, peer_state, send_res, chan_entry) {
5320							Some(monitor_update) => {
5321								let ok = handle_new_monitor_update!(
5322									self,
5323									funding_txo,
5324									monitor_update,
5325									peer_state_lock,
5326									peer_state,
5327									per_peer_state,
5328									chan
5329								);
5330								if !ok {
5331									// Note that MonitorUpdateInProgress here indicates (per function
5332									// docs) that we will resend the commitment update once monitor
5333									// updating completes. Therefore, we must return an error
5334									// indicating that it is unsafe to retry the payment wholesale,
5335									// which we do in the send_payment check for
5336									// MonitorUpdateInProgress, below.
5337									return Err(APIError::MonitorUpdateInProgress);
5338								}
5339							},
5340							None => {},
5341						}
5342					},
5343					None => {
5344						return Err(APIError::ChannelUnavailable {
5345							err: "Channel to first hop is unfunded".to_owned(),
5346						})
5347					},
5348				};
5349			} else {
5350				// The channel was likely removed after we fetched the id from the
5351				// `short_to_chan_info` map, but before we successfully locked the
5352				// `channel_by_id` map.
5353				// This can occur as no consistency guarantees exists between the two maps.
5354				return Err(APIError::ChannelUnavailable {
5355					err: "No channel available with first hop!".to_owned(),
5356				});
5357			}
5358			return Ok(());
5359		};
5360		match handle_error!(self, err, path.hops.first().unwrap().pubkey) {
5361			Ok(_) => unreachable!(),
5362			Err(e) => Err(APIError::ChannelUnavailable { err: e.err }),
5363		}
5364	}
5365
5366	/// Sends a payment along a given route. See [`Self::send_payment`] for more info.
5367	///
5368	/// LDK will not automatically retry this payment, though it may be manually re-sent after an
5369	/// [`Event::PaymentFailed`] is generated.
5370	#[rustfmt::skip]
5371	pub fn send_payment_with_route(
5372		&self, mut route: Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields,
5373		payment_id: PaymentId
5374	) -> Result<(), RetryableSendFailure> {
5375		let best_block_height = self.best_block.read().unwrap().height;
5376		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5377		let route_params = route.route_params.clone().unwrap_or_else(|| {
5378			// Create a dummy route params since they're a required parameter but unused in this case
5379			let (payee_node_id, cltv_delta) = route.paths.first()
5380				.and_then(|path| path.hops.last().map(|hop| (hop.pubkey, hop.cltv_expiry_delta as u32)))
5381				.unwrap_or_else(|| (PublicKey::from_slice(&[2; 32]).unwrap(), MIN_FINAL_CLTV_EXPIRY_DELTA as u32));
5382			let dummy_payment_params = PaymentParameters::from_node_id(payee_node_id, cltv_delta);
5383			RouteParameters::from_payment_params_and_value(dummy_payment_params, route.get_total_amount())
5384		});
5385		if route.route_params.is_none() { route.route_params = Some(route_params.clone()); }
5386		let router = FixedRouter::new(route);
5387		self.pending_outbound_payments
5388			.send_payment(payment_hash, recipient_onion, payment_id, Retry::Attempts(0),
5389				route_params, &&router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
5390				&self.entropy_source, &self.node_signer, best_block_height,
5391				&self.pending_events, |args| self.send_payment_along_path(args))
5392	}
5393
5394	/// Sends a payment to the route found using the provided [`RouteParameters`], retrying failed
5395	/// payment paths based on the provided `Retry`.
5396	///
5397	/// You should likely prefer [`Self::pay_for_bolt11_invoice`] or [`Self::pay_for_offer`] in
5398	/// general, however this method may allow for slightly more customization.
5399	///
5400	/// May generate [`UpdateHTLCs`] message(s) event on success, which should be relayed (e.g. via
5401	/// [`PeerManager::process_events`]).
5402	///
5403	/// # Avoiding Duplicate Payments
5404	///
5405	/// If a pending payment is currently in-flight with the same [`PaymentId`] provided, this
5406	/// method will error with [`RetryableSendFailure::DuplicatePayment`]. Note, however, that once a
5407	/// payment is no longer pending (either via [`ChannelManager::abandon_payment`], or handling of
5408	/// an [`Event::PaymentSent`] or [`Event::PaymentFailed`]) LDK will not stop you from sending a
5409	/// second payment with the same [`PaymentId`].
5410	///
5411	/// Thus, in order to ensure duplicate payments are not sent, you should implement your own
5412	/// tracking of payments, including state to indicate once a payment has completed. Because you
5413	/// should also ensure that [`PaymentHash`]es are not re-used, for simplicity, you should
5414	/// consider using the [`PaymentHash`] as the key for tracking payments. In that case, the
5415	/// [`PaymentId`] should be a copy of the [`PaymentHash`] bytes.
5416	///
5417	/// Additionally, in the scenario where we begin the process of sending a payment, but crash
5418	/// before `send_payment` returns (or prior to [`ChannelMonitorUpdate`] persistence if you're
5419	/// using [`ChannelMonitorUpdateStatus::InProgress`]), the payment may be lost on restart. See
5420	/// [`ChannelManager::list_recent_payments`] for more information.
5421	///
5422	/// Routes are automatically found using the [`Router] provided on startup. To fix a route for a
5423	/// particular payment, use [`Self::send_payment_with_route`] or match the [`PaymentId`] passed to
5424	/// [`Router::find_route_with_id`].
5425	///
5426	/// [`Event::PaymentSent`]: events::Event::PaymentSent
5427	/// [`Event::PaymentFailed`]: events::Event::PaymentFailed
5428	/// [`UpdateHTLCs`]: MessageSendEvent::UpdateHTLCs
5429	/// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
5430	/// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
5431	pub fn send_payment(
5432		&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields,
5433		payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry,
5434	) -> Result<(), RetryableSendFailure> {
5435		let best_block_height = self.best_block.read().unwrap().height;
5436		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5437		self.pending_outbound_payments.send_payment(
5438			payment_hash,
5439			recipient_onion,
5440			payment_id,
5441			retry_strategy,
5442			route_params,
5443			&self.router,
5444			self.list_usable_channels(),
5445			|| self.compute_inflight_htlcs(),
5446			&self.entropy_source,
5447			&self.node_signer,
5448			best_block_height,
5449			&self.pending_events,
5450			|args| self.send_payment_along_path(args),
5451		)
5452	}
5453
5454	#[cfg(any(test, feature = "_externalize_tests"))]
5455	pub(super) fn test_send_payment_internal(
5456		&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields,
5457		keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId,
5458		recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>,
5459	) -> Result<(), PaymentSendFailure> {
5460		let best_block_height = self.best_block.read().unwrap().height;
5461		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5462		self.pending_outbound_payments.test_send_payment_internal(
5463			route,
5464			payment_hash,
5465			recipient_onion,
5466			keysend_preimage,
5467			payment_id,
5468			recv_value_msat,
5469			onion_session_privs,
5470			&self.node_signer,
5471			best_block_height,
5472			|args| self.send_payment_along_path(args),
5473		)
5474	}
5475
5476	#[cfg(any(test, feature = "_externalize_tests"))]
5477	pub(crate) fn test_add_new_pending_payment(
5478		&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields,
5479		payment_id: PaymentId, route: &Route,
5480	) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
5481		let best_block_height = self.best_block.read().unwrap().height;
5482		self.pending_outbound_payments.test_add_new_pending_payment(
5483			payment_hash,
5484			recipient_onion,
5485			payment_id,
5486			route,
5487			None,
5488			&self.entropy_source,
5489			best_block_height,
5490		)
5491	}
5492
5493	#[cfg(test)]
5494	pub(crate) fn test_modify_pending_payment<Fn>(&self, payment_id: &PaymentId, mut callback: Fn)
5495	where
5496		Fn: FnMut(&mut PendingOutboundPayment),
5497	{
5498		let mut outbounds =
5499			self.pending_outbound_payments.pending_outbound_payments.lock().unwrap();
5500		match outbounds.get_mut(payment_id) {
5501			Some(outb) => callback(outb),
5502			_ => panic!(),
5503		}
5504	}
5505
5506	#[cfg(test)]
5507	pub(crate) fn test_set_payment_metadata(
5508		&self, payment_id: PaymentId, new_payment_metadata: Option<Vec<u8>>,
5509	) {
5510		self.pending_outbound_payments.test_set_payment_metadata(payment_id, new_payment_metadata);
5511	}
5512
5513	/// Pays a [`Bolt11Invoice`] associated with the `payment_id`. See [`Self::send_payment`] for more info.
5514	///
5515	/// # Payment Id
5516	/// The invoice's `payment_hash().0` serves as a reliable choice for the `payment_id`.
5517	///
5518	/// # Handling Invoice Amounts
5519	/// Some invoices include a specific amount, while others require you to specify one.
5520	/// - If the invoice **includes** an amount, user may provide an amount greater or equal to it
5521	/// to allow for overpayments.
5522	/// - If the invoice **doesn't include** an amount, you'll need to specify `amount_msats`.
5523	///
5524	/// If these conditions aren’t met, the function will return [`Bolt11PaymentError::InvalidAmount`].
5525	///
5526	/// # Custom Routing Parameters
5527	/// Users can customize routing parameters via [`RouteParametersConfig`].
5528	/// To use default settings, call the function with [`RouteParametersConfig::default`].
5529	pub fn pay_for_bolt11_invoice(
5530		&self, invoice: &Bolt11Invoice, payment_id: PaymentId, amount_msats: Option<u64>,
5531		route_params_config: RouteParametersConfig, retry_strategy: Retry,
5532	) -> Result<(), Bolt11PaymentError> {
5533		let best_block_height = self.best_block.read().unwrap().height;
5534		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5535		self.pending_outbound_payments.pay_for_bolt11_invoice(
5536			invoice,
5537			payment_id,
5538			amount_msats,
5539			route_params_config,
5540			retry_strategy,
5541			&self.router,
5542			self.list_usable_channels(),
5543			|| self.compute_inflight_htlcs(),
5544			&self.entropy_source,
5545			&self.node_signer,
5546			best_block_height,
5547			&self.pending_events,
5548			|args| self.send_payment_along_path(args),
5549		)
5550	}
5551
5552	/// Pays the [`Bolt12Invoice`] associated with the `payment_id` encoded in its `payer_metadata`.
5553	///
5554	/// The invoice's `payer_metadata` is used to authenticate that the invoice was indeed requested
5555	/// before attempting a payment. [`Bolt12PaymentError::UnexpectedInvoice`] is returned if this
5556	/// fails or if the encoded `payment_id` is not recognized. The latter may happen once the
5557	/// payment is no longer tracked because the payment was attempted after:
5558	/// - an invoice for the `payment_id` was already paid,
5559	/// - one full [timer tick] has elapsed since initially requesting the invoice when paying an
5560	///   offer, or
5561	/// - the refund corresponding to the invoice has already expired.
5562	///
5563	/// To retry the payment, request another invoice using a new `payment_id`.
5564	///
5565	/// Attempting to pay the same invoice twice while the first payment is still pending will
5566	/// result in a [`Bolt12PaymentError::DuplicateInvoice`].
5567	///
5568	/// Otherwise, either [`Event::PaymentSent`] or [`Event::PaymentFailed`] are used to indicate
5569	/// whether or not the payment was successful.
5570	///
5571	/// [timer tick]: Self::timer_tick_occurred
5572	pub fn send_payment_for_bolt12_invoice(
5573		&self, invoice: &Bolt12Invoice, context: Option<&OffersContext>,
5574	) -> Result<(), Bolt12PaymentError> {
5575		match self.verify_bolt12_invoice(invoice, context) {
5576			Ok(payment_id) => self.send_payment_for_verified_bolt12_invoice(invoice, payment_id),
5577			Err(()) => Err(Bolt12PaymentError::UnexpectedInvoice),
5578		}
5579	}
5580
5581	fn verify_bolt12_invoice(
5582		&self, invoice: &Bolt12Invoice, context: Option<&OffersContext>,
5583	) -> Result<PaymentId, ()> {
5584		let secp_ctx = &self.secp_ctx;
5585		let expanded_key = &self.inbound_payment_key;
5586
5587		match context {
5588			None if invoice.is_for_refund_without_paths() => {
5589				invoice.verify_using_metadata(expanded_key, secp_ctx)
5590			},
5591			Some(&OffersContext::OutboundPayment { payment_id, nonce, .. }) => {
5592				invoice.verify_using_payer_data(payment_id, nonce, expanded_key, secp_ctx)
5593			},
5594			_ => Err(()),
5595		}
5596	}
5597
5598	fn send_payment_for_verified_bolt12_invoice(
5599		&self, invoice: &Bolt12Invoice, payment_id: PaymentId,
5600	) -> Result<(), Bolt12PaymentError> {
5601		let best_block_height = self.best_block.read().unwrap().height;
5602		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5603		let features = self.bolt12_invoice_features();
5604		self.pending_outbound_payments.send_payment_for_bolt12_invoice(
5605			invoice,
5606			payment_id,
5607			&self.router,
5608			self.list_usable_channels(),
5609			features,
5610			|| self.compute_inflight_htlcs(),
5611			&self.entropy_source,
5612			&self.node_signer,
5613			&self,
5614			&self.secp_ctx,
5615			best_block_height,
5616			&self.pending_events,
5617			|args| self.send_payment_along_path(args),
5618		)
5619	}
5620
5621	fn check_refresh_async_receive_offer_cache(&self, timer_tick_occurred: bool) {
5622		let peers = self.get_peers_for_blinded_path();
5623		let channels = self.list_usable_channels();
5624		let entropy = &*self.entropy_source;
5625		let router = &*self.router;
5626		let refresh_res = self.flow.check_refresh_async_receive_offer_cache(
5627			peers,
5628			channels,
5629			entropy,
5630			router,
5631			timer_tick_occurred,
5632		);
5633		match refresh_res {
5634			Err(()) => {
5635				log_error!(
5636					self.logger,
5637					"Failed to create blinded paths when requesting async receive offer paths"
5638				);
5639			},
5640			Ok(()) => {},
5641		}
5642	}
5643
5644	#[cfg(test)]
5645	pub(crate) fn test_check_refresh_async_receive_offers(&self) {
5646		self.check_refresh_async_receive_offer_cache(false);
5647	}
5648
5649	/// Should be called after handling an [`Event::PersistStaticInvoice`], where the `Responder`
5650	/// comes from [`Event::PersistStaticInvoice::invoice_persisted_path`].
5651	pub fn static_invoice_persisted(&self, invoice_persisted_path: Responder) {
5652		self.flow.static_invoice_persisted(invoice_persisted_path);
5653	}
5654
5655	/// Forwards a [`StaticInvoice`] to a payer in response to an
5656	/// [`Event::StaticInvoiceRequested`]. Also forwards the payer's [`InvoiceRequest`] to the
5657	/// async recipient, in case the recipient is online to provide the payer with a fresh
5658	/// [`Bolt12Invoice`].
5659	pub fn respond_to_static_invoice_request(
5660		&self, invoice: StaticInvoice, responder: Responder, invoice_request: InvoiceRequest,
5661		invoice_request_path: BlindedMessagePath,
5662	) -> Result<(), Bolt12SemanticError> {
5663		self.flow.enqueue_invoice_request_to_forward(
5664			invoice_request,
5665			invoice_request_path,
5666			responder.clone(),
5667		);
5668		self.flow.enqueue_static_invoice(invoice, responder)
5669	}
5670
5671	fn initiate_async_payment(
5672		&self, invoice: &StaticInvoice, payment_id: PaymentId,
5673	) -> Result<(), Bolt12PaymentError> {
5674		let mut res = Ok(());
5675		PersistenceNotifierGuard::optionally_notify(self, || {
5676			let best_block_height = self.best_block.read().unwrap().height;
5677			let features = self.bolt12_invoice_features();
5678			let outbound_pmts_res = self.pending_outbound_payments.static_invoice_received(
5679				invoice,
5680				payment_id,
5681				features,
5682				best_block_height,
5683				self.duration_since_epoch(),
5684				&*self.entropy_source,
5685				&self.pending_events,
5686			);
5687			match outbound_pmts_res {
5688				Ok(()) => {},
5689				Err(Bolt12PaymentError::UnexpectedInvoice)
5690				| Err(Bolt12PaymentError::DuplicateInvoice) => {
5691					res = outbound_pmts_res.map(|_| ());
5692					return NotifyOption::SkipPersistNoEvents;
5693				},
5694				Err(e) => {
5695					res = Err(e);
5696					return NotifyOption::DoPersist;
5697				},
5698			};
5699
5700			// If the call to `Self::hold_htlc_channels` succeeded, then we are a private node and can
5701			// hold the HTLCs for this payment at our next-hop channel counterparty until the recipient
5702			// comes online. This allows us to go offline after locking in the HTLCs.
5703			if let Ok(channels) = self.hold_htlc_channels() {
5704				if let Err(e) =
5705					self.send_payment_for_static_invoice_no_persist(payment_id, channels, true)
5706				{
5707					log_trace!(
5708						self.logger,
5709						"Failed to send held HTLC with payment id {}: {:?}",
5710						payment_id,
5711						e
5712					);
5713				}
5714			} else {
5715				let reply_path = HeldHtlcReplyPath::ToUs {
5716					payment_id,
5717					peers: self.get_peers_for_blinded_path(),
5718				};
5719				let enqueue_held_htlc_available_res =
5720					self.flow.enqueue_held_htlc_available(invoice, reply_path);
5721				if enqueue_held_htlc_available_res.is_err() {
5722					self.abandon_payment_with_reason(
5723						payment_id,
5724						PaymentFailureReason::BlindedPathCreationFailed,
5725					);
5726					res = Err(Bolt12PaymentError::BlindedPathCreationFailed);
5727					return NotifyOption::DoPersist;
5728				};
5729			}
5730
5731			NotifyOption::DoPersist
5732		});
5733
5734		res
5735	}
5736
5737	/// Returns a list of channels where our counterparty supports
5738	/// [`InitFeatures::supports_htlc_hold`], or an error if there are none or we are configured not
5739	/// to hold HTLCs at our next-hop channel counterparty. Useful for sending async payments to
5740	/// [`StaticInvoice`]s.
5741	fn hold_htlc_channels(&self) -> Result<Vec<ChannelDetails>, ()> {
5742		let should_send_async = self.config.read().unwrap().hold_outbound_htlcs_at_next_hop;
5743		if !should_send_async {
5744			return Err(());
5745		}
5746
5747		let hold_htlc_channels =
5748			self.list_funded_channels_with_filter(|&(init_features, _, ref channel)| {
5749				init_features.supports_htlc_hold() && channel.context().is_live()
5750			});
5751
5752		if hold_htlc_channels.is_empty() {
5753			Err(())
5754		} else {
5755			Ok(hold_htlc_channels)
5756		}
5757	}
5758
5759	fn send_payment_for_static_invoice(
5760		&self, payment_id: PaymentId,
5761	) -> Result<(), Bolt12PaymentError> {
5762		let mut res = Ok(());
5763		let first_hops = self.list_usable_channels();
5764		PersistenceNotifierGuard::optionally_notify(self, || {
5765			let outbound_pmts_res =
5766				self.send_payment_for_static_invoice_no_persist(payment_id, first_hops, false);
5767			match outbound_pmts_res {
5768				Err(Bolt12PaymentError::UnexpectedInvoice)
5769				| Err(Bolt12PaymentError::DuplicateInvoice) => {
5770					res = outbound_pmts_res.map(|_| ());
5771					NotifyOption::SkipPersistNoEvents
5772				},
5773				other_res => {
5774					res = other_res;
5775					NotifyOption::DoPersist
5776				},
5777			}
5778		});
5779		res
5780	}
5781
5782	/// Useful if the caller is already triggering a persist of the `ChannelManager`.
5783	fn send_payment_for_static_invoice_no_persist(
5784		&self, payment_id: PaymentId, first_hops: Vec<ChannelDetails>, hold_htlcs_at_next_hop: bool,
5785	) -> Result<(), Bolt12PaymentError> {
5786		let best_block_height = self.best_block.read().unwrap().height;
5787		self.pending_outbound_payments.send_payment_for_static_invoice(
5788			payment_id,
5789			hold_htlcs_at_next_hop,
5790			&self.router,
5791			first_hops,
5792			|| self.compute_inflight_htlcs(),
5793			&self.entropy_source,
5794			&self.node_signer,
5795			&self,
5796			&self.secp_ctx,
5797			best_block_height,
5798			&self.pending_events,
5799			|args| self.send_payment_along_path(args),
5800		)
5801	}
5802
5803	/// If we are holding an HTLC on behalf of an often-offline sender, this method allows us to
5804	/// create a path for the sender to use as the reply path when they send the recipient a
5805	/// [`HeldHtlcAvailable`] onion message, so the recipient's [`ReleaseHeldHtlc`] response will be
5806	/// received to our node.
5807	fn path_for_release_held_htlc(
5808		&self, htlc_id: u64, prev_outbound_scid_alias: u64, channel_id: &ChannelId,
5809		counterparty_node_id: &PublicKey,
5810	) -> BlindedMessagePath {
5811		let intercept_id =
5812			InterceptId::from_htlc_id_and_chan_id(htlc_id, channel_id, counterparty_node_id);
5813		self.flow.path_for_release_held_htlc(
5814			intercept_id,
5815			prev_outbound_scid_alias,
5816			htlc_id,
5817			&*self.entropy_source,
5818		)
5819	}
5820
5821	/// Signals that no further attempts for the given payment should occur. Useful if you have a
5822	/// pending outbound payment with retries remaining, but wish to stop retrying the payment before
5823	/// retries are exhausted.
5824	///
5825	/// # Event Generation
5826	///
5827	/// If no [`Event::PaymentFailed`] event had been generated before, one will be generated as soon
5828	/// as there are no remaining pending HTLCs for this payment.
5829	///
5830	/// Note that calling this method does *not* prevent a payment from succeeding. You must still
5831	/// wait until you receive either a [`Event::PaymentFailed`] or [`Event::PaymentSent`] event to
5832	/// determine the ultimate status of a payment.
5833	///
5834	/// # Requested Invoices
5835	///
5836	/// In the case of paying a [`Bolt12Invoice`] via [`ChannelManager::pay_for_offer`], abandoning
5837	/// the payment prior to receiving the invoice will result in an [`Event::PaymentFailed`] and
5838	/// prevent any attempts at paying it once received.
5839	///
5840	/// # Restart Behavior
5841	///
5842	/// If an [`Event::PaymentFailed`] is generated and we restart without first persisting the
5843	/// [`ChannelManager`], another [`Event::PaymentFailed`] may be generated.
5844	///
5845	/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
5846	pub fn abandon_payment(&self, payment_id: PaymentId) {
5847		self.abandon_payment_with_reason(payment_id, PaymentFailureReason::UserAbandoned)
5848	}
5849
5850	fn abandon_payment_with_reason(&self, payment_id: PaymentId, reason: PaymentFailureReason) {
5851		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5852		self.pending_outbound_payments.abandon_payment(payment_id, reason, &self.pending_events);
5853	}
5854
5855	/// Send a spontaneous payment, which is a payment that does not require the recipient to have
5856	/// generated an invoice. Optionally, you may specify the preimage. If you do choose to specify
5857	/// the preimage, it must be a cryptographically secure random value that no intermediate node
5858	/// would be able to guess -- otherwise, an intermediate node may claim the payment and it will
5859	/// never reach the recipient.
5860	///
5861	/// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See
5862	/// [`send_payment`] for more information about the risks of duplicate preimage usage.
5863	///
5864	/// See [`send_payment`] documentation for more details on the idempotency guarantees provided by
5865	/// the [`PaymentId`] key.
5866	///
5867	/// See [`PaymentParameters::for_keysend`] for help in constructing `route_params` for spontaneous
5868	/// payments.
5869	///
5870	/// [`send_payment`]: Self::send_payment
5871	/// [`PaymentParameters::for_keysend`]: crate::routing::router::PaymentParameters::for_keysend
5872	pub fn send_spontaneous_payment(
5873		&self, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields,
5874		payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry,
5875	) -> Result<PaymentHash, RetryableSendFailure> {
5876		let best_block_height = self.best_block.read().unwrap().height;
5877		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5878		self.pending_outbound_payments.send_spontaneous_payment(
5879			payment_preimage,
5880			recipient_onion,
5881			payment_id,
5882			retry_strategy,
5883			route_params,
5884			&self.router,
5885			self.list_usable_channels(),
5886			|| self.compute_inflight_htlcs(),
5887			&self.entropy_source,
5888			&self.node_signer,
5889			best_block_height,
5890			&self.pending_events,
5891			|args| self.send_payment_along_path(args),
5892		)
5893	}
5894
5895	/// Send a payment that is probing the given route for liquidity. We calculate the
5896	/// [`PaymentHash`] of probes based on a static secret and a random [`PaymentId`], which allows
5897	/// us to easily discern them from real payments.
5898	pub fn send_probe(&self, path: Path) -> Result<(PaymentHash, PaymentId), ProbeSendFailure> {
5899		let best_block_height = self.best_block.read().unwrap().height;
5900		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5901		self.pending_outbound_payments.send_probe(
5902			path,
5903			self.probing_cookie_secret,
5904			&self.entropy_source,
5905			&self.node_signer,
5906			best_block_height,
5907			|args| self.send_payment_along_path(args),
5908		)
5909	}
5910
5911	/// Returns whether a payment with the given [`PaymentHash`] and [`PaymentId`] is, in fact, a
5912	/// payment probe.
5913	#[cfg(test)]
5914	pub(crate) fn payment_is_probe(
5915		&self, payment_hash: &PaymentHash, payment_id: &PaymentId,
5916	) -> bool {
5917		outbound_payment::payment_is_probe(payment_hash, payment_id, self.probing_cookie_secret)
5918	}
5919
5920	/// Sends payment probes over all paths of a route that would be used to pay the given
5921	/// amount to the given `node_id`.
5922	///
5923	/// See [`ChannelManager::send_preflight_probes`] for more information.
5924	pub fn send_spontaneous_preflight_probes(
5925		&self, node_id: PublicKey, amount_msat: u64, final_cltv_expiry_delta: u32,
5926		liquidity_limit_multiplier: Option<u64>,
5927	) -> Result<Vec<(PaymentHash, PaymentId)>, ProbeSendFailure> {
5928		let payment_params = PaymentParameters::from_node_id(node_id, final_cltv_expiry_delta);
5929
5930		let route_params =
5931			RouteParameters::from_payment_params_and_value(payment_params, amount_msat);
5932
5933		self.send_preflight_probes(route_params, liquidity_limit_multiplier)
5934	}
5935
5936	/// Sends payment probes over all paths of a route that would be used to pay a route found
5937	/// according to the given [`RouteParameters`].
5938	///
5939	/// This may be used to send "pre-flight" probes, i.e., to train our scorer before conducting
5940	/// the actual payment. Note this is only useful if there likely is sufficient time for the
5941	/// probe to settle before sending out the actual payment, e.g., when waiting for user
5942	/// confirmation in a wallet UI.
5943	///
5944	/// Otherwise, there is a chance the probe could take up some liquidity needed to complete the
5945	/// actual payment. Users should therefore be cautious and might avoid sending probes if
5946	/// liquidity is scarce and/or they don't expect the probe to return before they send the
5947	/// payment. To mitigate this issue, channels with available liquidity less than the required
5948	/// amount times the given `liquidity_limit_multiplier` won't be used to send pre-flight
5949	/// probes. If `None` is given as `liquidity_limit_multiplier`, it defaults to `3`.
5950	pub fn send_preflight_probes(
5951		&self, route_params: RouteParameters, liquidity_limit_multiplier: Option<u64>,
5952	) -> Result<Vec<(PaymentHash, PaymentId)>, ProbeSendFailure> {
5953		let liquidity_limit_multiplier = liquidity_limit_multiplier.unwrap_or(3);
5954
5955		let payer = self.get_our_node_id();
5956		let usable_channels = self.list_usable_channels();
5957		let first_hops = usable_channels.iter().collect::<Vec<_>>();
5958		let inflight_htlcs = self.compute_inflight_htlcs();
5959
5960		let route = self
5961			.router
5962			.find_route(&payer, &route_params, Some(&first_hops), inflight_htlcs)
5963			.map_err(|e| {
5964				log_error!(self.logger, "Failed to find path for payment probe: {:?}", e);
5965				ProbeSendFailure::RouteNotFound
5966			})?;
5967
5968		let mut used_liquidity_map = hash_map_with_capacity(first_hops.len());
5969
5970		let mut res = Vec::new();
5971
5972		for mut path in route.paths {
5973			// If the last hop is probably an unannounced channel we refrain from probing all the
5974			// way through to the end and instead probe up to the second-to-last channel.
5975			while let Some(last_path_hop) = path.hops.last() {
5976				if last_path_hop.maybe_announced_channel {
5977					// We found a potentially announced last hop.
5978					break;
5979				} else {
5980					// Drop the last hop, as it's likely unannounced.
5981					log_debug!(
5982						self.logger,
5983						"Avoided sending payment probe all the way to last hop {} as it is likely unannounced.",
5984						last_path_hop.short_channel_id
5985					);
5986					let final_value_msat = path.final_value_msat();
5987					path.hops.pop();
5988					if let Some(new_last) = path.hops.last_mut() {
5989						new_last.fee_msat += final_value_msat;
5990					}
5991				}
5992			}
5993
5994			if path.hops.len() < 2 {
5995				log_debug!(
5996					self.logger,
5997					"Skipped sending payment probe over path with less than two hops."
5998				);
5999				continue;
6000			}
6001
6002			if let Some(first_path_hop) = path.hops.first() {
6003				if let Some(first_hop) = first_hops.iter().find(|h| {
6004					h.get_outbound_payment_scid() == Some(first_path_hop.short_channel_id)
6005				}) {
6006					let path_value = path.final_value_msat() + path.fee_msat();
6007					let used_liquidity =
6008						used_liquidity_map.entry(first_path_hop.short_channel_id).or_insert(0);
6009
6010					if first_hop.next_outbound_htlc_limit_msat
6011						< (*used_liquidity + path_value) * liquidity_limit_multiplier
6012					{
6013						log_debug!(self.logger, "Skipped sending payment probe to avoid putting channel {} under the liquidity limit.", first_path_hop.short_channel_id);
6014						continue;
6015					} else {
6016						*used_liquidity += path_value;
6017					}
6018				}
6019			}
6020
6021			res.push(self.send_probe(path).map_err(|e| {
6022				log_error!(self.logger, "Failed to send pre-flight probe: {:?}", e);
6023				e
6024			})?);
6025		}
6026
6027		Ok(res)
6028	}
6029
6030	/// Handles the generation of a funding transaction, optionally (for tests) with a function
6031	/// which checks the correctness of the funding transaction given the associated channel.
6032	#[rustfmt::skip]
6033	fn funding_transaction_generated_intern<FundingOutput: FnMut(&OutboundV1Channel<SP>) -> Result<OutPoint, &'static str>>(
6034		&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding_transaction: Transaction, is_batch_funding: bool,
6035		mut find_funding_output: FundingOutput, is_manual_broadcast: bool,
6036	) -> Result<(), APIError> {
6037		let per_peer_state = self.per_peer_state.read().unwrap();
6038		let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
6039			.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}") })?;
6040
6041		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6042		let peer_state = &mut *peer_state_lock;
6043
6044		macro_rules! abandon_chan { ($err: expr, $api_err: expr, $chan: expr) => { {
6045			let counterparty;
6046			let err = if let ChannelError::Close((msg, reason)) = $err {
6047				let channel_id = $chan.context.channel_id();
6048				counterparty = $chan.context.get_counterparty_node_id();
6049				let shutdown_res = $chan.abandon_unfunded_chan(reason);
6050				MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None)
6051			} else { unreachable!(); };
6052
6053			mem::drop(peer_state_lock);
6054			mem::drop(per_peer_state);
6055			let _: Result<(), _> = handle_error!(self, Err(err), counterparty);
6056			Err($api_err)
6057		} } }
6058
6059		let mut chan = match peer_state.channel_by_id.entry(temporary_channel_id) {
6060			hash_map::Entry::Occupied(chan) => {
6061				if !chan.get().ready_to_fund() {
6062					return Err(APIError::APIMisuseError {
6063						err: format!("Channel {temporary_channel_id} with counterparty {counterparty_node_id} is not an unfunded, outbound channel ready to fund"),
6064					});
6065				}
6066				match chan.remove().into_unfunded_outbound_v1() {
6067					Ok(chan) => chan,
6068					Err(chan) => {
6069						debug_assert!(false, "ready_to_fund guarantees into_unfunded_outbound_v1 will succeed");
6070						peer_state.channel_by_id.insert(temporary_channel_id, chan);
6071						return Err(APIError::APIMisuseError {
6072							err: "Invalid state, please report this bug".to_owned(),
6073						});
6074					},
6075				}
6076			},
6077			hash_map::Entry::Vacant(_) => {
6078				return Err(APIError::ChannelUnavailable {
6079					err: format!("Channel {temporary_channel_id} with counterparty {counterparty_node_id} not found"),
6080				});
6081			},
6082		};
6083
6084		let funding_txo = match find_funding_output(&chan) {
6085			Ok(found_funding_txo) => found_funding_txo,
6086			Err(err) => {
6087				let chan_err = ChannelError::close(err.to_owned());
6088				let api_err = APIError::APIMisuseError { err: err.to_owned() };
6089				return abandon_chan!(chan_err, api_err, chan);
6090			},
6091		};
6092
6093		let logger = WithChannelContext::from(&self.logger, &chan.context, None);
6094		let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger);
6095		let (mut chan, msg_opt) = match funding_res {
6096			Ok(funding_msg) => (chan, funding_msg),
6097			Err((mut chan, chan_err)) => {
6098				let api_err = APIError::ChannelUnavailable { err: "Signer refused to sign the initial commitment transaction".to_owned() };
6099				return abandon_chan!(chan_err, api_err, chan);
6100			}
6101		};
6102
6103		match peer_state.channel_by_id.entry(chan.context.channel_id()) {
6104			hash_map::Entry::Occupied(_) => {
6105				// We need to `unset_funding_info` to make sure we don't close the already open
6106				// channel and instead close the one pending.
6107				let err = format!(
6108					"An existing channel using ID {} is open with peer {}",
6109					chan.context.channel_id(), chan.context.get_counterparty_node_id(),
6110				);
6111				let chan_err = ChannelError::close(err.to_owned());
6112				let api_err = APIError::APIMisuseError { err: err.to_owned() };
6113				chan.unset_funding_info();
6114				return abandon_chan!(chan_err, api_err, chan);
6115			},
6116			hash_map::Entry::Vacant(e) => {
6117				if let Some(msg) = msg_opt {
6118					peer_state.pending_msg_events.push(MessageSendEvent::SendFundingCreated {
6119						node_id: chan.context.get_counterparty_node_id(),
6120						msg,
6121					});
6122				}
6123				if is_manual_broadcast {
6124					chan.context.set_manual_broadcast();
6125				}
6126
6127				e.insert(Channel::from(chan));
6128				Ok(())
6129			}
6130		}
6131	}
6132
6133	#[cfg(any(test, feature = "_externalize_tests"))]
6134	pub(crate) fn funding_transaction_generated_unchecked(
6135		&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey,
6136		funding_transaction: Transaction, output_index: u16,
6137	) -> Result<(), APIError> {
6138		let txid = funding_transaction.compute_txid();
6139		self.funding_transaction_generated_intern(
6140			temporary_channel_id,
6141			counterparty_node_id,
6142			funding_transaction,
6143			false,
6144			|_| Ok(OutPoint { txid, index: output_index }),
6145			false,
6146		)
6147	}
6148
6149	/// Call this upon creation of a funding transaction for the given channel.
6150	///
6151	/// Returns an [`APIError::APIMisuseError`] if the funding_transaction spent non-SegWit outputs
6152	/// or if no output was found which matches the parameters in [`Event::FundingGenerationReady`].
6153	///
6154	/// Returns [`APIError::APIMisuseError`] if the funding transaction is not final for propagation
6155	/// across the p2p network.
6156	///
6157	/// Returns [`APIError::ChannelUnavailable`] if a funding transaction has already been provided
6158	/// for the channel or if the channel has been closed as indicated by [`Event::ChannelClosed`].
6159	///
6160	/// May panic if the output found in the funding transaction is duplicative with some other
6161	/// channel (note that this should be trivially prevented by using unique funding transaction
6162	/// keys per-channel).
6163	///
6164	/// Do NOT broadcast the funding transaction yourself. When we have safely received our
6165	/// counterparty's signature the funding transaction will automatically be broadcast via the
6166	/// [`BroadcasterInterface`] provided when this `ChannelManager` was constructed.
6167	///
6168	/// Note that this includes RBF or similar transaction replacement strategies - lightning does
6169	/// not currently support replacing a funding transaction on an existing channel. Instead,
6170	/// create a new channel with a conflicting funding transaction.
6171	///
6172	/// Note to keep the miner incentives aligned in moving the blockchain forward, we recommend
6173	/// the wallet software generating the funding transaction to apply anti-fee sniping as
6174	/// implemented by Bitcoin Core wallet. See <https://bitcoinops.org/en/topics/fee-sniping/>
6175	/// for more details.
6176	///
6177	/// [`Event::FundingGenerationReady`]: crate::events::Event::FundingGenerationReady
6178	/// [`Event::ChannelClosed`]: crate::events::Event::ChannelClosed
6179	pub fn funding_transaction_generated(
6180		&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey,
6181		funding_transaction: Transaction,
6182	) -> Result<(), APIError> {
6183		let temporary_chan = &[(&temporary_channel_id, &counterparty_node_id)];
6184		self.batch_funding_transaction_generated(temporary_chan, funding_transaction)
6185	}
6186
6187	/// **Unsafe**: This method does not validate the spent output. It is the caller's
6188	/// responsibility to ensure the spent outputs are SegWit, as well as making sure the funding
6189	/// transaction has a final absolute locktime, i.e., its locktime is lower than the next block height.
6190	///
6191	/// For a safer method, please refer to [`ChannelManager::funding_transaction_generated`].
6192	///
6193	/// Call this in response to a [`Event::FundingGenerationReady`] event.
6194	///
6195	/// Note that if this method is called successfully, the funding transaction won't be
6196	/// broadcasted and you are expected to broadcast it manually when receiving the
6197	/// [`Event::FundingTxBroadcastSafe`] event.
6198	///
6199	/// Returns [`APIError::ChannelUnavailable`] if a funding transaction has already been provided
6200	/// for the channel or if the channel has been closed as indicated by [`Event::ChannelClosed`].
6201	///
6202	/// May panic if the funding output is duplicative with some other channel (note that this
6203	/// should be trivially prevented by using unique funding transaction keys per-channel).
6204	///
6205	/// Note to keep the miner incentives aligned in moving the blockchain forward, we recommend
6206	/// the wallet software generating the funding transaction to apply anti-fee sniping as
6207	/// implemented by Bitcoin Core wallet. See <https://bitcoinops.org/en/topics/fee-sniping/> for
6208	/// more details.
6209	///
6210	/// [`Event::FundingGenerationReady`]: crate::events::Event::FundingGenerationReady
6211	/// [`Event::FundingTxBroadcastSafe`]: crate::events::Event::FundingTxBroadcastSafe
6212	/// [`Event::ChannelClosed`]: crate::events::Event::ChannelClosed
6213	/// [`ChannelManager::funding_transaction_generated`]: crate::ln::channelmanager::ChannelManager::funding_transaction_generated
6214	pub fn unsafe_manual_funding_transaction_generated(
6215		&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding: OutPoint,
6216	) -> Result<(), APIError> {
6217		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
6218
6219		let temporary_chans = &[(&temporary_channel_id, &counterparty_node_id)];
6220		let funding_type = FundingType::Unchecked(funding);
6221		self.batch_funding_transaction_generated_intern(temporary_chans, funding_type)
6222	}
6223
6224	/// Call this upon creation of a funding transaction for the given channel.
6225	///
6226	/// This method executes the same checks as [`ChannelManager::funding_transaction_generated`],
6227	/// but it does not automatically broadcast the funding transaction.
6228	///
6229	/// Call this in response to a [`Event::FundingGenerationReady`] event, only in a context where you want to manually
6230	/// control the broadcast of the funding transaction.
6231	///
6232	/// The associated [`ChannelMonitor`] likewise avoids broadcasting holder commitment or CPFP
6233	/// transactions until the funding has been observed on chain. This
6234	/// prevents attempting to broadcast unconfirmable commitment transactions before the channel's
6235	/// funding exists in a block.
6236	///
6237	/// If HTLCs would otherwise approach timeout while the funding transaction has not yet appeared
6238	/// on chain, the monitor avoids broadcasting force-close transactions in manual-broadcast
6239	/// mode until the funding is seen. It may still close the channel off-chain (emitting a
6240	/// `ChannelClosed` event) to avoid accepting further updates. Ensure your application either
6241	/// broadcasts the funding transaction in a timely manner or avoids forwarding HTLCs that could
6242	/// approach timeout during this interim state.
6243	///
6244	/// See also [`ChannelMonitor::broadcast_latest_holder_commitment_txn`]. For channels using
6245	/// manual-broadcast, calling that method has no effect until the funding has been observed
6246	/// on-chain.
6247	///
6248	/// [`ChannelManager::funding_transaction_generated`]: crate::ln::channelmanager::ChannelManager::funding_transaction_generated
6249	/// [`Event::FundingGenerationReady`]: crate::events::Event::FundingGenerationReady
6250	pub fn funding_transaction_generated_manual_broadcast(
6251		&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey,
6252		funding_transaction: Transaction,
6253	) -> Result<(), APIError> {
6254		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
6255		self.batch_funding_transaction_generated_intern(
6256			&[(&temporary_channel_id, &counterparty_node_id)],
6257			FundingType::CheckedManualBroadcast(funding_transaction),
6258		)
6259	}
6260
6261	/// Call this upon creation of a batch funding transaction for the given channels.
6262	///
6263	/// Return values are identical to [`Self::funding_transaction_generated`], respective to
6264	/// each individual channel and transaction output.
6265	///
6266	/// Do NOT broadcast the funding transaction yourself. This batch funding transaction
6267	/// will only be broadcast when we have safely received and persisted the counterparty's
6268	/// signature for each channel.
6269	///
6270	/// If there is an error, all channels in the batch are to be considered closed.
6271	pub fn batch_funding_transaction_generated(
6272		&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding_transaction: Transaction,
6273	) -> Result<(), APIError> {
6274		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
6275		let funding_type = FundingType::Checked(funding_transaction);
6276		self.batch_funding_transaction_generated_intern(temporary_channels, funding_type)
6277	}
6278
6279	#[rustfmt::skip]
6280	fn batch_funding_transaction_generated_intern(&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding: FundingType) -> Result<(), APIError> {
6281		let mut result = Ok(());
6282		if let FundingType::Checked(funding_transaction) |
6283			FundingType::CheckedManualBroadcast(funding_transaction) = &funding
6284		{
6285			if !funding_transaction.is_coinbase() {
6286				for inp in funding_transaction.input.iter() {
6287					if inp.witness.is_empty() {
6288						result = result.and(Err(APIError::APIMisuseError {
6289							err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned()
6290						}));
6291					}
6292				}
6293			}
6294
6295			if funding_transaction.output.len() > u16::max_value() as usize {
6296				result = result.and(Err(APIError::APIMisuseError {
6297					err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
6298				}));
6299			}
6300			let height = self.best_block.read().unwrap().height;
6301			// Transactions are evaluated as final by network mempools if their locktime is strictly
6302			// lower than the next block height. However, the modules constituting our Lightning
6303			// node might not have perfect sync about their blockchain views. Thus, if the wallet
6304			// module is ahead of LDK, only allow one more block of headroom.
6305			if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) &&
6306				funding_transaction.lock_time.is_block_height() &&
6307					funding_transaction.lock_time.to_consensus_u32() > height + 1
6308			{
6309				result = result.and(Err(APIError::APIMisuseError {
6310					err: "Funding transaction absolute timelock is non-final".to_owned()
6311				}));
6312			}
6313		}
6314
6315		let txid = funding.txid();
6316		let is_batch_funding = temporary_channels.len() > 1;
6317		let mut funding_batch_states = if is_batch_funding {
6318			Some(self.funding_batch_states.lock().unwrap())
6319		} else {
6320			None
6321		};
6322		let mut funding_batch_state = funding_batch_states.as_mut().and_then(|states| {
6323			match states.entry(txid) {
6324				btree_map::Entry::Occupied(_) => {
6325					result = result.clone().and(Err(APIError::APIMisuseError {
6326						err: "Batch funding transaction with the same txid already exists".to_owned()
6327					}));
6328					None
6329				},
6330				btree_map::Entry::Vacant(vacant) => Some(vacant.insert(Vec::new())),
6331			}
6332		});
6333		let is_manual_broadcast = funding.is_manual_broadcast();
6334		for &(temporary_channel_id, counterparty_node_id) in temporary_channels {
6335			result = result.and_then(|_| self.funding_transaction_generated_intern(
6336				*temporary_channel_id,
6337				*counterparty_node_id,
6338				funding.transaction_or_dummy(),
6339				is_batch_funding,
6340				|chan| {
6341					let mut output_index = None;
6342					let expected_spk = chan.funding.get_funding_redeemscript().to_p2wsh();
6343					let outpoint = match &funding {
6344						FundingType::Checked(tx) | FundingType::CheckedManualBroadcast(tx) => {
6345							for (idx, outp) in tx.output.iter().enumerate() {
6346								if outp.script_pubkey == expected_spk && outp.value.to_sat() == chan.funding.get_value_satoshis() {
6347									if output_index.is_some() {
6348										return Err("Multiple outputs matched the expected script and value");
6349									}
6350									output_index = Some(idx as u16);
6351								}
6352							}
6353							if output_index.is_none() {
6354								return Err("No output matched the script_pubkey and value in the FundingGenerationReady event");
6355							}
6356							OutPoint { txid, index: output_index.unwrap() }
6357						},
6358						FundingType::Unchecked(outpoint) => outpoint.clone(),
6359					};
6360					if let Some(funding_batch_state) = funding_batch_state.as_mut() {
6361						// TODO(dual_funding): We only do batch funding for V1 channels at the moment, but we'll probably
6362						// need to fix this somehow to not rely on using the outpoint for the channel ID if we
6363						// want to support V2 batching here as well.
6364						funding_batch_state.push((ChannelId::v1_from_funding_outpoint(outpoint), *counterparty_node_id, false));
6365					}
6366					Ok(outpoint)
6367				},
6368				is_manual_broadcast)
6369			);
6370		}
6371		if let Err(ref e) = result {
6372			// Remaining channels need to be removed on any error.
6373			let e = format!("Error in transaction funding: {:?}", e);
6374			let mut channels_to_remove = Vec::new();
6375			channels_to_remove.extend(funding_batch_states.as_mut()
6376				.and_then(|states| states.remove(&txid))
6377				.into_iter().flatten()
6378				.map(|(chan_id, node_id, _state)| (chan_id, node_id))
6379			);
6380			channels_to_remove.extend(temporary_channels.iter()
6381				.map(|(&chan_id, &node_id)| (chan_id, node_id))
6382			);
6383			let mut shutdown_results: Vec<(Result<Infallible, _>, _)> = Vec::new();
6384			{
6385				let per_peer_state = self.per_peer_state.read().unwrap();
6386				for (channel_id, counterparty_node_id) in channels_to_remove {
6387					per_peer_state.get(&counterparty_node_id)
6388						.map(|peer_state_mutex| peer_state_mutex.lock().unwrap())
6389						.and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id).map(|chan| (chan, peer_state)))
6390						.map(|(mut chan, mut peer_state)| {
6391							let reason = ClosureReason::ProcessingError { err: e.clone() };
6392							let err = ChannelError::Close((e.clone(), reason));
6393							let (_, e) =
6394								convert_channel_err!(self, peer_state, err, &mut chan);
6395							shutdown_results.push((Err(e), counterparty_node_id));
6396						});
6397				}
6398			}
6399			mem::drop(funding_batch_states);
6400			for (err, counterparty_node_id) in shutdown_results {
6401				let _ = handle_error!(self, err, counterparty_node_id);
6402			}
6403		}
6404		result
6405	}
6406
6407	/// Handles a signed funding transaction generated by interactive transaction construction and
6408	/// provided by the client. Should only be called in response to a [`FundingTransactionReadyForSigning`]
6409	/// event.
6410	///
6411	/// Do NOT broadcast the funding transaction yourself. When we have safely received our
6412	/// counterparty's signature(s) the funding transaction will automatically be broadcast via the
6413	/// [`BroadcasterInterface`] provided when this `ChannelManager` was constructed.
6414	///
6415	/// `SIGHASH_ALL` MUST be used for all signatures when providing signatures, otherwise your
6416	/// funds can be held hostage!
6417	///
6418	/// LDK checks the following:
6419	///  * Each input spends an output that is one of P2WPKH, P2WSH, or P2TR.
6420	///    These were already checked by LDK when the inputs to be contributed were provided.
6421	///  * All signatures use the `SIGHASH_ALL` sighash type.
6422	///  * P2WPKH and P2TR key path spends are valid (verifies signatures)
6423	///
6424	/// NOTE:
6425	///  * When checking P2WSH spends, LDK tries to decode 70-72 byte witness elements as ECDSA
6426	///    signatures with a sighash flag. If the internal DER-decoding fails, then LDK just
6427	///    assumes it wasn't a signature and carries with checks. If the element can be decoded
6428	///    as an ECDSA signature, the the sighash flag must be `SIGHASH_ALL`.
6429	///  * When checking P2TR script-path spends, LDK assumes all elements of exactly 65 bytes
6430	///    with the last byte matching any valid sighash flag byte are schnorr signatures and checks
6431	///    that the sighash type is `SIGHASH_ALL`. If the last byte is not any valid sighash flag, the
6432	///    element is assumed not to be a signature and is ignored. Elements of 64 bytes are not
6433	///    checked because if they were schnorr signatures then they would implicitly be `SIGHASH_DEFAULT`
6434	///    which is an alias of `SIGHASH_ALL`.
6435	///
6436	/// Returns [`ChannelUnavailable`] when a channel is not found or an incorrect
6437	/// `counterparty_node_id` is provided.
6438	///
6439	/// Returns [`APIMisuseError`] when a channel is not in a state where it is expecting funding
6440	/// signatures or if any of the checks described above fail.
6441	///
6442	/// [`FundingTransactionReadyForSigning`]: events::Event::FundingTransactionReadyForSigning
6443	/// [`ChannelUnavailable`]: APIError::ChannelUnavailable
6444	/// [`APIMisuseError`]: APIError::APIMisuseError
6445	pub fn funding_transaction_signed(
6446		&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, transaction: Transaction,
6447	) -> Result<(), APIError> {
6448		let mut result = Ok(());
6449		PersistenceNotifierGuard::optionally_notify(self, || {
6450			let per_peer_state = self.per_peer_state.read().unwrap();
6451			let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
6452			if peer_state_mutex_opt.is_none() {
6453				result = Err(APIError::ChannelUnavailable {
6454					err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}")
6455				});
6456				return NotifyOption::SkipPersistNoEvents;
6457			}
6458
6459			let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
6460
6461			match peer_state.channel_by_id.get_mut(channel_id) {
6462				Some(channel) => match channel.as_funded_mut() {
6463					Some(chan) => {
6464						let txid = transaction.compute_txid();
6465						let witnesses: Vec<_> = transaction
6466							.input
6467							.into_iter()
6468							.map(|input| input.witness)
6469							.filter(|witness| !witness.is_empty())
6470							.collect();
6471						let best_block_height = self.best_block.read().unwrap().height;
6472						match chan.funding_transaction_signed(
6473							txid,
6474							witnesses,
6475							best_block_height,
6476							&self.logger,
6477						) {
6478							Ok(FundingTxSigned {
6479								tx_signatures: Some(tx_signatures),
6480								funding_tx,
6481								splice_negotiated,
6482								splice_locked,
6483							}) => {
6484								if let Some(funding_tx) = funding_tx {
6485									self.broadcast_interactive_funding(
6486										chan,
6487										&funding_tx,
6488										&self.logger,
6489									);
6490								}
6491								if let Some(splice_negotiated) = splice_negotiated {
6492									self.pending_events.lock().unwrap().push_back((
6493										events::Event::SplicePending {
6494											channel_id: *channel_id,
6495											counterparty_node_id: *counterparty_node_id,
6496											user_channel_id: chan.context.get_user_id(),
6497											new_funding_txo: splice_negotiated.funding_txo,
6498											channel_type: splice_negotiated.channel_type,
6499											new_funding_redeem_script: splice_negotiated
6500												.funding_redeem_script,
6501										},
6502										None,
6503									));
6504								}
6505								peer_state.pending_msg_events.push(
6506									MessageSendEvent::SendTxSignatures {
6507										node_id: *counterparty_node_id,
6508										msg: tx_signatures,
6509									},
6510								);
6511								if let Some(splice_locked) = splice_locked {
6512									peer_state.pending_msg_events.push(
6513										MessageSendEvent::SendSpliceLocked {
6514											node_id: *counterparty_node_id,
6515											msg: splice_locked,
6516										},
6517									);
6518								}
6519								return NotifyOption::DoPersist;
6520							},
6521							Err(err) => {
6522								result = Err(err);
6523								return NotifyOption::SkipPersistNoEvents;
6524							},
6525							Ok(FundingTxSigned {
6526								tx_signatures: None,
6527								funding_tx,
6528								splice_negotiated,
6529								splice_locked,
6530							}) => {
6531								debug_assert!(funding_tx.is_none());
6532								debug_assert!(splice_negotiated.is_none());
6533								debug_assert!(splice_locked.is_none());
6534								return NotifyOption::SkipPersistNoEvents;
6535							},
6536						}
6537					},
6538					None => {
6539						result = Err(APIError::APIMisuseError {
6540							err: format!(
6541								"Channel with id {} not expecting funding signatures",
6542								channel_id
6543							),
6544						});
6545						return NotifyOption::SkipPersistNoEvents;
6546					},
6547				},
6548				None => {
6549					result = Err(APIError::ChannelUnavailable {
6550						err: format!(
6551							"Channel with id {} not found for the passed counterparty node_id {}",
6552							channel_id, counterparty_node_id
6553						),
6554					});
6555					return NotifyOption::SkipPersistNoEvents;
6556				},
6557			}
6558		});
6559
6560		result
6561	}
6562
6563	fn broadcast_interactive_funding(
6564		&self, channel: &mut FundedChannel<SP>, funding_tx: &Transaction, logger: &L,
6565	) {
6566		let logger = WithChannelContext::from(logger, channel.context(), None);
6567		log_info!(
6568			logger,
6569			"Broadcasting signed interactive funding transaction {}",
6570			funding_tx.compute_txid()
6571		);
6572		self.tx_broadcaster.broadcast_transactions(&[funding_tx]);
6573		{
6574			let mut pending_events = self.pending_events.lock().unwrap();
6575			emit_channel_pending_event!(pending_events, channel);
6576		}
6577	}
6578
6579	/// Atomically applies partial updates to the [`ChannelConfig`] of the given channels.
6580	///
6581	/// Once the updates are applied, each eligible channel (advertised with a known short channel
6582	/// ID and a change in [`forwarding_fee_proportional_millionths`], [`forwarding_fee_base_msat`],
6583	/// or [`cltv_expiry_delta`]) has a [`BroadcastChannelUpdate`] event message generated
6584	/// containing the new [`ChannelUpdate`] message which should be broadcast to the network.
6585	///
6586	/// Returns [`ChannelUnavailable`] when a channel is not found or an incorrect
6587	/// `counterparty_node_id` is provided.
6588	///
6589	/// Returns [`APIMisuseError`] when a [`cltv_expiry_delta`] update is to be applied with a value
6590	/// below [`MIN_CLTV_EXPIRY_DELTA`].
6591	///
6592	/// If an error is returned, none of the updates should be considered applied.
6593	///
6594	/// [`forwarding_fee_proportional_millionths`]: ChannelConfig::forwarding_fee_proportional_millionths
6595	/// [`forwarding_fee_base_msat`]: ChannelConfig::forwarding_fee_base_msat
6596	/// [`cltv_expiry_delta`]: ChannelConfig::cltv_expiry_delta
6597	/// [`BroadcastChannelUpdate`]: MessageSendEvent::BroadcastChannelUpdate
6598	/// [`ChannelUpdate`]: msgs::ChannelUpdate
6599	/// [`ChannelUnavailable`]: APIError::ChannelUnavailable
6600	/// [`APIMisuseError`]: APIError::APIMisuseError
6601	#[rustfmt::skip]
6602	pub fn update_partial_channel_config(
6603		&self, counterparty_node_id: &PublicKey, channel_ids: &[ChannelId], config_update: &ChannelConfigUpdate,
6604	) -> Result<(), APIError> {
6605		if config_update.cltv_expiry_delta.map(|delta| delta < MIN_CLTV_EXPIRY_DELTA).unwrap_or(false) {
6606			return Err(APIError::APIMisuseError {
6607				err: format!("The chosen CLTV expiry delta is below the minimum of {}", MIN_CLTV_EXPIRY_DELTA),
6608			});
6609		}
6610
6611		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
6612		let per_peer_state = self.per_peer_state.read().unwrap();
6613		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
6614			.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}") })?;
6615		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6616		let peer_state = &mut *peer_state_lock;
6617
6618		for channel_id in channel_ids {
6619			if !peer_state.has_channel(channel_id) {
6620				return Err(APIError::ChannelUnavailable {
6621					err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, counterparty_node_id),
6622				});
6623			};
6624		}
6625		for channel_id in channel_ids {
6626			if let Some(channel) = peer_state.channel_by_id.get_mut(channel_id) {
6627				let mut config = channel.context().config();
6628				config.apply(config_update);
6629				if !channel.context_mut().update_config(&config) {
6630					continue;
6631				}
6632				if let Some(channel) = channel.as_funded() {
6633					if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
6634						let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
6635						pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate { msg });
6636					} else if peer_state.is_connected {
6637						if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
6638							peer_state.pending_msg_events.push(MessageSendEvent::SendChannelUpdate {
6639								node_id: channel.context.get_counterparty_node_id(),
6640								msg,
6641							});
6642						}
6643					}
6644				}
6645				continue;
6646			} else {
6647				// This should not be reachable as we've already checked for non-existence in the previous channel_id loop.
6648				debug_assert!(false);
6649				return Err(APIError::ChannelUnavailable {
6650					err: format!(
6651						"Channel with ID {} for passed counterparty_node_id {} disappeared after we confirmed its existence - this should not be reachable!",
6652						channel_id, counterparty_node_id),
6653				});
6654			};
6655		}
6656		Ok(())
6657	}
6658
6659	/// Atomically updates the [`ChannelConfig`] for the given channels.
6660	///
6661	/// Once the updates are applied, each eligible channel (advertised with a known short channel
6662	/// ID and a change in [`forwarding_fee_proportional_millionths`], [`forwarding_fee_base_msat`],
6663	/// or [`cltv_expiry_delta`]) has a [`BroadcastChannelUpdate`] event message generated
6664	/// containing the new [`ChannelUpdate`] message which should be broadcast to the network.
6665	///
6666	/// Returns [`ChannelUnavailable`] when a channel is not found or an incorrect
6667	/// `counterparty_node_id` is provided.
6668	///
6669	/// Returns [`APIMisuseError`] when a [`cltv_expiry_delta`] update is to be applied with a value
6670	/// below [`MIN_CLTV_EXPIRY_DELTA`].
6671	///
6672	/// If an error is returned, none of the updates should be considered applied.
6673	///
6674	/// [`forwarding_fee_proportional_millionths`]: ChannelConfig::forwarding_fee_proportional_millionths
6675	/// [`forwarding_fee_base_msat`]: ChannelConfig::forwarding_fee_base_msat
6676	/// [`cltv_expiry_delta`]: ChannelConfig::cltv_expiry_delta
6677	/// [`BroadcastChannelUpdate`]: MessageSendEvent::BroadcastChannelUpdate
6678	/// [`ChannelUpdate`]: msgs::ChannelUpdate
6679	/// [`ChannelUnavailable`]: APIError::ChannelUnavailable
6680	/// [`APIMisuseError`]: APIError::APIMisuseError
6681	pub fn update_channel_config(
6682		&self, counterparty_node_id: &PublicKey, channel_ids: &[ChannelId], config: &ChannelConfig,
6683	) -> Result<(), APIError> {
6684		self.update_partial_channel_config(counterparty_node_id, channel_ids, &(*config).into())
6685	}
6686
6687	/// Attempts to forward an intercepted HTLC over the provided channel id and with the provided
6688	/// amount to forward. Should only be called in response to an [`HTLCIntercepted`] event.
6689	///
6690	/// Intercepted HTLCs can be useful for Lightning Service Providers (LSPs) to open a just-in-time
6691	/// channel to a receiving node if the node lacks sufficient inbound liquidity.
6692	///
6693	/// To make use of intercepted HTLCs, set [`UserConfig::accept_intercept_htlcs`] and use
6694	/// [`ChannelManager::get_intercept_scid`] to generate short channel id(s) to put in the
6695	/// receiver's invoice route hints. These route hints will signal to LDK to generate an
6696	/// [`HTLCIntercepted`] event when it receives the forwarded HTLC, and this method or
6697	/// [`ChannelManager::fail_intercepted_htlc`] MUST be called in response to the event.
6698	///
6699	/// Note that LDK does not enforce fee requirements in `amt_to_forward_msat`, and will not stop
6700	/// you from forwarding more than you received. See
6701	/// [`HTLCIntercepted::expected_outbound_amount_msat`] for more on forwarding a different amount
6702	/// than expected.
6703	///
6704	/// Errors if the event was not handled in time, in which case the HTLC was automatically failed
6705	/// backwards.
6706	///
6707	/// [`UserConfig::accept_intercept_htlcs`]: crate::util::config::UserConfig::accept_intercept_htlcs
6708	/// [`HTLCIntercepted`]: events::Event::HTLCIntercepted
6709	/// [`HTLCIntercepted::expected_outbound_amount_msat`]: events::Event::HTLCIntercepted::expected_outbound_amount_msat
6710	// TODO: when we move to deciding the best outbound channel at forward time, only take
6711	// `next_node_id` and not `next_hop_channel_id`
6712	pub fn forward_intercepted_htlc(
6713		&self, intercept_id: InterceptId, next_hop_channel_id: &ChannelId, next_node_id: PublicKey,
6714		amt_to_forward_msat: u64,
6715	) -> Result<(), APIError> {
6716		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
6717
6718		let outbound_scid_alias = {
6719			let peer_state_lock = self.per_peer_state.read().unwrap();
6720			let peer_state_mutex =
6721				peer_state_lock.get(&next_node_id).ok_or_else(|| APIError::ChannelUnavailable {
6722					err: format!(
6723						"Can't find a peer matching the passed counterparty node_id {next_node_id}"
6724					),
6725				})?;
6726			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6727			let peer_state = &mut *peer_state_lock;
6728			match peer_state.channel_by_id.get(next_hop_channel_id) {
6729				Some(chan) => {
6730					if let Some(funded_chan) = chan.as_funded() {
6731						if !funded_chan.context.is_usable() {
6732							return Err(APIError::ChannelUnavailable {
6733								err: format!(
6734									"Channel with id {next_hop_channel_id} not fully established"
6735								),
6736							});
6737						}
6738						funded_chan.context.outbound_scid_alias()
6739					} else {
6740						return Err(APIError::ChannelUnavailable {
6741						err: format!(
6742							"Channel with id {next_hop_channel_id} for the passed counterparty node_id {next_node_id} is still opening."
6743						)
6744					});
6745					}
6746				},
6747				None => {
6748					let error = format!(
6749						"Channel with id {next_hop_channel_id} not found for the passed counterparty node_id {next_node_id}"
6750					);
6751					let logger = WithContext::from(
6752						&self.logger,
6753						Some(next_node_id),
6754						Some(*next_hop_channel_id),
6755						None,
6756					);
6757					log_error!(logger, "{error} when attempting to forward intercepted HTLC");
6758					return Err(APIError::ChannelUnavailable { err: error });
6759				},
6760			}
6761		};
6762
6763		let payment = self
6764			.pending_intercepted_htlcs
6765			.lock()
6766			.unwrap()
6767			.remove(&intercept_id)
6768			.ok_or_else(|| APIError::APIMisuseError {
6769				err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0)),
6770			})?;
6771
6772		let routing = match payment.forward_info.routing {
6773			PendingHTLCRouting::Forward {
6774				onion_packet,
6775				blinded,
6776				incoming_cltv_expiry,
6777				hold_htlc,
6778				..
6779			} => {
6780				debug_assert!(hold_htlc.is_none(), "Held intercept HTLCs should not be surfaced in an event until the recipient comes online");
6781				PendingHTLCRouting::Forward {
6782					onion_packet,
6783					blinded,
6784					incoming_cltv_expiry,
6785					hold_htlc,
6786					short_channel_id: outbound_scid_alias,
6787				}
6788			},
6789			_ => unreachable!(), // Only `PendingHTLCRouting::Forward`s are intercepted
6790		};
6791		let skimmed_fee_msat =
6792			payment.forward_info.outgoing_amt_msat.saturating_sub(amt_to_forward_msat);
6793		let pending_htlc_info = PendingHTLCInfo {
6794			skimmed_fee_msat: if skimmed_fee_msat == 0 { None } else { Some(skimmed_fee_msat) },
6795			outgoing_amt_msat: amt_to_forward_msat,
6796			routing,
6797			..payment.forward_info
6798		};
6799
6800		let mut per_source_pending_forward = [(
6801			payment.prev_outbound_scid_alias,
6802			payment.prev_counterparty_node_id,
6803			payment.prev_funding_outpoint,
6804			payment.prev_channel_id,
6805			payment.prev_user_channel_id,
6806			vec![(pending_htlc_info, payment.prev_htlc_id)],
6807		)];
6808		self.forward_htlcs(&mut per_source_pending_forward);
6809		Ok(())
6810	}
6811
6812	/// Fails the intercepted HTLC indicated by intercept_id. Should only be called in response to
6813	/// an [`HTLCIntercepted`] event. See [`ChannelManager::forward_intercepted_htlc`].
6814	///
6815	/// Errors if the event was not handled in time, in which case the HTLC was automatically failed
6816	/// backwards.
6817	///
6818	/// [`HTLCIntercepted`]: events::Event::HTLCIntercepted
6819	#[rustfmt::skip]
6820	pub fn fail_intercepted_htlc(&self, intercept_id: InterceptId) -> Result<(), APIError> {
6821		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
6822
6823		let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id)
6824			.ok_or_else(|| APIError::APIMisuseError {
6825				err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0))
6826			})?;
6827
6828		if let PendingHTLCRouting::Forward { short_channel_id, .. } = payment.forward_info.routing {
6829			let htlc_source = HTLCSource::PreviousHopData(payment.htlc_previous_hop_data());
6830			let reason = HTLCFailReason::from_failure_code(LocalHTLCFailureReason::UnknownNextPeer);
6831			let destination = HTLCHandlingFailureType::InvalidForward { requested_forward_scid: short_channel_id };
6832			let hash = payment.forward_info.payment_hash;
6833			self.fail_htlc_backwards_internal(&htlc_source, &hash, &reason, destination, None);
6834		} else { unreachable!() } // Only `PendingHTLCRouting::Forward`s are intercepted
6835
6836		Ok(())
6837	}
6838
6839	#[cfg(any(test, feature = "_test_utils"))]
6840	/// Process any pending inbound [`msgs::UpdateAddHTLC`] messages, decoding the onion and placing
6841	/// the pending HTLC in `ChannelManager::forward_htlcs` or
6842	/// `ChannelManager::pending_intercepted_htlcs` as well as generating relevant [`Event`]s.
6843	pub fn test_process_pending_update_add_htlcs(&self) -> bool {
6844		self.process_pending_update_add_htlcs()
6845	}
6846
6847	fn process_pending_update_add_htlcs(&self) -> bool {
6848		let mut should_persist = false;
6849		let mut decode_update_add_htlcs = new_hash_map();
6850		mem::swap(&mut decode_update_add_htlcs, &mut self.decode_update_add_htlcs.lock().unwrap());
6851
6852		let get_htlc_failure_type = |outgoing_scid_opt: Option<u64>, payment_hash: PaymentHash| {
6853			if let Some(outgoing_scid) = outgoing_scid_opt {
6854				match self.short_to_chan_info.read().unwrap().get(&outgoing_scid) {
6855					Some((outgoing_counterparty_node_id, outgoing_channel_id)) => {
6856						HTLCHandlingFailureType::Forward {
6857							node_id: Some(*outgoing_counterparty_node_id),
6858							channel_id: *outgoing_channel_id,
6859						}
6860					},
6861					None => HTLCHandlingFailureType::InvalidForward {
6862						requested_forward_scid: outgoing_scid,
6863					},
6864				}
6865			} else {
6866				HTLCHandlingFailureType::Receive { payment_hash }
6867			}
6868		};
6869
6870		'outer_loop: for (incoming_scid_alias, update_add_htlcs) in decode_update_add_htlcs {
6871			// If any decoded update_add_htlcs were processed, we need to persist.
6872			should_persist = true;
6873			let incoming_channel_details_opt = self.do_funded_channel_callback(
6874				incoming_scid_alias,
6875				|chan: &mut FundedChannel<SP>| {
6876					let counterparty_node_id = chan.context.get_counterparty_node_id();
6877					let channel_id = chan.context.channel_id();
6878					let funding_txo = chan.funding.get_funding_txo().unwrap();
6879					let user_channel_id = chan.context.get_user_id();
6880					let accept_underpaying_htlcs = chan.context.config().accept_underpaying_htlcs;
6881					(
6882						counterparty_node_id,
6883						channel_id,
6884						funding_txo,
6885						user_channel_id,
6886						accept_underpaying_htlcs,
6887					)
6888				},
6889			);
6890			let (
6891				incoming_counterparty_node_id,
6892				incoming_channel_id,
6893				incoming_funding_txo,
6894				incoming_user_channel_id,
6895				incoming_accept_underpaying_htlcs,
6896			) = if let Some(incoming_channel_details) = incoming_channel_details_opt {
6897				incoming_channel_details
6898			} else {
6899				// The incoming channel no longer exists, HTLCs should be resolved onchain instead.
6900				continue;
6901			};
6902
6903			let mut htlc_forwards = Vec::new();
6904			let mut htlc_fails = Vec::new();
6905			for update_add_htlc in &update_add_htlcs {
6906				let (next_hop, next_packet_details_opt) =
6907					match decode_incoming_update_add_htlc_onion(
6908						&update_add_htlc,
6909						&*self.node_signer,
6910						&*self.logger,
6911						&self.secp_ctx,
6912					) {
6913						Ok(decoded_onion) => decoded_onion,
6914
6915						Err((htlc_fail, reason)) => {
6916							let failure_type = HTLCHandlingFailureType::InvalidOnion;
6917							htlc_fails.push((htlc_fail, failure_type, reason.into()));
6918							continue;
6919						},
6920					};
6921
6922				let is_intro_node_blinded_forward = next_hop.is_intro_node_blinded_forward();
6923				let outgoing_scid_opt =
6924					next_packet_details_opt.as_ref().and_then(|d| match d.outgoing_connector {
6925						HopConnector::ShortChannelId(scid) => Some(scid),
6926						HopConnector::Trampoline(_) => None,
6927					});
6928				let shared_secret = next_hop.shared_secret().secret_bytes();
6929
6930				// Nodes shouldn't expect us to hold HTLCs for them if we don't advertise htlc_hold feature
6931				// support.
6932				//
6933				// If we wanted to pretend to be a node that didn't understand the feature at all here, the
6934				// correct behavior would've been to disconnect the sender when we first received the
6935				// update_add message. However, this would make the `UserConfig::enable_htlc_hold` option
6936				// unsafe -- if our node switched the config option from on to off just after the sender
6937				// enqueued their update_add + CS, the sender would continue retransmitting those messages
6938				// and we would keep disconnecting them until the HTLC timed out.
6939				if update_add_htlc.hold_htlc.is_some()
6940					&& !BaseMessageHandler::provided_node_features(self).supports_htlc_hold()
6941				{
6942					let reason = LocalHTLCFailureReason::TemporaryNodeFailure;
6943					let htlc_fail = self.htlc_failure_from_update_add_err(
6944						&update_add_htlc,
6945						&incoming_counterparty_node_id,
6946						reason,
6947						is_intro_node_blinded_forward,
6948						&shared_secret,
6949					);
6950					let failure_type =
6951						get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash);
6952					htlc_fails.push((htlc_fail, failure_type, reason.into()));
6953					continue;
6954				}
6955
6956				// Process the HTLC on the incoming channel.
6957				match self.do_funded_channel_callback(
6958					incoming_scid_alias,
6959					|chan: &mut FundedChannel<SP>| {
6960						let logger = WithChannelContext::from(
6961							&self.logger,
6962							&chan.context,
6963							Some(update_add_htlc.payment_hash),
6964						);
6965						chan.can_accept_incoming_htlc(&self.fee_estimator, &logger)
6966					},
6967				) {
6968					Some(Ok(_)) => {},
6969					Some(Err(reason)) => {
6970						let htlc_fail = self.htlc_failure_from_update_add_err(
6971							&update_add_htlc,
6972							&incoming_counterparty_node_id,
6973							reason,
6974							is_intro_node_blinded_forward,
6975							&shared_secret,
6976						);
6977						let failure_type =
6978							get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash);
6979						htlc_fails.push((htlc_fail, failure_type, reason.into()));
6980						continue;
6981					},
6982					// The incoming channel no longer exists, HTLCs should be resolved onchain instead.
6983					None => continue 'outer_loop,
6984				}
6985
6986				// Now process the HTLC on the outgoing channel if it's a forward.
6987				if let Some(next_packet_details) = next_packet_details_opt.as_ref() {
6988					if let Err(reason) =
6989						self.can_forward_htlc(&update_add_htlc, next_packet_details)
6990					{
6991						let htlc_fail = self.htlc_failure_from_update_add_err(
6992							&update_add_htlc,
6993							&incoming_counterparty_node_id,
6994							reason,
6995							is_intro_node_blinded_forward,
6996							&shared_secret,
6997						);
6998						let failure_type =
6999							get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash);
7000						htlc_fails.push((htlc_fail, failure_type, reason.into()));
7001						continue;
7002					}
7003				}
7004
7005				match self.get_pending_htlc_info(
7006					&update_add_htlc,
7007					shared_secret,
7008					next_hop,
7009					incoming_accept_underpaying_htlcs,
7010					next_packet_details_opt.map(|d| d.next_packet_pubkey),
7011				) {
7012					Ok(info) => htlc_forwards.push((info, update_add_htlc.htlc_id)),
7013					Err(inbound_err) => {
7014						let failure_type =
7015							get_htlc_failure_type(outgoing_scid_opt, update_add_htlc.payment_hash);
7016						let htlc_failure = inbound_err.reason.into();
7017						let htlc_fail = self.construct_pending_htlc_fail_msg(
7018							&update_add_htlc,
7019							&incoming_counterparty_node_id,
7020							shared_secret,
7021							inbound_err,
7022						);
7023						htlc_fails.push((htlc_fail, failure_type, htlc_failure));
7024					},
7025				}
7026			}
7027
7028			// Process all of the forwards and failures for the channel in which the HTLCs were
7029			// proposed to as a batch.
7030			let pending_forwards = (
7031				incoming_scid_alias,
7032				incoming_counterparty_node_id,
7033				incoming_funding_txo,
7034				incoming_channel_id,
7035				incoming_user_channel_id,
7036				htlc_forwards.drain(..).collect(),
7037			);
7038			self.forward_htlcs(&mut [pending_forwards]);
7039			for (htlc_fail, failure_type, failure_reason) in htlc_fails.drain(..) {
7040				let failure = match htlc_fail {
7041					HTLCFailureMsg::Relay(fail_htlc) => HTLCForwardInfo::FailHTLC {
7042						htlc_id: fail_htlc.htlc_id,
7043						err_packet: fail_htlc.into(),
7044					},
7045					HTLCFailureMsg::Malformed(fail_malformed_htlc) => {
7046						HTLCForwardInfo::FailMalformedHTLC {
7047							htlc_id: fail_malformed_htlc.htlc_id,
7048							sha256_of_onion: fail_malformed_htlc.sha256_of_onion,
7049							failure_code: fail_malformed_htlc.failure_code.into(),
7050						}
7051					},
7052				};
7053				self.forward_htlcs
7054					.lock()
7055					.unwrap()
7056					.entry(incoming_scid_alias)
7057					.or_default()
7058					.push(failure);
7059				self.pending_events.lock().unwrap().push_back((
7060					events::Event::HTLCHandlingFailed {
7061						prev_channel_id: incoming_channel_id,
7062						failure_type,
7063						failure_reason: Some(failure_reason),
7064					},
7065					None,
7066				));
7067			}
7068		}
7069		should_persist
7070	}
7071
7072	/// Returns whether we have pending HTLC forwards that need to be processed via
7073	/// [`Self::process_pending_htlc_forwards`].
7074	pub fn needs_pending_htlc_processing(&self) -> bool {
7075		if !self.forward_htlcs.lock().unwrap().is_empty() {
7076			return true;
7077		}
7078		if !self.decode_update_add_htlcs.lock().unwrap().is_empty() {
7079			return true;
7080		}
7081		if self.pending_outbound_payments.needs_abandon_or_retry() {
7082			return true;
7083		}
7084		false
7085	}
7086
7087	/// Processes HTLCs which are pending waiting on random forward delay.
7088	///
7089	/// Will be regularly called by LDK's background processor.
7090	///
7091	/// Users implementing their own background processing logic should call this in irregular,
7092	/// randomly-distributed intervals.
7093	pub fn process_pending_htlc_forwards(&self) {
7094		if self
7095			.pending_htlc_forwards_processor
7096			.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
7097			.is_err()
7098		{
7099			return;
7100		}
7101
7102		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
7103			self.internal_process_pending_htlc_forwards()
7104		});
7105
7106		self.pending_htlc_forwards_processor.store(false, Ordering::Release);
7107	}
7108
7109	// Returns whether or not we need to re-persist.
7110	fn internal_process_pending_htlc_forwards(&self) -> NotifyOption {
7111		let mut should_persist = NotifyOption::SkipPersistNoEvents;
7112
7113		if self.process_pending_update_add_htlcs() {
7114			should_persist = NotifyOption::DoPersist;
7115		}
7116
7117		let mut new_events = VecDeque::new();
7118		let mut failed_forwards = Vec::new();
7119		let mut phantom_receives: Vec<PerSourcePendingForward> = Vec::new();
7120		let mut forward_htlcs = new_hash_map();
7121		mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
7122
7123		for (short_chan_id, mut pending_forwards) in forward_htlcs {
7124			should_persist = NotifyOption::DoPersist;
7125			if short_chan_id != 0 {
7126				self.process_forward_htlcs(
7127					short_chan_id,
7128					&mut pending_forwards,
7129					&mut failed_forwards,
7130					&mut phantom_receives,
7131				);
7132			} else {
7133				self.process_receive_htlcs(
7134					&mut pending_forwards,
7135					&mut new_events,
7136					&mut failed_forwards,
7137				);
7138			}
7139		}
7140
7141		let best_block_height = self.best_block.read().unwrap().height;
7142		let needs_persist = self.pending_outbound_payments.check_retry_payments(
7143			&self.router,
7144			|| self.list_usable_channels(),
7145			|| self.compute_inflight_htlcs(),
7146			&self.entropy_source,
7147			&self.node_signer,
7148			best_block_height,
7149			&self.pending_events,
7150			|args| self.send_payment_along_path(args),
7151		);
7152		if needs_persist {
7153			should_persist = NotifyOption::DoPersist;
7154		}
7155
7156		for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
7157			self.fail_htlc_backwards_internal(
7158				&htlc_source,
7159				&payment_hash,
7160				&failure_reason,
7161				destination,
7162				None,
7163			);
7164		}
7165		self.forward_htlcs(&mut phantom_receives);
7166
7167		// Freeing the holding cell here is relatively redundant - in practice we'll do it when we
7168		// next get a `get_and_clear_pending_msg_events` call, but some tests rely on it, and it's
7169		// nice to do the work now if we can rather than while we're trying to get messages in the
7170		// network stack.
7171		if self.check_free_holding_cells() {
7172			should_persist = NotifyOption::DoPersist;
7173		}
7174
7175		if new_events.is_empty() {
7176			return should_persist;
7177		}
7178		let mut events = self.pending_events.lock().unwrap();
7179		events.append(&mut new_events);
7180		should_persist = NotifyOption::DoPersist;
7181
7182		should_persist
7183	}
7184
7185	/// Fail the list of provided HTLC forwards because the channel they were to be forwarded over does no longer exist.
7186	fn forwarding_channel_not_found(
7187		&self, forward_infos: impl Iterator<Item = HTLCForwardInfo>, short_chan_id: u64,
7188		forwarding_counterparty: Option<PublicKey>, failed_forwards: &mut Vec<FailedHTLCForward>,
7189		phantom_receives: &mut Vec<PerSourcePendingForward>,
7190	) {
7191		for forward_info in forward_infos {
7192			match forward_info {
7193				HTLCForwardInfo::AddHTLC(payment) => {
7194					let PendingAddHTLCInfo {
7195						prev_outbound_scid_alias,
7196						prev_htlc_id,
7197						prev_channel_id,
7198						prev_funding_outpoint,
7199						prev_user_channel_id,
7200						prev_counterparty_node_id,
7201						forward_info:
7202							PendingHTLCInfo {
7203								ref routing,
7204								incoming_shared_secret,
7205								payment_hash,
7206								outgoing_amt_msat,
7207								outgoing_cltv_value,
7208								..
7209							},
7210					} = payment;
7211					let logger = WithContext::from(
7212						&self.logger,
7213						forwarding_counterparty,
7214						Some(prev_channel_id),
7215						Some(payment_hash),
7216					);
7217					let mut failure_handler =
7218						|msg, reason, err_data, phantom_ss, next_hop_unknown| {
7219							log_info!(logger, "Failed to accept/forward incoming HTLC: {}", msg);
7220
7221							let mut prev_hop = payment.htlc_previous_hop_data();
7222							// Override the phantom shared secret because it wasn't set in the originating
7223							// `PendingAddHTLCInfo` above, it was calculated below after detecting this as a
7224							// phantom payment.
7225							prev_hop.phantom_shared_secret = phantom_ss;
7226							let failure_type = if next_hop_unknown {
7227								HTLCHandlingFailureType::InvalidForward {
7228									requested_forward_scid: short_chan_id,
7229								}
7230							} else {
7231								HTLCHandlingFailureType::Receive { payment_hash }
7232							};
7233
7234							failed_forwards.push((
7235								HTLCSource::PreviousHopData(prev_hop),
7236								payment_hash,
7237								HTLCFailReason::reason(reason, err_data),
7238								failure_type,
7239							));
7240						};
7241
7242					if let PendingHTLCRouting::Forward { ref onion_packet, .. } = routing {
7243						let phantom_pubkey_res =
7244							self.node_signer.get_node_id(Recipient::PhantomNode);
7245						if phantom_pubkey_res.is_ok()
7246							&& fake_scid::is_valid_phantom(
7247								&self.fake_scid_rand_bytes,
7248								short_chan_id,
7249								&self.chain_hash,
7250							) {
7251							let decode_res = onion_utils::decode_next_payment_hop(
7252								Recipient::PhantomNode,
7253								&onion_packet.public_key.unwrap(),
7254								&onion_packet.hop_data,
7255								onion_packet.hmac,
7256								payment_hash,
7257								None,
7258								&*self.node_signer,
7259							);
7260							let next_hop = match decode_res {
7261								Ok(res) => res,
7262								Err(onion_utils::OnionDecodeErr::Malformed { err_msg, reason }) => {
7263									let sha256_of_onion =
7264										Sha256::hash(&onion_packet.hop_data).to_byte_array();
7265									// In this scenario, the phantom would have sent us an
7266									// `update_fail_malformed_htlc`, meaning here we encrypt the error as
7267									// if it came from us (the second-to-last hop) but contains the sha256
7268									// of the onion.
7269									failure_handler(
7270										err_msg,
7271										reason,
7272										sha256_of_onion.to_vec(),
7273										None,
7274										false,
7275									);
7276									continue;
7277								},
7278								Err(onion_utils::OnionDecodeErr::Relay {
7279									err_msg,
7280									reason,
7281									shared_secret,
7282									..
7283								}) => {
7284									let phantom_shared_secret = shared_secret.secret_bytes();
7285									failure_handler(
7286										err_msg,
7287										reason,
7288										Vec::new(),
7289										Some(phantom_shared_secret),
7290										false,
7291									);
7292									continue;
7293								},
7294							};
7295							let phantom_shared_secret = next_hop.shared_secret().secret_bytes();
7296							let current_height: u32 = self.best_block.read().unwrap().height;
7297							let create_res = create_recv_pending_htlc_info(
7298								next_hop,
7299								incoming_shared_secret,
7300								payment_hash,
7301								outgoing_amt_msat,
7302								outgoing_cltv_value,
7303								Some(phantom_shared_secret),
7304								false,
7305								None,
7306								current_height,
7307							);
7308							match create_res {
7309								Ok(info) => phantom_receives.push((
7310									prev_outbound_scid_alias,
7311									prev_counterparty_node_id,
7312									prev_funding_outpoint,
7313									prev_channel_id,
7314									prev_user_channel_id,
7315									vec![(info, prev_htlc_id)],
7316								)),
7317								Err(InboundHTLCErr { reason, err_data, msg }) => {
7318									failure_handler(
7319										msg,
7320										reason,
7321										err_data,
7322										Some(phantom_shared_secret),
7323										false,
7324									);
7325									continue;
7326								},
7327							}
7328						} else {
7329							let msg = format!(
7330								"Unknown short channel id {} for forward HTLC",
7331								short_chan_id
7332							);
7333							failure_handler(
7334								&msg,
7335								LocalHTLCFailureReason::UnknownNextPeer,
7336								Vec::new(),
7337								None,
7338								true,
7339							);
7340							continue;
7341						}
7342					} else {
7343						let msg =
7344							format!("Unknown short channel id {} for forward HTLC", short_chan_id);
7345						failure_handler(
7346							&msg,
7347							LocalHTLCFailureReason::UnknownNextPeer,
7348							Vec::new(),
7349							None,
7350							true,
7351						);
7352						continue;
7353					}
7354				},
7355				HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => {
7356					// Channel went away before we could fail it. This implies
7357					// the channel is now on chain and our counterparty is
7358					// trying to broadcast the HTLC-Timeout, but that's their
7359					// problem, not ours.
7360				},
7361			}
7362		}
7363	}
7364
7365	fn process_forward_htlcs(
7366		&self, short_chan_id: u64, pending_forwards: &mut Vec<HTLCForwardInfo>,
7367		failed_forwards: &mut Vec<FailedHTLCForward>,
7368		phantom_receives: &mut Vec<PerSourcePendingForward>,
7369	) {
7370		let mut forwarding_counterparty = None;
7371
7372		let chan_info_opt = self.short_to_chan_info.read().unwrap().get(&short_chan_id).cloned();
7373		let (counterparty_node_id, forward_chan_id) = match chan_info_opt {
7374			Some((cp_id, chan_id)) => (cp_id, chan_id),
7375			None => {
7376				self.forwarding_channel_not_found(
7377					pending_forwards.drain(..),
7378					short_chan_id,
7379					forwarding_counterparty,
7380					failed_forwards,
7381					phantom_receives,
7382				);
7383				return;
7384			},
7385		};
7386		forwarding_counterparty = Some(counterparty_node_id);
7387		let per_peer_state = self.per_peer_state.read().unwrap();
7388		let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
7389		if peer_state_mutex_opt.is_none() {
7390			self.forwarding_channel_not_found(
7391				pending_forwards.drain(..),
7392				short_chan_id,
7393				forwarding_counterparty,
7394				failed_forwards,
7395				phantom_receives,
7396			);
7397			return;
7398		}
7399		let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
7400		let peer_state = &mut *peer_state_lock;
7401		let mut draining_pending_forwards = pending_forwards.drain(..);
7402		while let Some(forward_info) = draining_pending_forwards.next() {
7403			let queue_fail_htlc_res = match forward_info {
7404				HTLCForwardInfo::AddHTLC(ref payment) => {
7405					let htlc_source = HTLCSource::PreviousHopData(payment.htlc_previous_hop_data());
7406					let PendingAddHTLCInfo {
7407						prev_outbound_scid_alias,
7408						forward_info:
7409							PendingHTLCInfo {
7410								payment_hash,
7411								outgoing_amt_msat,
7412								outgoing_cltv_value,
7413								routing,
7414								skimmed_fee_msat,
7415								..
7416							},
7417						..
7418					} = payment;
7419					let (onion_packet, blinded) = match routing {
7420						PendingHTLCRouting::Forward { ref onion_packet, blinded, .. } => {
7421							(onion_packet, blinded)
7422						},
7423						_ => {
7424							panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
7425						},
7426					};
7427					let next_blinding_point = blinded.and_then(|b| {
7428						b.next_blinding_override.or_else(|| {
7429							let encrypted_tlvs_ss = self
7430								.node_signer
7431								.ecdh(Recipient::Node, &b.inbound_blinding_point, None)
7432								.unwrap()
7433								.secret_bytes();
7434							onion_utils::next_hop_pubkey(
7435								&self.secp_ctx,
7436								b.inbound_blinding_point,
7437								&encrypted_tlvs_ss,
7438							)
7439							.ok()
7440						})
7441					});
7442
7443					// Forward the HTLC over the most appropriate channel with the corresponding peer,
7444					// applying non-strict forwarding.
7445					// The channel with the least amount of outbound liquidity will be used to maximize the
7446					// probability of being able to successfully forward a subsequent HTLC.
7447					let maybe_optimal_channel = peer_state
7448						.channel_by_id
7449						.values_mut()
7450						.filter_map(Channel::as_funded_mut)
7451						.filter_map(|chan| {
7452							let balances = chan.get_available_balances(&self.fee_estimator);
7453							let is_in_range = (balances.next_outbound_htlc_minimum_msat
7454								..=balances.next_outbound_htlc_limit_msat)
7455								.contains(&outgoing_amt_msat);
7456							if is_in_range && chan.context.is_usable() {
7457								Some((chan, balances))
7458							} else {
7459								None
7460							}
7461						})
7462						.min_by_key(|(_, balances)| balances.next_outbound_htlc_limit_msat)
7463						.map(|(c, _)| c);
7464					let optimal_channel = match maybe_optimal_channel {
7465						Some(chan) => chan,
7466						None => {
7467							// Fall back to the specified channel to return an appropriate error.
7468							if let Some(chan) = peer_state
7469								.channel_by_id
7470								.get_mut(&forward_chan_id)
7471								.and_then(Channel::as_funded_mut)
7472							{
7473								chan
7474							} else {
7475								let fwd_iter =
7476									core::iter::once(forward_info).chain(draining_pending_forwards);
7477								self.forwarding_channel_not_found(
7478									fwd_iter,
7479									short_chan_id,
7480									forwarding_counterparty,
7481									failed_forwards,
7482									phantom_receives,
7483								);
7484								break;
7485							}
7486						},
7487					};
7488
7489					let logger = WithChannelContext::from(
7490						&self.logger,
7491						&optimal_channel.context,
7492						Some(*payment_hash),
7493					);
7494					let channel_description =
7495						if optimal_channel.funding.get_short_channel_id() == Some(short_chan_id) {
7496							"specified"
7497						} else {
7498							"alternate"
7499						};
7500					log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} channel {} with corresponding peer {}",
7501						prev_outbound_scid_alias, &payment_hash, short_chan_id, channel_description, optimal_channel.context.channel_id(), &counterparty_node_id);
7502					if let Err((reason, msg)) = optimal_channel.queue_add_htlc(
7503						*outgoing_amt_msat,
7504						*payment_hash,
7505						*outgoing_cltv_value,
7506						htlc_source.clone(),
7507						onion_packet.clone(),
7508						*skimmed_fee_msat,
7509						next_blinding_point,
7510						&self.fee_estimator,
7511						&&logger,
7512					) {
7513						log_trace!(
7514							logger,
7515							"Failed to forward HTLC with payment_hash {} to peer {}: {}",
7516							&payment_hash,
7517							&counterparty_node_id,
7518							msg
7519						);
7520
7521						if let Some(chan) = peer_state
7522							.channel_by_id
7523							.get_mut(&forward_chan_id)
7524							.and_then(Channel::as_funded_mut)
7525						{
7526							let data = self.get_htlc_inbound_temp_fail_data(reason);
7527							let failure_type = HTLCHandlingFailureType::Forward {
7528								node_id: Some(chan.context.get_counterparty_node_id()),
7529								channel_id: forward_chan_id,
7530							};
7531							failed_forwards.push((
7532								htlc_source,
7533								*payment_hash,
7534								HTLCFailReason::reason(reason, data),
7535								failure_type,
7536							));
7537						} else {
7538							self.forwarding_channel_not_found(
7539								core::iter::once(forward_info).chain(draining_pending_forwards),
7540								short_chan_id,
7541								forwarding_counterparty,
7542								failed_forwards,
7543								phantom_receives,
7544							);
7545							break;
7546						}
7547					}
7548					None
7549				},
7550				HTLCForwardInfo::FailHTLC { htlc_id, ref err_packet } => {
7551					if let Some(chan) = peer_state
7552						.channel_by_id
7553						.get_mut(&forward_chan_id)
7554						.and_then(Channel::as_funded_mut)
7555					{
7556						let logger = WithChannelContext::from(&self.logger, &chan.context, None);
7557						log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
7558						Some((chan.queue_fail_htlc(htlc_id, err_packet.clone(), &&logger), htlc_id))
7559					} else {
7560						self.forwarding_channel_not_found(
7561							core::iter::once(forward_info).chain(draining_pending_forwards),
7562							short_chan_id,
7563							forwarding_counterparty,
7564							failed_forwards,
7565							phantom_receives,
7566						);
7567						break;
7568					}
7569				},
7570				HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
7571					if let Some(chan) = peer_state
7572						.channel_by_id
7573						.get_mut(&forward_chan_id)
7574						.and_then(Channel::as_funded_mut)
7575					{
7576						let logger = WithChannelContext::from(&self.logger, &chan.context, None);
7577						log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
7578						let res = chan.queue_fail_malformed_htlc(
7579							htlc_id,
7580							failure_code,
7581							sha256_of_onion,
7582							&&logger,
7583						);
7584						Some((res, htlc_id))
7585					} else {
7586						self.forwarding_channel_not_found(
7587							core::iter::once(forward_info).chain(draining_pending_forwards),
7588							short_chan_id,
7589							forwarding_counterparty,
7590							failed_forwards,
7591							phantom_receives,
7592						);
7593						break;
7594					}
7595				},
7596			};
7597			if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
7598				if let Err(e) = queue_fail_htlc_res {
7599					if let ChannelError::Ignore(msg) = e {
7600						if let Some(chan) = peer_state
7601							.channel_by_id
7602							.get_mut(&forward_chan_id)
7603							.and_then(Channel::as_funded_mut)
7604						{
7605							let logger =
7606								WithChannelContext::from(&self.logger, &chan.context, None);
7607							log_trace!(
7608								logger,
7609								"Failed to fail HTLC with ID {} backwards to short_id {}: {}",
7610								htlc_id,
7611								short_chan_id,
7612								msg
7613							);
7614						}
7615					} else {
7616						panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
7617					}
7618					// fail-backs are best-effort, we probably already have one
7619					// pending, and if not that's OK, if not, the channel is on
7620					// the chain and sending the HTLC-Timeout is their problem.
7621				}
7622			}
7623		}
7624	}
7625
7626	fn process_receive_htlcs(
7627		&self, pending_forwards: &mut Vec<HTLCForwardInfo>,
7628		new_events: &mut VecDeque<(Event, Option<EventCompletionAction>)>,
7629		failed_forwards: &mut Vec<FailedHTLCForward>,
7630	) {
7631		'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
7632			match forward_info {
7633				HTLCForwardInfo::AddHTLC(payment) => {
7634					let prev_hop = payment.htlc_previous_hop_data();
7635					let PendingAddHTLCInfo {
7636						prev_channel_id,
7637						prev_funding_outpoint,
7638						forward_info:
7639							PendingHTLCInfo {
7640								routing,
7641								payment_hash,
7642								incoming_amt_msat,
7643								outgoing_amt_msat,
7644								skimmed_fee_msat,
7645								..
7646							},
7647						..
7648					} = payment;
7649					let blinded_failure = routing.blinded_failure();
7650					let (
7651						cltv_expiry,
7652						onion_payload,
7653						payment_data,
7654						payment_context,
7655						phantom_shared_secret,
7656						mut onion_fields,
7657						has_recipient_created_payment_secret,
7658						invoice_request_opt,
7659					) = match routing {
7660						PendingHTLCRouting::Receive {
7661							payment_data,
7662							payment_metadata,
7663							payment_context,
7664							incoming_cltv_expiry,
7665							phantom_shared_secret,
7666							custom_tlvs,
7667							requires_blinded_error: _,
7668						} => {
7669							let _legacy_hop_data = Some(payment_data.clone());
7670							let onion_fields = RecipientOnionFields {
7671								payment_secret: Some(payment_data.payment_secret),
7672								payment_metadata,
7673								custom_tlvs,
7674							};
7675							(
7676								incoming_cltv_expiry,
7677								OnionPayload::Invoice { _legacy_hop_data },
7678								Some(payment_data),
7679								payment_context,
7680								phantom_shared_secret,
7681								onion_fields,
7682								true,
7683								None,
7684							)
7685						},
7686						PendingHTLCRouting::ReceiveKeysend {
7687							payment_data,
7688							payment_preimage,
7689							payment_metadata,
7690							incoming_cltv_expiry,
7691							custom_tlvs,
7692							requires_blinded_error: _,
7693							has_recipient_created_payment_secret,
7694							payment_context,
7695							invoice_request,
7696						} => {
7697							let onion_fields = RecipientOnionFields {
7698								payment_secret: payment_data
7699									.as_ref()
7700									.map(|data| data.payment_secret),
7701								payment_metadata,
7702								custom_tlvs,
7703							};
7704							(
7705								incoming_cltv_expiry,
7706								OnionPayload::Spontaneous(payment_preimage),
7707								payment_data,
7708								payment_context,
7709								None,
7710								onion_fields,
7711								has_recipient_created_payment_secret,
7712								invoice_request,
7713							)
7714						},
7715						_ => {
7716							panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
7717						},
7718					};
7719					let claimable_htlc = ClaimableHTLC {
7720						prev_hop,
7721						// We differentiate the received value from the sender intended value
7722						// if possible so that we don't prematurely mark MPP payments complete
7723						// if routing nodes overpay
7724						value: incoming_amt_msat.unwrap_or(outgoing_amt_msat),
7725						sender_intended_value: outgoing_amt_msat,
7726						timer_ticks: 0,
7727						total_value_received: None,
7728						total_msat: if let Some(data) = &payment_data {
7729							data.total_msat
7730						} else {
7731							outgoing_amt_msat
7732						},
7733						cltv_expiry,
7734						onion_payload,
7735						counterparty_skimmed_fee_msat: skimmed_fee_msat,
7736					};
7737
7738					let mut committed_to_claimable = false;
7739
7740					macro_rules! fail_htlc {
7741						($htlc: expr, $payment_hash: expr) => {
7742							debug_assert!(!committed_to_claimable);
7743							let err_data = invalid_payment_err_data(
7744								$htlc.value,
7745								self.best_block.read().unwrap().height,
7746							);
7747							let counterparty_node_id = $htlc.prev_hop.counterparty_node_id;
7748							let incoming_packet_shared_secret =
7749								$htlc.prev_hop.incoming_packet_shared_secret;
7750							let prev_outbound_scid_alias = $htlc.prev_hop.prev_outbound_scid_alias;
7751							failed_forwards.push((
7752								HTLCSource::PreviousHopData(HTLCPreviousHopData {
7753									prev_outbound_scid_alias,
7754									user_channel_id: $htlc.prev_hop.user_channel_id,
7755									counterparty_node_id,
7756									channel_id: prev_channel_id,
7757									outpoint: prev_funding_outpoint,
7758									htlc_id: $htlc.prev_hop.htlc_id,
7759									incoming_packet_shared_secret,
7760									phantom_shared_secret,
7761									blinded_failure,
7762									cltv_expiry: Some(cltv_expiry),
7763								}),
7764								payment_hash,
7765								HTLCFailReason::reason(
7766									LocalHTLCFailureReason::IncorrectPaymentDetails,
7767									err_data,
7768								),
7769								HTLCHandlingFailureType::Receive { payment_hash: $payment_hash },
7770							));
7771							continue 'next_forwardable_htlc;
7772						};
7773					}
7774					let phantom_shared_secret = claimable_htlc.prev_hop.phantom_shared_secret;
7775					let mut receiver_node_id = self.our_network_pubkey;
7776					if phantom_shared_secret.is_some() {
7777						receiver_node_id = self
7778							.node_signer
7779							.get_node_id(Recipient::PhantomNode)
7780							.expect("Failed to get node_id for phantom node recipient");
7781					}
7782
7783					macro_rules! check_total_value {
7784						($purpose: expr) => {{
7785							let mut payment_claimable_generated = false;
7786							let is_keysend = $purpose.is_keysend();
7787							let mut claimable_payments = self.claimable_payments.lock().unwrap();
7788							if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
7789								fail_htlc!(claimable_htlc, payment_hash);
7790							}
7791							let ref mut claimable_payment = claimable_payments.claimable_payments
7792								.entry(payment_hash)
7793								// Note that if we insert here we MUST NOT fail_htlc!()
7794								.or_insert_with(|| {
7795									committed_to_claimable = true;
7796									ClaimablePayment {
7797										purpose: $purpose.clone(), htlcs: Vec::new(), onion_fields: None,
7798									}
7799								});
7800							if $purpose != claimable_payment.purpose {
7801								let log_keysend = |keysend| if keysend { "keysend" } else { "non-keysend" };
7802								log_trace!(self.logger, "Failing new {} HTLC with payment_hash {} as we already had an existing {} HTLC with the same payment hash", log_keysend(is_keysend), &payment_hash, log_keysend(!is_keysend));
7803								fail_htlc!(claimable_htlc, payment_hash);
7804							}
7805							if let Some(earlier_fields) = &mut claimable_payment.onion_fields {
7806								if earlier_fields.check_merge(&mut onion_fields).is_err() {
7807									fail_htlc!(claimable_htlc, payment_hash);
7808								}
7809							} else {
7810								claimable_payment.onion_fields = Some(onion_fields);
7811							}
7812							let mut total_value = claimable_htlc.sender_intended_value;
7813							let mut earliest_expiry = claimable_htlc.cltv_expiry;
7814							for htlc in claimable_payment.htlcs.iter() {
7815								total_value += htlc.sender_intended_value;
7816								earliest_expiry = cmp::min(earliest_expiry, htlc.cltv_expiry);
7817								if htlc.total_msat != claimable_htlc.total_msat {
7818									log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
7819										&payment_hash, claimable_htlc.total_msat, htlc.total_msat);
7820									total_value = msgs::MAX_VALUE_MSAT;
7821								}
7822								if total_value >= msgs::MAX_VALUE_MSAT { break; }
7823							}
7824							// The condition determining whether an MPP is complete must
7825							// match exactly the condition used in `timer_tick_occurred`
7826							if total_value >= msgs::MAX_VALUE_MSAT {
7827								fail_htlc!(claimable_htlc, payment_hash);
7828							} else if total_value - claimable_htlc.sender_intended_value >= claimable_htlc.total_msat {
7829								log_trace!(self.logger, "Failing HTLC with payment_hash {} as payment is already claimable",
7830									&payment_hash);
7831								fail_htlc!(claimable_htlc, payment_hash);
7832							} else if total_value >= claimable_htlc.total_msat {
7833								#[allow(unused_assignments)] {
7834									committed_to_claimable = true;
7835								}
7836								claimable_payment.htlcs.push(claimable_htlc);
7837								let amount_msat =
7838									claimable_payment.htlcs.iter().map(|htlc| htlc.value).sum();
7839								claimable_payment.htlcs.iter_mut()
7840									.for_each(|htlc| htlc.total_value_received = Some(amount_msat));
7841								let counterparty_skimmed_fee_msat = claimable_payment.htlcs.iter()
7842									.map(|htlc| htlc.counterparty_skimmed_fee_msat.unwrap_or(0)).sum();
7843								debug_assert!(total_value.saturating_sub(amount_msat) <=
7844									counterparty_skimmed_fee_msat);
7845								claimable_payment.htlcs.sort();
7846								let payment_id =
7847									claimable_payment.inbound_payment_id(&self.inbound_payment_id_secret);
7848								new_events.push_back((events::Event::PaymentClaimable {
7849									receiver_node_id: Some(receiver_node_id),
7850									payment_hash,
7851									purpose: $purpose,
7852									amount_msat,
7853									counterparty_skimmed_fee_msat,
7854									receiving_channel_ids: claimable_payment.receiving_channel_ids(),
7855									claim_deadline: Some(earliest_expiry - HTLC_FAIL_BACK_BUFFER),
7856									onion_fields: claimable_payment.onion_fields.clone(),
7857									payment_id: Some(payment_id),
7858								}, None));
7859								payment_claimable_generated = true;
7860							} else {
7861								// Nothing to do - we haven't reached the total
7862								// payment value yet, wait until we receive more
7863								// MPP parts.
7864								claimable_payment.htlcs.push(claimable_htlc);
7865								#[allow(unused_assignments)] {
7866									committed_to_claimable = true;
7867								}
7868							}
7869							payment_claimable_generated
7870						}}
7871					}
7872
7873					// Check that the payment hash and secret are known. Note that we
7874					// MUST take care to handle the "unknown payment hash" and
7875					// "incorrect payment secret" cases here identically or we'd expose
7876					// that we are the ultimate recipient of the given payment hash.
7877					// Further, we must not expose whether we have any other HTLCs
7878					// associated with the same payment_hash pending or not.
7879					let payment_preimage = if has_recipient_created_payment_secret {
7880						if let Some(ref payment_data) = payment_data {
7881							let verify_res = inbound_payment::verify(
7882								payment_hash,
7883								&payment_data,
7884								self.highest_seen_timestamp.load(Ordering::Acquire) as u64,
7885								&self.inbound_payment_key,
7886								&self.logger,
7887							);
7888							let (payment_preimage, min_final_cltv_expiry_delta) = match verify_res {
7889								Ok(result) => result,
7890								Err(()) => {
7891									log_trace!(self.logger, "Failing new HTLC with payment_hash {} as payment verification failed", &payment_hash);
7892									fail_htlc!(claimable_htlc, payment_hash);
7893								},
7894							};
7895							if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta {
7896								let expected_min_expiry_height = (self.current_best_block().height
7897									+ min_final_cltv_expiry_delta as u32)
7898									as u64;
7899								if (cltv_expiry as u64) < expected_min_expiry_height {
7900									log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})",
7901									&payment_hash, cltv_expiry, expected_min_expiry_height);
7902									fail_htlc!(claimable_htlc, payment_hash);
7903								}
7904							}
7905							payment_preimage
7906						} else {
7907							fail_htlc!(claimable_htlc, payment_hash);
7908						}
7909					} else {
7910						None
7911					};
7912					match claimable_htlc.onion_payload {
7913						OnionPayload::Invoice { .. } => {
7914							let payment_data = payment_data.unwrap();
7915							let from_parts_res = events::PaymentPurpose::from_parts(
7916								payment_preimage,
7917								payment_data.payment_secret,
7918								payment_context,
7919							);
7920							let purpose = match from_parts_res {
7921								Ok(purpose) => purpose,
7922								Err(()) => {
7923									fail_htlc!(claimable_htlc, payment_hash);
7924								},
7925							};
7926							check_total_value!(purpose);
7927						},
7928						OnionPayload::Spontaneous(keysend_preimage) => {
7929							let purpose = if let Some(PaymentContext::AsyncBolt12Offer(
7930								AsyncBolt12OfferContext { offer_nonce },
7931							)) = payment_context
7932							{
7933								let payment_data = match payment_data {
7934									Some(data) => data,
7935									None => {
7936										debug_assert!(
7937											false,
7938											"We checked that payment_data is Some above"
7939										);
7940										fail_htlc!(claimable_htlc, payment_hash);
7941									},
7942								};
7943
7944								let verify_opt = invoice_request_opt.and_then(|invreq| {
7945									invreq
7946										.verify_using_recipient_data(
7947											offer_nonce,
7948											&self.inbound_payment_key,
7949											&self.secp_ctx,
7950										)
7951										.ok()
7952								});
7953								let verified_invreq = match verify_opt {
7954									Some(verified_invreq) => {
7955										if let Some(invreq_amt_msat) =
7956											verified_invreq.amount_msats()
7957										{
7958											if payment_data.total_msat < invreq_amt_msat {
7959												fail_htlc!(claimable_htlc, payment_hash);
7960											}
7961										}
7962										verified_invreq
7963									},
7964									None => {
7965										fail_htlc!(claimable_htlc, payment_hash);
7966									},
7967								};
7968								let payment_purpose_context =
7969									PaymentContext::Bolt12Offer(Bolt12OfferContext {
7970										offer_id: verified_invreq.offer_id,
7971										invoice_request: verified_invreq.fields(),
7972									});
7973								let from_parts_res = events::PaymentPurpose::from_parts(
7974									Some(keysend_preimage),
7975									payment_data.payment_secret,
7976									Some(payment_purpose_context),
7977								);
7978								match from_parts_res {
7979									Ok(purpose) => purpose,
7980									Err(()) => {
7981										fail_htlc!(claimable_htlc, payment_hash);
7982									},
7983								}
7984							} else if payment_context.is_some() {
7985								log_trace!(self.logger, "Failing new HTLC with payment_hash {}: received a keysend payment to a non-async payments context {:#?}", payment_hash, payment_context);
7986								fail_htlc!(claimable_htlc, payment_hash);
7987							} else {
7988								events::PaymentPurpose::SpontaneousPayment(keysend_preimage)
7989							};
7990							check_total_value!(purpose);
7991						},
7992					}
7993				},
7994				HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => {
7995					panic!("Got pending fail of our own HTLC");
7996				},
7997			}
7998		}
7999	}
8000
8001	/// Free the background events, generally called from [`PersistenceNotifierGuard`] constructors.
8002	///
8003	/// Expects the caller to have a total_consistency_lock read lock.
8004	#[rustfmt::skip]
8005	fn process_background_events(&self) -> NotifyOption {
8006		debug_assert_ne!(self.total_consistency_lock.held_by_thread(), LockHeldState::NotHeldByThread);
8007
8008		self.background_events_processed_since_startup.store(true, Ordering::Release);
8009
8010		let mut background_events = Vec::new();
8011		mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events);
8012		if background_events.is_empty() {
8013			return NotifyOption::SkipPersistNoEvents;
8014		}
8015
8016		for event in background_events.drain(..) {
8017			match event {
8018				BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
8019					self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
8020				},
8021				BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
8022					self.channel_monitor_updated(&channel_id, None, &counterparty_node_id);
8023				},
8024			}
8025		}
8026		NotifyOption::DoPersist
8027	}
8028
8029	#[cfg(any(test, feature = "_test_utils"))]
8030	/// Process background events, for functional testing
8031	pub fn test_process_background_events(&self) {
8032		let _lck = self.total_consistency_lock.read().unwrap();
8033		let _ = self.process_background_events();
8034	}
8035
8036	#[rustfmt::skip]
8037	fn update_channel_fee(&self, chan_id: &ChannelId, chan: &mut FundedChannel<SP>, new_feerate: u32) -> NotifyOption {
8038		if !chan.funding.is_outbound() { return NotifyOption::SkipPersistNoEvents; }
8039
8040		let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8041
8042		let current_feerate = chan.context.get_feerate_sat_per_1000_weight();
8043		let update_fee_required = match new_feerate.cmp(&current_feerate) {
8044			cmp::Ordering::Greater => true,
8045			cmp::Ordering::Equal => false,
8046			// Only bother with a fee update if feerate has decreased at least half.
8047			cmp::Ordering::Less => new_feerate * 2 <= current_feerate,
8048		};
8049		if !update_fee_required {
8050			return NotifyOption::SkipPersistNoEvents
8051		}
8052
8053		if !chan.context.is_live() {
8054			log_trace!(logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
8055				chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
8056			return NotifyOption::SkipPersistNoEvents;
8057		}
8058		log_trace!(logger, "Channel {} qualifies for a feerate change from {} to {}.",
8059			&chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
8060
8061		chan.queue_update_fee(new_feerate, &self.fee_estimator, &&logger);
8062		NotifyOption::DoPersist
8063	}
8064
8065	#[cfg(any(test, fuzzing, feature = "_externalize_tests"))]
8066	/// In chanmon_consistency we want to sometimes do the channel fee updates done in
8067	/// timer_tick_occurred, but we can't generate the disabled channel updates as it considers
8068	/// these a fuzz failure (as they usually indicate a channel force-close, which is exactly what
8069	/// it wants to detect). Thus, we have a variant exposed here for its benefit.
8070	#[rustfmt::skip]
8071	pub fn maybe_update_chan_fees(&self) {
8072		PersistenceNotifierGuard::optionally_notify(self, || {
8073			let mut should_persist = NotifyOption::SkipPersistNoEvents;
8074			let mut feerate_cache = new_hash_map();
8075
8076			let per_peer_state = self.per_peer_state.read().unwrap();
8077			for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
8078				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8079				let peer_state = &mut *peer_state_lock;
8080				for (chan_id, chan) in peer_state.channel_by_id.iter_mut()
8081					.filter_map(|(chan_id, chan)| chan.as_funded_mut().map(|chan| (chan_id, chan)))
8082				{
8083					let channel_type = chan.funding.get_channel_type();
8084					let new_feerate = feerate_cache.get(channel_type).copied().or_else(|| {
8085						let feerate = selected_commitment_sat_per_1000_weight(&self.fee_estimator, &channel_type);
8086						feerate_cache.insert(channel_type.clone(), feerate);
8087						Some(feerate)
8088					}).unwrap();
8089					let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
8090					if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
8091				}
8092			}
8093
8094			should_persist
8095		});
8096	}
8097
8098	/// Performs actions which should happen on startup and roughly once per minute thereafter.
8099	///
8100	/// This currently includes:
8101	///  * Increasing or decreasing the on-chain feerate estimates for our outbound channels,
8102	///  * Broadcasting [`ChannelUpdate`] messages if we've been disconnected from our peer for more
8103	///    than a minute, informing the network that they should no longer attempt to route over
8104	///    the channel.
8105	///  * Expiring a channel's previous [`ChannelConfig`] if necessary to only allow forwarding HTLCs
8106	///    with the current [`ChannelConfig`].
8107	///  * Removing peers which have disconnected but and no longer have any channels.
8108	///  * Force-closing and removing channels which have not completed establishment in a timely manner.
8109	///  * Forgetting about stale outbound payments, either those that have already been fulfilled
8110	///    or those awaiting an invoice that hasn't been delivered in the necessary amount of time.
8111	///    The latter is determined using the system clock in `std` and the highest seen block time
8112	///    minus two hours in non-`std`.
8113	///
8114	/// Note that this may cause reentrancy through [`chain::Watch::update_channel`] calls or feerate
8115	/// estimate fetches.
8116	///
8117	/// [`ChannelUpdate`]: msgs::ChannelUpdate
8118	/// [`ChannelConfig`]: crate::util::config::ChannelConfig
8119	pub fn timer_tick_occurred(&self) {
8120		PersistenceNotifierGuard::optionally_notify(self, || {
8121			let mut should_persist = NotifyOption::SkipPersistNoEvents;
8122
8123			let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
8124			let mut timed_out_mpp_htlcs = Vec::new();
8125			let mut pending_peers_awaiting_removal = Vec::new();
8126			let mut feerate_cache = new_hash_map();
8127
8128			{
8129				let per_peer_state = self.per_peer_state.read().unwrap();
8130				for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
8131					let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8132					let peer_state = &mut *peer_state_lock;
8133					let pending_msg_events = &mut peer_state.pending_msg_events;
8134					let counterparty_node_id = *counterparty_node_id;
8135					peer_state.channel_by_id.retain(|chan_id, chan| {
8136						match chan.as_funded_mut() {
8137							Some(funded_chan) => {
8138								let channel_type = funded_chan.funding.get_channel_type();
8139								let new_feerate = feerate_cache.get(channel_type).copied().or_else(|| {
8140									let feerate = selected_commitment_sat_per_1000_weight(&self.fee_estimator, &channel_type);
8141									feerate_cache.insert(channel_type.clone(), feerate);
8142									Some(feerate)
8143								}).unwrap();
8144								let chan_needs_persist = self.update_channel_fee(chan_id, funded_chan, new_feerate);
8145								if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
8146
8147								if let Err(e) = funded_chan.timer_check_closing_negotiation_progress() {
8148									let (needs_close, err) = convert_channel_err!(self, peer_state, e, funded_chan, FUNDED_CHANNEL);
8149									handle_errors.push((Err(err), counterparty_node_id));
8150									if needs_close { return false; }
8151								}
8152
8153								match funded_chan.channel_update_status() {
8154									ChannelUpdateStatus::Enabled if !funded_chan.context.is_live() => funded_chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)),
8155									ChannelUpdateStatus::Disabled if funded_chan.context.is_live() => funded_chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)),
8156									ChannelUpdateStatus::DisabledStaged(_) if funded_chan.context.is_live()
8157										=> funded_chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
8158									ChannelUpdateStatus::EnabledStaged(_) if !funded_chan.context.is_live()
8159										=> funded_chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
8160									ChannelUpdateStatus::DisabledStaged(mut n) if !funded_chan.context.is_live() => {
8161										n += 1;
8162										if n >= DISABLE_GOSSIP_TICKS {
8163											funded_chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
8164											if let Ok(update) = self.get_channel_update_for_broadcast(&funded_chan) {
8165												let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
8166												pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate {
8167													msg: update
8168												});
8169											}
8170											should_persist = NotifyOption::DoPersist;
8171										} else {
8172											funded_chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(n));
8173										}
8174									},
8175									ChannelUpdateStatus::EnabledStaged(mut n) if funded_chan.context.is_live() => {
8176										n += 1;
8177										if n >= ENABLE_GOSSIP_TICKS {
8178											funded_chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
8179											if let Ok(update) = self.get_channel_update_for_broadcast(&funded_chan) {
8180												let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
8181												pending_broadcast_messages.push(MessageSendEvent::BroadcastChannelUpdate {
8182													msg: update
8183												});
8184											}
8185											should_persist = NotifyOption::DoPersist;
8186										} else {
8187											funded_chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(n));
8188										}
8189									},
8190									_ => {},
8191								}
8192
8193								funded_chan.context.maybe_expire_prev_config();
8194
8195								if peer_state.is_connected {
8196									if funded_chan.should_disconnect_peer_awaiting_response() {
8197										let logger = WithChannelContext::from(&self.logger, &funded_chan.context, None);
8198										log_debug!(logger, "Disconnecting peer {} due to not making any progress on channel {}",
8199												counterparty_node_id, chan_id);
8200										pending_msg_events.push(MessageSendEvent::HandleError {
8201											node_id: counterparty_node_id,
8202											action: msgs::ErrorAction::DisconnectPeerWithWarning {
8203												msg: msgs::WarningMessage {
8204													channel_id: *chan_id,
8205													data: "Disconnecting due to timeout awaiting response".to_owned(),
8206												},
8207											},
8208										});
8209									}
8210								}
8211
8212								true
8213							},
8214							None => {
8215								chan.context_mut().maybe_expire_prev_config();
8216								let unfunded_context = chan.unfunded_context_mut().expect("channel should be unfunded");
8217								if unfunded_context.should_expire_unfunded_channel() {
8218									let context = chan.context();
8219									let logger = WithChannelContext::from(&self.logger, context, None);
8220									log_error!(logger,
8221										"Force-closing pending channel with ID {} for not establishing in a timely manner",
8222										context.channel_id());
8223									let reason = ClosureReason::FundingTimedOut;
8224									let msg = "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned();
8225									let err = ChannelError::Close((msg, reason));
8226									let (_, e) = convert_channel_err!(self, peer_state, err, chan);
8227									handle_errors.push((Err(e), counterparty_node_id));
8228									false
8229								} else {
8230									true
8231								}
8232							},
8233						}
8234					});
8235
8236					for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() {
8237						if {
8238							req.ticks_remaining -= 1;
8239							req.ticks_remaining
8240						} <= 0
8241						{
8242							let logger = WithContext::from(
8243								&self.logger,
8244								Some(counterparty_node_id),
8245								Some(*chan_id),
8246								None,
8247							);
8248							log_error!(logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id);
8249							if peer_state.is_connected {
8250								peer_state.pending_msg_events.push(MessageSendEvent::HandleError {
8251									node_id: counterparty_node_id,
8252									action: msgs::ErrorAction::SendErrorMessage {
8253										msg: msgs::ErrorMessage {
8254											channel_id: chan_id.clone(),
8255											data: "Channel force-closed".to_owned(),
8256										},
8257									},
8258								});
8259							}
8260						}
8261					}
8262					peer_state
8263						.inbound_channel_request_by_id
8264						.retain(|_, req| req.ticks_remaining > 0);
8265
8266					if peer_state.ok_to_remove(true) {
8267						pending_peers_awaiting_removal.push(counterparty_node_id);
8268					}
8269				}
8270			}
8271
8272			// When a peer disconnects but still has channels, the peer's `peer_state` entry in the
8273			// `per_peer_state` is not removed by the `peer_disconnected` function. If the channels
8274			// of to that peer is later closed while still being disconnected (i.e. force closed),
8275			// we therefore need to remove the peer from `peer_state` separately.
8276			// To avoid having to take the `per_peer_state` `write` lock once the channels are
8277			// closed, we instead remove such peers awaiting removal here on a timer, to limit the
8278			// negative effects on parallelism as much as possible.
8279			if pending_peers_awaiting_removal.len() > 0 {
8280				let mut per_peer_state = self.per_peer_state.write().unwrap();
8281				for counterparty_node_id in pending_peers_awaiting_removal {
8282					match per_peer_state.entry(counterparty_node_id) {
8283						hash_map::Entry::Occupied(entry) => {
8284							// Remove the entry if the peer is still disconnected and we still
8285							// have no channels to the peer.
8286							let remove_entry = {
8287								let peer_state = entry.get().lock().unwrap();
8288								peer_state.ok_to_remove(true)
8289							};
8290							if remove_entry {
8291								entry.remove_entry();
8292							}
8293						},
8294						hash_map::Entry::Vacant(_) => { /* The PeerState has already been removed */
8295						},
8296					}
8297				}
8298			}
8299
8300			self.claimable_payments.lock().unwrap().claimable_payments.retain(
8301				|payment_hash, payment| {
8302					if payment.htlcs.is_empty() {
8303						// This should be unreachable
8304						debug_assert!(false);
8305						return false;
8306					}
8307					if let OnionPayload::Invoice { .. } = payment.htlcs[0].onion_payload {
8308						// Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
8309						// In this case we're not going to handle any timeouts of the parts here.
8310						// This condition determining whether the MPP is complete here must match
8311						// exactly the condition used in `process_pending_htlc_forwards`.
8312						let htlc_total_msat =
8313							payment.htlcs.iter().map(|h| h.sender_intended_value).sum();
8314						if payment.htlcs[0].total_msat <= htlc_total_msat {
8315							return true;
8316						} else if payment.htlcs.iter_mut().any(|htlc| {
8317							htlc.timer_ticks += 1;
8318							return htlc.timer_ticks >= MPP_TIMEOUT_TICKS;
8319						}) {
8320							let htlcs = payment
8321								.htlcs
8322								.drain(..)
8323								.map(|htlc: ClaimableHTLC| (htlc.prev_hop, *payment_hash));
8324							timed_out_mpp_htlcs.extend(htlcs);
8325							return false;
8326						}
8327					}
8328					true
8329				},
8330			);
8331
8332			for htlc_source in timed_out_mpp_htlcs.drain(..) {
8333				let source = HTLCSource::PreviousHopData(htlc_source.0.clone());
8334				let failure_reason = LocalHTLCFailureReason::MPPTimeout;
8335				let reason = HTLCFailReason::from_failure_code(failure_reason);
8336				let receiver = HTLCHandlingFailureType::Receive { payment_hash: htlc_source.1 };
8337				self.fail_htlc_backwards_internal(&source, &htlc_source.1, &reason, receiver, None);
8338			}
8339
8340			for (err, counterparty_node_id) in handle_errors {
8341				let _ = handle_error!(self, err, counterparty_node_id);
8342			}
8343
8344			#[cfg(feature = "std")]
8345			let duration_since_epoch = std::time::SystemTime::now()
8346				.duration_since(std::time::SystemTime::UNIX_EPOCH)
8347				.expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
8348			#[cfg(not(feature = "std"))]
8349			let duration_since_epoch = Duration::from_secs(
8350				self.highest_seen_timestamp.load(Ordering::Acquire).saturating_sub(7200) as u64,
8351			);
8352
8353			self.pending_outbound_payments
8354				.remove_stale_payments(duration_since_epoch, &self.pending_events);
8355
8356			self.check_refresh_async_receive_offer_cache(true);
8357
8358			// Technically we don't need to do this here, but if we have holding cell entries in a
8359			// channel that need freeing, it's better to do that here and block a background task
8360			// than block the message queueing pipeline.
8361			if self.check_free_holding_cells() {
8362				should_persist = NotifyOption::DoPersist;
8363			}
8364
8365			should_persist
8366		});
8367	}
8368
8369	/// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect
8370	/// after a PaymentClaimable event, failing the HTLC back to its origin and freeing resources
8371	/// along the path (including in our own channel on which we received it).
8372	///
8373	/// Note that in some cases around unclean shutdown, it is possible the payment may have
8374	/// already been claimed by you via [`ChannelManager::claim_funds`] prior to you seeing (a
8375	/// second copy of) the [`events::Event::PaymentClaimable`] event. Alternatively, the payment
8376	/// may have already been failed automatically by LDK if it was nearing its expiration time.
8377	///
8378	/// While LDK will never claim a payment automatically on your behalf (i.e. without you calling
8379	/// [`ChannelManager::claim_funds`]), you should still monitor for
8380	/// [`events::Event::PaymentClaimed`] events even for payments you intend to fail, especially on
8381	/// startup during which time claims that were in-progress at shutdown may be replayed.
8382	pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
8383		let failure_code = FailureCode::IncorrectOrUnknownPaymentDetails;
8384		self.fail_htlc_backwards_with_reason(payment_hash, failure_code);
8385	}
8386
8387	/// This is a variant of [`ChannelManager::fail_htlc_backwards`] that allows you to specify the
8388	/// reason for the failure.
8389	///
8390	/// See [`FailureCode`] for valid failure codes.
8391	pub fn fail_htlc_backwards_with_reason(
8392		&self, payment_hash: &PaymentHash, failure_code: FailureCode,
8393	) {
8394		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
8395
8396		let removed_source =
8397			self.claimable_payments.lock().unwrap().claimable_payments.remove(payment_hash);
8398		if let Some(payment) = removed_source {
8399			for htlc in payment.htlcs {
8400				let reason = self.get_htlc_fail_reason_from_failure_code(failure_code, &htlc);
8401				let source = HTLCSource::PreviousHopData(htlc.prev_hop);
8402				let receiver = HTLCHandlingFailureType::Receive { payment_hash: *payment_hash };
8403				self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver, None);
8404			}
8405		}
8406	}
8407
8408	/// Gets error data to form an [`HTLCFailReason`] given a [`FailureCode`] and [`ClaimableHTLC`].
8409	fn get_htlc_fail_reason_from_failure_code(
8410		&self, failure_code: FailureCode, htlc: &ClaimableHTLC,
8411	) -> HTLCFailReason {
8412		match failure_code {
8413			FailureCode::TemporaryNodeFailure => {
8414				HTLCFailReason::from_failure_code(failure_code.into())
8415			},
8416			FailureCode::RequiredNodeFeatureMissing => {
8417				HTLCFailReason::from_failure_code(failure_code.into())
8418			},
8419			FailureCode::IncorrectOrUnknownPaymentDetails => {
8420				let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
8421				htlc_msat_height_data
8422					.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes());
8423				HTLCFailReason::reason(failure_code.into(), htlc_msat_height_data)
8424			},
8425			FailureCode::InvalidOnionPayload(data) => {
8426				let fail_data = match data {
8427					Some((typ, offset)) => [BigSize(typ).encode(), offset.encode()].concat(),
8428					None => Vec::new(),
8429				};
8430				HTLCFailReason::reason(failure_code.into(), fail_data)
8431			},
8432		}
8433	}
8434
8435	/// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code
8436	/// that we want to return and a channel.
8437	///
8438	/// This is for failures on the channel on which the HTLC was *received*, not failures
8439	/// forwarding
8440	fn get_htlc_inbound_temp_fail_data(&self, reason: LocalHTLCFailureReason) -> Vec<u8> {
8441		debug_assert!(reason.is_temporary());
8442		debug_assert!(reason != LocalHTLCFailureReason::AmountBelowMinimum);
8443		debug_assert!(reason != LocalHTLCFailureReason::FeeInsufficient);
8444		debug_assert!(reason != LocalHTLCFailureReason::IncorrectCLTVExpiry);
8445		// at capacity, we write fields `disabled_flags` and `len`
8446		let mut enc = VecWriter(Vec::with_capacity(4));
8447		if reason == LocalHTLCFailureReason::ChannelDisabled {
8448			// No flags for `disabled_flags` are currently defined so they're always two zero bytes.
8449			// See https://github.com/lightning/bolts/blob/341ec84/04-onion-routing.md?plain=1#L1008
8450			0u16.write(&mut enc).expect("Writes cannot fail");
8451		}
8452		// See https://github.com/lightning/bolts/blob/247e83d/04-onion-routing.md?plain=1#L1414-L1415
8453		(0u16).write(&mut enc).expect("Writes cannot fail");
8454		enc.0
8455	}
8456
8457	// Fail a list of HTLCs that were just freed from the holding cell. The HTLCs need to be
8458	// failed backwards or, if they were one of our outgoing HTLCs, then their failure needs to
8459	// be surfaced to the user.
8460	fn fail_holding_cell_htlcs(
8461		&self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: ChannelId,
8462		counterparty_node_id: &PublicKey,
8463	) {
8464		let (failure_reason, onion_failure_data) = {
8465			let per_peer_state = self.per_peer_state.read().unwrap();
8466			if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
8467				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8468				let peer_state = &mut *peer_state_lock;
8469				match peer_state.channel_by_id.entry(channel_id) {
8470					hash_map::Entry::Occupied(chan_entry) => {
8471						if let Some(_chan) = chan_entry.get().as_funded() {
8472							let reason = LocalHTLCFailureReason::TemporaryChannelFailure;
8473							let data = self.get_htlc_inbound_temp_fail_data(reason);
8474							(reason, data)
8475						} else {
8476							// We shouldn't be trying to fail holding cell HTLCs on an unfunded channel.
8477							debug_assert!(false);
8478							(LocalHTLCFailureReason::UnknownNextPeer, Vec::new())
8479						}
8480					},
8481					hash_map::Entry::Vacant(_) => {
8482						(LocalHTLCFailureReason::UnknownNextPeer, Vec::new())
8483					},
8484				}
8485			} else {
8486				(LocalHTLCFailureReason::UnknownNextPeer, Vec::new())
8487			}
8488		};
8489
8490		for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
8491			let reason = HTLCFailReason::reason(failure_reason, onion_failure_data.clone());
8492			let receiver = HTLCHandlingFailureType::Forward {
8493				node_id: Some(counterparty_node_id.clone()),
8494				channel_id,
8495			};
8496			self.fail_htlc_backwards_internal(&htlc_src, &payment_hash, &reason, receiver, None);
8497		}
8498	}
8499
8500	/// Fails an HTLC backwards to the sender of it to us.
8501	/// Note that we do not assume that channels corresponding to failed HTLCs are still available.
8502	fn fail_htlc_backwards_internal(
8503		&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason,
8504		failure_type: HTLCHandlingFailureType,
8505		mut from_monitor_update_completion: Option<PaymentCompleteUpdate>,
8506	) {
8507		// Ensure that no peer state channel storage lock is held when calling this function.
8508		// This ensures that future code doesn't introduce a lock-order requirement for
8509		// `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
8510		// this function with any `per_peer_state` peer lock acquired would.
8511		#[cfg(debug_assertions)]
8512		for (_, peer) in self.per_peer_state.read().unwrap().iter() {
8513			debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
8514		}
8515
8516		//TODO: There is a timing attack here where if a node fails an HTLC back to us they can
8517		//identify whether we sent it or not based on the (I presume) very different runtime
8518		//between the branches here. We should make this async and move it into the forward HTLCs
8519		//timer handling.
8520
8521		// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
8522		// from block_connected which may run during initialization prior to the chain_monitor
8523		// being fully configured. See the docs for `ChannelManagerReadArgs` for more.
8524		match source {
8525			HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => {
8526				self.pending_outbound_payments.fail_htlc(
8527					source,
8528					payment_hash,
8529					onion_error,
8530					path,
8531					session_priv,
8532					payment_id,
8533					self.probing_cookie_secret,
8534					&self.secp_ctx,
8535					&self.pending_events,
8536					&mut from_monitor_update_completion,
8537				);
8538				if let Some(update) = from_monitor_update_completion {
8539					// If `fail_htlc` didn't `take` the post-event action, we should go ahead and
8540					// complete it here as the failure was duplicative - we've already handled it.
8541					// This can happen in rare cases where a MonitorUpdate is replayed after
8542					// restart because a ChannelMonitor wasn't persisted after it was applied (even
8543					// though the ChannelManager was).
8544					// For such cases, we also check that there's no existing pending event to
8545					// complete this action already, which we let finish instead.
8546					let action =
8547						EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(update);
8548					let have_action = {
8549						let pending_events = self.pending_events.lock().unwrap();
8550						pending_events.iter().any(|(_, act)| act.as_ref() == Some(&action))
8551					};
8552					if !have_action {
8553						self.handle_post_event_actions([action]);
8554					}
8555				}
8556			},
8557			HTLCSource::PreviousHopData(HTLCPreviousHopData {
8558				ref prev_outbound_scid_alias,
8559				ref htlc_id,
8560				ref incoming_packet_shared_secret,
8561				ref phantom_shared_secret,
8562				outpoint: _,
8563				ref blinded_failure,
8564				ref channel_id,
8565				..
8566			}) => {
8567				log_trace!(
8568					WithContext::from(&self.logger, None, Some(*channel_id), Some(*payment_hash)),
8569					"Failing {}HTLC with payment_hash {} backwards from us: {:?}",
8570					if blinded_failure.is_some() { "blinded " } else { "" },
8571					&payment_hash,
8572					onion_error
8573				);
8574				let failure = match blinded_failure {
8575					Some(BlindedFailure::FromIntroductionNode) => {
8576						let blinded_onion_error = HTLCFailReason::reason(
8577							LocalHTLCFailureReason::InvalidOnionBlinding,
8578							vec![0; 32],
8579						);
8580						let err_packet = blinded_onion_error.get_encrypted_failure_packet(
8581							incoming_packet_shared_secret,
8582							phantom_shared_secret,
8583						);
8584						HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }
8585					},
8586					Some(BlindedFailure::FromBlindedNode) => HTLCForwardInfo::FailMalformedHTLC {
8587						htlc_id: *htlc_id,
8588						failure_code: LocalHTLCFailureReason::InvalidOnionBlinding.failure_code(),
8589						sha256_of_onion: [0; 32],
8590					},
8591					None => {
8592						let err_packet = onion_error.get_encrypted_failure_packet(
8593							incoming_packet_shared_secret,
8594							phantom_shared_secret,
8595						);
8596						HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }
8597					},
8598				};
8599
8600				let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
8601				match forward_htlcs.entry(*prev_outbound_scid_alias) {
8602					hash_map::Entry::Occupied(mut entry) => {
8603						entry.get_mut().push(failure);
8604					},
8605					hash_map::Entry::Vacant(entry) => {
8606						entry.insert(vec![failure]);
8607					},
8608				}
8609				mem::drop(forward_htlcs);
8610				let mut pending_events = self.pending_events.lock().unwrap();
8611				pending_events.push_back((
8612					events::Event::HTLCHandlingFailed {
8613						prev_channel_id: *channel_id,
8614						failure_type,
8615						failure_reason: Some(onion_error.into()),
8616					},
8617					None,
8618				));
8619			},
8620		}
8621	}
8622
8623	/// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any
8624	/// [`MessageSendEvent`]s needed to claim the payment.
8625	///
8626	/// This method is guaranteed to ensure the payment has been claimed but only if the current
8627	/// height is strictly below [`Event::PaymentClaimable::claim_deadline`]. To avoid race
8628	/// conditions, you should wait for an [`Event::PaymentClaimed`] before considering the payment
8629	/// successful. It will generally be available in the next [`process_pending_events`] call.
8630	///
8631	/// Note that if you did not set an `amount_msat` when calling [`create_inbound_payment`] or
8632	/// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentClaimable`
8633	/// event matches your expectation. If you fail to do so and call this method, you may provide
8634	/// the sender "proof-of-payment" when they did not fulfill the full expected payment.
8635	///
8636	/// This function will fail the payment if it has custom TLVs with even type numbers, as we
8637	/// will assume they are unknown. If you intend to accept even custom TLVs, you should use
8638	/// [`claim_funds_with_known_custom_tlvs`].
8639	///
8640	/// [`Event::PaymentClaimable`]: crate::events::Event::PaymentClaimable
8641	/// [`Event::PaymentClaimable::claim_deadline`]: crate::events::Event::PaymentClaimable::claim_deadline
8642	/// [`Event::PaymentClaimed`]: crate::events::Event::PaymentClaimed
8643	/// [`process_pending_events`]: EventsProvider::process_pending_events
8644	/// [`create_inbound_payment`]: Self::create_inbound_payment
8645	/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
8646	/// [`claim_funds_with_known_custom_tlvs`]: Self::claim_funds_with_known_custom_tlvs
8647	pub fn claim_funds(&self, payment_preimage: PaymentPreimage) {
8648		self.claim_payment_internal(payment_preimage, false);
8649	}
8650
8651	/// This is a variant of [`claim_funds`] that allows accepting a payment with custom TLVs with
8652	/// even type numbers.
8653	///
8654	/// # Note
8655	///
8656	/// You MUST check you've understood all even TLVs before using this to
8657	/// claim, otherwise you may unintentionally agree to some protocol you do not understand.
8658	///
8659	/// [`claim_funds`]: Self::claim_funds
8660	pub fn claim_funds_with_known_custom_tlvs(&self, payment_preimage: PaymentPreimage) {
8661		self.claim_payment_internal(payment_preimage, true);
8662	}
8663
8664	fn claim_payment_internal(&self, payment_preimage: PaymentPreimage, custom_tlvs_known: bool) {
8665		let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).to_byte_array());
8666
8667		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
8668
8669		let (sources, claiming_payment) = {
8670			let res = self.claimable_payments.lock().unwrap().begin_claiming_payment(
8671				payment_hash,
8672				&self.node_signer,
8673				&self.logger,
8674				&self.inbound_payment_id_secret,
8675				custom_tlvs_known,
8676			);
8677
8678			match res {
8679				Ok((htlcs, payment_info)) => (htlcs, payment_info),
8680				Err(htlcs) => {
8681					for htlc in htlcs {
8682						let reason = self.get_htlc_fail_reason_from_failure_code(
8683							FailureCode::InvalidOnionPayload(None),
8684							&htlc,
8685						);
8686						let source = HTLCSource::PreviousHopData(htlc.prev_hop);
8687						let receiver = HTLCHandlingFailureType::Receive { payment_hash };
8688						self.fail_htlc_backwards_internal(
8689							&source,
8690							&payment_hash,
8691							&reason,
8692							receiver,
8693							None,
8694						);
8695					}
8696					return;
8697				},
8698			}
8699		};
8700		debug_assert!(!sources.is_empty());
8701
8702		// Just in case one HTLC has been failed between when we generated the `PaymentClaimable`
8703		// and when we got here we need to check that the amount we're about to claim matches the
8704		// amount we told the user in the last `PaymentClaimable`. We also do a sanity-check that
8705		// the MPP parts all have the same `total_msat`.
8706		let mut claimable_amt_msat = 0;
8707		let mut prev_total_msat = None;
8708		let mut expected_amt_msat = None;
8709		let mut valid_mpp = true;
8710		let mut errs = Vec::new();
8711		let per_peer_state = self.per_peer_state.read().unwrap();
8712		for htlc in sources.iter() {
8713			if prev_total_msat.is_some() && prev_total_msat != Some(htlc.total_msat) {
8714				log_error!(self.logger, "Somehow ended up with an MPP payment with different expected total amounts - this should not be reachable!");
8715				debug_assert!(false);
8716				valid_mpp = false;
8717				break;
8718			}
8719			prev_total_msat = Some(htlc.total_msat);
8720
8721			if expected_amt_msat.is_some() && expected_amt_msat != htlc.total_value_received {
8722				log_error!(self.logger, "Somehow ended up with an MPP payment with different received total amounts - this should not be reachable!");
8723				debug_assert!(false);
8724				valid_mpp = false;
8725				break;
8726			}
8727			expected_amt_msat = htlc.total_value_received;
8728			claimable_amt_msat += htlc.value;
8729		}
8730		mem::drop(per_peer_state);
8731		if sources.is_empty() || expected_amt_msat.is_none() {
8732			self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
8733			log_info!(
8734				self.logger,
8735				"Attempted to claim an incomplete payment which no longer had any available HTLCs!"
8736			);
8737			return;
8738		}
8739		if claimable_amt_msat != expected_amt_msat.unwrap() {
8740			self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
8741			log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.",
8742				expected_amt_msat.unwrap(), claimable_amt_msat);
8743			return;
8744		}
8745		if valid_mpp {
8746			let mpp_parts: Vec<_> = sources
8747				.iter()
8748				.filter_map(|htlc| {
8749					if let Some(cp_id) = htlc.prev_hop.counterparty_node_id {
8750						Some(MPPClaimHTLCSource {
8751							counterparty_node_id: cp_id,
8752							funding_txo: htlc.prev_hop.outpoint,
8753							channel_id: htlc.prev_hop.channel_id,
8754							htlc_id: htlc.prev_hop.htlc_id,
8755						})
8756					} else {
8757						None
8758					}
8759				})
8760				.collect();
8761			let pending_mpp_claim_ptr_opt = if sources.len() > 1 {
8762				let mut channels_without_preimage = Vec::with_capacity(mpp_parts.len());
8763				for part in mpp_parts.iter() {
8764					let chan = (part.counterparty_node_id, part.channel_id);
8765					if !channels_without_preimage.contains(&chan) {
8766						channels_without_preimage.push(chan);
8767					}
8768				}
8769				Some(Arc::new(Mutex::new(PendingMPPClaim {
8770					channels_without_preimage,
8771					channels_with_preimage: Vec::new(),
8772				})))
8773			} else {
8774				None
8775			};
8776			let payment_info = Some(PaymentClaimDetails { mpp_parts, claiming_payment });
8777			for htlc in sources {
8778				let this_mpp_claim =
8779					pending_mpp_claim_ptr_opt.as_ref().map(|pending_mpp_claim| {
8780						let counterparty_id = htlc.prev_hop.counterparty_node_id;
8781						let counterparty_id = counterparty_id
8782							.expect("Prior to upgrading to LDK 0.1, all pending HTLCs forwarded by LDK 0.0.123 or before must be resolved. It appears at least one claimable payment was not resolved. Please downgrade to LDK 0.0.125 and resolve the HTLC by claiming the payment prior to upgrading.");
8783						let claim_ptr = PendingMPPClaimPointer(Arc::clone(pending_mpp_claim));
8784						(counterparty_id, htlc.prev_hop.channel_id, claim_ptr)
8785					});
8786				let raa_blocker = pending_mpp_claim_ptr_opt.as_ref().map(|pending_claim| {
8787					RAAMonitorUpdateBlockingAction::ClaimedMPPPayment {
8788						pending_claim: PendingMPPClaimPointer(Arc::clone(pending_claim)),
8789					}
8790				});
8791
8792				// Create new attribution data as the final hop. Always report a zero hold time, because reporting a
8793				// non-zero value will not make a difference in the penalty that may be applied by the sender. If there
8794				// is a phantom hop, we need to double-process.
8795				let attribution_data =
8796					if let Some(phantom_secret) = htlc.prev_hop.phantom_shared_secret {
8797						let attribution_data =
8798							process_fulfill_attribution_data(None, &phantom_secret, 0);
8799						Some(attribution_data)
8800					} else {
8801						None
8802					};
8803
8804				let attribution_data = process_fulfill_attribution_data(
8805					attribution_data,
8806					&htlc.prev_hop.incoming_packet_shared_secret,
8807					0,
8808				);
8809
8810				self.claim_funds_from_hop(
8811					htlc.prev_hop,
8812					payment_preimage,
8813					payment_info.clone(),
8814					Some(attribution_data),
8815					|_, definitely_duplicate| {
8816						debug_assert!(
8817							!definitely_duplicate,
8818							"We shouldn't claim duplicatively from a payment"
8819						);
8820						(
8821							Some(MonitorUpdateCompletionAction::PaymentClaimed {
8822								payment_hash,
8823								pending_mpp_claim: this_mpp_claim,
8824							}),
8825							raa_blocker,
8826						)
8827					},
8828				);
8829			}
8830		} else {
8831			for htlc in sources {
8832				let err_data =
8833					invalid_payment_err_data(htlc.value, self.best_block.read().unwrap().height);
8834				let source = HTLCSource::PreviousHopData(htlc.prev_hop);
8835				let reason = HTLCFailReason::reason(
8836					LocalHTLCFailureReason::IncorrectPaymentDetails,
8837					err_data,
8838				);
8839				let receiver = HTLCHandlingFailureType::Receive { payment_hash };
8840				self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver, None);
8841			}
8842			self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
8843		}
8844
8845		// Now we can handle any errors which were generated.
8846		for (counterparty_node_id, err) in errs.drain(..) {
8847			let res: Result<(), _> = Err(err);
8848			let _ = handle_error!(self, res, counterparty_node_id);
8849		}
8850	}
8851
8852	fn claim_funds_from_hop<
8853		ComplFunc: FnOnce(
8854			Option<u64>,
8855			bool,
8856		) -> (Option<MonitorUpdateCompletionAction>, Option<RAAMonitorUpdateBlockingAction>),
8857	>(
8858		&self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage,
8859		payment_info: Option<PaymentClaimDetails>, attribution_data: Option<AttributionData>,
8860		completion_action: ComplFunc,
8861	) {
8862		let counterparty_node_id = prev_hop.counterparty_node_id.or_else(|| {
8863			let short_to_chan_info = self.short_to_chan_info.read().unwrap();
8864			short_to_chan_info.get(&prev_hop.prev_outbound_scid_alias).map(|(cp_id, _)| *cp_id)
8865		});
8866		let counterparty_node_id = if let Some(node_id) = counterparty_node_id {
8867			node_id
8868		} else {
8869			let payment_hash: PaymentHash = payment_preimage.into();
8870			panic!(
8871				"Prior to upgrading to LDK 0.1, all pending HTLCs forwarded by LDK 0.0.123 or before must be resolved. It appears at least the HTLC with payment_hash {payment_hash} (preimage {payment_preimage}) was not resolved. Please downgrade to LDK 0.0.125 and resolve the HTLC prior to upgrading.",
8872			);
8873		};
8874
8875		let htlc_source = HTLCClaimSource {
8876			counterparty_node_id,
8877			funding_txo: prev_hop.outpoint,
8878			channel_id: prev_hop.channel_id,
8879			htlc_id: prev_hop.htlc_id,
8880		};
8881		self.claim_mpp_part(
8882			htlc_source,
8883			payment_preimage,
8884			payment_info,
8885			attribution_data,
8886			completion_action,
8887		)
8888	}
8889
8890	fn claim_mpp_part<
8891		ComplFunc: FnOnce(
8892			Option<u64>,
8893			bool,
8894		) -> (Option<MonitorUpdateCompletionAction>, Option<RAAMonitorUpdateBlockingAction>),
8895	>(
8896		&self, prev_hop: HTLCClaimSource, payment_preimage: PaymentPreimage,
8897		payment_info: Option<PaymentClaimDetails>, attribution_data: Option<AttributionData>,
8898		completion_action: ComplFunc,
8899	) {
8900		//TODO: Delay the claimed_funds relaying just like we do outbound relay!
8901
8902		// If we haven't yet run background events assume we're still deserializing and shouldn't
8903		// actually pass `ChannelMonitorUpdate`s to users yet. Instead, queue them up as
8904		// `BackgroundEvent`s.
8905		let during_init = !self.background_events_processed_since_startup.load(Ordering::Acquire);
8906
8907		// As we may call handle_monitor_update_completion_actions in rather rare cases, check that
8908		// the required mutexes are not held before we start.
8909		debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
8910		debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread);
8911
8912		let per_peer_state = self.per_peer_state.read().unwrap();
8913		let chan_id = prev_hop.channel_id;
8914
8915		const MISSING_MON_ERROR: &'static str =
8916			"If we're going to claim an HTLC against a channel, we should always have *some* state for the channel, even if just the latest ChannelMonitor update_id. This failure indicates we need to claim an HTLC from a channel for which we did not have a ChannelMonitor at startup and didn't create one while running.";
8917
8918		let mut peer_state_lock = per_peer_state
8919			.get(&prev_hop.counterparty_node_id)
8920			.map(|peer_mutex| peer_mutex.lock().unwrap())
8921			.expect(MISSING_MON_ERROR);
8922
8923		{
8924			let peer_state = &mut *peer_state_lock;
8925			if let hash_map::Entry::Occupied(mut chan_entry) =
8926				peer_state.channel_by_id.entry(chan_id)
8927			{
8928				if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
8929					let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8930					let fulfill_res = chan.get_update_fulfill_htlc_and_commit(
8931						prev_hop.htlc_id,
8932						payment_preimage,
8933						payment_info,
8934						attribution_data,
8935						&&logger,
8936					);
8937
8938					match fulfill_res {
8939						UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } => {
8940							let (action_opt, raa_blocker_opt) =
8941								completion_action(Some(htlc_value_msat), false);
8942							if let Some(action) = action_opt {
8943								log_trace!(logger, "Tracking monitor update completion action for channel {}: {:?}",
8944									chan_id, action);
8945								peer_state
8946									.monitor_update_blocked_actions
8947									.entry(chan_id)
8948									.or_insert(Vec::new())
8949									.push(action);
8950							}
8951							if let Some(raa_blocker) = raa_blocker_opt {
8952								peer_state
8953									.actions_blocking_raa_monitor_updates
8954									.entry(chan_id)
8955									.or_insert_with(Vec::new)
8956									.push(raa_blocker);
8957							}
8958							handle_new_monitor_update!(
8959								self,
8960								prev_hop.funding_txo,
8961								monitor_update,
8962								peer_state_lock,
8963								peer_state,
8964								per_peer_state,
8965								chan
8966							);
8967						},
8968						UpdateFulfillCommitFetch::DuplicateClaim {} => {
8969							let (action_opt, raa_blocker_opt) = completion_action(None, true);
8970							if let Some(raa_blocker) = raa_blocker_opt {
8971								// If we're making a claim during startup, its a replay of a
8972								// payment claim from a `ChannelMonitor`. In some cases (MPP or
8973								// if the HTLC was only recently removed) we make such claims
8974								// after an HTLC has been removed from a channel entirely, and
8975								// thus the RAA blocker may have long since completed.
8976								//
8977								// However, its possible that the `ChannelMonitorUpdate` containing
8978								// the preimage never completed and is still pending. In that case,
8979								// we need to re-add the RAA blocker, which we do here. Handling
8980								// the post-update action, below, will remove it again.
8981								//
8982								// In any other case (i.e. not during startup), the RAA blocker
8983								// must still be present and blocking RAAs.
8984								let actions = &mut peer_state.actions_blocking_raa_monitor_updates;
8985								let actions_list = actions.entry(chan_id).or_insert_with(Vec::new);
8986								if !actions_list.contains(&raa_blocker) {
8987									debug_assert!(during_init);
8988									actions_list.push(raa_blocker);
8989								}
8990							}
8991							let action = if let Some(action) = action_opt {
8992								action
8993							} else {
8994								return;
8995							};
8996
8997							// If there are monitor updates in flight, we may be in the case
8998							// described above, replaying a claim on startup which needs an RAA
8999							// blocker to remain blocked. Thus, in such a case we simply push the
9000							// post-update action to the blocked list and move on.
9001							// In any case, we should err on the side of caution and not process
9002							// the post-update action no matter the situation.
9003							let in_flight_mons = peer_state.in_flight_monitor_updates.get(&chan_id);
9004							if in_flight_mons.map(|(_, mons)| !mons.is_empty()).unwrap_or(false) {
9005								peer_state
9006									.monitor_update_blocked_actions
9007									.entry(chan_id)
9008									.or_insert_with(Vec::new)
9009									.push(action);
9010								return;
9011							}
9012
9013							mem::drop(peer_state_lock);
9014
9015							log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
9016								chan_id, action);
9017							if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
9018								downstream_counterparty_node_id: node_id,
9019								blocking_action: blocker,
9020								downstream_channel_id: channel_id,
9021							} = action
9022							{
9023								if let Some(peer_state_mtx) = per_peer_state.get(&node_id) {
9024									let mut peer_state = peer_state_mtx.lock().unwrap();
9025									if let Some(blockers) = peer_state
9026										.actions_blocking_raa_monitor_updates
9027										.get_mut(&channel_id)
9028									{
9029										let mut found_blocker = false;
9030										blockers.retain(|iter| {
9031											// Note that we could actually be blocked, in
9032											// which case we need to only remove the one
9033											// blocker which was added duplicatively.
9034											let first_blocker = !found_blocker;
9035											if *iter == blocker {
9036												found_blocker = true;
9037											}
9038											*iter != blocker || !first_blocker
9039										});
9040										debug_assert!(found_blocker);
9041									}
9042								} else {
9043									debug_assert!(false);
9044								}
9045							} else if matches!(
9046								action,
9047								MonitorUpdateCompletionAction::PaymentClaimed { .. }
9048							) {
9049								debug_assert!(during_init,
9050									"Duplicate claims should always either be for forwarded payments(freeing another channel immediately) or during init (for claim replay)");
9051								mem::drop(per_peer_state);
9052								self.handle_monitor_update_completion_actions([action]);
9053							} else {
9054								debug_assert!(false,
9055									"Duplicate claims should always either be for forwarded payments(freeing another channel immediately) or during init (for claim replay)");
9056								return;
9057							};
9058						},
9059					}
9060				}
9061				return;
9062			}
9063		}
9064
9065		let peer_state = &mut *peer_state_lock;
9066
9067		let update_id = if let Some(latest_update_id) =
9068			peer_state.closed_channel_monitor_update_ids.get_mut(&chan_id)
9069		{
9070			*latest_update_id = latest_update_id.saturating_add(1);
9071			*latest_update_id
9072		} else {
9073			let err = "We need the latest ChannelMonitorUpdate ID to build a new update.
9074This should have been checked for availability on startup but somehow it is no longer available.
9075This indicates a bug inside LDK. Please report this error at https://github.com/lightningdevkit/rust-lightning/issues/new";
9076			log_error!(self.logger, "{}", err);
9077			panic!("{}", err);
9078		};
9079
9080		let preimage_update = ChannelMonitorUpdate {
9081			update_id,
9082			updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
9083				payment_preimage,
9084				payment_info,
9085			}],
9086			channel_id: Some(prev_hop.channel_id),
9087		};
9088
9089		// We don't have any idea if this is a duplicate claim without interrogating the
9090		// `ChannelMonitor`, so we just always queue up the completion action after the
9091		// `ChannelMonitorUpdate` we're about to generate. This may result in a duplicate `Event`,
9092		// but note that `Event`s are generally always allowed to be duplicative (and it's
9093		// specifically noted in `PaymentForwarded`).
9094		let (action_opt, raa_blocker_opt) = completion_action(None, false);
9095
9096		if let Some(raa_blocker) = raa_blocker_opt {
9097			peer_state
9098				.actions_blocking_raa_monitor_updates
9099				.entry(prev_hop.channel_id)
9100				.or_default()
9101				.push(raa_blocker);
9102		}
9103
9104		// Given the fact that we're in a bit of a weird edge case, its worth hashing the preimage
9105		// to include the `payment_hash` in the log metadata here.
9106		let payment_hash = payment_preimage.into();
9107		let logger = WithContext::from(
9108			&self.logger,
9109			Some(prev_hop.counterparty_node_id),
9110			Some(chan_id),
9111			Some(payment_hash),
9112		);
9113
9114		if let Some(action) = action_opt {
9115			log_trace!(
9116				logger,
9117				"Tracking monitor update completion action for closed channel {}: {:?}",
9118				chan_id,
9119				action
9120			);
9121			peer_state
9122				.monitor_update_blocked_actions
9123				.entry(chan_id)
9124				.or_insert(Vec::new())
9125				.push(action);
9126		}
9127
9128		handle_new_monitor_update!(
9129			self,
9130			prev_hop.funding_txo,
9131			preimage_update,
9132			peer_state_lock,
9133			peer_state,
9134			per_peer_state,
9135			prev_hop.counterparty_node_id,
9136			chan_id,
9137			POST_CHANNEL_CLOSE
9138		);
9139	}
9140
9141	fn finalize_claims(&self, sources: Vec<(HTLCSource, Option<AttributionData>)>) {
9142		// Decode attribution data to hold times.
9143		let hold_times = sources.into_iter().filter_map(|(source, attribution_data)| {
9144			if let HTLCSource::OutboundRoute { ref session_priv, ref path, .. } = source {
9145				// If the path has trampoline hops, we need to hash the session private key to get the outer session key.
9146				let derived_key;
9147				let session_priv = if path.has_trampoline_hops() {
9148					let session_priv_hash =
9149						Sha256::hash(&session_priv.secret_bytes()).to_byte_array();
9150					derived_key = SecretKey::from_slice(&session_priv_hash[..]).unwrap();
9151					&derived_key
9152				} else {
9153					session_priv
9154				};
9155
9156				let hold_times = attribution_data.map_or(Vec::new(), |attribution_data| {
9157					decode_fulfill_attribution_data(
9158						&self.secp_ctx,
9159						&self.logger,
9160						path,
9161						session_priv,
9162						attribution_data,
9163					)
9164				});
9165
9166				Some((source, hold_times))
9167			} else {
9168				None
9169			}
9170		});
9171
9172		self.pending_outbound_payments.finalize_claims(hold_times, &self.pending_events);
9173	}
9174
9175	fn claim_funds_internal(
9176		&self, source: HTLCSource, payment_preimage: PaymentPreimage,
9177		forwarded_htlc_value_msat: Option<u64>, skimmed_fee_msat: Option<u64>, from_onchain: bool,
9178		next_channel_counterparty_node_id: PublicKey, next_channel_outpoint: OutPoint,
9179		next_channel_id: ChannelId, next_user_channel_id: Option<u128>,
9180		attribution_data: Option<AttributionData>, send_timestamp: Option<Duration>,
9181	) {
9182		let startup_replay =
9183			!self.background_events_processed_since_startup.load(Ordering::Acquire);
9184		let htlc_id = SentHTLCId::from_source(&source);
9185		match source {
9186			HTLCSource::OutboundRoute {
9187				session_priv, payment_id, path, bolt12_invoice, ..
9188			} => {
9189				debug_assert!(!startup_replay,
9190					"We don't support claim_htlc claims during startup - monitors may not be available yet");
9191				debug_assert_eq!(next_channel_counterparty_node_id, path.hops[0].pubkey);
9192
9193				let mut ev_completion_action = if from_onchain {
9194					let release = PaymentCompleteUpdate {
9195						counterparty_node_id: next_channel_counterparty_node_id,
9196						channel_funding_outpoint: next_channel_outpoint,
9197						channel_id: next_channel_id,
9198						htlc_id,
9199					};
9200					Some(EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(release))
9201				} else {
9202					Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
9203						channel_funding_outpoint: Some(next_channel_outpoint),
9204						channel_id: next_channel_id,
9205						counterparty_node_id: path.hops[0].pubkey,
9206					})
9207				};
9208				self.pending_outbound_payments.claim_htlc(
9209					payment_id,
9210					payment_preimage,
9211					bolt12_invoice,
9212					session_priv,
9213					path,
9214					from_onchain,
9215					&mut ev_completion_action,
9216					&self.pending_events,
9217				);
9218				// If an event was generated, `claim_htlc` set `ev_completion_action` to None, if
9219				// not, we should go ahead and run it now (as the claim was duplicative), at least
9220				// if a PaymentClaimed event with the same action isn't already pending.
9221				let have_action = if ev_completion_action.is_some() {
9222					let pending_events = self.pending_events.lock().unwrap();
9223					pending_events.iter().any(|(_, act)| *act == ev_completion_action)
9224				} else {
9225					false
9226				};
9227				if !have_action {
9228					self.handle_post_event_actions(ev_completion_action);
9229				}
9230			},
9231			HTLCSource::PreviousHopData(hop_data) => {
9232				let prev_channel_id = hop_data.channel_id;
9233				let prev_user_channel_id = hop_data.user_channel_id;
9234				let prev_node_id = hop_data.counterparty_node_id;
9235				let completed_blocker =
9236					RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
9237
9238				// Obtain hold time, if available.
9239				let hold_time = hold_time_since(send_timestamp).unwrap_or(0);
9240
9241				// If attribution data was received from downstream, we shift it and get it ready for adding our hold
9242				// time. Note that fulfilled HTLCs take a fast path to the incoming side. We don't need to wait for RAA
9243				// to record the hold time like we do for failed HTLCs.
9244				let attribution_data = process_fulfill_attribution_data(
9245					attribution_data,
9246					&hop_data.incoming_packet_shared_secret,
9247					hold_time,
9248				);
9249
9250				#[cfg(test)]
9251				let claiming_chan_funding_outpoint = hop_data.outpoint;
9252				self.claim_funds_from_hop(
9253					hop_data,
9254					payment_preimage,
9255					None,
9256					Some(attribution_data),
9257					|htlc_claim_value_msat, definitely_duplicate| {
9258						let chan_to_release = Some(EventUnblockedChannel {
9259							counterparty_node_id: next_channel_counterparty_node_id,
9260							funding_txo: next_channel_outpoint,
9261							channel_id: next_channel_id,
9262							blocking_action: completed_blocker,
9263						});
9264
9265						if definitely_duplicate && startup_replay {
9266							// On startup we may get redundant claims which are related to
9267							// monitor updates still in flight. In that case, we shouldn't
9268							// immediately free, but instead let that monitor update complete
9269							// in the background.
9270							#[cfg(test)]
9271							{
9272								let per_peer_state = self.per_peer_state.deadlocking_read();
9273								// The channel we'd unblock should already be closed, or...
9274								let channel_closed = per_peer_state
9275									.get(&next_channel_counterparty_node_id)
9276									.map(|lck| lck.deadlocking_lock())
9277									.map(|peer| !peer.channel_by_id.contains_key(&next_channel_id))
9278									.unwrap_or(true);
9279								let background_events =
9280									self.pending_background_events.lock().unwrap();
9281								// there should be a `BackgroundEvent` pending...
9282								let matching_bg_event =
9283									background_events.iter().any(|ev| {
9284										match ev {
9285											// to apply a monitor update that blocked the claiming channel,
9286											BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
9287												funding_txo, update, ..
9288											} => {
9289												if *funding_txo == claiming_chan_funding_outpoint {
9290													assert!(update.updates.iter().any(|upd|
9291														if let ChannelMonitorUpdateStep::PaymentPreimage {
9292															payment_preimage: update_preimage, ..
9293														} = upd {
9294															payment_preimage == *update_preimage
9295														} else { false }
9296													), "{:?}", update);
9297													true
9298												} else { false }
9299											},
9300											// or the monitor update has completed and will unblock
9301											// immediately once we get going.
9302											BackgroundEvent::MonitorUpdatesComplete {
9303												channel_id, ..
9304											} =>
9305												*channel_id == prev_channel_id,
9306										}
9307									});
9308								assert!(
9309									channel_closed || matching_bg_event,
9310									"{:?}",
9311									*background_events
9312								);
9313							}
9314							(None, None)
9315						} else if definitely_duplicate {
9316							if let Some(other_chan) = chan_to_release {
9317								(Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
9318									downstream_counterparty_node_id: other_chan.counterparty_node_id,
9319									downstream_channel_id: other_chan.channel_id,
9320									blocking_action: other_chan.blocking_action,
9321								}), None)
9322							} else {
9323								(None, None)
9324							}
9325						} else {
9326							let total_fee_earned_msat =
9327								if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
9328									if let Some(claimed_htlc_value) = htlc_claim_value_msat {
9329										Some(claimed_htlc_value - forwarded_htlc_value)
9330									} else {
9331										None
9332									}
9333								} else {
9334									None
9335								};
9336							debug_assert!(
9337								skimmed_fee_msat <= total_fee_earned_msat,
9338								"skimmed_fee_msat must always be included in total_fee_earned_msat"
9339							);
9340							(
9341								Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
9342									event: events::Event::PaymentForwarded {
9343										prev_channel_id: Some(prev_channel_id),
9344										next_channel_id: Some(next_channel_id),
9345										prev_user_channel_id,
9346										next_user_channel_id,
9347										prev_node_id,
9348										next_node_id: Some(next_channel_counterparty_node_id),
9349										total_fee_earned_msat,
9350										skimmed_fee_msat,
9351										claim_from_onchain_tx: from_onchain,
9352										outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
9353									},
9354									downstream_counterparty_and_funding_outpoint: chan_to_release,
9355								}),
9356								None,
9357							)
9358						}
9359					},
9360				);
9361			},
9362		}
9363	}
9364
9365	/// Gets the node_id held by this ChannelManager
9366	pub fn get_our_node_id(&self) -> PublicKey {
9367		self.our_network_pubkey
9368	}
9369
9370	#[rustfmt::skip]
9371	fn handle_monitor_update_completion_actions<I: IntoIterator<Item=MonitorUpdateCompletionAction>>(&self, actions: I) {
9372		debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
9373		debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread);
9374		debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
9375
9376		let mut freed_channels = Vec::new();
9377
9378		for action in actions.into_iter() {
9379			match action {
9380				MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim } => {
9381					if let Some((counterparty_node_id, chan_id, claim_ptr)) = pending_mpp_claim {
9382						let per_peer_state = self.per_peer_state.read().unwrap();
9383						per_peer_state.get(&counterparty_node_id).map(|peer_state_mutex| {
9384							let mut peer_state = peer_state_mutex.lock().unwrap();
9385							let blockers_entry = peer_state.actions_blocking_raa_monitor_updates.entry(chan_id);
9386							if let btree_map::Entry::Occupied(mut blockers) = blockers_entry {
9387								blockers.get_mut().retain(|blocker|
9388									if let &RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { pending_claim } = &blocker {
9389										if *pending_claim == claim_ptr {
9390											let mut pending_claim_state_lock = pending_claim.0.lock().unwrap();
9391											let pending_claim_state = &mut *pending_claim_state_lock;
9392											pending_claim_state.channels_without_preimage.retain(|(cp, cid)| {
9393												let this_claim =
9394													*cp == counterparty_node_id && *cid == chan_id;
9395												if this_claim {
9396													pending_claim_state.channels_with_preimage.push((*cp, *cid));
9397													false
9398												} else { true }
9399											});
9400											if pending_claim_state.channels_without_preimage.is_empty() {
9401												for (cp, cid) in pending_claim_state.channels_with_preimage.iter() {
9402													let freed_chan = (*cp, *cid, blocker.clone());
9403													freed_channels.push(freed_chan);
9404												}
9405											}
9406											!pending_claim_state.channels_without_preimage.is_empty()
9407										} else { true }
9408									} else { true }
9409								);
9410								if blockers.get().is_empty() {
9411									blockers.remove();
9412								}
9413							}
9414						});
9415					}
9416
9417					let payment = self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
9418					if let Some(ClaimingPayment {
9419						amount_msat,
9420						payment_purpose: purpose,
9421						receiver_node_id,
9422						htlcs,
9423						sender_intended_value: sender_intended_total_msat,
9424						onion_fields,
9425						payment_id,
9426						durable_preimage_channel,
9427					}) = payment {
9428						let event = events::Event::PaymentClaimed {
9429							payment_hash,
9430							purpose,
9431							amount_msat,
9432							receiver_node_id: Some(receiver_node_id),
9433							htlcs,
9434							sender_intended_total_msat,
9435							onion_fields,
9436							payment_id,
9437						};
9438						let action = if let Some((outpoint, counterparty_node_id, channel_id))
9439							= durable_preimage_channel
9440						{
9441							Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
9442								channel_funding_outpoint: Some(outpoint),
9443								counterparty_node_id,
9444								channel_id,
9445							})
9446						} else {
9447							None
9448						};
9449						let event_action = (event, action);
9450						let mut pending_events = self.pending_events.lock().unwrap();
9451						// If we're replaying a claim on startup we may end up duplicating an event
9452						// that's already in our queue, so check before we push another one. The
9453						// `payment_id` should suffice to ensure we never spuriously drop a second
9454						// event for a duplicate payment.
9455						if !pending_events.contains(&event_action) {
9456							pending_events.push_back(event_action);
9457						}
9458					}
9459				},
9460				MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
9461					event, downstream_counterparty_and_funding_outpoint
9462				} => {
9463					self.pending_events.lock().unwrap().push_back((event, None));
9464					if let Some(unblocked) = downstream_counterparty_and_funding_outpoint {
9465						self.handle_monitor_update_release(
9466							unblocked.counterparty_node_id,
9467							unblocked.channel_id,
9468							Some(unblocked.blocking_action),
9469						);
9470					}
9471				},
9472				MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
9473					downstream_counterparty_node_id, downstream_channel_id, blocking_action,
9474				} => {
9475					self.handle_monitor_update_release(
9476						downstream_counterparty_node_id,
9477						downstream_channel_id,
9478						Some(blocking_action),
9479					);
9480				},
9481			}
9482		}
9483
9484		for (node_id, channel_id, blocker) in freed_channels {
9485			self.handle_monitor_update_release(node_id, channel_id, Some(blocker));
9486		}
9487	}
9488
9489	/// Handles a channel reentering a functional state, either due to reconnect or a monitor
9490	/// update completion.
9491	#[rustfmt::skip]
9492	fn handle_channel_resumption(&self, pending_msg_events: &mut Vec<MessageSendEvent>,
9493		channel: &mut FundedChannel<SP>, raa: Option<msgs::RevokeAndACK>,
9494		commitment_update: Option<msgs::CommitmentUpdate>, commitment_order: RAACommitmentOrder,
9495		pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_update_adds: Vec<msgs::UpdateAddHTLC>,
9496		funding_broadcastable: Option<Transaction>,
9497		channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>,
9498		tx_signatures: Option<msgs::TxSignatures>, tx_abort: Option<msgs::TxAbort>,
9499		channel_ready_order: ChannelReadyOrder,
9500	) -> (Option<(u64, PublicKey, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec<msgs::UpdateAddHTLC>)>) {
9501		let logger = WithChannelContext::from(&self.logger, &channel.context, None);
9502		log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures, {} tx_abort",
9503			&channel.context.channel_id(),
9504			if raa.is_some() { "an" } else { "no" },
9505			if commitment_update.is_some() { "a" } else { "no" },
9506			pending_forwards.len(), pending_update_adds.len(),
9507			if funding_broadcastable.is_some() { "" } else { "not " },
9508			if channel_ready.is_some() { "sending" } else { "without" },
9509			if announcement_sigs.is_some() { "sending" } else { "without" },
9510			if tx_signatures.is_some() { "sending" } else { "without" },
9511			if tx_abort.is_some() { "sending" } else { "without" },
9512		);
9513
9514		let counterparty_node_id = channel.context.get_counterparty_node_id();
9515		let outbound_scid_alias = channel.context.outbound_scid_alias();
9516
9517		let mut htlc_forwards = None;
9518		if !pending_forwards.is_empty() {
9519			htlc_forwards = Some((
9520				outbound_scid_alias, channel.context.get_counterparty_node_id(),
9521				channel.funding.get_funding_txo().unwrap(), channel.context.channel_id(),
9522				channel.context.get_user_id(), pending_forwards
9523			));
9524		}
9525		let mut decode_update_add_htlcs = None;
9526		if !pending_update_adds.is_empty() {
9527			decode_update_add_htlcs = Some((outbound_scid_alias, pending_update_adds));
9528		}
9529
9530		if channel.context.is_connected() {
9531			if let ChannelReadyOrder::ChannelReadyFirst = channel_ready_order {
9532				if let Some(msg) = &channel_ready {
9533					send_channel_ready!(self, pending_msg_events, channel, msg.clone());
9534				}
9535
9536				if let Some(msg) = &announcement_sigs {
9537					pending_msg_events.push(MessageSendEvent::SendAnnouncementSignatures {
9538						node_id: counterparty_node_id,
9539						msg: msg.clone(),
9540					});
9541				}
9542			}
9543
9544			macro_rules! handle_cs { () => {
9545				if let Some(update) = commitment_update {
9546					pending_msg_events.push(MessageSendEvent::UpdateHTLCs {
9547						node_id: counterparty_node_id,
9548						channel_id: channel.context.channel_id(),
9549						updates: update,
9550					});
9551				}
9552			} }
9553			macro_rules! handle_raa { () => {
9554				if let Some(revoke_and_ack) = raa {
9555					pending_msg_events.push(MessageSendEvent::SendRevokeAndACK {
9556						node_id: counterparty_node_id,
9557						msg: revoke_and_ack,
9558					});
9559				}
9560			} }
9561			match commitment_order {
9562				RAACommitmentOrder::CommitmentFirst => {
9563					handle_cs!();
9564					handle_raa!();
9565				},
9566				RAACommitmentOrder::RevokeAndACKFirst => {
9567					handle_raa!();
9568					handle_cs!();
9569				},
9570			}
9571
9572			// TODO(dual_funding): For async signing support we need to hold back `tx_signatures` until the `commitment_signed` is ready.
9573			if let Some(msg) = tx_signatures {
9574				pending_msg_events.push(MessageSendEvent::SendTxSignatures {
9575					node_id: counterparty_node_id,
9576					msg,
9577				});
9578			}
9579			if let Some(msg) = tx_abort {
9580				pending_msg_events.push(MessageSendEvent::SendTxAbort {
9581					node_id: counterparty_node_id,
9582					msg,
9583				});
9584			}
9585
9586			if let ChannelReadyOrder::SignaturesFirst = channel_ready_order {
9587				if let Some(msg) = channel_ready {
9588					send_channel_ready!(self, pending_msg_events, channel, msg);
9589				}
9590
9591				if let Some(msg) = announcement_sigs {
9592					pending_msg_events.push(MessageSendEvent::SendAnnouncementSignatures {
9593						node_id: counterparty_node_id,
9594						msg,
9595					});
9596				}
9597			}
9598		} else if let Some(msg) = channel_ready {
9599			send_channel_ready!(self, pending_msg_events, channel, msg);
9600		}
9601
9602		if let Some(tx) = funding_broadcastable {
9603			if channel.context.is_manual_broadcast() {
9604				log_info!(logger, "Not broadcasting funding transaction with txid {} as it is manually managed", tx.compute_txid());
9605				let mut pending_events = self.pending_events.lock().unwrap();
9606				match channel.funding.get_funding_txo() {
9607					Some(funding_txo) => {
9608						emit_funding_tx_broadcast_safe_event!(pending_events, channel, funding_txo.into_bitcoin_outpoint())
9609					},
9610					None => {
9611						debug_assert!(false, "Channel resumed without a funding txo, this should never happen!");
9612						return (htlc_forwards, decode_update_add_htlcs);
9613					}
9614				};
9615			} else {
9616				log_info!(logger, "Broadcasting funding transaction with txid {}", tx.compute_txid());
9617				self.tx_broadcaster.broadcast_transactions(&[&tx]);
9618			}
9619		}
9620
9621		if let Some(signing_session) = (!channel.is_awaiting_monitor_update())
9622			.then(|| ())
9623			.and_then(|_| channel.context.interactive_tx_signing_session.as_mut())
9624			.filter(|signing_session| signing_session.has_received_commitment_signed())
9625			.filter(|signing_session| signing_session.holder_tx_signatures().is_none())
9626		{
9627			if signing_session.has_local_contribution() {
9628				let mut pending_events = self.pending_events.lock().unwrap();
9629				let unsigned_transaction = signing_session.unsigned_tx().tx().clone();
9630				let event_action = (
9631					Event::FundingTransactionReadyForSigning {
9632						unsigned_transaction,
9633						counterparty_node_id,
9634						channel_id: channel.context.channel_id(),
9635						user_channel_id: channel.context.get_user_id(),
9636					},
9637					None,
9638				);
9639
9640				if !pending_events.contains(&event_action) {
9641					pending_events.push_back(event_action);
9642				}
9643			} else {
9644				let txid = signing_session.unsigned_tx().compute_txid();
9645				let best_block_height = self.best_block.read().unwrap().height;
9646				match channel.funding_transaction_signed(txid, vec![], best_block_height, &self.logger) {
9647					Ok(FundingTxSigned {
9648						tx_signatures: Some(tx_signatures),
9649						funding_tx,
9650						splice_negotiated,
9651						splice_locked,
9652					}) => {
9653						if let Some(funding_tx) = funding_tx {
9654							self.broadcast_interactive_funding(channel, &funding_tx, &self.logger);
9655						}
9656
9657						if let Some(splice_negotiated) = splice_negotiated {
9658							self.pending_events.lock().unwrap().push_back((
9659								events::Event::SplicePending {
9660									channel_id: channel.context.channel_id(),
9661									counterparty_node_id,
9662									user_channel_id: channel.context.get_user_id(),
9663									new_funding_txo: splice_negotiated.funding_txo,
9664									channel_type: splice_negotiated.channel_type,
9665									new_funding_redeem_script: splice_negotiated.funding_redeem_script,
9666								},
9667								None,
9668							));
9669						}
9670
9671						if channel.context.is_connected() {
9672							pending_msg_events.push(MessageSendEvent::SendTxSignatures {
9673								node_id: counterparty_node_id,
9674								msg: tx_signatures,
9675							});
9676							if let Some(splice_locked) = splice_locked {
9677								pending_msg_events.push(MessageSendEvent::SendSpliceLocked {
9678									node_id: counterparty_node_id,
9679									msg: splice_locked,
9680								});
9681							}
9682						}
9683					},
9684					Ok(FundingTxSigned { tx_signatures: None, .. }) => {
9685						debug_assert!(false, "If our tx_signatures is empty, then we should send it first!");
9686					},
9687					Err(err) => {
9688						log_warn!(logger, "Failed signing interactive funding transaction: {err:?}");
9689					},
9690				}
9691			}
9692		}
9693
9694		{
9695			let mut pending_events = self.pending_events.lock().unwrap();
9696			emit_channel_pending_event!(pending_events, channel);
9697			emit_initial_channel_ready_event!(pending_events, channel);
9698		}
9699
9700		(htlc_forwards, decode_update_add_htlcs)
9701	}
9702
9703	#[rustfmt::skip]
9704	fn channel_monitor_updated(&self, channel_id: &ChannelId, highest_applied_update_id: Option<u64>, counterparty_node_id: &PublicKey) {
9705		debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
9706
9707		let per_peer_state = self.per_peer_state.read().unwrap();
9708		let mut peer_state_lock;
9709		let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
9710		if peer_state_mutex_opt.is_none() { return }
9711		peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
9712		let peer_state = &mut *peer_state_lock;
9713
9714		let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*channel_id), None);
9715		let remaining_in_flight =
9716			if let Some((_, pending)) = peer_state.in_flight_monitor_updates.get_mut(channel_id) {
9717				if let Some(highest_applied_update_id) = highest_applied_update_id {
9718					pending.retain(|upd| upd.update_id > highest_applied_update_id);
9719					log_trace!(
9720						logger,
9721						"ChannelMonitor updated to {highest_applied_update_id}. {} pending in-flight updates.",
9722						pending.len()
9723					);
9724				} else if let Some(update) = pending.get(0) {
9725					log_trace!(
9726						logger,
9727						"ChannelMonitor updated to {}. {} pending in-flight updates.",
9728						update.update_id - 1,
9729						pending.len()
9730					);
9731				} else {
9732					log_trace!(
9733						logger,
9734						"ChannelMonitor updated. {} pending in-flight updates.",
9735						pending.len()
9736					);
9737				}
9738				pending.len()
9739			} else { 0 };
9740
9741		if remaining_in_flight != 0 {
9742			return;
9743		}
9744
9745		if let Some(chan) = peer_state.channel_by_id
9746			.get_mut(channel_id)
9747			.and_then(Channel::as_funded_mut)
9748		{
9749			if chan.is_awaiting_monitor_update() {
9750				handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
9751			} else {
9752				log_trace!(logger, "Channel is open but not awaiting update");
9753			}
9754		} else {
9755			let update_actions = peer_state.monitor_update_blocked_actions
9756				.remove(channel_id).unwrap_or(Vec::new());
9757			log_trace!(logger, "Channel is closed, applying {} post-update actions", update_actions.len());
9758			mem::drop(peer_state_lock);
9759			mem::drop(per_peer_state);
9760			self.handle_monitor_update_completion_actions(update_actions);
9761		}
9762	}
9763
9764	/// Accepts a request to open a channel after a [`Event::OpenChannelRequest`].
9765	///
9766	/// The `temporary_channel_id` parameter indicates which inbound channel should be accepted,
9767	/// and the `counterparty_node_id` parameter is the id of the peer which has requested to open
9768	/// the channel.
9769	///
9770	/// The `user_channel_id` parameter will be provided back in
9771	/// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond
9772	/// with which `accept_inbound_channel`/`accept_inbound_channel_from_trusted_peer_0conf` call.
9773	///
9774	/// Note that this method will return an error and reject the channel, if it requires support
9775	/// for zero confirmations. Instead, `accept_inbound_channel_from_trusted_peer_0conf` must be
9776	/// used to accept such channels.
9777	///
9778	/// NOTE: LDK makes no attempt to prevent the counterparty from using non-standard inputs which
9779	/// will prevent the funding transaction from being relayed on the bitcoin network and hence being
9780	/// confirmed.
9781	///
9782	/// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
9783	/// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
9784	pub fn accept_inbound_channel(
9785		&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey,
9786		user_channel_id: u128, config_overrides: Option<ChannelConfigOverrides>,
9787	) -> Result<(), APIError> {
9788		self.do_accept_inbound_channel(
9789			temporary_channel_id,
9790			counterparty_node_id,
9791			false,
9792			user_channel_id,
9793			config_overrides,
9794		)
9795	}
9796
9797	/// Accepts a request to open a channel after a [`events::Event::OpenChannelRequest`], treating
9798	/// it as confirmed immediately.
9799	///
9800	/// The `user_channel_id` parameter will be provided back in
9801	/// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond
9802	/// with which `accept_inbound_channel`/`accept_inbound_channel_from_trusted_peer_0conf` call.
9803	///
9804	/// Unlike [`ChannelManager::accept_inbound_channel`], this method accepts the incoming channel
9805	/// and (if the counterparty agrees), enables forwarding of payments immediately.
9806	///
9807	/// This fully trusts that the counterparty has honestly and correctly constructed the funding
9808	/// transaction and blindly assumes that it will eventually confirm.
9809	///
9810	/// If it does not confirm before we decide to close the channel, or if the funding transaction
9811	/// does not pay to the correct script the correct amount, *you will lose funds*.
9812	///
9813	/// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
9814	/// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
9815	pub fn accept_inbound_channel_from_trusted_peer_0conf(
9816		&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey,
9817		user_channel_id: u128, config_overrides: Option<ChannelConfigOverrides>,
9818	) -> Result<(), APIError> {
9819		self.do_accept_inbound_channel(
9820			temporary_channel_id,
9821			counterparty_node_id,
9822			true,
9823			user_channel_id,
9824			config_overrides,
9825		)
9826	}
9827
9828	/// TODO(dual_funding): Allow contributions, pass intended amount and inputs
9829	#[rustfmt::skip]
9830	fn do_accept_inbound_channel(
9831		&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool,
9832		user_channel_id: u128, config_overrides: Option<ChannelConfigOverrides>
9833	) -> Result<(), APIError> {
9834
9835		let mut config = self.config.read().unwrap().clone();
9836
9837		// Apply configuration overrides.
9838		if let Some(overrides) = config_overrides {
9839			config.apply(&overrides);
9840		};
9841
9842		let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id), None);
9843		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
9844
9845		let peers_without_funded_channels =
9846			self.peers_without_funded_channels(|peer| { peer.total_channel_count() > 0 });
9847		let per_peer_state = self.per_peer_state.read().unwrap();
9848		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
9849		.ok_or_else(|| {
9850			let err_str = format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}");
9851			log_error!(logger, "{}", err_str);
9852
9853			APIError::ChannelUnavailable { err: err_str }
9854		})?;
9855		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9856		let peer_state = &mut *peer_state_lock;
9857		let is_only_peer_channel = peer_state.total_channel_count() == 1;
9858
9859		// Find (and remove) the channel in the unaccepted table. If it's not there, something weird is
9860		// happening and return an error. N.B. that we create channel with an outbound SCID of zero so
9861		// that we can delay allocating the SCID until after we're sure that the checks below will
9862		// succeed.
9863		let res = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) {
9864			Some(unaccepted_channel) => {
9865				let best_block_height = self.best_block.read().unwrap().height;
9866				match unaccepted_channel.open_channel_msg {
9867					OpenChannelMessage::V1(open_channel_msg) => {
9868						InboundV1Channel::new(
9869							&self.fee_estimator, &self.entropy_source, &self.signer_provider, *counterparty_node_id,
9870							&self.channel_type_features(), &peer_state.latest_features, &open_channel_msg,
9871							user_channel_id, &config, best_block_height, &self.logger, accept_0conf
9872						).map_err(|err| MsgHandleErrInternal::from_chan_no_close(err, *temporary_channel_id)
9873						).map(|mut channel| {
9874							let logger = WithChannelContext::from(&self.logger, &channel.context, None);
9875							let message_send_event = channel.accept_inbound_channel(&&logger).map(|msg| {
9876								MessageSendEvent::SendAcceptChannel {
9877									node_id: *counterparty_node_id,
9878									msg,
9879								}
9880							});
9881							(*temporary_channel_id, Channel::from(channel), message_send_event)
9882						})
9883					},
9884					OpenChannelMessage::V2(open_channel_msg) => {
9885						PendingV2Channel::new_inbound(
9886							&self.fee_estimator, &self.entropy_source, &self.signer_provider,
9887							self.get_our_node_id(), *counterparty_node_id,
9888							&self.channel_type_features(), &peer_state.latest_features,
9889							&open_channel_msg,
9890							user_channel_id, &config, best_block_height,
9891							&self.logger,
9892						).map_err(|e| {
9893							let channel_id = open_channel_msg.common_fields.temporary_channel_id;
9894							MsgHandleErrInternal::from_chan_no_close(e, channel_id)
9895						}).map(|channel| {
9896							let message_send_event =  MessageSendEvent::SendAcceptChannelV2 {
9897								node_id: channel.context.get_counterparty_node_id(),
9898								msg: channel.accept_inbound_dual_funded_channel()
9899							};
9900							(channel.context.channel_id(), Channel::from(channel), Some(message_send_event))
9901						})
9902					},
9903				}
9904			},
9905			None => {
9906				let err_str = "No such channel awaiting to be accepted.".to_owned();
9907				log_error!(logger, "{}", err_str);
9908
9909				return Err(APIError::APIMisuseError { err: err_str });
9910			}
9911		};
9912
9913		// We have to match below instead of map_err on the above as in the map_err closure the borrow checker
9914		// would consider peer_state moved even though we would bail out with the `?` operator.
9915		let (channel_id, mut channel, message_send_event) = match res {
9916			Ok(res) => res,
9917			Err(err) => {
9918				mem::drop(peer_state_lock);
9919				mem::drop(per_peer_state);
9920				// TODO(dunxen): Find/make less icky way to do this.
9921				match handle_error!(self, Result::<(), MsgHandleErrInternal>::Err(err), *counterparty_node_id) {
9922					Ok(_) => unreachable!("`handle_error` only returns Err as we've passed in an Err"),
9923					Err(e) => {
9924						return Err(APIError::ChannelUnavailable { err: e.err });
9925					},
9926				}
9927			}
9928		};
9929
9930		if accept_0conf {
9931			// This should have been correctly configured by the call to Inbound(V1/V2)Channel::new.
9932			debug_assert!(channel.minimum_depth().unwrap() == 0);
9933		} else if channel.funding().get_channel_type().requires_zero_conf() {
9934			let send_msg_err_event = MessageSendEvent::HandleError {
9935				node_id: channel.context().get_counterparty_node_id(),
9936				action: msgs::ErrorAction::SendErrorMessage{
9937					msg: msgs::ErrorMessage { channel_id: *temporary_channel_id, data: "No zero confirmation channels accepted".to_owned(), }
9938				}
9939			};
9940			debug_assert!(peer_state.is_connected);
9941			peer_state.pending_msg_events.push(send_msg_err_event);
9942			let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
9943			log_error!(logger, "{}", err_str);
9944
9945			return Err(APIError::APIMisuseError { err: err_str });
9946		} else {
9947			// If this peer already has some channels, a new channel won't increase our number of peers
9948			// with unfunded channels, so as long as we aren't over the maximum number of unfunded
9949			// channels per-peer we can accept channels from a peer with existing ones.
9950			if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
9951				let send_msg_err_event = MessageSendEvent::HandleError {
9952					node_id: channel.context().get_counterparty_node_id(),
9953					action: msgs::ErrorAction::SendErrorMessage{
9954						msg: msgs::ErrorMessage { channel_id: *temporary_channel_id, data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
9955					}
9956				};
9957				debug_assert!(peer_state.is_connected);
9958				peer_state.pending_msg_events.push(send_msg_err_event);
9959				let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
9960				log_error!(logger, "{}", err_str);
9961
9962				return Err(APIError::APIMisuseError { err: err_str });
9963			}
9964		}
9965
9966		// Now that we know we have a channel, assign an outbound SCID alias.
9967		let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
9968		channel.context_mut().set_outbound_scid_alias(outbound_scid_alias);
9969
9970		if let Some(message_send_event) = message_send_event {
9971			debug_assert!(peer_state.is_connected);
9972			peer_state.pending_msg_events.push(message_send_event);
9973		}
9974		peer_state.channel_by_id.insert(channel_id, channel);
9975
9976		Ok(())
9977	}
9978
9979	/// Gets the number of peers which match the given filter and do not have any funded, outbound,
9980	/// or 0-conf channels.
9981	///
9982	/// The filter is called for each peer and provided with the number of unfunded, inbound, and
9983	/// non-0-conf channels we have with the peer.
9984	fn peers_without_funded_channels<Filter>(&self, maybe_count_peer: Filter) -> usize
9985	where
9986		Filter: Fn(&PeerState<SP>) -> bool,
9987	{
9988		let mut peers_without_funded_channels = 0;
9989		let best_block_height = self.best_block.read().unwrap().height;
9990		{
9991			let peer_state_lock = self.per_peer_state.read().unwrap();
9992			for (_, peer_mtx) in peer_state_lock.iter() {
9993				let peer = peer_mtx.lock().unwrap();
9994				if !maybe_count_peer(&*peer) {
9995					continue;
9996				}
9997				let num_unfunded_channels = Self::unfunded_channel_count(&peer, best_block_height);
9998				if num_unfunded_channels == peer.total_channel_count() {
9999					peers_without_funded_channels += 1;
10000				}
10001			}
10002		}
10003		return peers_without_funded_channels;
10004	}
10005
10006	#[rustfmt::skip]
10007	fn unfunded_channel_count(
10008		peer: &PeerState<SP>, best_block_height: u32
10009	) -> usize {
10010		let mut num_unfunded_channels = 0;
10011		for (_, chan) in peer.channel_by_id.iter() {
10012			match chan.as_funded() {
10013				Some(funded_chan) => {
10014					// This covers non-zero-conf inbound `Channel`s that we are currently monitoring, but those
10015					// which have not yet had any confirmations on-chain.
10016					if !funded_chan.funding.is_outbound() && chan.minimum_depth().unwrap_or(1) != 0 &&
10017						funded_chan.funding.get_funding_tx_confirmations(best_block_height) == 0
10018					{
10019						num_unfunded_channels += 1;
10020					}
10021				},
10022				None => {
10023					// Outbound channels don't contribute to the unfunded count in the DoS context.
10024					if chan.funding().is_outbound() {
10025						continue;
10026					}
10027
10028					// 0conf channels are not considered unfunded.
10029					if chan.minimum_depth().unwrap_or(1) == 0 {
10030						continue;
10031					}
10032
10033					// Inbound V2 channels with contributed inputs are not considered unfunded.
10034					if let Some(unfunded_chan) = chan.as_unfunded_v2() {
10035						if unfunded_chan.funding_negotiation_context.our_funding_contribution > SignedAmount::ZERO {
10036							continue;
10037						}
10038					}
10039
10040					num_unfunded_channels += 1;
10041				},
10042			}
10043		}
10044		num_unfunded_channels + peer.inbound_channel_request_by_id.len()
10045	}
10046
10047	#[rustfmt::skip]
10048	fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: OpenChannelMessageRef<'_>) -> Result<(), MsgHandleErrInternal> {
10049		let common_fields = match msg {
10050			OpenChannelMessageRef::V1(msg) => &msg.common_fields,
10051			OpenChannelMessageRef::V2(msg) => &msg.common_fields,
10052		};
10053
10054		// Do common open_channel(2) checks
10055
10056		// Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
10057		// likely to be lost on restart!
10058		if common_fields.chain_hash != self.chain_hash {
10059			return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(),
10060				 common_fields.temporary_channel_id));
10061		}
10062
10063		if !self.config.read().unwrap().accept_inbound_channels {
10064			return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(),
10065				 common_fields.temporary_channel_id));
10066		}
10067
10068		// Get the number of peers with channels, but without funded ones. We don't care too much
10069		// about peers that never open a channel, so we filter by peers that have at least one
10070		// channel, and then limit the number of those with unfunded channels.
10071		let channeled_peers_without_funding =
10072			self.peers_without_funded_channels(|node| node.total_channel_count() > 0);
10073
10074		let per_peer_state = self.per_peer_state.read().unwrap();
10075		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
10076		    .ok_or_else(|| {
10077				debug_assert!(false);
10078				MsgHandleErrInternal::send_err_msg_no_close(
10079					format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"),
10080					common_fields.temporary_channel_id)
10081			})?;
10082		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10083		let peer_state = &mut *peer_state_lock;
10084
10085		// If this peer already has some channels, a new channel won't increase our number of peers
10086		// with unfunded channels, so as long as we aren't over the maximum number of unfunded
10087		// channels per-peer we can accept channels from a peer with existing ones.
10088		if peer_state.total_channel_count() == 0 &&
10089			channeled_peers_without_funding >= MAX_UNFUNDED_CHANNEL_PEERS &&
10090			!self.config.read().unwrap().manually_accept_inbound_channels
10091		{
10092			return Err(MsgHandleErrInternal::send_err_msg_no_close(
10093				"Have too many peers with unfunded channels, not accepting new ones".to_owned(),
10094				common_fields.temporary_channel_id));
10095		}
10096
10097		let best_block_height = self.best_block.read().unwrap().height;
10098		if Self::unfunded_channel_count(peer_state, best_block_height) >= MAX_UNFUNDED_CHANS_PER_PEER {
10099			return Err(MsgHandleErrInternal::send_err_msg_no_close(
10100				format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER),
10101				common_fields.temporary_channel_id));
10102		}
10103
10104		let channel_id = common_fields.temporary_channel_id;
10105		let channel_exists = peer_state.has_channel(&channel_id);
10106		if channel_exists {
10107			return Err(MsgHandleErrInternal::send_err_msg_no_close(
10108				"temporary_channel_id collision for the same peer!".to_owned(),
10109				common_fields.temporary_channel_id));
10110		}
10111
10112		// We can get the channel type at this point already as we'll need it immediately in both the
10113		// manual and the automatic acceptance cases.
10114		let channel_type = channel::channel_type_from_open_channel(
10115			common_fields, &self.channel_type_features()
10116		).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, common_fields.temporary_channel_id))?;
10117
10118		// If we're doing manual acceptance checks on the channel, then defer creation until we're sure we want to accept.
10119		if self.config.read().unwrap().manually_accept_inbound_channels {
10120			let mut pending_events = self.pending_events.lock().unwrap();
10121			let is_announced = (common_fields.channel_flags & 1) == 1;
10122			pending_events.push_back((events::Event::OpenChannelRequest {
10123				temporary_channel_id: common_fields.temporary_channel_id,
10124				counterparty_node_id: *counterparty_node_id,
10125				funding_satoshis: common_fields.funding_satoshis,
10126				channel_negotiation_type: match msg {
10127					OpenChannelMessageRef::V1(msg) => InboundChannelFunds::PushMsat(msg.push_msat),
10128					OpenChannelMessageRef::V2(_) => InboundChannelFunds::DualFunded,
10129				},
10130				channel_type,
10131				is_announced,
10132				params: common_fields.channel_parameters(),
10133			}, None));
10134			peer_state.inbound_channel_request_by_id.insert(channel_id, InboundChannelRequest {
10135				open_channel_msg: match msg {
10136					OpenChannelMessageRef::V1(msg) => OpenChannelMessage::V1(msg.clone()),
10137					OpenChannelMessageRef::V2(msg) => OpenChannelMessage::V2(msg.clone()),
10138				},
10139				ticks_remaining: UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS,
10140			});
10141			return Ok(());
10142		}
10143
10144		// Otherwise create the channel right now.
10145		let mut random_bytes = [0u8; 16];
10146		random_bytes.copy_from_slice(&self.entropy_source.get_secure_random_bytes()[..16]);
10147		let user_channel_id = u128::from_be_bytes(random_bytes);
10148
10149		if channel_type.requires_zero_conf() {
10150			return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), common_fields.temporary_channel_id));
10151		}
10152		if channel_type.requires_anchors_zero_fee_htlc_tx() || channel_type.requires_anchor_zero_fee_commitments() {
10153			return Err(MsgHandleErrInternal::send_err_msg_no_close("No channels with anchor outputs accepted".to_owned(), common_fields.temporary_channel_id));
10154		}
10155
10156		let (mut channel, message_send_event) = match msg {
10157			OpenChannelMessageRef::V1(msg) => {
10158				let mut channel = InboundV1Channel::new(
10159					&self.fee_estimator, &self.entropy_source, &self.signer_provider, *counterparty_node_id,
10160					&self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id,
10161					&self.config.read().unwrap(), best_block_height, &self.logger, /*is_0conf=*/false
10162				).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id))?;
10163				let logger = WithChannelContext::from(&self.logger, &channel.context, None);
10164				let message_send_event = channel.accept_inbound_channel(&&logger).map(|msg| {
10165					MessageSendEvent::SendAcceptChannel {
10166						node_id: *counterparty_node_id,
10167						msg,
10168					}
10169				});
10170				(Channel::from(channel), message_send_event)
10171			},
10172			OpenChannelMessageRef::V2(msg) => {
10173				let channel = PendingV2Channel::new_inbound(
10174					&self.fee_estimator, &self.entropy_source, &self.signer_provider,
10175					self.get_our_node_id(), *counterparty_node_id, &self.channel_type_features(),
10176					&peer_state.latest_features, msg, user_channel_id,
10177					&self.config.read().unwrap(), best_block_height, &self.logger,
10178				).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id))?;
10179				let message_send_event = MessageSendEvent::SendAcceptChannelV2 {
10180					node_id: *counterparty_node_id,
10181					msg: channel.accept_inbound_dual_funded_channel(),
10182				};
10183				(Channel::from(channel), Some(message_send_event))
10184			},
10185		};
10186
10187		let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
10188		channel.context_mut().set_outbound_scid_alias(outbound_scid_alias);
10189
10190		if let Some(message_send_event) = message_send_event {
10191			peer_state.pending_msg_events.push(message_send_event);
10192		}
10193		peer_state.channel_by_id.insert(channel.context().channel_id(), channel);
10194
10195		Ok(())
10196	}
10197
10198	#[rustfmt::skip]
10199	fn internal_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
10200		// Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
10201		// likely to be lost on restart!
10202		let (value, output_script, user_id) = {
10203			let per_peer_state = self.per_peer_state.read().unwrap();
10204			let peer_state_mutex = per_peer_state.get(counterparty_node_id)
10205				.ok_or_else(|| {
10206					debug_assert!(false);
10207					MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.common_fields.temporary_channel_id)
10208				})?;
10209			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10210			let peer_state = &mut *peer_state_lock;
10211			match peer_state.channel_by_id.entry(msg.common_fields.temporary_channel_id) {
10212				hash_map::Entry::Occupied(mut chan) => {
10213					match chan.get_mut().as_unfunded_outbound_v1_mut() {
10214						Some(unfunded_chan) => {
10215							let res = unfunded_chan.accept_channel(
10216								msg,
10217								&self.config.read().unwrap().channel_handshake_limits,
10218								&peer_state.latest_features,
10219							);
10220							try_channel_entry!(self, peer_state, res, chan);
10221							(unfunded_chan.funding.get_value_satoshis(), unfunded_chan.funding.get_funding_redeemscript().to_p2wsh(), unfunded_chan.context.get_user_id())
10222						},
10223						None => {
10224							return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id));
10225						}
10226					}
10227				},
10228				hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id))
10229			}
10230		};
10231		let mut pending_events = self.pending_events.lock().unwrap();
10232		pending_events.push_back((events::Event::FundingGenerationReady {
10233			temporary_channel_id: msg.common_fields.temporary_channel_id,
10234			counterparty_node_id: *counterparty_node_id,
10235			channel_value_satoshis: value,
10236			output_script,
10237			user_channel_id: user_id,
10238		}, None));
10239		Ok(())
10240	}
10241
10242	#[rustfmt::skip]
10243	fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
10244		let best_block = *self.best_block.read().unwrap();
10245
10246		let per_peer_state = self.per_peer_state.read().unwrap();
10247		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
10248			.ok_or_else(|| {
10249				debug_assert!(false);
10250				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.temporary_channel_id)
10251			})?;
10252
10253		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10254		let peer_state = &mut *peer_state_lock;
10255		let (mut chan, funding_msg_opt, monitor) =
10256			match peer_state.channel_by_id.remove(&msg.temporary_channel_id)
10257				.map(Channel::into_unfunded_inbound_v1)
10258			{
10259				Some(Ok(inbound_chan)) => {
10260					let logger = WithChannelContext::from(&self.logger, &inbound_chan.context, None);
10261					match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) {
10262						Ok(res) => res,
10263						Err((inbound_chan, err)) => {
10264							// We've already removed this inbound channel from the map in `PeerState`
10265							// above so at this point we just need to clean up any lingering entries
10266							// concerning this channel as it is safe to do so.
10267							debug_assert!(matches!(err, ChannelError::Close(_)));
10268							let mut chan = Channel::from(inbound_chan);
10269							return Err(convert_channel_err!(self, peer_state, err, &mut chan).1);
10270						},
10271					}
10272				},
10273				Some(Err(mut chan)) => {
10274					let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id);
10275					let err = ChannelError::close(err_msg);
10276					return Err(convert_channel_err!(self, peer_state, err, &mut chan).1);
10277				},
10278				None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
10279			};
10280
10281		let funded_channel_id = chan.context.channel_id();
10282
10283		macro_rules! fail_chan { ($err: expr) => { {
10284			// Note that at this point we've filled in the funding outpoint on our
10285			// channel, but its actually in conflict with another channel. Thus, if
10286			// we call `convert_channel_err` immediately (thus calling
10287			// `locked_close_channel`), we'll remove the existing channel from `outpoint_to_peer`.
10288			// Thus, we must first unset the funding outpoint on the channel.
10289			let err = ChannelError::close($err.to_owned());
10290			chan.unset_funding_info();
10291			let mut chan = Channel::from(chan);
10292			return Err(convert_channel_err!(self, peer_state, err, &mut chan, UNFUNDED_CHANNEL).1);
10293		} } }
10294
10295		match peer_state.channel_by_id.entry(funded_channel_id) {
10296			hash_map::Entry::Occupied(_) => {
10297				fail_chan!("Already had channel with the new channel_id");
10298			},
10299			hash_map::Entry::Vacant(e) => {
10300				let monitor_res = self.chain_monitor.watch_channel(monitor.channel_id(), monitor);
10301				if let Ok(persist_state) = monitor_res {
10302					// There's no problem signing a counterparty's funding transaction if our monitor
10303					// hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
10304					// accepted payment from yet. We do, however, need to wait to send our channel_ready
10305					// until we have persisted our monitor.
10306					if let Some(msg) = funding_msg_opt {
10307						peer_state.pending_msg_events.push(MessageSendEvent::SendFundingSigned {
10308							node_id: *counterparty_node_id,
10309							msg,
10310						});
10311					}
10312
10313					if let Some(funded_chan) = e.insert(Channel::from(chan)).as_funded_mut() {
10314						handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
10315							per_peer_state, funded_chan, INITIAL_MONITOR);
10316					} else {
10317						unreachable!("This must be a funded channel as we just inserted it.");
10318					}
10319					Ok(())
10320				} else {
10321					let logger = WithChannelContext::from(&self.logger, &chan.context, None);
10322					log_error!(logger, "Persisting initial ChannelMonitor failed, implying the channel ID was duplicated");
10323					fail_chan!("Duplicate channel ID");
10324				}
10325			}
10326		}
10327	}
10328
10329	fn internal_peer_storage_retrieval(
10330		&self, peer_node_id: PublicKey, msg: msgs::PeerStorageRetrieval,
10331	) -> Result<(), MsgHandleErrInternal> {
10332		// TODO: Check if have any stale or missing ChannelMonitor.
10333		let logger = WithContext::from(&self.logger, Some(peer_node_id), None, None);
10334		let err = || {
10335			MsgHandleErrInternal::from_chan_no_close(
10336				ChannelError::Ignore("Invalid PeerStorageRetrieval message received.".into()),
10337				ChannelId([0; 32]),
10338			)
10339		};
10340
10341		let encrypted_ops = match EncryptedOurPeerStorage::new(msg.data) {
10342			Ok(encrypted_ops) => encrypted_ops,
10343			Err(()) => {
10344				log_debug!(logger, "Received a peer backup which wasn't long enough to be valid");
10345				return Err(err());
10346			},
10347		};
10348
10349		let decrypted = match encrypted_ops.decrypt(&self.node_signer.get_peer_storage_key()) {
10350			Ok(decrypted_ops) => decrypted_ops.into_vec(),
10351			Err(()) => {
10352				log_debug!(logger, "Received a peer backup which was corrupted");
10353				return Err(err());
10354			},
10355		};
10356
10357		log_trace!(logger, "Got valid {}-byte peer backup from {}", decrypted.len(), peer_node_id);
10358		let per_peer_state = self.per_peer_state.read().unwrap();
10359
10360		let mut cursor = io::Cursor::new(decrypted);
10361		let mon_list = <Vec<PeerStorageMonitorHolder> as Readable>::read(&mut cursor)
10362			.unwrap_or_else(|e| {
10363				// This should NEVER happen.
10364				debug_assert!(false);
10365				log_debug!(self.logger, "Unable to unpack the retrieved peer storage {:?}", e);
10366				Vec::new()
10367			});
10368
10369		for mon_holder in mon_list.iter() {
10370			let peer_state_mutex = match per_peer_state.get(&mon_holder.counterparty_node_id) {
10371				Some(mutex) => mutex,
10372				None => {
10373					log_debug!(
10374						logger,
10375						"Not able to find peer_state for the counterparty {}, channel_id {}",
10376						log_pubkey!(mon_holder.counterparty_node_id),
10377						mon_holder.channel_id
10378					);
10379					continue;
10380				},
10381			};
10382
10383			let peer_state_lock = peer_state_mutex.lock().unwrap();
10384			let peer_state = &*peer_state_lock;
10385
10386			match peer_state.channel_by_id.get(&mon_holder.channel_id) {
10387				Some(chan) => {
10388					if let Some(funded_chan) = chan.as_funded() {
10389						if funded_chan.get_revoked_counterparty_commitment_transaction_number()
10390							> mon_holder.min_seen_secret
10391						{
10392							panic!(
10393								"Lost channel state for channel {}.\n\
10394								Received peer storage with a more recent state than what our node had.\n\
10395								Use the FundRecoverer to initiate a force close and sweep the funds.",
10396								&mon_holder.channel_id
10397							);
10398						}
10399					}
10400				},
10401				None => {
10402					log_debug!(logger, "Found an unknown channel {}", &mon_holder.channel_id);
10403				},
10404			}
10405		}
10406		Ok(())
10407	}
10408
10409	#[rustfmt::skip]
10410	fn internal_peer_storage(&self, counterparty_node_id: PublicKey, msg: msgs::PeerStorage) -> Result<(), MsgHandleErrInternal> {
10411		let per_peer_state = self.per_peer_state.read().unwrap();
10412		let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
10413			.ok_or_else(|| {
10414				debug_assert!(false);
10415				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), ChannelId([0; 32]))
10416			})?;
10417
10418		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10419		let peer_state = &mut *peer_state_lock;
10420		let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None);
10421
10422		// Check if we have any channels with the peer (Currently we only provide the service to peers we have a channel with).
10423		if !peer_state.channel_by_id.values().any(|phase| phase.is_funded()) {
10424			log_debug!(logger, "Ignoring peer storage request from {} as we don't have any funded channels with them.", log_pubkey!(counterparty_node_id));
10425			return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(
10426				"Ignoring peer_storage message, as peer storage is currently supported only for \
10427				peers with an active funded channel.".into(),
10428			), ChannelId([0; 32])));
10429		}
10430
10431		#[cfg(not(test))]
10432		if msg.data.len() > MAX_PEER_STORAGE_SIZE {
10433			log_debug!(logger, "Sending warning to peer and ignoring peer storage request from {} as its over 1KiB", log_pubkey!(counterparty_node_id));
10434
10435			return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(
10436				format!("Supports only data up to {} bytes in peer storage.", MAX_PEER_STORAGE_SIZE)
10437			), ChannelId([0; 32])));
10438		}
10439
10440		log_trace!(logger, "Received peer_storage from {}", log_pubkey!(counterparty_node_id));
10441		peer_state.peer_storage = msg.data;
10442
10443		Ok(())
10444	}
10445
10446	#[rustfmt::skip]
10447	fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
10448		let best_block = *self.best_block.read().unwrap();
10449		let per_peer_state = self.per_peer_state.read().unwrap();
10450		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
10451			.ok_or_else(|| {
10452				debug_assert!(false);
10453				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id)
10454			})?;
10455
10456		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10457		let peer_state = &mut *peer_state_lock;
10458		match peer_state.channel_by_id.entry(msg.channel_id) {
10459			hash_map::Entry::Occupied(mut chan_entry) => {
10460				let chan = chan_entry.get_mut();
10461				match chan
10462					.funding_signed(&msg, best_block, &self.signer_provider, &self.logger)
10463					.and_then(|(funded_chan, monitor)| {
10464						self.chain_monitor
10465							.watch_channel(funded_chan.context.channel_id(), monitor)
10466							.map_err(|()| {
10467								// We weren't able to watch the channel to begin with, so no
10468								// updates should be made on it. Previously, full_stack_target
10469								// found an (unreachable) panic when the monitor update contained
10470								// within `shutdown_finish` was applied.
10471								funded_chan.unset_funding_info();
10472								ChannelError::close("Channel ID was a duplicate".to_owned())
10473							})
10474							.map(|persist_status| (funded_chan, persist_status))
10475					})
10476				{
10477					Ok((funded_chan, persist_status)) => {
10478						handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, funded_chan, INITIAL_MONITOR);
10479						Ok(())
10480					},
10481					Err(e) => try_channel_entry!(self, peer_state, Err(e), chan_entry),
10482				}
10483			},
10484			hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
10485		}
10486	}
10487
10488	fn internal_tx_msg<
10489		HandleTxMsgFn: Fn(
10490			&mut Channel<SP>,
10491		) -> Result<InteractiveTxMessageSend, (ChannelError, Option<SpliceFundingFailed>)>,
10492	>(
10493		&self, counterparty_node_id: &PublicKey, channel_id: ChannelId,
10494		tx_msg_handler: HandleTxMsgFn,
10495	) -> Result<NotifyOption, MsgHandleErrInternal> {
10496		let per_peer_state = self.per_peer_state.read().unwrap();
10497		let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| {
10498			debug_assert!(false);
10499			MsgHandleErrInternal::send_err_msg_no_close(
10500				format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"),
10501				channel_id,
10502			)
10503		})?;
10504		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10505		let peer_state = &mut *peer_state_lock;
10506		match peer_state.channel_by_id.entry(channel_id) {
10507			hash_map::Entry::Occupied(mut chan_entry) => {
10508				let channel = chan_entry.get_mut();
10509				match tx_msg_handler(channel) {
10510					Ok(msg_send) => {
10511						let msg_send_event = msg_send.into_msg_send_event(*counterparty_node_id);
10512						peer_state.pending_msg_events.push(msg_send_event);
10513						Ok(NotifyOption::SkipPersistHandleEvents)
10514					},
10515					Err((error, splice_funding_failed)) => {
10516						if let Some(splice_funding_failed) = splice_funding_failed {
10517							let pending_events = &mut self.pending_events.lock().unwrap();
10518							pending_events.push_back((events::Event::SpliceFailed {
10519								channel_id,
10520								counterparty_node_id: *counterparty_node_id,
10521								user_channel_id: channel.context().get_user_id(),
10522								abandoned_funding_txo: splice_funding_failed.funding_txo,
10523								channel_type: splice_funding_failed.channel_type.clone(),
10524								contributed_inputs: splice_funding_failed.contributed_inputs,
10525								contributed_outputs: splice_funding_failed.contributed_outputs,
10526							}, None));
10527						}
10528						Err(MsgHandleErrInternal::from_chan_no_close(error, channel_id))
10529					},
10530				}
10531			},
10532			hash_map::Entry::Vacant(_) => {
10533				Err(MsgHandleErrInternal::send_err_msg_no_close(format!(
10534					"Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}",
10535					counterparty_node_id), channel_id)
10536				)
10537			}
10538		}
10539	}
10540
10541	fn internal_tx_add_input(
10542		&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddInput,
10543	) -> Result<NotifyOption, MsgHandleErrInternal> {
10544		self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel<SP>| {
10545			channel.tx_add_input(msg, &self.logger)
10546		})
10547	}
10548
10549	fn internal_tx_add_output(
10550		&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddOutput,
10551	) -> Result<NotifyOption, MsgHandleErrInternal> {
10552		self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel<SP>| {
10553			channel.tx_add_output(msg, &self.logger)
10554		})
10555	}
10556
10557	fn internal_tx_remove_input(
10558		&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveInput,
10559	) -> Result<NotifyOption, MsgHandleErrInternal> {
10560		self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel<SP>| {
10561			channel.tx_remove_input(msg, &self.logger)
10562		})
10563	}
10564
10565	fn internal_tx_remove_output(
10566		&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveOutput,
10567	) -> Result<NotifyOption, MsgHandleErrInternal> {
10568		self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel: &mut Channel<SP>| {
10569			channel.tx_remove_output(msg, &self.logger)
10570		})
10571	}
10572
10573	#[rustfmt::skip]
10574	fn internal_tx_complete(&self, counterparty_node_id: PublicKey, msg: &msgs::TxComplete) -> Result<NotifyOption, MsgHandleErrInternal> {
10575		let per_peer_state = self.per_peer_state.read().unwrap();
10576		let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
10577			.ok_or_else(|| {
10578				debug_assert!(false);
10579				MsgHandleErrInternal::send_err_msg_no_close(
10580					format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"),
10581					msg.channel_id)
10582			})?;
10583		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10584		let peer_state = &mut *peer_state_lock;
10585		match peer_state.channel_by_id.entry(msg.channel_id) {
10586			hash_map::Entry::Occupied(mut chan_entry) => {
10587				let chan = chan_entry.get_mut();
10588				match chan.tx_complete(msg, &self.logger) {
10589					Ok((interactive_tx_msg_send, commitment_signed)) => {
10590						let persist = if interactive_tx_msg_send.is_some() || commitment_signed.is_some() {
10591							NotifyOption::SkipPersistHandleEvents
10592						} else {
10593							NotifyOption::SkipPersistNoEvents
10594						};
10595						if let Some(interactive_tx_msg_send) = interactive_tx_msg_send {
10596							let msg_send_event = interactive_tx_msg_send.into_msg_send_event(counterparty_node_id);
10597							peer_state.pending_msg_events.push(msg_send_event);
10598						};
10599						if let Some(commitment_signed) = commitment_signed {
10600							peer_state.pending_msg_events.push(MessageSendEvent::UpdateHTLCs {
10601								node_id: counterparty_node_id,
10602								channel_id: msg.channel_id,
10603								updates: CommitmentUpdate {
10604									commitment_signed: vec![commitment_signed],
10605									update_add_htlcs: vec![],
10606									update_fulfill_htlcs: vec![],
10607									update_fail_htlcs: vec![],
10608									update_fail_malformed_htlcs: vec![],
10609									update_fee: None,
10610								},
10611							});
10612						}
10613						Ok(persist)
10614					},
10615					Err((error, splice_funding_failed)) => {
10616						if let Some(splice_funding_failed) = splice_funding_failed {
10617							let pending_events = &mut self.pending_events.lock().unwrap();
10618							pending_events.push_back((events::Event::SpliceFailed {
10619								channel_id: msg.channel_id,
10620								counterparty_node_id,
10621								user_channel_id: chan.context().get_user_id(),
10622								abandoned_funding_txo: splice_funding_failed.funding_txo,
10623								channel_type: splice_funding_failed.channel_type.clone(),
10624								contributed_inputs: splice_funding_failed.contributed_inputs,
10625								contributed_outputs: splice_funding_failed.contributed_outputs,
10626							}, None));
10627						}
10628						Err(MsgHandleErrInternal::from_chan_no_close(error, msg.channel_id))
10629					},
10630				}
10631			},
10632			hash_map::Entry::Vacant(_) => {
10633				Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
10634			}
10635		}
10636	}
10637
10638	#[rustfmt::skip]
10639	fn internal_tx_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxSignatures)
10640	-> Result<(), MsgHandleErrInternal> {
10641		let per_peer_state = self.per_peer_state.read().unwrap();
10642		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
10643			.ok_or_else(|| {
10644				debug_assert!(false);
10645				MsgHandleErrInternal::send_err_msg_no_close(
10646					format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"),
10647					msg.channel_id)
10648			})?;
10649		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10650		let peer_state = &mut *peer_state_lock;
10651		match peer_state.channel_by_id.entry(msg.channel_id) {
10652			hash_map::Entry::Occupied(mut chan_entry) => {
10653				match chan_entry.get_mut().as_funded_mut() {
10654					Some(chan) => {
10655						let best_block_height = self.best_block.read().unwrap().height;
10656						let FundingTxSigned {
10657							tx_signatures,
10658							funding_tx,
10659							splice_negotiated,
10660							splice_locked,
10661						} = try_channel_entry!(
10662							self,
10663							peer_state,
10664							chan.tx_signatures(msg, best_block_height, &self.logger),
10665							chan_entry
10666						);
10667						if let Some(tx_signatures) = tx_signatures {
10668							peer_state.pending_msg_events.push(MessageSendEvent::SendTxSignatures {
10669								node_id: *counterparty_node_id,
10670								msg: tx_signatures,
10671							});
10672						}
10673						if let Some(splice_locked) = splice_locked {
10674							peer_state.pending_msg_events.push(MessageSendEvent::SendSpliceLocked {
10675								node_id: *counterparty_node_id,
10676								msg: splice_locked,
10677							});
10678						}
10679						if let Some(ref funding_tx) = funding_tx {
10680							self.broadcast_interactive_funding(chan, funding_tx, &self.logger);
10681						}
10682						if let Some(splice_negotiated) = splice_negotiated {
10683							self.pending_events.lock().unwrap().push_back((
10684								events::Event::SplicePending {
10685									channel_id: msg.channel_id,
10686									counterparty_node_id: *counterparty_node_id,
10687									user_channel_id: chan.context.get_user_id(),
10688									new_funding_txo: splice_negotiated.funding_txo,
10689									channel_type: splice_negotiated.channel_type,
10690									new_funding_redeem_script: splice_negotiated.funding_redeem_script,
10691								},
10692								None,
10693							));
10694						}
10695					},
10696					None => {
10697						let msg = "Got an unexpected tx_signatures message";
10698						let reason = ClosureReason::ProcessingError { err: msg.to_owned() };
10699						let err = ChannelError::Close((msg.to_owned(), reason));
10700						try_channel_entry!(self, peer_state, Err(err), chan_entry)
10701					},
10702				}
10703				Ok(())
10704			},
10705			hash_map::Entry::Vacant(_) => {
10706				Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
10707			}
10708		}
10709	}
10710
10711	#[rustfmt::skip]
10712	fn internal_tx_abort(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAbort)
10713	-> Result<NotifyOption, MsgHandleErrInternal> {
10714		let per_peer_state = self.per_peer_state.read().unwrap();
10715		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
10716			.ok_or_else(|| {
10717				debug_assert!(false);
10718				MsgHandleErrInternal::send_err_msg_no_close(
10719					format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"),
10720					msg.channel_id)
10721			})?;
10722		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10723		let peer_state = &mut *peer_state_lock;
10724		match peer_state.channel_by_id.entry(msg.channel_id) {
10725			hash_map::Entry::Occupied(mut chan_entry) => {
10726				let res = chan_entry.get_mut().tx_abort(msg, &self.logger);
10727				let (tx_abort, splice_failed) = try_channel_entry!(self, peer_state, res, chan_entry);
10728
10729				let persist = if tx_abort.is_some() || splice_failed.is_some() {
10730					NotifyOption::DoPersist
10731				} else {
10732					NotifyOption::SkipPersistNoEvents
10733				};
10734
10735				if let Some(tx_abort_msg) = tx_abort {
10736					peer_state.pending_msg_events.push(MessageSendEvent::SendTxAbort {
10737						node_id: *counterparty_node_id,
10738						msg: tx_abort_msg,
10739					});
10740				}
10741
10742				if let Some(splice_funding_failed) = splice_failed {
10743					let pending_events = &mut self.pending_events.lock().unwrap();
10744					pending_events.push_back((events::Event::SpliceFailed {
10745						channel_id: msg.channel_id,
10746						counterparty_node_id: *counterparty_node_id,
10747						user_channel_id: chan_entry.get().context().get_user_id(),
10748						abandoned_funding_txo: splice_funding_failed.funding_txo,
10749						channel_type: splice_funding_failed.channel_type,
10750						contributed_inputs: splice_funding_failed.contributed_inputs,
10751						contributed_outputs: splice_funding_failed.contributed_outputs,
10752					}, None));
10753				}
10754
10755				Ok(persist)
10756			},
10757			hash_map::Entry::Vacant(_) => {
10758				Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
10759			}
10760		}
10761	}
10762
10763	#[rustfmt::skip]
10764	fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {
10765		// Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
10766		// closing a channel), so any changes are likely to be lost on restart!
10767		let per_peer_state = self.per_peer_state.read().unwrap();
10768		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
10769			.ok_or_else(|| {
10770				debug_assert!(false);
10771				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id)
10772			})?;
10773		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10774		let peer_state = &mut *peer_state_lock;
10775		match peer_state.channel_by_id.entry(msg.channel_id) {
10776			hash_map::Entry::Occupied(mut chan_entry) => {
10777				if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
10778					let logger = WithChannelContext::from(&self.logger, &chan.context, None);
10779					let res = chan.channel_ready(
10780						&msg,
10781						&self.node_signer,
10782						self.chain_hash,
10783						&self.config.read().unwrap(),
10784						&self.best_block.read().unwrap(),
10785						&&logger
10786					);
10787					let announcement_sigs_opt =
10788						try_channel_entry!(self, peer_state, res, chan_entry);
10789					if let Some(announcement_sigs) = announcement_sigs_opt {
10790						log_trace!(logger, "Sending announcement_signatures for channel {}", chan.context.channel_id());
10791						peer_state.pending_msg_events.push(MessageSendEvent::SendAnnouncementSignatures {
10792							node_id: counterparty_node_id.clone(),
10793							msg: announcement_sigs,
10794						});
10795					} else if chan.context.is_usable() {
10796						// If we're sending an announcement_signatures, we'll send the (public)
10797						// channel_update after sending a channel_announcement when we receive our
10798						// counterparty's announcement_signatures. Thus, we only bother to send a
10799						// channel_update here if the channel is not public, i.e. we're not sending an
10800						// announcement_signatures.
10801						log_trace!(logger, "Sending private initial channel_update for our counterparty on channel {}", chan.context.channel_id());
10802						if let Ok(msg) = self.get_channel_update_for_unicast(chan) {
10803							peer_state.pending_msg_events.push(MessageSendEvent::SendChannelUpdate {
10804								node_id: counterparty_node_id.clone(),
10805								msg,
10806							});
10807						}
10808					}
10809
10810					{
10811						let mut pending_events = self.pending_events.lock().unwrap();
10812						emit_initial_channel_ready_event!(pending_events, chan);
10813					}
10814
10815					Ok(())
10816				} else {
10817					try_channel_entry!(self, peer_state, Err(ChannelError::close(
10818						"Got a channel_ready message for an unfunded channel!".into())), chan_entry)
10819				}
10820			},
10821			hash_map::Entry::Vacant(_) => {
10822				Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
10823			}
10824		}
10825	}
10826
10827	fn internal_shutdown(
10828		&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown,
10829	) -> Result<(), MsgHandleErrInternal> {
10830		let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>;
10831		{
10832			let per_peer_state = self.per_peer_state.read().unwrap();
10833			let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| {
10834				debug_assert!(false);
10835				MsgHandleErrInternal::send_err_msg_no_close(
10836					format!(
10837						"Can't find a peer matching the passed counterparty node_id {}",
10838						counterparty_node_id
10839					),
10840					msg.channel_id,
10841				)
10842			})?;
10843			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10844			let peer_state = &mut *peer_state_lock;
10845			if let hash_map::Entry::Occupied(mut chan_entry) =
10846				peer_state.channel_by_id.entry(msg.channel_id.clone())
10847			{
10848				match chan_entry.get_mut().as_funded_mut() {
10849					Some(chan) => {
10850						if !chan.received_shutdown() {
10851							let logger =
10852								WithChannelContext::from(&self.logger, &chan.context, None);
10853							log_info!(logger, "Received a shutdown message from our counterparty for channel {}{}.",
10854								msg.channel_id,
10855								if chan.sent_shutdown() { " after we initiated shutdown" } else { "" });
10856						}
10857
10858						let funding_txo_opt = chan.funding.get_funding_txo();
10859						let (shutdown, monitor_update_opt, htlcs) = try_channel_entry!(
10860							self,
10861							peer_state,
10862							chan.shutdown(&self.signer_provider, &peer_state.latest_features, &msg),
10863							chan_entry
10864						);
10865						dropped_htlcs = htlcs;
10866
10867						if let Some(msg) = shutdown {
10868							// We can send the `shutdown` message before updating the `ChannelMonitor`
10869							// here as we don't need the monitor update to complete until we send a
10870							// `shutdown_signed`, which we'll delay if we're pending a monitor update.
10871							peer_state.pending_msg_events.push(MessageSendEvent::SendShutdown {
10872								node_id: *counterparty_node_id,
10873								msg,
10874							});
10875						}
10876						// Update the monitor with the shutdown script if necessary.
10877						if let Some(monitor_update) = monitor_update_opt {
10878							handle_new_monitor_update!(
10879								self,
10880								funding_txo_opt.unwrap(),
10881								monitor_update,
10882								peer_state_lock,
10883								peer_state,
10884								per_peer_state,
10885								chan
10886							);
10887						}
10888					},
10889					None => {
10890						let logger = WithChannelContext::from(
10891							&self.logger,
10892							chan_entry.get().context(),
10893							None,
10894						);
10895						log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
10896						let reason = ClosureReason::CounterpartyCoopClosedUnfundedChannel;
10897						let err = ChannelError::Close((reason.to_string(), reason));
10898						let mut chan = chan_entry.remove();
10899						let (_, mut e) = convert_channel_err!(self, peer_state, err, &mut chan);
10900						e.dont_send_error_message();
10901						return Err(e);
10902					},
10903				}
10904			} else {
10905				return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id));
10906			}
10907		}
10908		for htlc_source in dropped_htlcs.drain(..) {
10909			let receiver = HTLCHandlingFailureType::Forward {
10910				node_id: Some(counterparty_node_id.clone()),
10911				channel_id: msg.channel_id,
10912			};
10913			let reason = HTLCFailReason::from_failure_code(LocalHTLCFailureReason::ChannelClosed);
10914			let (source, hash) = htlc_source;
10915			self.fail_htlc_backwards_internal(&source, &hash, &reason, receiver, None);
10916		}
10917
10918		Ok(())
10919	}
10920
10921	fn internal_closing_signed(
10922		&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned,
10923	) -> Result<(), MsgHandleErrInternal> {
10924		let per_peer_state = self.per_peer_state.read().unwrap();
10925		let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| {
10926			debug_assert!(false);
10927			MsgHandleErrInternal::send_err_msg_no_close(
10928				format!(
10929					"Can't find a peer matching the passed counterparty node_id {}",
10930					counterparty_node_id
10931				),
10932				msg.channel_id,
10933			)
10934		})?;
10935		let logger;
10936		let tx_err: Option<(_, Result<Infallible, _>)> = {
10937			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10938			let peer_state = &mut *peer_state_lock;
10939			match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
10940				hash_map::Entry::Occupied(mut chan_entry) => {
10941					if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
10942						logger = WithChannelContext::from(&self.logger, &chan.context, None);
10943						let res = chan.closing_signed(&self.fee_estimator, &msg, &&logger);
10944						let (closing_signed, tx_shutdown_result) =
10945							try_channel_entry!(self, peer_state, res, chan_entry);
10946						debug_assert_eq!(tx_shutdown_result.is_some(), chan.is_shutdown());
10947						if let Some(msg) = closing_signed {
10948							peer_state.pending_msg_events.push(MessageSendEvent::SendClosingSigned {
10949								node_id: counterparty_node_id.clone(),
10950								msg,
10951							});
10952						}
10953						if let Some((tx, close_res)) = tx_shutdown_result {
10954							// We're done with this channel, we've got a signed closing transaction and
10955							// will send the closing_signed back to the remote peer upon return. This
10956							// also implies there are no pending HTLCs left on the channel, so we can
10957							// fully delete it from tracking (the channel monitor is still around to
10958							// watch for old state broadcasts)!
10959							let err = convert_channel_err!(self, peer_state, close_res, chan, COOP_CLOSED);
10960							chan_entry.remove();
10961							Some((tx, Err(err)))
10962						} else {
10963							None
10964						}
10965					} else {
10966						return try_channel_entry!(self, peer_state, Err(ChannelError::close(
10967							"Got a closing_signed message for an unfunded channel!".into())), chan_entry);
10968					}
10969				},
10970				hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
10971			}
10972		};
10973		mem::drop(per_peer_state);
10974		if let Some((broadcast_tx, err)) = tx_err {
10975			log_info!(logger, "Broadcasting {}", log_tx!(broadcast_tx));
10976			self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
10977			let _ = handle_error!(self, err, *counterparty_node_id);
10978		}
10979		Ok(())
10980	}
10981
10982	#[cfg(simple_close)]
10983	fn internal_closing_complete(
10984		&self, _counterparty_node_id: PublicKey, _msg: msgs::ClosingComplete,
10985	) -> Result<(), MsgHandleErrInternal> {
10986		unimplemented!("Handling ClosingComplete is not implemented");
10987	}
10988
10989	#[cfg(simple_close)]
10990	fn internal_closing_sig(
10991		&self, _counterparty_node_id: PublicKey, _msg: msgs::ClosingSig,
10992	) -> Result<(), MsgHandleErrInternal> {
10993		unimplemented!("Handling ClosingSig is not implemented");
10994	}
10995
10996	#[rustfmt::skip]
10997	fn internal_update_add_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), MsgHandleErrInternal> {
10998		//TODO: BOLT 4 points out a specific attack where a peer may re-send an onion packet and
10999		//determine the state of the payment based on our response/if we forward anything/the time
11000		//we take to respond. We should take care to avoid allowing such an attack.
11001		//
11002		//TODO: There exists a further attack where a node may garble the onion data, forward it to
11003		//us repeatedly garbled in different ways, and compare our error messages, which are
11004		//encrypted with the same key. It's not immediately obvious how to usefully exploit that,
11005		//but we should prevent it anyway.
11006
11007		// Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
11008		// closing a channel), so any changes are likely to be lost on restart!
11009
11010		let per_peer_state = self.per_peer_state.read().unwrap();
11011		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
11012			.ok_or_else(|| {
11013				debug_assert!(false);
11014				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id)
11015			})?;
11016		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11017		let peer_state = &mut *peer_state_lock;
11018		match peer_state.channel_by_id.entry(msg.channel_id) {
11019			hash_map::Entry::Occupied(mut chan_entry) => {
11020				if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
11021					try_channel_entry!(self, peer_state, chan.update_add_htlc(&msg, &self.fee_estimator), chan_entry);
11022				} else {
11023					return try_channel_entry!(self, peer_state, Err(ChannelError::close(
11024						"Got an update_add_htlc message for an unfunded channel!".into())), chan_entry);
11025				}
11026			},
11027			hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
11028		}
11029		Ok(())
11030	}
11031
11032	fn internal_update_fulfill_htlc(
11033		&self, counterparty_node_id: &PublicKey, msg: msgs::UpdateFulfillHTLC,
11034	) -> Result<(), MsgHandleErrInternal> {
11035		let funding_txo;
11036		let next_user_channel_id;
11037		let (htlc_source, forwarded_htlc_value, skimmed_fee_msat, send_timestamp) = {
11038			let per_peer_state = self.per_peer_state.read().unwrap();
11039			let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| {
11040				debug_assert!(false);
11041				MsgHandleErrInternal::send_err_msg_no_close(
11042					format!(
11043						"Can't find a peer matching the passed counterparty node_id {}",
11044						counterparty_node_id
11045					),
11046					msg.channel_id,
11047				)
11048			})?;
11049			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11050			let peer_state = &mut *peer_state_lock;
11051			match peer_state.channel_by_id.entry(msg.channel_id) {
11052				hash_map::Entry::Occupied(mut chan_entry) => {
11053					if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
11054						let res = try_channel_entry!(self, peer_state, chan.update_fulfill_htlc(&msg), chan_entry);
11055						if let HTLCSource::PreviousHopData(prev_hop) = &res.0 {
11056							let logger = WithChannelContext::from(&self.logger, &chan.context, None);
11057							log_trace!(logger,
11058								"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
11059								msg.channel_id);
11060							peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id)
11061								.or_insert_with(Vec::new)
11062								.push(RAAMonitorUpdateBlockingAction::from_prev_hop_data(&prev_hop));
11063						}
11064						// Note that we do not need to push an `actions_blocking_raa_monitor_updates`
11065						// entry here, even though we *do* need to block the next RAA monitor update.
11066						// We do this instead in the `claim_funds_internal` by attaching a
11067						// `ReleaseRAAChannelMonitorUpdate` action to the event generated when the
11068						// outbound HTLC is claimed. This is guaranteed to all complete before we
11069						// process the RAA as messages are processed from single peers serially.
11070						funding_txo = chan.funding.get_funding_txo().expect("We won't accept a fulfill until funded");
11071						next_user_channel_id = chan.context.get_user_id();
11072						res
11073					} else {
11074						return try_channel_entry!(self, peer_state, Err(ChannelError::close(
11075							"Got an update_fulfill_htlc message for an unfunded channel!".into())), chan_entry);
11076					}
11077				},
11078				hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
11079			}
11080		};
11081		self.claim_funds_internal(
11082			htlc_source,
11083			msg.payment_preimage.clone(),
11084			Some(forwarded_htlc_value),
11085			skimmed_fee_msat,
11086			false,
11087			*counterparty_node_id,
11088			funding_txo,
11089			msg.channel_id,
11090			Some(next_user_channel_id),
11091			msg.attribution_data,
11092			send_timestamp,
11093		);
11094
11095		Ok(())
11096	}
11097
11098	#[rustfmt::skip]
11099	fn internal_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
11100		// Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
11101		// closing a channel), so any changes are likely to be lost on restart!
11102		let per_peer_state = self.per_peer_state.read().unwrap();
11103		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
11104			.ok_or_else(|| {
11105				debug_assert!(false);
11106				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id)
11107			})?;
11108		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11109		let peer_state = &mut *peer_state_lock;
11110		match peer_state.channel_by_id.entry(msg.channel_id) {
11111			hash_map::Entry::Occupied(mut chan_entry) => {
11112				if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
11113					try_channel_entry!(self, peer_state, chan.update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan_entry);
11114				} else {
11115					return try_channel_entry!(self, peer_state, Err(ChannelError::close(
11116						"Got an update_fail_htlc message for an unfunded channel!".into())), chan_entry);
11117				}
11118			},
11119			hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
11120		}
11121		Ok(())
11122	}
11123
11124	#[rustfmt::skip]
11125	fn internal_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
11126		// Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
11127		// closing a channel), so any changes are likely to be lost on restart!
11128		let per_peer_state = self.per_peer_state.read().unwrap();
11129		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
11130			.ok_or_else(|| {
11131				debug_assert!(false);
11132				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id)
11133			})?;
11134		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11135		let peer_state = &mut *peer_state_lock;
11136		match peer_state.channel_by_id.entry(msg.channel_id) {
11137			hash_map::Entry::Occupied(mut chan_entry) => {
11138				if (msg.failure_code & 0x8000) == 0 {
11139					let chan_err = ChannelError::close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
11140					try_channel_entry!(self, peer_state, Err(chan_err), chan_entry);
11141				}
11142				if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
11143					try_channel_entry!(self, peer_state, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code.into(), msg.sha256_of_onion.to_vec())), chan_entry);
11144				} else {
11145					return try_channel_entry!(self, peer_state, Err(ChannelError::close(
11146						"Got an update_fail_malformed_htlc message for an unfunded channel!".into())), chan_entry);
11147				}
11148				Ok(())
11149			},
11150			hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
11151		}
11152	}
11153
11154	#[rustfmt::skip]
11155	fn internal_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> {
11156		let best_block = *self.best_block.read().unwrap();
11157		let per_peer_state = self.per_peer_state.read().unwrap();
11158		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
11159			.ok_or_else(|| {
11160				debug_assert!(false);
11161				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id)
11162			})?;
11163		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11164		let peer_state = &mut *peer_state_lock;
11165		match peer_state.channel_by_id.entry(msg.channel_id) {
11166			hash_map::Entry::Occupied(mut chan_entry) => {
11167				let chan = chan_entry.get_mut();
11168				let logger = WithChannelContext::from(&self.logger, &chan.context(), None);
11169				let funding_txo = chan.funding().get_funding_txo();
11170				let (monitor_opt, monitor_update_opt) = try_channel_entry!(
11171					self, peer_state, chan.commitment_signed(msg, best_block, &self.signer_provider, &self.fee_estimator, &&logger),
11172					chan_entry);
11173
11174				if let Some(chan) = chan.as_funded_mut() {
11175					if let Some(monitor) = monitor_opt {
11176						let monitor_res = self.chain_monitor.watch_channel(monitor.channel_id(), monitor);
11177						if let Ok(persist_state) = monitor_res {
11178							handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
11179								per_peer_state, chan, INITIAL_MONITOR);
11180						} else {
11181							let logger = WithChannelContext::from(&self.logger, &chan.context, None);
11182							log_error!(logger, "Persisting initial ChannelMonitor failed, implying the channel ID was duplicated");
11183							let msg = "Channel ID was a duplicate";
11184							let reason = ClosureReason::ProcessingError { err: msg.to_owned() };
11185							let err = ChannelError::Close((msg.to_owned(), reason));
11186							try_channel_entry!(self, peer_state, Err(err), chan_entry)
11187						}
11188					} else if let Some(monitor_update) = monitor_update_opt {
11189						handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
11190							peer_state, per_peer_state, chan);
11191					}
11192				}
11193				Ok(())
11194			},
11195			hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
11196		}
11197	}
11198
11199	#[rustfmt::skip]
11200	fn internal_commitment_signed_batch(&self, counterparty_node_id: &PublicKey, channel_id: ChannelId, batch: Vec<msgs::CommitmentSigned>) -> Result<(), MsgHandleErrInternal> {
11201		let per_peer_state = self.per_peer_state.read().unwrap();
11202		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
11203			.ok_or_else(|| {
11204				debug_assert!(false);
11205				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), channel_id)
11206			})?;
11207		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11208		let peer_state = &mut *peer_state_lock;
11209		match peer_state.channel_by_id.entry(channel_id) {
11210			hash_map::Entry::Occupied(mut chan_entry) => {
11211				let chan = chan_entry.get_mut();
11212				let logger = WithChannelContext::from(&self.logger, &chan.context(), None);
11213				let funding_txo = chan.funding().get_funding_txo();
11214				if let Some(chan) = chan.as_funded_mut() {
11215					let monitor_update_opt = try_channel_entry!(
11216						self, peer_state, chan.commitment_signed_batch(batch, &self.fee_estimator, &&logger), chan_entry
11217					);
11218
11219					if let Some(monitor_update) = monitor_update_opt {
11220						handle_new_monitor_update!(
11221							self, funding_txo.unwrap(), monitor_update, peer_state_lock, peer_state,
11222							per_peer_state, chan
11223						);
11224					}
11225				}
11226				Ok(())
11227			},
11228			hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), channel_id))
11229		}
11230	}
11231
11232	fn push_decode_update_add_htlcs(&self, mut update_add_htlcs: (u64, Vec<msgs::UpdateAddHTLC>)) {
11233		let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
11234		let src_outbound_scid_alias = update_add_htlcs.0;
11235		match decode_update_add_htlcs.entry(src_outbound_scid_alias) {
11236			hash_map::Entry::Occupied(mut e) => {
11237				e.get_mut().append(&mut update_add_htlcs.1);
11238			},
11239			hash_map::Entry::Vacant(e) => {
11240				e.insert(update_add_htlcs.1);
11241			},
11242		}
11243	}
11244
11245	#[inline]
11246	fn forward_htlcs(&self, per_source_pending_forwards: &mut [PerSourcePendingForward]) {
11247		for &mut (
11248			prev_outbound_scid_alias,
11249			prev_counterparty_node_id,
11250			prev_funding_outpoint,
11251			prev_channel_id,
11252			prev_user_channel_id,
11253			ref mut pending_forwards,
11254		) in per_source_pending_forwards
11255		{
11256			let mut new_intercept_events = VecDeque::new();
11257			let mut failed_intercept_forwards = Vec::new();
11258			if !pending_forwards.is_empty() {
11259				for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
11260					let scid = match forward_info.routing {
11261						PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
11262						PendingHTLCRouting::TrampolineForward { .. } => 0,
11263						PendingHTLCRouting::Receive { .. } => 0,
11264						PendingHTLCRouting::ReceiveKeysend { .. } => 0,
11265					};
11266					// Pull this now to avoid introducing a lock order with `forward_htlcs`.
11267					let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid);
11268
11269					let payment_hash = forward_info.payment_hash;
11270					let logger = WithContext::from(
11271						&self.logger,
11272						None,
11273						Some(prev_channel_id),
11274						Some(payment_hash),
11275					);
11276					let pending_add = PendingAddHTLCInfo {
11277						prev_outbound_scid_alias,
11278						prev_counterparty_node_id,
11279						prev_funding_outpoint,
11280						prev_channel_id,
11281						prev_htlc_id,
11282						prev_user_channel_id,
11283						forward_info,
11284					};
11285					let mut fail_intercepted_htlc = |pending_add: PendingAddHTLCInfo| {
11286						let htlc_source =
11287							HTLCSource::PreviousHopData(pending_add.htlc_previous_hop_data());
11288						let reason = HTLCFailReason::from_failure_code(
11289							LocalHTLCFailureReason::UnknownNextPeer,
11290						);
11291						let failure_type = HTLCHandlingFailureType::InvalidForward {
11292							requested_forward_scid: scid,
11293						};
11294						failed_intercept_forwards.push((
11295							htlc_source,
11296							payment_hash,
11297							reason,
11298							failure_type,
11299						));
11300					};
11301
11302					// In the case that we have an HTLC that we're supposed to hold onto until the
11303					// recipient comes online *and* the outbound scid is encoded as
11304					// `fake_scid::is_valid_intercept`, we should first wait for the recipient to come
11305					// online before generating an `HTLCIntercepted` event, since the event cannot be
11306					// acted on until the recipient is online to cooperatively open the JIT channel. Once
11307					// we receive the `ReleaseHeldHtlc` message from the recipient, we will circle back
11308					// here and resume generating the event below.
11309					if pending_add.forward_info.routing.should_hold_htlc() {
11310						let intercept_id = InterceptId::from_htlc_id_and_chan_id(
11311							prev_htlc_id,
11312							&prev_channel_id,
11313							&prev_counterparty_node_id,
11314						);
11315						let mut held_htlcs = self.pending_intercepted_htlcs.lock().unwrap();
11316						match held_htlcs.entry(intercept_id) {
11317							hash_map::Entry::Vacant(entry) => {
11318								log_trace!(
11319									logger,
11320									"Intercepted held HTLC with id {}, holding until the recipient is online",
11321									intercept_id
11322								);
11323								entry.insert(pending_add);
11324							},
11325							hash_map::Entry::Occupied(_) => {
11326								debug_assert!(false, "Should never have two HTLCs with the same channel id and htlc id");
11327								fail_intercepted_htlc(pending_add);
11328							},
11329						}
11330					} else if !is_our_scid
11331						&& pending_add.forward_info.incoming_amt_msat.is_some()
11332						&& fake_scid::is_valid_intercept(
11333							&self.fake_scid_rand_bytes,
11334							scid,
11335							&self.chain_hash,
11336						) {
11337						let intercept_id = InterceptId::from_incoming_shared_secret(
11338							&pending_add.forward_info.incoming_shared_secret,
11339						);
11340						let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
11341						match pending_intercepts.entry(intercept_id) {
11342							hash_map::Entry::Vacant(entry) => {
11343								new_intercept_events.push_back((
11344									events::Event::HTLCIntercepted {
11345										requested_next_hop_scid: scid,
11346										payment_hash,
11347										inbound_amount_msat: pending_add
11348											.forward_info
11349											.incoming_amt_msat
11350											.unwrap(),
11351										expected_outbound_amount_msat: pending_add
11352											.forward_info
11353											.outgoing_amt_msat,
11354										intercept_id,
11355									},
11356									None,
11357								));
11358								entry.insert(pending_add);
11359							},
11360							hash_map::Entry::Occupied(_) => {
11361								log_info!(
11362											logger,
11363											"Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}",
11364											scid
11365										);
11366								fail_intercepted_htlc(pending_add);
11367							},
11368						}
11369					} else {
11370						match self.forward_htlcs.lock().unwrap().entry(scid) {
11371							hash_map::Entry::Occupied(mut entry) => {
11372								entry.get_mut().push(HTLCForwardInfo::AddHTLC(pending_add));
11373							},
11374							hash_map::Entry::Vacant(entry) => {
11375								entry.insert(vec![HTLCForwardInfo::AddHTLC(pending_add)]);
11376							},
11377						}
11378					}
11379				}
11380			}
11381
11382			for (htlc_source, payment_hash, failure_reason, destination) in
11383				failed_intercept_forwards.drain(..)
11384			{
11385				self.fail_htlc_backwards_internal(
11386					&htlc_source,
11387					&payment_hash,
11388					&failure_reason,
11389					destination,
11390					None,
11391				);
11392			}
11393
11394			if !new_intercept_events.is_empty() {
11395				let mut events = self.pending_events.lock().unwrap();
11396				events.append(&mut new_intercept_events);
11397			}
11398		}
11399	}
11400
11401	/// Checks whether [`ChannelMonitorUpdate`]s generated by the receipt of a remote
11402	/// [`msgs::RevokeAndACK`] should be held for the given channel until some other action
11403	/// completes. Note that this needs to happen in the same [`PeerState`] mutex as any release of
11404	/// the [`ChannelMonitorUpdate`] in question.
11405	#[rustfmt::skip]
11406	fn raa_monitor_updates_held(&self,
11407		actions_blocking_raa_monitor_updates: &BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
11408		channel_id: ChannelId, counterparty_node_id: PublicKey,
11409	) -> bool {
11410		actions_blocking_raa_monitor_updates
11411			.get(&channel_id).map(|v| !v.is_empty()).unwrap_or(false)
11412		|| self.pending_events.lock().unwrap().iter().any(|(_, action)| {
11413			if let Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
11414				channel_funding_outpoint: _,
11415				channel_id: ev_channel_id,
11416				counterparty_node_id: ev_counterparty_node_id
11417			}) = action {
11418				*ev_channel_id == channel_id && *ev_counterparty_node_id == counterparty_node_id
11419			} else {
11420				false
11421			}
11422		})
11423	}
11424
11425	#[cfg(any(test, feature = "_test_utils"))]
11426	pub(crate) fn test_raa_monitor_updates_held(
11427		&self, counterparty_node_id: PublicKey, channel_id: ChannelId,
11428	) -> bool {
11429		let per_peer_state = self.per_peer_state.read().unwrap();
11430		if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
11431			let mut peer_state_lck = peer_state_mtx.lock().unwrap();
11432			let peer_state = &mut *peer_state_lck;
11433
11434			assert!(peer_state.channel_by_id.contains_key(&channel_id));
11435			return self.raa_monitor_updates_held(
11436				&peer_state.actions_blocking_raa_monitor_updates,
11437				channel_id,
11438				counterparty_node_id,
11439			);
11440		}
11441		false
11442	}
11443
11444	#[rustfmt::skip]
11445	fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
11446		let (htlcs_to_fail, static_invoices) = {
11447			let per_peer_state = self.per_peer_state.read().unwrap();
11448			let mut peer_state_lock = per_peer_state.get(counterparty_node_id)
11449				.ok_or_else(|| {
11450					debug_assert!(false);
11451					MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id)
11452				}).map(|mtx| mtx.lock().unwrap())?;
11453			let peer_state = &mut *peer_state_lock;
11454			match peer_state.channel_by_id.entry(msg.channel_id) {
11455				hash_map::Entry::Occupied(mut chan_entry) => {
11456					if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
11457						let logger = WithChannelContext::from(&self.logger, &chan.context, None);
11458						let funding_txo_opt = chan.funding.get_funding_txo();
11459						let mon_update_blocked = self.raa_monitor_updates_held(
11460							&peer_state.actions_blocking_raa_monitor_updates, msg.channel_id,
11461							*counterparty_node_id);
11462						let (htlcs_to_fail, static_invoices, monitor_update_opt) = try_channel_entry!(self, peer_state,
11463							chan.revoke_and_ack(&msg, &self.fee_estimator, &&logger, mon_update_blocked), chan_entry);
11464						if let Some(monitor_update) = monitor_update_opt {
11465							let funding_txo = funding_txo_opt
11466								.expect("Funding outpoint must have been set for RAA handling to succeed");
11467							handle_new_monitor_update!(self, funding_txo, monitor_update,
11468								peer_state_lock, peer_state, per_peer_state, chan);
11469						}
11470						(htlcs_to_fail, static_invoices)
11471					} else {
11472						return try_channel_entry!(self, peer_state, Err(ChannelError::close(
11473							"Got a revoke_and_ack message for an unfunded channel!".into())), chan_entry);
11474					}
11475				},
11476				hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
11477			}
11478		};
11479		self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id);
11480		for (static_invoice, reply_path) in static_invoices {
11481			let res = self.flow.enqueue_held_htlc_available(&static_invoice, HeldHtlcReplyPath::ToCounterparty { path: reply_path });
11482			debug_assert!(res.is_ok(), "enqueue_held_htlc_available can only fail for non-async senders");
11483		}
11484		Ok(())
11485	}
11486
11487	#[rustfmt::skip]
11488	fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
11489		let per_peer_state = self.per_peer_state.read().unwrap();
11490		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
11491			.ok_or_else(|| {
11492				debug_assert!(false);
11493				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id)
11494			})?;
11495		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11496		let peer_state = &mut *peer_state_lock;
11497		match peer_state.channel_by_id.entry(msg.channel_id) {
11498			hash_map::Entry::Occupied(mut chan_entry) => {
11499				if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
11500					let logger = WithChannelContext::from(&self.logger, &chan.context, None);
11501					try_channel_entry!(self, peer_state, chan.update_fee(&self.fee_estimator, &msg, &&logger), chan_entry);
11502				} else {
11503					return try_channel_entry!(self, peer_state, Err(ChannelError::close(
11504						"Got an update_fee message for an unfunded channel!".into())), chan_entry);
11505				}
11506			},
11507			hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
11508		}
11509		Ok(())
11510	}
11511
11512	#[rustfmt::skip]
11513	fn internal_stfu(&self, counterparty_node_id: &PublicKey, msg: &msgs::Stfu) -> Result<bool, MsgHandleErrInternal> {
11514		let per_peer_state = self.per_peer_state.read().unwrap();
11515		let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| {
11516			debug_assert!(false);
11517			MsgHandleErrInternal::send_err_msg_no_close(
11518				format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"),
11519				msg.channel_id
11520			)
11521		})?;
11522		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11523		let peer_state = &mut *peer_state_lock;
11524
11525		if !self.init_features().supports_quiescence() {
11526			return Err(MsgHandleErrInternal::from_chan_no_close(
11527				ChannelError::Warn("Quiescense not supported".to_string()), msg.channel_id
11528			));
11529		}
11530
11531		match peer_state.channel_by_id.entry(msg.channel_id) {
11532			hash_map::Entry::Occupied(mut chan_entry) => {
11533				if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
11534					let logger = WithContext::from(
11535						&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), None
11536					);
11537
11538					let res = chan.stfu(&msg, &&logger);
11539					let resp = try_channel_entry!(self, peer_state, res, chan_entry);
11540					match resp {
11541						None => Ok(false),
11542						Some(StfuResponse::Stfu(msg)) => {
11543							peer_state.pending_msg_events.push(MessageSendEvent::SendStfu {
11544								node_id: *counterparty_node_id,
11545								msg,
11546							});
11547							Ok(true)
11548						},
11549						Some(StfuResponse::SpliceInit(msg)) => {
11550							peer_state.pending_msg_events.push(MessageSendEvent::SendSpliceInit {
11551								node_id: *counterparty_node_id,
11552								msg,
11553							});
11554							Ok(true)
11555						},
11556					}
11557				} else {
11558					let msg = "Peer sent `stfu` for an unfunded channel";
11559					let err = Err(ChannelError::Close(
11560						(msg.into(), ClosureReason::ProcessingError { err: msg.into() })
11561					));
11562					return try_channel_entry!(self, peer_state, err, chan_entry);
11563				}
11564			},
11565			hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(
11566				format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id),
11567				msg.channel_id
11568			))
11569		}
11570	}
11571
11572	#[rustfmt::skip]
11573	fn internal_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> {
11574		let per_peer_state = self.per_peer_state.read().unwrap();
11575		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
11576			.ok_or_else(|| {
11577				debug_assert!(false);
11578				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id)
11579			})?;
11580		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11581		let peer_state = &mut *peer_state_lock;
11582		match peer_state.channel_by_id.entry(msg.channel_id) {
11583			hash_map::Entry::Occupied(mut chan_entry) => {
11584				if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
11585					if !chan.context.is_usable() {
11586						return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
11587					}
11588
11589					let cur_height = self.best_block.read().unwrap().height;
11590					let res = chan.announcement_signatures(
11591						&self.node_signer,
11592						self.chain_hash,
11593						cur_height,
11594						msg,
11595						&self.config.read().unwrap(),
11596					);
11597					peer_state.pending_msg_events.push(MessageSendEvent::BroadcastChannelAnnouncement {
11598						msg: try_channel_entry!(self, peer_state, res, chan_entry),
11599						// Note that announcement_signatures fails if the channel cannot be announced,
11600						// so get_channel_update_for_broadcast will never fail by the time we get here.
11601						update_msg: Some(self.get_channel_update_for_broadcast(chan).unwrap()),
11602					});
11603				} else {
11604					return try_channel_entry!(self, peer_state, Err(ChannelError::close(
11605						"Got an announcement_signatures message for an unfunded channel!".into())), chan_entry);
11606				}
11607			},
11608			hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
11609		}
11610		Ok(())
11611	}
11612
11613	/// Returns DoPersist if anything changed, otherwise either SkipPersistNoEvents or an Err.
11614	#[rustfmt::skip]
11615	fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
11616		let (chan_counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&msg.contents.short_channel_id) {
11617			Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
11618			None => {
11619				// It's not a local channel
11620				return Ok(NotifyOption::SkipPersistNoEvents)
11621			}
11622		};
11623		let per_peer_state = self.per_peer_state.read().unwrap();
11624		let peer_state_mutex_opt = per_peer_state.get(&chan_counterparty_node_id);
11625		if peer_state_mutex_opt.is_none() {
11626			return Ok(NotifyOption::SkipPersistNoEvents)
11627		}
11628		let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
11629		let peer_state = &mut *peer_state_lock;
11630		match peer_state.channel_by_id.entry(chan_id) {
11631			hash_map::Entry::Occupied(mut chan_entry) => {
11632				if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
11633					if chan.context.get_counterparty_node_id() != *counterparty_node_id {
11634						if chan.context.should_announce() {
11635							// If the announcement is about a channel of ours which is public, some
11636							// other peer may simply be forwarding all its gossip to us. Don't provide
11637							// a scary-looking error message and return Ok instead.
11638							return Ok(NotifyOption::SkipPersistNoEvents);
11639						}
11640						return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
11641					}
11642					let were_node_one = self.get_our_node_id().serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
11643					let msg_from_node_one = msg.contents.channel_flags & 1 == 0;
11644					if were_node_one == msg_from_node_one {
11645						return Ok(NotifyOption::SkipPersistNoEvents);
11646					} else {
11647						let logger = WithChannelContext::from(&self.logger, &chan.context, None);
11648						log_debug!(logger, "Received channel_update {:?} for channel {}.", msg, chan_id);
11649						let did_change = try_channel_entry!(self, peer_state, chan.channel_update(&msg), chan_entry);
11650						// If nothing changed after applying their update, we don't need to bother
11651						// persisting.
11652						if !did_change {
11653							return Ok(NotifyOption::SkipPersistNoEvents);
11654						}
11655					}
11656				} else {
11657					return try_channel_entry!(self, peer_state, Err(ChannelError::close(
11658						"Got a channel_update for an unfunded channel!".into())), chan_entry);
11659				}
11660			},
11661			hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersistNoEvents)
11662		}
11663		Ok(NotifyOption::DoPersist)
11664	}
11665
11666	#[rustfmt::skip]
11667	fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
11668		let (inferred_splice_locked, need_lnd_workaround) = {
11669			let per_peer_state = self.per_peer_state.read().unwrap();
11670
11671			let peer_state_mutex = per_peer_state.get(counterparty_node_id)
11672				.ok_or_else(|| {
11673					debug_assert!(false);
11674					MsgHandleErrInternal::send_err_msg_no_close(
11675						format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"),
11676						msg.channel_id
11677					)
11678				})?;
11679			let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), None);
11680			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11681			let peer_state = &mut *peer_state_lock;
11682			match peer_state.channel_by_id.entry(msg.channel_id) {
11683				hash_map::Entry::Occupied(mut chan_entry) => {
11684					if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
11685						// Currently, we expect all holding cell update_adds to be dropped on peer
11686						// disconnect, so Channel's reestablish will never hand us any holding cell
11687						// freed HTLCs to fail backwards. If in the future we no longer drop pending
11688						// add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
11689						let outbound_scid_alias = chan.context.outbound_scid_alias();
11690						let res = chan.channel_reestablish(
11691							msg,
11692							&&logger,
11693							&self.node_signer,
11694							self.chain_hash,
11695							&self.config.read().unwrap(),
11696							&*self.best_block.read().unwrap(),
11697							|htlc_id| self.path_for_release_held_htlc(htlc_id, outbound_scid_alias, &msg.channel_id, counterparty_node_id)
11698						);
11699						let responses = try_channel_entry!(self, peer_state, res, chan_entry);
11700						let mut channel_update = None;
11701						if let Some(msg) = responses.shutdown_msg {
11702							peer_state.pending_msg_events.push(MessageSendEvent::SendShutdown {
11703								node_id: counterparty_node_id.clone(),
11704								msg,
11705							});
11706						} else if chan.context.is_usable() {
11707							// If the channel is in a usable state (ie the channel is not being shut
11708							// down), send a unicast channel_update to our counterparty to make sure
11709							// they have the latest channel parameters.
11710							if let Ok(msg) = self.get_channel_update_for_unicast(chan) {
11711								channel_update = Some(MessageSendEvent::SendChannelUpdate {
11712									node_id: chan.context.get_counterparty_node_id(),
11713									msg,
11714								});
11715							}
11716						}
11717						let need_lnd_workaround = chan.context.workaround_lnd_bug_4006.take();
11718						let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption(
11719							&mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.commitment_order,
11720							Vec::new(), Vec::new(), None, responses.channel_ready, responses.announcement_sigs,
11721							responses.tx_signatures, responses.tx_abort, responses.channel_ready_order,
11722						);
11723						debug_assert!(htlc_forwards.is_none());
11724						debug_assert!(decode_update_add_htlcs.is_none());
11725						if let Some(upd) = channel_update {
11726							peer_state.pending_msg_events.push(upd);
11727						}
11728
11729						(responses.inferred_splice_locked, need_lnd_workaround)
11730					} else {
11731						return try_channel_entry!(self, peer_state, Err(ChannelError::close(
11732							"Got a channel_reestablish message for an unfunded channel!".into())), chan_entry);
11733					}
11734				},
11735				hash_map::Entry::Vacant(_) => {
11736					log_debug!(logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure",
11737						msg.channel_id);
11738					// Unfortunately, lnd doesn't force close on errors
11739					// (https://github.com/lightningnetwork/lnd/blob/abb1e3463f3a83bbb843d5c399869dbe930ad94f/htlcswitch/link.go#L2119).
11740					// One of the few ways to get an lnd counterparty to force close is by
11741					// replicating what they do when restoring static channel backups (SCBs). They
11742					// send an invalid `ChannelReestablish` with `0` commitment numbers and an
11743					// invalid `your_last_per_commitment_secret`.
11744					//
11745					// Since we received a `ChannelReestablish` for a channel that doesn't exist, we
11746					// can assume it's likely the channel closed from our point of view, but it
11747					// remains open on the counterparty's side. By sending this bogus
11748					// `ChannelReestablish` message now as a response to theirs, we trigger them to
11749					// force close broadcasting their latest state. If the closing transaction from
11750					// our point of view remains unconfirmed, it'll enter a race with the
11751					// counterparty's to-be-broadcast latest commitment transaction.
11752					peer_state.pending_msg_events.push(MessageSendEvent::SendChannelReestablish {
11753						node_id: *counterparty_node_id,
11754						msg: msgs::ChannelReestablish {
11755							channel_id: msg.channel_id,
11756							next_local_commitment_number: 0,
11757							next_remote_commitment_number: 0,
11758							your_last_per_commitment_secret: [1u8; 32],
11759							my_current_per_commitment_point: PublicKey::from_slice(&[2u8; 33]).unwrap(),
11760							next_funding: None,
11761							my_current_funding_locked: None,
11762						},
11763					});
11764					return Err(MsgHandleErrInternal::send_err_msg_no_close(
11765						format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}",
11766							counterparty_node_id), msg.channel_id)
11767					)
11768				}
11769			}
11770		};
11771
11772		if let Some(channel_ready_msg) = need_lnd_workaround {
11773			self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
11774		}
11775
11776		if let Some(splice_locked) = inferred_splice_locked {
11777			self.internal_splice_locked(counterparty_node_id, &splice_locked)?;
11778		}
11779
11780		Ok(())
11781	}
11782
11783	/// Handle incoming splice request, transition channel to splice-pending (unless some check fails).
11784	#[rustfmt::skip]
11785	fn internal_splice_init(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceInit) -> Result<(), MsgHandleErrInternal> {
11786		let per_peer_state = self.per_peer_state.read().unwrap();
11787		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
11788			.ok_or_else(|| {
11789				debug_assert!(false);
11790				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id)
11791			})?;
11792		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11793		let peer_state = &mut *peer_state_lock;
11794
11795		// TODO(splicing): Currently not possible to contribute on the splicing-acceptor side
11796		let our_funding_contribution = 0i64;
11797
11798		// Look for the channel
11799		match peer_state.channel_by_id.entry(msg.channel_id) {
11800			hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!(
11801					"Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}, channel_id {}",
11802					counterparty_node_id, msg.channel_id,
11803				), msg.channel_id)),
11804			hash_map::Entry::Occupied(mut chan_entry) => {
11805				if self.config.read().unwrap().reject_inbound_splices {
11806					let err = ChannelError::WarnAndDisconnect(
11807						"Inbound channel splices are currently not allowed".to_owned()
11808					);
11809					return Err(MsgHandleErrInternal::from_chan_no_close(err, msg.channel_id));
11810				}
11811
11812				if let Some(ref mut funded_channel) = chan_entry.get_mut().as_funded_mut() {
11813					let init_res = funded_channel.splice_init(
11814						msg, our_funding_contribution, &self.signer_provider, &self.entropy_source,
11815						&self.get_our_node_id(), &self.logger
11816					);
11817					let splice_ack_msg = try_channel_entry!(self, peer_state, init_res, chan_entry);
11818					peer_state.pending_msg_events.push(MessageSendEvent::SendSpliceAck {
11819						node_id: *counterparty_node_id,
11820						msg: splice_ack_msg,
11821					});
11822					Ok(())
11823				} else {
11824					try_channel_entry!(self, peer_state, Err(ChannelError::close("Channel is not funded, cannot be spliced".into())), chan_entry)
11825				}
11826			},
11827		}
11828	}
11829
11830	/// Handle incoming splice request ack, transition channel to splice-pending (unless some check fails).
11831	#[rustfmt::skip]
11832	fn internal_splice_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceAck) -> Result<(), MsgHandleErrInternal> {
11833		let per_peer_state = self.per_peer_state.read().unwrap();
11834		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
11835			.ok_or_else(|| {
11836				debug_assert!(false);
11837				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}"), msg.channel_id)
11838			})?;
11839		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11840		let peer_state = &mut *peer_state_lock;
11841
11842		// Look for the channel
11843		match peer_state.channel_by_id.entry(msg.channel_id) {
11844			hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!(
11845					"Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}",
11846					counterparty_node_id
11847				), msg.channel_id)),
11848			hash_map::Entry::Occupied(mut chan_entry) => {
11849				if let Some(ref mut funded_channel) = chan_entry.get_mut().as_funded_mut() {
11850					let splice_ack_res = funded_channel.splice_ack(
11851						msg, &self.signer_provider, &self.entropy_source,
11852						&self.get_our_node_id(), &self.logger
11853					);
11854					let tx_msg_opt = try_channel_entry!(self, peer_state, splice_ack_res, chan_entry);
11855					if let Some(tx_msg) = tx_msg_opt {
11856						peer_state.pending_msg_events.push(tx_msg.into_msg_send_event(counterparty_node_id.clone()));
11857					}
11858					Ok(())
11859				} else {
11860					try_channel_entry!(self, peer_state, Err(ChannelError::close("Channel is not funded, cannot be spliced".into())), chan_entry)
11861				}
11862			},
11863		}
11864	}
11865
11866	fn internal_splice_locked(
11867		&self, counterparty_node_id: &PublicKey, msg: &msgs::SpliceLocked,
11868	) -> Result<(), MsgHandleErrInternal> {
11869		let per_peer_state = self.per_peer_state.read().unwrap();
11870		let peer_state_mutex = per_peer_state.get(counterparty_node_id).ok_or_else(|| {
11871			debug_assert!(false);
11872			MsgHandleErrInternal::send_err_msg_no_close(
11873				format!(
11874					"Can't find a peer matching the passed counterparty node_id {}",
11875					counterparty_node_id
11876				),
11877				msg.channel_id,
11878			)
11879		})?;
11880		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11881		let peer_state = &mut *peer_state_lock;
11882
11883		// Look for the channel
11884		match peer_state.channel_by_id.entry(msg.channel_id) {
11885			hash_map::Entry::Vacant(_) => {
11886				let err = format!(
11887					"Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}",
11888					counterparty_node_id,
11889				);
11890				return Err(MsgHandleErrInternal::send_err_msg_no_close(err, msg.channel_id));
11891			},
11892			hash_map::Entry::Occupied(mut chan_entry) => {
11893				if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
11894					let logger = WithChannelContext::from(&self.logger, &chan.context, None);
11895					let result = chan.splice_locked(
11896						msg,
11897						&self.node_signer,
11898						self.chain_hash,
11899						&self.config.read().unwrap(),
11900						self.best_block.read().unwrap().height,
11901						&&logger,
11902					);
11903					let splice_promotion = try_channel_entry!(self, peer_state, result, chan_entry);
11904					if let Some(splice_promotion) = splice_promotion {
11905						{
11906							let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
11907							insert_short_channel_id!(short_to_chan_info, chan);
11908						}
11909
11910						{
11911							let mut pending_events = self.pending_events.lock().unwrap();
11912							pending_events.push_back((
11913								events::Event::ChannelReady {
11914									channel_id: chan.context.channel_id(),
11915									user_channel_id: chan.context.get_user_id(),
11916									counterparty_node_id: chan.context.get_counterparty_node_id(),
11917									funding_txo: Some(
11918										splice_promotion.funding_txo.into_bitcoin_outpoint(),
11919									),
11920									channel_type: chan.funding.get_channel_type().clone(),
11921								},
11922								None,
11923							));
11924							splice_promotion.discarded_funding.into_iter().for_each(
11925								|funding_info| {
11926									let event = Event::DiscardFunding {
11927										channel_id: chan.context.channel_id(),
11928										funding_info,
11929									};
11930									pending_events.push_back((event, None));
11931								},
11932							);
11933						}
11934
11935						if let Some(announcement_sigs) = splice_promotion.announcement_sigs {
11936							log_trace!(
11937								logger,
11938								"Sending announcement_signatures for channel {}",
11939								chan.context.channel_id()
11940							);
11941							peer_state.pending_msg_events.push(
11942								MessageSendEvent::SendAnnouncementSignatures {
11943									node_id: counterparty_node_id.clone(),
11944									msg: announcement_sigs,
11945								},
11946							);
11947						}
11948
11949						if let Some(monitor_update) = splice_promotion.monitor_update {
11950							handle_new_monitor_update!(
11951								self,
11952								splice_promotion.funding_txo,
11953								monitor_update,
11954								peer_state_lock,
11955								peer_state,
11956								per_peer_state,
11957								chan
11958							);
11959						}
11960					}
11961				} else {
11962					return Err(MsgHandleErrInternal::send_err_msg_no_close(
11963						"Channel is not funded, cannot splice".to_owned(),
11964						msg.channel_id,
11965					));
11966				}
11967			},
11968		};
11969
11970		Ok(())
11971	}
11972
11973	/// Process pending events from the [`chain::Watch`], returning whether any events were processed.
11974	fn process_pending_monitor_events(&self) -> bool {
11975		debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
11976
11977		let mut failed_channels: Vec<(Result<Infallible, _>, _)> = Vec::new();
11978		let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
11979		let has_pending_monitor_events = !pending_monitor_events.is_empty();
11980		for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in
11981			pending_monitor_events.drain(..)
11982		{
11983			for monitor_event in monitor_events.drain(..) {
11984				match monitor_event {
11985					MonitorEvent::HTLCEvent(htlc_update) => {
11986						let logger = WithContext::from(
11987							&self.logger,
11988							Some(counterparty_node_id),
11989							Some(channel_id),
11990							Some(htlc_update.payment_hash),
11991						);
11992						if let Some(preimage) = htlc_update.payment_preimage {
11993							log_trace!(
11994								logger,
11995								"Claiming HTLC with preimage {} from our monitor",
11996								preimage
11997							);
11998							// Claim the funds from the previous hop, if there is one. Because this is in response to a
11999							// chain event, no attribution data is available.
12000							self.claim_funds_internal(
12001								htlc_update.source,
12002								preimage,
12003								htlc_update.htlc_value_satoshis.map(|v| v * 1000),
12004								None,
12005								true,
12006								counterparty_node_id,
12007								funding_outpoint,
12008								channel_id,
12009								None,
12010								None,
12011								None,
12012							);
12013						} else {
12014							log_trace!(
12015								logger,
12016								"Failing HTLC with hash {} from our monitor",
12017								&htlc_update.payment_hash
12018							);
12019							let failure_reason = LocalHTLCFailureReason::OnChainTimeout;
12020							let receiver = HTLCHandlingFailureType::Forward {
12021								node_id: Some(counterparty_node_id),
12022								channel_id,
12023							};
12024							let reason = HTLCFailReason::from_failure_code(failure_reason);
12025							let completion_update = Some(PaymentCompleteUpdate {
12026								counterparty_node_id,
12027								channel_funding_outpoint: funding_outpoint,
12028								channel_id,
12029								htlc_id: SentHTLCId::from_source(&htlc_update.source),
12030							});
12031							self.fail_htlc_backwards_internal(
12032								&htlc_update.source,
12033								&htlc_update.payment_hash,
12034								&reason,
12035								receiver,
12036								completion_update,
12037							);
12038						}
12039					},
12040					MonitorEvent::HolderForceClosed(_)
12041					| MonitorEvent::HolderForceClosedWithInfo { .. } => {
12042						let per_peer_state = self.per_peer_state.read().unwrap();
12043						if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
12044							let mut peer_state_lock = peer_state_mutex.lock().unwrap();
12045							let peer_state = &mut *peer_state_lock;
12046							if let hash_map::Entry::Occupied(chan_entry) =
12047								peer_state.channel_by_id.entry(channel_id)
12048							{
12049								let reason = if let MonitorEvent::HolderForceClosedWithInfo {
12050									reason,
12051									..
12052								} = monitor_event
12053								{
12054									reason
12055								} else {
12056									ClosureReason::HolderForceClosed {
12057										broadcasted_latest_txn: Some(true),
12058										message: "Legacy ChannelMonitor closure".to_owned(),
12059									}
12060								};
12061								let err = ChannelError::Close((reason.to_string(), reason));
12062								let mut chan = chan_entry.remove();
12063								let (_, e) = convert_channel_err!(self, peer_state, err, &mut chan);
12064								failed_channels.push((Err(e), counterparty_node_id));
12065							}
12066						}
12067					},
12068					MonitorEvent::CommitmentTxConfirmed(_) => {
12069						let per_peer_state = self.per_peer_state.read().unwrap();
12070						if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
12071							let mut peer_state_lock = peer_state_mutex.lock().unwrap();
12072							let peer_state = &mut *peer_state_lock;
12073							if let hash_map::Entry::Occupied(chan_entry) =
12074								peer_state.channel_by_id.entry(channel_id)
12075							{
12076								let reason = ClosureReason::CommitmentTxConfirmed;
12077								let err = ChannelError::Close((reason.to_string(), reason));
12078								let mut chan = chan_entry.remove();
12079								let (_, e) = convert_channel_err!(self, peer_state, err, &mut chan);
12080								failed_channels.push((Err(e), counterparty_node_id));
12081							}
12082						}
12083					},
12084					MonitorEvent::Completed { channel_id, monitor_update_id, .. } => {
12085						self.channel_monitor_updated(
12086							&channel_id,
12087							Some(monitor_update_id),
12088							&counterparty_node_id,
12089						);
12090					},
12091				}
12092			}
12093		}
12094
12095		for (err, counterparty_node_id) in failed_channels {
12096			let _ = handle_error!(self, err, counterparty_node_id);
12097		}
12098
12099		has_pending_monitor_events
12100	}
12101
12102	/// Check the holding cell in each channel and free any pending HTLCs in them if possible.
12103	/// Returns whether there were any updates such as if pending HTLCs were freed or a monitor
12104	/// update was applied.
12105	fn check_free_holding_cells(&self) -> bool {
12106		let mut has_monitor_update = false;
12107		let mut failed_htlcs = Vec::new();
12108
12109		// Walk our list of channels and find any that need to update. Note that when we do find an
12110		// update, if it includes actions that must be taken afterwards, we have to drop the
12111		// per-peer state lock as well as the top level per_peer_state lock. Thus, we loop until we
12112		// manage to go through all our peers without finding a single channel to update.
12113		'peer_loop: loop {
12114			let per_peer_state = self.per_peer_state.read().unwrap();
12115			for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
12116				'chan_loop: loop {
12117					let mut peer_state_lock = peer_state_mutex.lock().unwrap();
12118					let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
12119					for (channel_id, chan) in
12120						peer_state.channel_by_id.iter_mut().filter_map(|(chan_id, chan)| {
12121							chan.as_funded_mut().map(|chan| (chan_id, chan))
12122						}) {
12123						let counterparty_node_id = chan.context.get_counterparty_node_id();
12124						let funding_txo = chan.funding.get_funding_txo();
12125						let (monitor_opt, holding_cell_failed_htlcs) = chan
12126							.maybe_free_holding_cell_htlcs(
12127								&self.fee_estimator,
12128								&&WithChannelContext::from(&self.logger, &chan.context, None),
12129							);
12130						if !holding_cell_failed_htlcs.is_empty() {
12131							failed_htlcs.push((
12132								holding_cell_failed_htlcs,
12133								*channel_id,
12134								counterparty_node_id,
12135							));
12136						}
12137						if let Some(monitor_update) = monitor_opt {
12138							has_monitor_update = true;
12139
12140							handle_new_monitor_update!(
12141								self,
12142								funding_txo.unwrap(),
12143								monitor_update,
12144								peer_state_lock,
12145								peer_state,
12146								per_peer_state,
12147								chan
12148							);
12149							continue 'peer_loop;
12150						}
12151					}
12152					break 'chan_loop;
12153				}
12154			}
12155			break 'peer_loop;
12156		}
12157
12158		let has_update = has_monitor_update || !failed_htlcs.is_empty();
12159		for (failures, channel_id, counterparty_node_id) in failed_htlcs.drain(..) {
12160			self.fail_holding_cell_htlcs(failures, channel_id, &counterparty_node_id);
12161		}
12162
12163		has_update
12164	}
12165
12166	/// When a call to a [`ChannelSigner`] method returns an error, this indicates that the signer
12167	/// is (temporarily) unavailable, and the operation should be retried later.
12168	///
12169	/// This method allows for that retry - either checking for any signer-pending messages to be
12170	/// attempted in every channel, or in the specifically provided channel.
12171	///
12172	/// [`ChannelSigner`]: crate::sign::ChannelSigner
12173	#[rustfmt::skip]
12174	pub fn signer_unblocked(&self, channel_opt: Option<(PublicKey, ChannelId)>) {
12175		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
12176
12177		// Returns whether we should remove this channel as it's just been closed.
12178		let unblock_chan = |chan: &mut Channel<SP>, pending_msg_events: &mut Vec<MessageSendEvent>| -> Option<ShutdownResult> {
12179			let channel_id = chan.context().channel_id();
12180			let outbound_scid_alias = chan.context().outbound_scid_alias();
12181			let logger = WithChannelContext::from(&self.logger, &chan.context(), None);
12182			let node_id = chan.context().get_counterparty_node_id();
12183			if let Some(msgs) = chan.signer_maybe_unblocked(
12184				self.chain_hash, &&logger,
12185				|htlc_id| self.path_for_release_held_htlc(htlc_id, outbound_scid_alias, &channel_id, &node_id)
12186			) {
12187				if chan.context().is_connected() {
12188					if let Some(msg) = msgs.open_channel {
12189						pending_msg_events.push(MessageSendEvent::SendOpenChannel {
12190							node_id,
12191							msg,
12192						});
12193					}
12194					if let Some(msg) = msgs.funding_created {
12195						pending_msg_events.push(MessageSendEvent::SendFundingCreated {
12196							node_id,
12197							msg,
12198						});
12199					}
12200					if let Some(msg) = msgs.accept_channel {
12201						pending_msg_events.push(MessageSendEvent::SendAcceptChannel {
12202							node_id,
12203							msg,
12204						});
12205					}
12206					let cu_msg = msgs.commitment_update.map(|updates| MessageSendEvent::UpdateHTLCs {
12207						node_id,
12208						channel_id,
12209						updates,
12210					});
12211					let raa_msg = msgs.revoke_and_ack.map(|msg| MessageSendEvent::SendRevokeAndACK {
12212						node_id,
12213						msg,
12214					});
12215					match (cu_msg, raa_msg) {
12216						(Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::CommitmentFirst => {
12217							pending_msg_events.push(cu);
12218							pending_msg_events.push(raa);
12219						},
12220						(Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::RevokeAndACKFirst => {
12221							pending_msg_events.push(raa);
12222							pending_msg_events.push(cu);
12223						},
12224						(Some(cu), _) => pending_msg_events.push(cu),
12225						(_, Some(raa)) => pending_msg_events.push(raa),
12226						(_, _) => {},
12227					}
12228					if let Some(msg) = msgs.funding_signed {
12229						pending_msg_events.push(MessageSendEvent::SendFundingSigned {
12230							node_id,
12231							msg,
12232						});
12233					}
12234					if let Some(msg) = msgs.closing_signed {
12235						pending_msg_events.push(MessageSendEvent::SendClosingSigned {
12236							node_id,
12237							msg,
12238						});
12239					}
12240				}
12241				if let Some(funded_chan) = chan.as_funded() {
12242					if let Some(msg) = msgs.channel_ready {
12243						send_channel_ready!(self, pending_msg_events, funded_chan, msg);
12244					}
12245					if let Some(broadcast_tx) = msgs.signed_closing_tx {
12246						log_info!(logger, "Broadcasting closing tx {}", log_tx!(broadcast_tx));
12247						self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
12248					}
12249				} else {
12250					// We don't know how to handle a channel_ready or signed_closing_tx for a
12251					// non-funded channel.
12252					debug_assert!(msgs.channel_ready.is_none());
12253					debug_assert!(msgs.signed_closing_tx.is_none());
12254				}
12255				msgs.shutdown_result
12256			} else {
12257				None
12258			}
12259		};
12260
12261		let mut shutdown_results: Vec<(Result<Infallible, _>, _)> = Vec::new();
12262		let per_peer_state = self.per_peer_state.read().unwrap();
12263		let per_peer_state_iter = per_peer_state.iter().filter(|(cp_id, _)| {
12264			if let Some((counterparty_node_id, _)) = channel_opt {
12265				**cp_id == counterparty_node_id
12266			} else { true }
12267		});
12268		for (cp_id, peer_state_mutex) in per_peer_state_iter {
12269			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
12270			let peer_state = &mut *peer_state_lock;
12271			peer_state.channel_by_id.retain(|_, chan| {
12272				let shutdown_result = match channel_opt {
12273					Some((_, channel_id)) if chan.context().channel_id() != channel_id => None,
12274					_ => unblock_chan(chan, &mut peer_state.pending_msg_events),
12275				};
12276				if let Some(shutdown) = shutdown_result {
12277					let context = chan.context();
12278					let logger = WithChannelContext::from(&self.logger, context, None);
12279					let chan_id = context.channel_id();
12280					log_trace!(logger, "Removing channel {} now that the signer is unblocked", chan_id);
12281					let (remove, err) = if let Some(funded) = chan.as_funded_mut() {
12282						let err =
12283							convert_channel_err!(self, peer_state, shutdown, funded, COOP_CLOSED);
12284						(true, err)
12285					} else {
12286						debug_assert!(false);
12287						let reason = shutdown.closure_reason.clone();
12288						let err = ChannelError::Close((reason.to_string(), reason));
12289						convert_channel_err!(self, peer_state, err, chan, UNFUNDED_CHANNEL)
12290					};
12291					debug_assert!(remove);
12292					shutdown_results.push((Err(err), *cp_id));
12293					false
12294				} else {
12295					true
12296				}
12297			});
12298		}
12299		drop(per_peer_state);
12300		for (err, counterparty_node_id) in shutdown_results {
12301			let _ = handle_error!(self, err, counterparty_node_id);
12302		}
12303	}
12304
12305	/// Check whether any channels have finished removing all pending updates after a shutdown
12306	/// exchange and can now send a closing_signed.
12307	/// Returns whether any closing_signed messages were generated.
12308	#[rustfmt::skip]
12309	fn maybe_generate_initial_closing_signed(&self) -> bool {
12310		let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new();
12311		let mut has_update = false;
12312		{
12313			let per_peer_state = self.per_peer_state.read().unwrap();
12314
12315			for (cp_id, peer_state_mutex) in per_peer_state.iter() {
12316				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
12317				let peer_state = &mut *peer_state_lock;
12318				let pending_msg_events = &mut peer_state.pending_msg_events;
12319				peer_state.channel_by_id.retain(|_, chan| {
12320					if !chan.context().is_connected() {
12321						return true;
12322					}
12323					match chan.as_funded_mut() {
12324						Some(funded_chan) => {
12325							let logger = WithChannelContext::from(&self.logger, &funded_chan.context, None);
12326							match funded_chan.maybe_propose_closing_signed(&self.fee_estimator, &&logger) {
12327								Ok((msg_opt, tx_shutdown_result_opt)) => {
12328									if let Some(msg) = msg_opt {
12329										has_update = true;
12330										pending_msg_events.push(MessageSendEvent::SendClosingSigned {
12331											node_id: funded_chan.context.get_counterparty_node_id(), msg,
12332										});
12333									}
12334									debug_assert_eq!(tx_shutdown_result_opt.is_some(), funded_chan.is_shutdown());
12335									if let Some((tx, shutdown_res)) = tx_shutdown_result_opt {
12336										// We're done with this channel. We got a closing_signed and sent back
12337										// a closing_signed with a closing transaction to broadcast.
12338										let err = convert_channel_err!(self, peer_state, shutdown_res, funded_chan, COOP_CLOSED);
12339										handle_errors.push((*cp_id, Err(err)));
12340
12341										log_info!(logger, "Broadcasting {}", log_tx!(tx));
12342										self.tx_broadcaster.broadcast_transactions(&[&tx]);
12343										false
12344									} else { true }
12345								},
12346								Err(e) => {
12347									has_update = true;
12348									let (close_channel, res) = convert_channel_err!(self, peer_state, e, funded_chan, FUNDED_CHANNEL);
12349									handle_errors.push((funded_chan.context.get_counterparty_node_id(), Err(res)));
12350									!close_channel
12351								}
12352							}
12353						},
12354						None => true, // Retain unfunded channels if present.
12355					}
12356				});
12357			}
12358		}
12359
12360		for (counterparty_node_id, err) in handle_errors {
12361			let _ = handle_error!(self, err, counterparty_node_id);
12362		}
12363
12364		has_update
12365	}
12366
12367	#[rustfmt::skip]
12368	fn maybe_send_stfu(&self) {
12369		let per_peer_state = self.per_peer_state.read().unwrap();
12370		for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
12371			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
12372			let peer_state = &mut *peer_state_lock;
12373			let pending_msg_events = &mut peer_state.pending_msg_events;
12374			for (channel_id, chan) in &mut peer_state.channel_by_id {
12375				if let Some(funded_chan) = chan.as_funded_mut() {
12376					let logger = WithContext::from(
12377						&self.logger, Some(*counterparty_node_id), Some(*channel_id), None
12378					);
12379					match funded_chan.try_send_stfu(&&logger) {
12380						Ok(None) => {},
12381						Ok(Some(stfu)) => {
12382							pending_msg_events.push(MessageSendEvent::SendStfu {
12383								node_id: chan.context().get_counterparty_node_id(),
12384								msg: stfu,
12385							});
12386						},
12387						Err(e) => {
12388							log_debug!(logger, "Could not advance quiescence handshake: {}", e);
12389						}
12390					}
12391				}
12392			}
12393		}
12394	}
12395
12396	#[cfg(any(test, fuzzing))]
12397	#[rustfmt::skip]
12398	pub fn maybe_propose_quiescence(&self, counterparty_node_id: &PublicKey, channel_id: &ChannelId) -> Result<(), APIError> {
12399		let mut result = Ok(());
12400		PersistenceNotifierGuard::optionally_notify(self, || {
12401			let mut notify = NotifyOption::SkipPersistNoEvents;
12402
12403			let per_peer_state = self.per_peer_state.read().unwrap();
12404			let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
12405			if peer_state_mutex_opt.is_none() {
12406				result = Err(APIError::ChannelUnavailable {
12407					err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}")
12408				});
12409				return notify;
12410			}
12411
12412			let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
12413			if !peer_state.latest_features.supports_quiescence() {
12414				result = Err(APIError::ChannelUnavailable { err: "Peer does not support quiescence".to_owned() });
12415				return notify;
12416			}
12417
12418			match peer_state.channel_by_id.entry(channel_id.clone()) {
12419				hash_map::Entry::Occupied(mut chan_entry) => {
12420					if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
12421						let logger = WithContext::from(
12422							&self.logger, Some(*counterparty_node_id), Some(*channel_id), None
12423						);
12424
12425						match chan.propose_quiescence(&&logger, QuiescentAction::DoNothing) {
12426							Ok(None) => {},
12427							Ok(Some(stfu)) => {
12428								peer_state.pending_msg_events.push(MessageSendEvent::SendStfu {
12429									node_id: *counterparty_node_id, msg: stfu
12430								});
12431								notify = NotifyOption::SkipPersistHandleEvents;
12432							},
12433							Err(msg) => log_trace!(logger, "{}", msg),
12434						}
12435					} else {
12436						result = Err(APIError::APIMisuseError {
12437							err: format!("Unfunded channel {} cannot be quiescent", channel_id),
12438						});
12439					}
12440				},
12441				hash_map::Entry::Vacant(_) => {
12442					result = Err(APIError::ChannelUnavailable {
12443						err: format!("Channel with id {} not found for the passed counterparty node_id {}",
12444							channel_id, counterparty_node_id),
12445					});
12446				},
12447			}
12448
12449			notify
12450		});
12451
12452		result
12453	}
12454
12455	#[cfg(any(test, fuzzing))]
12456	#[rustfmt::skip]
12457	pub fn exit_quiescence(&self, counterparty_node_id: &PublicKey, channel_id: &ChannelId) -> Result<bool, APIError> {
12458		let per_peer_state = self.per_peer_state.read().unwrap();
12459		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
12460			.ok_or_else(|| APIError::ChannelUnavailable {
12461				err: format!("Can't find a peer matching the passed counterparty node_id {counterparty_node_id}")
12462			})?;
12463		let mut peer_state = peer_state_mutex.lock().unwrap();
12464		let initiator = match peer_state.channel_by_id.entry(*channel_id) {
12465			hash_map::Entry::Occupied(mut chan_entry) => {
12466				if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
12467					chan.exit_quiescence()
12468				} else {
12469					return Err(APIError::APIMisuseError {
12470						err: format!("Unfunded channel {} cannot be quiescent", channel_id),
12471					})
12472				}
12473			},
12474			hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable {
12475				err: format!("Channel with id {} not found for the passed counterparty node_id {}",
12476					channel_id, counterparty_node_id),
12477			}),
12478		};
12479		Ok(initiator)
12480	}
12481
12482	/// Utility for creating a BOLT11 invoice that can be verified by [`ChannelManager`] without
12483	/// storing any additional state. It achieves this by including a [`PaymentSecret`] in the
12484	/// invoice which it uses to verify that the invoice has not expired and the payment amount is
12485	/// sufficient, reproducing the [`PaymentPreimage`] if applicable.
12486	#[rustfmt::skip]
12487	pub fn create_bolt11_invoice(
12488		&self, params: Bolt11InvoiceParameters,
12489	) -> Result<Bolt11Invoice, SignOrCreationError<()>> {
12490		let Bolt11InvoiceParameters {
12491			amount_msats, description, invoice_expiry_delta_secs, min_final_cltv_expiry_delta,
12492			payment_hash,
12493		} = params;
12494
12495		let currency =
12496			Network::from_chain_hash(self.chain_hash).map(Into::into).unwrap_or(Currency::Bitcoin);
12497
12498		#[cfg(feature = "std")]
12499		let duration_since_epoch = {
12500			use std::time::SystemTime;
12501			SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
12502				.expect("SystemTime::now() should be after SystemTime::UNIX_EPOCH")
12503		};
12504
12505		// This may be up to 2 hours in the future because of bitcoin's block time rule or about
12506		// 10-30 minutes in the past if a block hasn't been found recently. This should be fine as
12507		// the default invoice expiration is 2 hours, though shorter expirations may be problematic.
12508		#[cfg(not(feature = "std"))]
12509		let duration_since_epoch =
12510			Duration::from_secs(self.highest_seen_timestamp.load(Ordering::Acquire) as u64);
12511
12512		if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta {
12513			if min_final_cltv_expiry_delta.saturating_add(3) < MIN_FINAL_CLTV_EXPIRY_DELTA {
12514				return Err(SignOrCreationError::CreationError(CreationError::MinFinalCltvExpiryDeltaTooShort));
12515			}
12516		}
12517
12518		let (payment_hash, payment_secret) = match payment_hash {
12519			Some(payment_hash) => {
12520				let payment_secret = self
12521					.create_inbound_payment_for_hash(
12522						payment_hash, amount_msats,
12523						invoice_expiry_delta_secs.unwrap_or(DEFAULT_EXPIRY_TIME as u32),
12524						min_final_cltv_expiry_delta,
12525					)
12526					.map_err(|()| SignOrCreationError::CreationError(CreationError::InvalidAmount))?;
12527				(payment_hash, payment_secret)
12528			},
12529			None => {
12530				self
12531					.create_inbound_payment(
12532						amount_msats, invoice_expiry_delta_secs.unwrap_or(DEFAULT_EXPIRY_TIME as u32),
12533						min_final_cltv_expiry_delta,
12534					)
12535					.map_err(|()| SignOrCreationError::CreationError(CreationError::InvalidAmount))?
12536			},
12537		};
12538
12539		log_trace!(self.logger, "Creating invoice with payment hash {}", &payment_hash);
12540
12541		let invoice = Bolt11InvoiceBuilder::new(currency);
12542		let invoice = match description {
12543			Bolt11InvoiceDescription::Direct(description) => invoice.description(description.into_inner().0),
12544			Bolt11InvoiceDescription::Hash(hash) => invoice.description_hash(hash.0),
12545		};
12546
12547		let mut invoice = invoice
12548			.duration_since_epoch(duration_since_epoch)
12549			.payee_pub_key(self.get_our_node_id())
12550			.payment_hash(Hash::from_slice(&payment_hash.0).unwrap())
12551			.payment_secret(payment_secret)
12552			.basic_mpp()
12553			.min_final_cltv_expiry_delta(
12554				// Add a buffer of 3 to the delta if present, otherwise use LDK's minimum.
12555				min_final_cltv_expiry_delta.map(|x| x.saturating_add(3)).unwrap_or(MIN_FINAL_CLTV_EXPIRY_DELTA).into()
12556			);
12557
12558		if let Some(invoice_expiry_delta_secs) = invoice_expiry_delta_secs{
12559			invoice = invoice.expiry_time(Duration::from_secs(invoice_expiry_delta_secs.into()));
12560		}
12561
12562		if let Some(amount_msats) = amount_msats {
12563			invoice = invoice.amount_milli_satoshis(amount_msats);
12564		}
12565
12566		let channels = self.list_channels();
12567		let route_hints = super::invoice_utils::sort_and_filter_channels(channels, amount_msats, &self.logger);
12568		for hint in route_hints {
12569			invoice = invoice.private_route(hint);
12570		}
12571
12572		let raw_invoice = invoice.build_raw().map_err(|e| SignOrCreationError::CreationError(e))?;
12573		let signature = self.node_signer.sign_invoice(&raw_invoice, Recipient::Node);
12574
12575		raw_invoice
12576			.sign(|_| signature)
12577			.map(|invoice| Bolt11Invoice::from_signed(invoice).unwrap())
12578			.map_err(|e| SignOrCreationError::SignError(e))
12579	}
12580}
12581
12582/// Parameters used with [`create_bolt11_invoice`].
12583///
12584/// [`create_bolt11_invoice`]: ChannelManager::create_bolt11_invoice
12585pub struct Bolt11InvoiceParameters {
12586	/// The amount for the invoice, if any.
12587	pub amount_msats: Option<u64>,
12588
12589	/// The description for what the invoice is for, or hash of such description.
12590	pub description: Bolt11InvoiceDescription,
12591
12592	/// The invoice expiration relative to its creation time. If not set, the invoice will expire in
12593	/// [`DEFAULT_EXPIRY_TIME`] by default.
12594	///
12595	/// The creation time used is the duration since the Unix epoch for `std` builds. For non-`std`
12596	/// builds, the highest block timestamp seen is used instead. In the latter case, use a long
12597	/// enough expiry to account for the average block time.
12598	pub invoice_expiry_delta_secs: Option<u32>,
12599
12600	/// The minimum `cltv_expiry` for the last HTLC in the route. If not set, will use
12601	/// [`MIN_FINAL_CLTV_EXPIRY_DELTA`].
12602	///
12603	/// If set, must be at least [`MIN_FINAL_CLTV_EXPIRY_DELTA`], and a three-block buffer will be
12604	/// added as well to allow for up to a few new block confirmations during routing.
12605	pub min_final_cltv_expiry_delta: Option<u16>,
12606
12607	/// The payment hash used in the invoice. If not set, a payment hash will be generated using a
12608	/// preimage that can be reproduced by [`ChannelManager`] without storing any state.
12609	///
12610	/// Uses the payment hash if set. This may be useful if you're building an on-chain swap or
12611	/// involving another protocol where the payment hash is also involved outside the scope of
12612	/// lightning.
12613	pub payment_hash: Option<PaymentHash>,
12614}
12615
12616impl Default for Bolt11InvoiceParameters {
12617	fn default() -> Self {
12618		Self {
12619			amount_msats: None,
12620			description: Bolt11InvoiceDescription::Direct(Description::empty()),
12621			invoice_expiry_delta_secs: None,
12622			min_final_cltv_expiry_delta: None,
12623			payment_hash: None,
12624		}
12625	}
12626}
12627
12628macro_rules! create_offer_builder { ($self: ident, $builder: ty) => {
12629	/// Creates an [`OfferBuilder`] such that the [`Offer`] it builds is recognized by the
12630	/// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer's
12631	/// expiration will be `absolute_expiry` if `Some`, otherwise it will not expire.
12632	///
12633	/// # Privacy
12634	///
12635	/// Uses [`MessageRouter`] provided at construction to construct a [`BlindedMessagePath`] for
12636	/// the offer. See the documentation of the selected [`MessageRouter`] for details on how it
12637	/// selects blinded paths including privacy implications and reliability tradeoffs.
12638	///
12639	/// Also, uses a derived signing pubkey in the offer for recipient privacy.
12640	///
12641	/// # Limitations
12642	///
12643	/// See [`OffersMessageFlow::create_offer_builder`] for limitations on the offer builder.
12644	///
12645	/// # Errors
12646	///
12647	/// Errors if the parameterized [`MessageRouter`] is unable to create a blinded path for the offer.
12648	///
12649	/// [`BlindedMessagePath`]: crate::blinded_path::message::BlindedMessagePath
12650	/// [`Offer`]: crate::offers::offer::Offer
12651	/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
12652	pub fn create_offer_builder(&$self) -> Result<$builder, Bolt12SemanticError> {
12653		let builder = $self.flow.create_offer_builder(
12654			&*$self.entropy_source, $self.get_peers_for_blinded_path()
12655		)?;
12656
12657		Ok(builder.into())
12658	}
12659
12660	/// Same as [`Self::create_offer_builder`], but allows specifying a custom [`MessageRouter`]
12661	/// instead of using the [`MessageRouter`] provided to the [`ChannelManager`] at construction.
12662	///
12663	/// This gives users full control over how the [`BlindedMessagePath`] is constructed,
12664	/// including the option to omit it entirely.
12665	///
12666	/// See [`Self::create_offer_builder`] for more details on usage.
12667	///
12668	/// [`BlindedMessagePath`]: crate::blinded_path::message::BlindedMessagePath
12669	/// [`Offer`]: crate::offers::offer::Offer
12670	/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
12671	pub fn create_offer_builder_using_router<ME: Deref>(
12672		&$self,
12673		router: ME,
12674	) -> Result<$builder, Bolt12SemanticError>
12675	where
12676		ME::Target: MessageRouter,
12677	{
12678		let builder = $self.flow.create_offer_builder_using_router(
12679			router, &*$self.entropy_source, $self.get_peers_for_blinded_path()
12680		)?;
12681
12682		Ok(builder.into())
12683	}
12684} }
12685
12686macro_rules! create_refund_builder { ($self: ident, $builder: ty) => {
12687	/// Creates a [`RefundBuilder`] such that the [`Refund`] it builds is recognized by the
12688	/// [`ChannelManager`] when handling [`Bolt12Invoice`] messages for the refund.
12689	///
12690	/// # Payment
12691	///
12692	/// The provided `payment_id` is used to ensure that only one invoice is paid for the refund.
12693	/// See [Avoiding Duplicate Payments] for other requirements once the payment has been sent.
12694	///
12695	/// The builder will have the provided expiration set. Any changes to the expiration on the
12696	/// returned builder will not be honored by [`ChannelManager`]. For non-`std`, the highest seen
12697	/// block time minus two hours is used for the current time when determining if the refund has
12698	/// expired.
12699	///
12700	/// To revoke the refund, use [`ChannelManager::abandon_payment`] prior to receiving the
12701	/// invoice. If abandoned, or an invoice isn't received before expiration, the payment will fail
12702	/// with an [`Event::PaymentFailed`].
12703	///
12704	/// If `max_total_routing_fee_msat` is not specified, The default from
12705	/// [`RouteParameters::from_payment_params_and_value`] is applied.
12706	///
12707	/// # Privacy
12708	///
12709	/// Uses [`MessageRouter`] provided at construction to construct a [`BlindedMessagePath`] for
12710	/// the refund. See the documentation of the selected [`MessageRouter`] for details on how it
12711	/// selects blinded paths including privacy implications and reliability tradeoffs.
12712	///
12713	/// Also, uses a derived payer id in the refund for payer privacy.
12714	///
12715	/// # Errors
12716	///
12717	/// Errors if:
12718	/// - a duplicate `payment_id` is provided given the caveats in the aforementioned link,
12719	/// - `amount_msats` is invalid, or
12720	/// - the parameterized [`Router`] is unable to create a blinded path for the refund.
12721	///
12722	/// [`Refund`]: crate::offers::refund::Refund
12723	/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
12724	/// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths
12725	/// [`BlindedMessagePath`]: crate::blinded_path::message::BlindedMessagePath
12726	/// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments
12727	pub fn create_refund_builder(
12728		&$self, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId,
12729		retry_strategy: Retry, route_params_config: RouteParametersConfig
12730	) -> Result<$builder, Bolt12SemanticError> {
12731		let entropy = &*$self.entropy_source;
12732
12733		let builder = $self.flow.create_refund_builder(
12734			entropy, amount_msats, absolute_expiry,
12735			payment_id, $self.get_peers_for_blinded_path()
12736		)?;
12737
12738		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop($self);
12739
12740		let expiration = StaleExpiration::AbsoluteTimeout(absolute_expiry);
12741		$self.pending_outbound_payments
12742			.add_new_awaiting_invoice(
12743				payment_id, expiration, retry_strategy, route_params_config, None,
12744			)
12745			.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?;
12746
12747		Ok(builder.into())
12748	}
12749
12750	/// Same as [`Self::create_refund_builder`], but allows specifying a custom [`MessageRouter`]
12751	/// instead of using the one provided during [`ChannelManager`] construction for
12752	/// [`BlindedMessagePath`] creation.
12753	///
12754	/// This gives users full control over how the [`BlindedMessagePath`] is constructed for the
12755	/// refund, including the option to omit it entirely. This is useful for testing or when
12756	/// alternative privacy strategies are needed.
12757	///
12758	/// See [`Self::create_refund_builder`] for more details on usage.
12759	///
12760	/// # Errors
12761	///
12762	/// In addition to the errors in [`Self::create_refund_builder`], this returns an error if
12763	/// the provided [`MessageRouter`] fails to construct a valid [`BlindedMessagePath`] for the refund.
12764	///
12765	/// [`Refund`]: crate::offers::refund::Refund
12766	/// [`BlindedMessagePath`]: crate::blinded_path::message::BlindedMessagePath
12767	/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
12768	pub fn create_refund_builder_using_router<ME: Deref>(
12769		&$self, router: ME, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId,
12770		retry_strategy: Retry, route_params_config: RouteParametersConfig
12771	) -> Result<$builder, Bolt12SemanticError>
12772	where
12773		ME::Target: MessageRouter,
12774	{
12775		let entropy = &*$self.entropy_source;
12776
12777		let builder = $self.flow.create_refund_builder_using_router(
12778			router, entropy, amount_msats, absolute_expiry,
12779			payment_id, $self.get_peers_for_blinded_path()
12780		)?;
12781
12782		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop($self);
12783
12784		let expiration = StaleExpiration::AbsoluteTimeout(absolute_expiry);
12785		$self.pending_outbound_payments
12786			.add_new_awaiting_invoice(
12787				payment_id, expiration, retry_strategy, route_params_config, None,
12788			)
12789			.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?;
12790
12791		Ok(builder.into())
12792	}
12793} }
12794
12795impl<
12796		M: Deref,
12797		T: Deref,
12798		ES: Deref,
12799		NS: Deref,
12800		SP: Deref,
12801		F: Deref,
12802		R: Deref,
12803		MR: Deref,
12804		L: Deref,
12805	> ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
12806where
12807	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
12808	T::Target: BroadcasterInterface,
12809	ES::Target: EntropySource,
12810	NS::Target: NodeSigner,
12811	SP::Target: SignerProvider,
12812	F::Target: FeeEstimator,
12813	R::Target: Router,
12814	MR::Target: MessageRouter,
12815	L::Target: Logger,
12816{
12817	#[cfg(not(c_bindings))]
12818	create_offer_builder!(self, OfferBuilder<'_, DerivedMetadata, secp256k1::All>);
12819	#[cfg(not(c_bindings))]
12820	create_refund_builder!(self, RefundBuilder<'_, secp256k1::All>);
12821
12822	#[cfg(c_bindings)]
12823	create_offer_builder!(self, OfferWithDerivedMetadataBuilder);
12824	#[cfg(c_bindings)]
12825	create_refund_builder!(self, RefundMaybeWithDerivedMetadataBuilder);
12826
12827	/// Retrieve an [`Offer`] for receiving async payments as an often-offline recipient. Will only
12828	/// return an offer if [`Self::set_paths_to_static_invoice_server`] was called and we succeeded in
12829	/// interactively building a [`StaticInvoice`] with the static invoice server.
12830	///
12831	/// Useful for posting offers to receive payments later, such as posting an offer on a website.
12832	pub fn get_async_receive_offer(&self) -> Result<Offer, ()> {
12833		let (offer, needs_persist) = self.flow.get_async_receive_offer()?;
12834		if needs_persist {
12835			// We need to re-persist the cache if a fresh offer was just marked as used to ensure we
12836			// continue to keep this offer's invoice updated and don't replace it with the server.
12837			let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
12838		}
12839		Ok(offer)
12840	}
12841
12842	/// Sets the [`BlindedMessagePath`]s that we will use as an async recipient to interactively build
12843	/// [`Offer`]s with a static invoice server, so the server can serve [`StaticInvoice`]s to payers
12844	/// on our behalf when we're offline.
12845	///
12846	/// This method only needs to be called once when the server first takes on the recipient as a
12847	/// client, or when the paths change, e.g. if the paths are set to expire at a particular time.
12848	pub fn set_paths_to_static_invoice_server(
12849		&self, paths_to_static_invoice_server: Vec<BlindedMessagePath>,
12850	) -> Result<(), ()> {
12851		let peers = self.get_peers_for_blinded_path();
12852		self.flow.set_paths_to_static_invoice_server(paths_to_static_invoice_server, peers)?;
12853
12854		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
12855		Ok(())
12856	}
12857
12858	/// Pays for an [`Offer`] using the given parameters by creating an [`InvoiceRequest`] and
12859	/// enqueuing it to be sent via an onion message. [`ChannelManager`] will pay the actual
12860	/// [`Bolt12Invoice`] once it is received.
12861	///
12862	/// Uses [`InvoiceRequestBuilder`] such that the [`InvoiceRequest`] it builds is recognized by
12863	/// the [`ChannelManager`] when handling a [`Bolt12Invoice`] message in response to the request.
12864	///
12865	/// `amount_msats` allows you to overpay what is required to satisfy the offer, or may be
12866	/// required if the offer does not require a specific amount.
12867	///
12868	/// If the [`Offer`] was built from a human readable name resolved using BIP 353, you *must*
12869	/// instead call [`Self::pay_for_offer_from_hrn`].
12870	///
12871	/// # Payment
12872	///
12873	/// The provided `payment_id` is used to ensure that only one invoice is paid for the request
12874	/// when received. See [Avoiding Duplicate Payments] for other requirements once the payment has
12875	/// been sent.
12876	///
12877	/// To revoke the request, use [`ChannelManager::abandon_payment`] prior to receiving the
12878	/// invoice. If abandoned, or an invoice isn't received in a reasonable amount of time, the
12879	/// payment will fail with an [`Event::PaymentFailed`].
12880	///
12881	/// # Privacy
12882	///
12883	/// For payer privacy, uses a derived payer id and uses [`MessageRouter::create_blinded_paths`]
12884	/// to construct a [`BlindedMessagePath`] for the reply path.
12885	///
12886	/// # Note
12887	///
12888	/// If the offer resolves to an async payment, and the HTLC is neither claimed nor failed by
12889	/// our next-hop peer, we will not force-close the channel to resolve the payment for 4
12890	/// weeks. This avoids an issue for often-offline nodes where channels are force-closed on
12891	/// startup during chain sync prior to connecting to peers. If you want to resolve such a
12892	/// timed-out payment more urgently, you can manually force-close the channel which will,
12893	/// after some transaction confirmation(s), result in an [`Event::PaymentFailed`].
12894	///
12895	/// # Errors
12896	///
12897	/// Errors if:
12898	/// - a duplicate `payment_id` is provided given the caveats in the aforementioned link,
12899	/// - the provided parameters are invalid for the offer,
12900	/// - the offer is for an unsupported chain, or
12901	/// - the parameterized [`Router`] is unable to create a blinded reply path for the invoice
12902	///   request.
12903	///
12904	/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
12905	/// [`InvoiceRequestBuilder`]: crate::offers::invoice_request::InvoiceRequestBuilder
12906	/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
12907	/// [`BlindedMessagePath`]: crate::blinded_path::message::BlindedMessagePath
12908	/// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths
12909	/// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments
12910	pub fn pay_for_offer(
12911		&self, offer: &Offer, amount_msats: Option<u64>, payment_id: PaymentId,
12912		optional_params: OptionalOfferPaymentParams,
12913	) -> Result<(), Bolt12SemanticError> {
12914		let create_pending_payment_fn = |retryable_invoice_request: RetryableInvoiceRequest| {
12915			self.pending_outbound_payments
12916				.add_new_awaiting_invoice(
12917					payment_id,
12918					StaleExpiration::TimerTicks(1),
12919					optional_params.retry_strategy,
12920					optional_params.route_params_config,
12921					Some(retryable_invoice_request),
12922				)
12923				.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
12924		};
12925
12926		self.pay_for_offer_intern(
12927			offer,
12928			if offer.expects_quantity() { Some(1) } else { None },
12929			amount_msats,
12930			optional_params.payer_note,
12931			payment_id,
12932			None,
12933			create_pending_payment_fn,
12934		)
12935	}
12936
12937	/// Pays for an [`Offer`] which was built by resolving a human readable name. It is otherwise
12938	/// identical to [`Self::pay_for_offer`].
12939	pub fn pay_for_offer_from_hrn(
12940		&self, offer: &OfferFromHrn, amount_msats: u64, payment_id: PaymentId,
12941		optional_params: OptionalOfferPaymentParams,
12942	) -> Result<(), Bolt12SemanticError> {
12943		let create_pending_payment_fn = |retryable_invoice_request: RetryableInvoiceRequest| {
12944			self.pending_outbound_payments
12945				.add_new_awaiting_invoice(
12946					payment_id,
12947					StaleExpiration::TimerTicks(1),
12948					optional_params.retry_strategy,
12949					optional_params.route_params_config,
12950					Some(retryable_invoice_request),
12951				)
12952				.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
12953		};
12954
12955		self.pay_for_offer_intern(
12956			&offer.offer,
12957			if offer.offer.expects_quantity() { Some(1) } else { None },
12958			Some(amount_msats),
12959			optional_params.payer_note,
12960			payment_id,
12961			Some(offer.hrn),
12962			create_pending_payment_fn,
12963		)
12964	}
12965
12966	/// Pays for an [`Offer`] using the given parameters, including a `quantity`, by creating an
12967	/// [`InvoiceRequest`] and enqueuing it to be sent via an onion message. [`ChannelManager`] will
12968	/// pay the actual [`Bolt12Invoice`] once it is received.
12969	///
12970	/// This method is identical to [`Self::pay_for_offer`] with the one exception that it allows
12971	/// you to specify the [`InvoiceRequest::quantity`]. We expect this to be rather seldomly used,
12972	/// as the "quantity" feature of offers doesn't line up with common payment flows today.
12973	///
12974	/// This method is otherwise identical to [`Self::pay_for_offer`] but will additionally fail if
12975	/// the provided `quantity` does not meet the requirements described by
12976	/// [`Offer::supported_quantity`].
12977	///
12978	/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
12979	/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
12980	/// [`InvoiceRequest::quantity`]: crate::offers::invoice_request::InvoiceRequest::quantity
12981	pub fn pay_for_offer_with_quantity(
12982		&self, offer: &Offer, amount_msats: Option<u64>, payment_id: PaymentId,
12983		optional_params: OptionalOfferPaymentParams, quantity: u64,
12984	) -> Result<(), Bolt12SemanticError> {
12985		let create_pending_payment_fn = |retryable_invoice_request: RetryableInvoiceRequest| {
12986			self.pending_outbound_payments
12987				.add_new_awaiting_invoice(
12988					payment_id,
12989					StaleExpiration::TimerTicks(1),
12990					optional_params.retry_strategy,
12991					optional_params.route_params_config,
12992					Some(retryable_invoice_request),
12993				)
12994				.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
12995		};
12996
12997		self.pay_for_offer_intern(
12998			offer,
12999			Some(quantity),
13000			amount_msats,
13001			optional_params.payer_note,
13002			payment_id,
13003			None,
13004			create_pending_payment_fn,
13005		)
13006	}
13007
13008	#[rustfmt::skip]
13009	fn pay_for_offer_intern<CPP: FnOnce(RetryableInvoiceRequest) -> Result<(), Bolt12SemanticError>>(
13010		&self, offer: &Offer, quantity: Option<u64>, amount_msats: Option<u64>,
13011		payer_note: Option<String>, payment_id: PaymentId,
13012		human_readable_name: Option<HumanReadableName>, create_pending_payment: CPP,
13013	) -> Result<(), Bolt12SemanticError> {
13014		let entropy = &*self.entropy_source;
13015		let nonce = Nonce::from_entropy_source(entropy);
13016
13017		let builder = self.flow.create_invoice_request_builder(
13018			offer, nonce, payment_id,
13019		)?;
13020
13021		let builder = match quantity {
13022			None => builder,
13023			Some(quantity) => builder.quantity(quantity)?,
13024		};
13025		let builder = match amount_msats {
13026			None => builder,
13027			Some(amount_msats) => builder.amount_msats(amount_msats)?,
13028		};
13029		let builder = match payer_note {
13030			None => builder,
13031			Some(payer_note) => builder.payer_note(payer_note),
13032		};
13033		let builder = match human_readable_name {
13034			None => builder,
13035			Some(hrn) => builder.sourced_from_human_readable_name(hrn),
13036		};
13037
13038		let invoice_request = builder.build_and_sign()?;
13039		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
13040
13041		self.flow.enqueue_invoice_request(
13042			invoice_request.clone(), payment_id, nonce,
13043			self.get_peers_for_blinded_path()
13044		)?;
13045
13046		let retryable_invoice_request = RetryableInvoiceRequest {
13047			invoice_request: invoice_request.clone(),
13048			nonce,
13049			needs_retry: true,
13050		};
13051
13052		create_pending_payment(retryable_invoice_request)
13053	}
13054
13055	/// Creates a [`Bolt12Invoice`] for a [`Refund`] and enqueues it to be sent via an onion
13056	/// message.
13057	///
13058	/// The resulting invoice uses a [`PaymentHash`] recognized by the [`ChannelManager`] and a
13059	/// [`BlindedPaymentPath`] containing the [`PaymentSecret`] needed to reconstruct the
13060	/// corresponding [`PaymentPreimage`]. It is returned purely for informational purposes.
13061	///
13062	/// # Limitations
13063	///
13064	/// Requires a direct connection to an introduction node in [`Refund::paths`] or to
13065	/// [`Refund::payer_signing_pubkey`], if empty. This request is best effort; an invoice will be
13066	/// sent to each node meeting the aforementioned criteria, but there's no guarantee that they
13067	/// will be received and no retries will be made.
13068	///
13069	/// # Errors
13070	///
13071	/// Errors if:
13072	/// - the refund is for an unsupported chain, or
13073	/// - the parameterized [`Router`] is unable to create a blinded payment path or reply path for
13074	///   the invoice.
13075	///
13076	/// [`BlindedPaymentPath`]: crate::blinded_path::payment::BlindedPaymentPath
13077	/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
13078	#[rustfmt::skip]
13079	pub fn request_refund_payment(
13080		&self, refund: &Refund
13081	) -> Result<Bolt12Invoice, Bolt12SemanticError> {
13082		let secp_ctx = &self.secp_ctx;
13083
13084		let amount_msats = refund.amount_msats();
13085		let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
13086
13087		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
13088
13089		match self.create_inbound_payment(Some(amount_msats), relative_expiry, None) {
13090			Ok((payment_hash, payment_secret)) => {
13091				let entropy = &*self.entropy_source;
13092				let builder = self.flow.create_invoice_builder_from_refund(
13093					&self.router, entropy, refund, payment_hash,
13094					payment_secret, self.list_usable_channels()
13095				)?;
13096
13097				let invoice = builder.allow_mpp().build_and_sign(secp_ctx)?;
13098
13099				self.flow.enqueue_invoice(invoice.clone(), refund, self.get_peers_for_blinded_path())?;
13100
13101				Ok(invoice)
13102			},
13103			Err(()) => Err(Bolt12SemanticError::InvalidAmount),
13104		}
13105	}
13106
13107	/// Pays for an [`Offer`] looked up using [BIP 353] Human Readable Names resolved by the DNS
13108	/// resolver(s) at `dns_resolvers` which resolve names according to [bLIP 32].
13109	///
13110	/// Because most wallets support on-chain or other payment schemes beyond only offers, this is
13111	/// deprecated in favor of the [`bitcoin-payment-instructions`] crate, which can be used to
13112	/// build an [`OfferFromHrn`] and call [`Self::pay_for_offer_from_hrn`]. Thus, this method is
13113	/// deprecated.
13114	///
13115	/// # Payment
13116	///
13117	/// The provided `payment_id` is used to ensure that only one invoice is paid for the request
13118	/// when received. See [Avoiding Duplicate Payments] for other requirements once the payment has
13119	/// been sent.
13120	///
13121	/// To revoke the request, use [`ChannelManager::abandon_payment`] prior to receiving the
13122	/// invoice. If abandoned, or an invoice isn't received in a reasonable amount of time, the
13123	/// payment will fail with an [`PaymentFailureReason::UserAbandoned`] or
13124	/// [`PaymentFailureReason::InvoiceRequestExpired`], respectively.
13125	///
13126	/// # Privacy
13127	///
13128	/// For payer privacy, uses a derived payer id and uses [`MessageRouter::create_blinded_paths`]
13129	/// to construct a [`BlindedMessagePath`] for the reply path.
13130	///
13131	/// # Errors
13132	///
13133	/// Errors if a duplicate `payment_id` is provided given the caveats in the aforementioned link.
13134	///
13135	/// [BIP 353]: https://github.com/bitcoin/bips/blob/master/bip-0353.mediawiki
13136	/// [bLIP 32]: https://github.com/lightning/blips/blob/master/blip-0032.md
13137	/// [`OMNameResolver::resolve_name`]: crate::onion_message::dns_resolution::OMNameResolver::resolve_name
13138	/// [`OMNameResolver::handle_dnssec_proof_for_uri`]: crate::onion_message::dns_resolution::OMNameResolver::handle_dnssec_proof_for_uri
13139	/// [`bitcoin-payment-instructions`]: https://docs.rs/bitcoin-payment-instructions/
13140	/// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments
13141	/// [`BlindedMessagePath`]: crate::blinded_path::message::BlindedMessagePath
13142	/// [`PaymentFailureReason::UserAbandoned`]: crate::events::PaymentFailureReason::UserAbandoned
13143	/// [`PaymentFailureReason::InvoiceRequestRejected`]: crate::events::PaymentFailureReason::InvoiceRequestRejected
13144	#[cfg(feature = "dnssec")]
13145	#[deprecated(note = "Use bitcoin-payment-instructions and pay_for_offer_from_hrn instead")]
13146	pub fn pay_for_offer_from_human_readable_name(
13147		&self, name: HumanReadableName, amount_msats: u64, payment_id: PaymentId,
13148		optional_params: OptionalOfferPaymentParams, dns_resolvers: Vec<Destination>,
13149	) -> Result<(), ()> {
13150		let (onion_message, context) =
13151			self.flow.hrn_resolver.resolve_name(payment_id, name, &*self.entropy_source)?;
13152
13153		let expiration = StaleExpiration::TimerTicks(1);
13154		self.pending_outbound_payments.add_new_awaiting_offer(
13155			payment_id,
13156			expiration,
13157			optional_params.retry_strategy,
13158			optional_params.route_params_config,
13159			amount_msats,
13160			optional_params.payer_note,
13161		)?;
13162
13163		self.flow
13164			.enqueue_dns_onion_message(
13165				onion_message,
13166				context,
13167				dns_resolvers,
13168				self.get_peers_for_blinded_path(),
13169			)
13170			.map_err(|_| ())
13171	}
13172
13173	/// Gets a payment secret and payment hash for use in an invoice given to a third party wishing
13174	/// to pay us.
13175	///
13176	/// This differs from [`create_inbound_payment_for_hash`] only in that it generates the
13177	/// [`PaymentHash`] and [`PaymentPreimage`] for you.
13178	///
13179	/// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentClaimable`] event, which
13180	/// will have the [`PaymentClaimable::purpose`] return `Some` for [`PaymentPurpose::preimage`]. That
13181	/// should then be passed directly to [`claim_funds`].
13182	///
13183	/// See [`create_inbound_payment_for_hash`] for detailed documentation on behavior and requirements.
13184	///
13185	/// Note that a malicious eavesdropper can intuit whether an inbound payment was created by
13186	/// `create_inbound_payment` or `create_inbound_payment_for_hash` based on runtime.
13187	///
13188	/// # Note
13189	///
13190	/// If you register an inbound payment with this method, then serialize the `ChannelManager`, then
13191	/// deserialize it with a node running 0.0.103 and earlier, the payment will fail to be received.
13192	///
13193	/// Errors if `min_value_msat` is greater than total bitcoin supply.
13194	///
13195	/// If `min_final_cltv_expiry_delta` is set to some value, then the payment will not be receivable
13196	/// on versions of LDK prior to 0.0.114.
13197	///
13198	/// [`claim_funds`]: Self::claim_funds
13199	/// [`PaymentClaimable`]: events::Event::PaymentClaimable
13200	/// [`PaymentClaimable::purpose`]: events::Event::PaymentClaimable::purpose
13201	/// [`PaymentPurpose::preimage`]: events::PaymentPurpose::preimage
13202	/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
13203	pub fn create_inbound_payment(
13204		&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32,
13205		min_final_cltv_expiry_delta: Option<u16>,
13206	) -> Result<(PaymentHash, PaymentSecret), ()> {
13207		inbound_payment::create(
13208			&self.inbound_payment_key,
13209			min_value_msat,
13210			invoice_expiry_delta_secs,
13211			&self.entropy_source,
13212			self.highest_seen_timestamp.load(Ordering::Acquire) as u64,
13213			min_final_cltv_expiry_delta,
13214		)
13215	}
13216
13217	/// Gets a [`PaymentSecret`] for a given [`PaymentHash`], for which the payment preimage is
13218	/// stored external to LDK.
13219	///
13220	/// A [`PaymentClaimable`] event will only be generated if the [`PaymentSecret`] matches a
13221	/// payment secret fetched via this method or [`create_inbound_payment`], and which is at least
13222	/// the `min_value_msat` provided here, if one is provided.
13223	///
13224	/// The [`PaymentHash`] (and corresponding [`PaymentPreimage`]) should be globally unique, though
13225	/// note that LDK will not stop you from registering duplicate payment hashes for inbound
13226	/// payments.
13227	///
13228	/// `min_value_msat` should be set if the invoice being generated contains a value. Any payment
13229	/// received for the returned [`PaymentHash`] will be required to be at least `min_value_msat`
13230	/// before a [`PaymentClaimable`] event will be generated, ensuring that we do not provide the
13231	/// sender "proof-of-payment" unless they have paid the required amount.
13232	///
13233	/// `invoice_expiry_delta_secs` describes the number of seconds that the invoice is valid for
13234	/// in excess of the current time. This should roughly match the expiry time set in the invoice.
13235	/// After this many seconds, we will remove the inbound payment, resulting in any attempts to
13236	/// pay the invoice failing. The BOLT spec suggests 3,600 secs as a default validity time for
13237	/// invoices when no timeout is set.
13238	///
13239	/// Note that we use block header time to time-out pending inbound payments (with some margin
13240	/// to compensate for the inaccuracy of block header timestamps). Thus, in practice we will
13241	/// accept a payment and generate a [`PaymentClaimable`] event for some time after the expiry.
13242	/// If you need exact expiry semantics, you should enforce them upon receipt of
13243	/// [`PaymentClaimable`].
13244	///
13245	/// Note that invoices generated for inbound payments should have their `min_final_cltv_expiry_delta`
13246	/// set to at least [`MIN_FINAL_CLTV_EXPIRY_DELTA`].
13247	///
13248	/// Note that a malicious eavesdropper can intuit whether an inbound payment was created by
13249	/// `create_inbound_payment` or `create_inbound_payment_for_hash` based on runtime.
13250	///
13251	/// # Note
13252	///
13253	/// If you register an inbound payment with this method, then serialize the `ChannelManager`, then
13254	/// deserialize it with a node running 0.0.103 and earlier, the payment will fail to be received.
13255	///
13256	/// Errors if `min_value_msat` is greater than total bitcoin supply.
13257	///
13258	/// If `min_final_cltv_expiry_delta` is set to some value, then the payment will not be receivable
13259	/// on versions of LDK prior to 0.0.114.
13260	///
13261	/// [`create_inbound_payment`]: Self::create_inbound_payment
13262	/// [`PaymentClaimable`]: events::Event::PaymentClaimable
13263	pub fn create_inbound_payment_for_hash(
13264		&self, payment_hash: PaymentHash, min_value_msat: Option<u64>,
13265		invoice_expiry_delta_secs: u32, min_final_cltv_expiry: Option<u16>,
13266	) -> Result<PaymentSecret, ()> {
13267		inbound_payment::create_from_hash(
13268			&self.inbound_payment_key,
13269			min_value_msat,
13270			payment_hash,
13271			invoice_expiry_delta_secs,
13272			self.highest_seen_timestamp.load(Ordering::Acquire) as u64,
13273			min_final_cltv_expiry,
13274		)
13275	}
13276
13277	/// Gets an LDK-generated payment preimage from a payment hash and payment secret that were
13278	/// previously returned from [`create_inbound_payment`].
13279	///
13280	/// [`create_inbound_payment`]: Self::create_inbound_payment
13281	pub fn get_payment_preimage(
13282		&self, payment_hash: PaymentHash, payment_secret: PaymentSecret,
13283	) -> Result<PaymentPreimage, APIError> {
13284		let expanded_key = &self.inbound_payment_key;
13285		inbound_payment::get_payment_preimage(payment_hash, payment_secret, expanded_key)
13286	}
13287
13288	/// [`BlindedMessagePath`]s for an async recipient to communicate with this node and interactively
13289	/// build [`Offer`]s and [`StaticInvoice`]s for receiving async payments.
13290	///
13291	/// ## Usage
13292	/// 1. Static invoice server calls [`Self::blinded_paths_for_async_recipient`]
13293	/// 2. Static invoice server communicates the resulting paths out-of-band to the async recipient,
13294	///    who calls [`Self::set_paths_to_static_invoice_server`] to configure themselves with these
13295	///    paths
13296	/// 3. Async recipient automatically sends [`OfferPathsRequest`]s over the configured paths, and
13297	///    uses the resulting paths from the server's [`OfferPaths`] response to build their async
13298	///    receive offer
13299	///
13300	/// If `relative_expiry` is unset, the [`BlindedMessagePath`]s will never expire.
13301	///
13302	/// Returns the paths that the recipient should be configured with via
13303	/// [`Self::set_paths_to_static_invoice_server`].
13304	///
13305	/// The provided `recipient_id` must uniquely identify the recipient, and will be surfaced later
13306	/// when the recipient provides us with a static invoice to persist and serve to payers on their
13307	/// behalf.
13308	pub fn blinded_paths_for_async_recipient(
13309		&self, recipient_id: Vec<u8>, relative_expiry: Option<Duration>,
13310	) -> Result<Vec<BlindedMessagePath>, ()> {
13311		let peers = self.get_peers_for_blinded_path();
13312		self.flow.blinded_paths_for_async_recipient(recipient_id, relative_expiry, peers)
13313	}
13314
13315	pub(super) fn duration_since_epoch(&self) -> Duration {
13316		#[cfg(not(feature = "std"))]
13317		let now = Duration::from_secs(self.highest_seen_timestamp.load(Ordering::Acquire) as u64);
13318		#[cfg(feature = "std")]
13319		let now = std::time::SystemTime::now()
13320			.duration_since(std::time::SystemTime::UNIX_EPOCH)
13321			.expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
13322
13323		now
13324	}
13325
13326	fn get_peers_for_blinded_path(&self) -> Vec<MessageForwardNode> {
13327		let per_peer_state = self.per_peer_state.read().unwrap();
13328		per_peer_state
13329			.iter()
13330			.map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
13331			.filter(|(_, peer)| peer.is_connected)
13332			.filter(|(_, peer)| peer.latest_features.supports_onion_messages())
13333			.map(|(node_id, peer)| MessageForwardNode {
13334				node_id: *node_id,
13335				short_channel_id: peer
13336					.channel_by_id
13337					.iter()
13338					.filter(|(_, channel)| channel.context().is_usable())
13339					.filter_map(|(_, channel)| channel.as_funded())
13340					.min_by_key(|funded_channel| funded_channel.context.channel_creation_height)
13341					.and_then(|funded_channel| funded_channel.get_inbound_scid()),
13342			})
13343			.collect::<Vec<_>>()
13344	}
13345
13346	#[cfg(test)]
13347	pub(super) fn test_get_peers_for_blinded_path(&self) -> Vec<MessageForwardNode> {
13348		self.get_peers_for_blinded_path()
13349	}
13350
13351	#[cfg(test)]
13352	/// Creates multi-hop blinded payment paths for the given `amount_msats` by delegating to
13353	/// [`Router::create_blinded_payment_paths`].
13354	pub(super) fn test_create_blinded_payment_paths(
13355		&self, amount_msats: Option<u64>, payment_secret: PaymentSecret,
13356		payment_context: PaymentContext, relative_expiry_seconds: u32,
13357	) -> Result<Vec<BlindedPaymentPath>, ()> {
13358		let entropy = &*self.entropy_source;
13359
13360		self.flow.test_create_blinded_payment_paths(
13361			&self.router,
13362			entropy,
13363			self.list_usable_channels(),
13364			amount_msats,
13365			payment_secret,
13366			payment_context,
13367			relative_expiry_seconds,
13368		)
13369	}
13370
13371	/// Gets a fake short channel id for use in receiving [phantom node payments]. These fake scids
13372	/// are used when constructing the phantom invoice's route hints.
13373	///
13374	/// [phantom node payments]: crate::sign::PhantomKeysManager
13375	pub fn get_phantom_scid(&self) -> u64 {
13376		let best_block_height = self.best_block.read().unwrap().height;
13377		let short_to_chan_info = self.short_to_chan_info.read().unwrap();
13378		loop {
13379			let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(
13380				best_block_height,
13381				&self.chain_hash,
13382				&self.fake_scid_rand_bytes,
13383				&self.entropy_source,
13384			);
13385			// Ensure the generated scid doesn't conflict with a real channel.
13386			match short_to_chan_info.get(&scid_candidate) {
13387				Some(_) => continue,
13388				None => return scid_candidate,
13389			}
13390		}
13391	}
13392
13393	/// Gets route hints for use in receiving [phantom node payments].
13394	///
13395	/// [phantom node payments]: crate::sign::PhantomKeysManager
13396	pub fn get_phantom_route_hints(&self) -> PhantomRouteHints {
13397		PhantomRouteHints {
13398			channels: self.list_usable_channels(),
13399			phantom_scid: self.get_phantom_scid(),
13400			real_node_pubkey: self.get_our_node_id(),
13401		}
13402	}
13403
13404	/// Gets a fake short channel id for use in receiving intercepted payments. These fake scids are
13405	/// used when constructing the route hints for HTLCs intended to be intercepted. See
13406	/// [`ChannelManager::forward_intercepted_htlc`].
13407	///
13408	/// Note that this method is not guaranteed to return unique values, you may need to call it a few
13409	/// times to get a unique scid.
13410	pub fn get_intercept_scid(&self) -> u64 {
13411		let best_block_height = self.best_block.read().unwrap().height;
13412		let short_to_chan_info = self.short_to_chan_info.read().unwrap();
13413		loop {
13414			let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(
13415				best_block_height,
13416				&self.chain_hash,
13417				&self.fake_scid_rand_bytes,
13418				&self.entropy_source,
13419			);
13420			// Ensure the generated scid doesn't conflict with a real channel.
13421			if short_to_chan_info.contains_key(&scid_candidate) {
13422				continue;
13423			}
13424			return scid_candidate;
13425		}
13426	}
13427
13428	/// Gets inflight HTLC information by processing pending outbound payments that are in
13429	/// our channels. May be used during pathfinding to account for in-use channel liquidity.
13430	pub fn compute_inflight_htlcs(&self) -> InFlightHtlcs {
13431		let mut inflight_htlcs = InFlightHtlcs::new();
13432
13433		let per_peer_state = self.per_peer_state.read().unwrap();
13434		for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
13435			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
13436			let peer_state = &mut *peer_state_lock;
13437			for chan in peer_state.channel_by_id.values().filter_map(Channel::as_funded) {
13438				for (htlc_source, _) in chan.inflight_htlc_sources() {
13439					if let HTLCSource::OutboundRoute { path, .. } = htlc_source {
13440						inflight_htlcs.process_path(path, self.get_our_node_id());
13441					}
13442				}
13443			}
13444		}
13445
13446		inflight_htlcs
13447	}
13448
13449	#[cfg(any(test, feature = "_test_utils"))]
13450	pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
13451		let events = core::cell::RefCell::new(Vec::new());
13452		let event_handler = |event: events::Event| Ok(events.borrow_mut().push(event));
13453		self.process_pending_events(&event_handler);
13454		let collected_events = events.into_inner();
13455
13456		// To expand the coverage and make sure all events are properly serialised and deserialised,
13457		// we test all generated events round-trip:
13458		for event in &collected_events {
13459			let ser = event.encode();
13460			if let Some(deser) =
13461				events::Event::read(&mut &ser[..]).expect("event should deserialize")
13462			{
13463				assert_eq!(&deser, event, "event should roundtrip correctly");
13464			}
13465		}
13466
13467		collected_events
13468	}
13469
13470	#[cfg(feature = "_test_utils")]
13471	pub fn push_pending_event(&self, event: events::Event) {
13472		let mut events = self.pending_events.lock().unwrap();
13473		events.push_back((event, None));
13474	}
13475
13476	#[cfg(test)]
13477	pub fn pop_pending_event(&self) -> Option<events::Event> {
13478		let mut events = self.pending_events.lock().unwrap();
13479		events.pop_front().map(|(e, _)| e)
13480	}
13481
13482	#[cfg(test)]
13483	pub fn has_pending_payments(&self) -> bool {
13484		self.pending_outbound_payments.has_pending_payments()
13485	}
13486
13487	#[cfg(test)]
13488	pub fn clear_pending_payments(&self) {
13489		self.pending_outbound_payments.clear_pending_payments()
13490	}
13491
13492	#[cfg(any(test, feature = "_test_utils"))]
13493	pub(crate) fn get_and_clear_pending_raa_blockers(
13494		&self,
13495	) -> Vec<(ChannelId, Vec<RAAMonitorUpdateBlockingAction>)> {
13496		let per_peer_state = self.per_peer_state.read().unwrap();
13497		let mut pending_blockers = Vec::new();
13498
13499		for (_peer_pubkey, peer_state_mutex) in per_peer_state.iter() {
13500			let mut peer_state = peer_state_mutex.lock().unwrap();
13501
13502			for (chan_id, actions) in peer_state.actions_blocking_raa_monitor_updates.iter() {
13503				// Only collect the non-empty actions into `pending_blockers`.
13504				if !actions.is_empty() {
13505					pending_blockers.push((chan_id.clone(), actions.clone()));
13506				}
13507			}
13508
13509			peer_state.actions_blocking_raa_monitor_updates.clear();
13510		}
13511
13512		pending_blockers
13513	}
13514
13515	/// When something which was blocking a channel from updating its [`ChannelMonitor`] (e.g. an
13516	/// [`Event`] being handled) completes, this should be called to restore the channel to normal
13517	/// operation. It will double-check that nothing *else* is also blocking the same channel from
13518	/// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly.
13519	#[rustfmt::skip]
13520	fn handle_monitor_update_release(
13521		&self, counterparty_node_id: PublicKey, channel_id: ChannelId,
13522		mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>,
13523	) {
13524		let logger = WithContext::from(
13525			&self.logger, Some(counterparty_node_id), Some(channel_id), None
13526		);
13527		loop {
13528			let per_peer_state = self.per_peer_state.read().unwrap();
13529			if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
13530				let mut peer_state_lck = peer_state_mtx.lock().unwrap();
13531				let peer_state = &mut *peer_state_lck;
13532				if let Some(blocker) = completed_blocker.take() {
13533					// Only do this on the first iteration of the loop.
13534					if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
13535						.get_mut(&channel_id)
13536					{
13537						blockers.retain(|iter| iter != &blocker);
13538					}
13539				}
13540
13541				if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
13542					channel_id, counterparty_node_id) {
13543					// Check that, while holding the peer lock, we don't have anything else
13544					// blocking monitor updates for this channel. If we do, release the monitor
13545					// update(s) when those blockers complete.
13546					log_trace!(logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
13547						&channel_id);
13548					break;
13549				}
13550
13551				if let hash_map::Entry::Occupied(mut chan_entry) = peer_state.channel_by_id.entry(
13552					channel_id) {
13553					if let Some(chan) = chan_entry.get_mut().as_funded_mut() {
13554						let channel_funding_outpoint = chan.funding_outpoint();
13555						if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
13556							log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor",
13557								channel_id);
13558							handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
13559								peer_state_lck, peer_state, per_peer_state, chan);
13560							if further_update_exists {
13561								// If there are more `ChannelMonitorUpdate`s to process, restart at the
13562								// top of the loop.
13563								continue;
13564							}
13565						} else {
13566							log_trace!(logger, "Unlocked monitor updating for channel {} without monitors to update",
13567								channel_id);
13568						}
13569					}
13570				}
13571			} else {
13572				log_debug!(logger,
13573					"Got a release post-RAA monitor update for peer {} but the channel is gone",
13574					log_pubkey!(counterparty_node_id));
13575			}
13576			break;
13577		}
13578	}
13579
13580	fn handle_post_event_actions<I: IntoIterator<Item = EventCompletionAction>>(&self, actions: I) {
13581		for action in actions.into_iter() {
13582			match action {
13583				EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
13584					channel_funding_outpoint: _,
13585					channel_id,
13586					counterparty_node_id,
13587				} => {
13588					let startup_complete =
13589						self.background_events_processed_since_startup.load(Ordering::Acquire);
13590					debug_assert!(startup_complete);
13591					self.handle_monitor_update_release(counterparty_node_id, channel_id, None);
13592				},
13593				EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(
13594					PaymentCompleteUpdate {
13595						counterparty_node_id,
13596						channel_funding_outpoint,
13597						channel_id,
13598						htlc_id,
13599					},
13600				) => {
13601					let per_peer_state = self.per_peer_state.read().unwrap();
13602					let mut peer_state = per_peer_state
13603						.get(&counterparty_node_id)
13604						.map(|state| state.lock().unwrap())
13605						.expect("Channels originating a payment resolution must have peer state");
13606					let update_id = peer_state
13607						.closed_channel_monitor_update_ids
13608						.get_mut(&channel_id)
13609						.expect("Channels originating a payment resolution must have a monitor");
13610					// Note that for channels closed pre-0.1, the latest update_id is `u64::MAX`.
13611					*update_id = update_id.saturating_add(1);
13612
13613					let update = ChannelMonitorUpdate {
13614						update_id: *update_id,
13615						channel_id: Some(channel_id),
13616						updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete {
13617							htlc: htlc_id,
13618						}],
13619					};
13620
13621					let during_startup =
13622						!self.background_events_processed_since_startup.load(Ordering::Acquire);
13623					if during_startup {
13624						let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13625							counterparty_node_id,
13626							funding_txo: channel_funding_outpoint,
13627							channel_id,
13628							update,
13629						};
13630						self.pending_background_events.lock().unwrap().push(event);
13631					} else {
13632						handle_new_monitor_update!(
13633							self,
13634							channel_funding_outpoint,
13635							update,
13636							peer_state,
13637							peer_state,
13638							per_peer_state,
13639							counterparty_node_id,
13640							channel_id,
13641							POST_CHANNEL_CLOSE
13642						);
13643					}
13644				},
13645			}
13646		}
13647	}
13648
13649	/// Processes any events asynchronously in the order they were generated since the last call
13650	/// using the given event handler.
13651	///
13652	/// See the trait-level documentation of [`EventsProvider`] for requirements.
13653	pub async fn process_pending_events_async<
13654		Future: core::future::Future<Output = Result<(), ReplayEvent>>,
13655		H: Fn(Event) -> Future,
13656	>(
13657		&self, handler: H,
13658	) {
13659		let mut ev;
13660		process_events_body!(self, ev, { handler(ev).await });
13661	}
13662}
13663
13664impl<
13665		M: Deref,
13666		T: Deref,
13667		ES: Deref,
13668		NS: Deref,
13669		SP: Deref,
13670		F: Deref,
13671		R: Deref,
13672		MR: Deref,
13673		L: Deref,
13674	> BaseMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
13675where
13676	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
13677	T::Target: BroadcasterInterface,
13678	ES::Target: EntropySource,
13679	NS::Target: NodeSigner,
13680	SP::Target: SignerProvider,
13681	F::Target: FeeEstimator,
13682	R::Target: Router,
13683	MR::Target: MessageRouter,
13684	L::Target: Logger,
13685{
13686	fn provided_node_features(&self) -> NodeFeatures {
13687		provided_node_features(&self.config.read().unwrap())
13688	}
13689
13690	fn provided_init_features(&self, _their_init_features: PublicKey) -> InitFeatures {
13691		provided_init_features(&self.config.read().unwrap())
13692	}
13693
13694	#[rustfmt::skip]
13695	fn peer_disconnected(&self, counterparty_node_id: PublicKey) {
13696		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
13697			let mut splice_failed_events = Vec::new();
13698			let mut failed_channels: Vec<(Result<Infallible, _>, _)> = Vec::new();
13699			let mut per_peer_state = self.per_peer_state.write().unwrap();
13700			let remove_peer = {
13701				log_debug!(
13702					WithContext::from(&self.logger, Some(counterparty_node_id), None, None),
13703					"Marking channels with {} disconnected and generating channel_updates.",
13704					log_pubkey!(counterparty_node_id)
13705				);
13706				if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
13707					let mut peer_state_lock = peer_state_mutex.lock().unwrap();
13708					let peer_state = &mut *peer_state_lock;
13709					let pending_msg_events = &mut peer_state.pending_msg_events;
13710					peer_state.channel_by_id.retain(|_, chan| {
13711						let logger = WithChannelContext::from(&self.logger, &chan.context(), None);
13712						let DisconnectResult { is_resumable, splice_funding_failed } =
13713							chan.peer_disconnected_is_resumable(&&logger);
13714
13715						if let Some(splice_funding_failed) = splice_funding_failed {
13716							splice_failed_events.push(events::Event::SpliceFailed {
13717								channel_id: chan.context().channel_id(),
13718								counterparty_node_id,
13719								user_channel_id: chan.context().get_user_id(),
13720								abandoned_funding_txo: splice_funding_failed.funding_txo,
13721								channel_type: splice_funding_failed.channel_type,
13722								contributed_inputs: splice_funding_failed.contributed_inputs,
13723								contributed_outputs: splice_funding_failed.contributed_outputs,
13724							});
13725						}
13726
13727						if is_resumable {
13728							return true;
13729						}
13730
13731						// Clean up for removal.
13732						let reason = ClosureReason::DisconnectedPeer;
13733						let err = ChannelError::Close((reason.to_string(), reason));
13734						let (_, e) = convert_channel_err!(self, peer_state, err, chan);
13735						failed_channels.push((Err(e), counterparty_node_id));
13736						false
13737					});
13738					// Note that we don't bother generating any events for pre-accept channels -
13739					// they're not considered "channels" yet from the PoV of our events interface.
13740					peer_state.inbound_channel_request_by_id.clear();
13741					pending_msg_events.retain(|msg| {
13742						match msg {
13743							// V1 Channel Establishment
13744							&MessageSendEvent::SendAcceptChannel { .. } => false,
13745							&MessageSendEvent::SendOpenChannel { .. } => false,
13746							&MessageSendEvent::SendFundingCreated { .. } => false,
13747							&MessageSendEvent::SendFundingSigned { .. } => false,
13748							// V2 Channel Establishment
13749							&MessageSendEvent::SendAcceptChannelV2 { .. } => false,
13750							&MessageSendEvent::SendOpenChannelV2 { .. } => false,
13751							// Common Channel Establishment
13752							&MessageSendEvent::SendChannelReady { .. } => false,
13753							&MessageSendEvent::SendAnnouncementSignatures { .. } => false,
13754							// Quiescence
13755							&MessageSendEvent::SendStfu { .. } => false,
13756							// Splicing
13757							&MessageSendEvent::SendSpliceInit { .. } => false,
13758							&MessageSendEvent::SendSpliceAck { .. } => false,
13759							&MessageSendEvent::SendSpliceLocked { .. } => false,
13760							// Interactive Transaction Construction
13761							&MessageSendEvent::SendTxAddInput { .. } => false,
13762							&MessageSendEvent::SendTxAddOutput { .. } => false,
13763							&MessageSendEvent::SendTxRemoveInput { .. } => false,
13764							&MessageSendEvent::SendTxRemoveOutput { .. } => false,
13765							&MessageSendEvent::SendTxComplete { .. } => false,
13766							&MessageSendEvent::SendTxSignatures { .. } => false,
13767							&MessageSendEvent::SendTxInitRbf { .. } => false,
13768							&MessageSendEvent::SendTxAckRbf { .. } => false,
13769							&MessageSendEvent::SendTxAbort { .. } => false,
13770							// Channel Operations
13771							&MessageSendEvent::UpdateHTLCs { .. } => false,
13772							&MessageSendEvent::SendRevokeAndACK { .. } => false,
13773							&MessageSendEvent::SendClosingSigned { .. } => false,
13774							&MessageSendEvent::SendClosingComplete { .. } => false,
13775							&MessageSendEvent::SendClosingSig { .. } => false,
13776							&MessageSendEvent::SendShutdown { .. } => false,
13777							&MessageSendEvent::SendChannelReestablish { .. } => false,
13778							&MessageSendEvent::HandleError { .. } => false,
13779							// Gossip
13780							&MessageSendEvent::SendChannelAnnouncement { .. } => false,
13781							&MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
13782							// [`ChannelManager::pending_broadcast_events`] holds the [`BroadcastChannelUpdate`]
13783							// This check here is to ensure exhaustivity.
13784							&MessageSendEvent::BroadcastChannelUpdate { .. } => {
13785								debug_assert!(false, "This event shouldn't have been here");
13786								false
13787							},
13788							&MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
13789							&MessageSendEvent::SendChannelUpdate { .. } => false,
13790							&MessageSendEvent::SendChannelRangeQuery { .. } => false,
13791							&MessageSendEvent::SendShortIdsQuery { .. } => false,
13792							&MessageSendEvent::SendReplyChannelRange { .. } => false,
13793							&MessageSendEvent::SendGossipTimestampFilter { .. } => false,
13794
13795							// Peer Storage
13796							&MessageSendEvent::SendPeerStorage { .. } => false,
13797							&MessageSendEvent::SendPeerStorageRetrieval { .. } => false,
13798						}
13799					});
13800					debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect");
13801					peer_state.is_connected = false;
13802					peer_state.ok_to_remove(true)
13803				} else { debug_assert!(false, "Unconnected peer disconnected"); true }
13804			};
13805			if remove_peer {
13806				per_peer_state.remove(&counterparty_node_id);
13807			}
13808			mem::drop(per_peer_state);
13809
13810			let persist = if splice_failed_events.is_empty() {
13811				NotifyOption::SkipPersistHandleEvents
13812			} else {
13813				let mut pending_events = self.pending_events.lock().unwrap();
13814				for event in splice_failed_events {
13815					pending_events.push_back((event, None));
13816				}
13817				NotifyOption::DoPersist
13818			};
13819
13820			for (err, counterparty_node_id) in failed_channels.drain(..) {
13821				let _ = handle_error!(self, err, counterparty_node_id);
13822			}
13823
13824			persist
13825		});
13826	}
13827
13828	#[rustfmt::skip]
13829	fn peer_connected(&self, counterparty_node_id: PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> {
13830		let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None);
13831		if !init_msg.features.supports_static_remote_key() {
13832			log_debug!(logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
13833			return Err(());
13834		}
13835
13836		let mut res = Ok(());
13837
13838		PersistenceNotifierGuard::optionally_notify(self, || {
13839			// If we have too many peers connected which don't have funded channels, disconnect the
13840			// peer immediately (as long as it doesn't have funded channels). If we have a bunch of
13841			// unfunded channels taking up space in memory for disconnected peers, we still let new
13842			// peers connect, but we'll reject new channels from them.
13843			let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected);
13844			let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
13845
13846			{
13847				let mut peer_state_lock = self.per_peer_state.write().unwrap();
13848				match peer_state_lock.entry(counterparty_node_id) {
13849					hash_map::Entry::Vacant(e) => {
13850						if inbound_peer_limited {
13851							res = Err(());
13852							return NotifyOption::SkipPersistNoEvents;
13853						}
13854						e.insert(Mutex::new(PeerState {
13855							channel_by_id: new_hash_map(),
13856							inbound_channel_request_by_id: new_hash_map(),
13857							latest_features: init_msg.features.clone(),
13858							pending_msg_events: Vec::new(),
13859							in_flight_monitor_updates: BTreeMap::new(),
13860							monitor_update_blocked_actions: BTreeMap::new(),
13861							actions_blocking_raa_monitor_updates: BTreeMap::new(),
13862							closed_channel_monitor_update_ids: BTreeMap::new(),
13863							is_connected: true,
13864							peer_storage: Vec::new(),
13865						}));
13866					},
13867					hash_map::Entry::Occupied(e) => {
13868						let mut peer_state = e.get().lock().unwrap();
13869						peer_state.latest_features = init_msg.features.clone();
13870
13871						let best_block_height = self.best_block.read().unwrap().height;
13872						if inbound_peer_limited &&
13873							Self::unfunded_channel_count(&*peer_state, best_block_height) ==
13874							peer_state.channel_by_id.len()
13875						{
13876							res = Err(());
13877							return NotifyOption::SkipPersistNoEvents;
13878						}
13879
13880						debug_assert!(peer_state.pending_msg_events.is_empty());
13881						peer_state.pending_msg_events.clear();
13882
13883						debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
13884						peer_state.is_connected = true;
13885					},
13886				}
13887			}
13888
13889			log_debug!(logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
13890
13891			let per_peer_state = self.per_peer_state.read().unwrap();
13892			if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
13893				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
13894				let peer_state = &mut *peer_state_lock;
13895				let pending_msg_events = &mut peer_state.pending_msg_events;
13896
13897				if !peer_state.peer_storage.is_empty() {
13898					pending_msg_events.push(MessageSendEvent::SendPeerStorageRetrieval {
13899						node_id: counterparty_node_id.clone(),
13900						msg: msgs::PeerStorageRetrieval {
13901							data: peer_state.peer_storage.clone()
13902						},
13903					});
13904				}
13905
13906				for (_, chan) in peer_state.channel_by_id.iter_mut() {
13907					let logger = WithChannelContext::from(&self.logger, &chan.context(), None);
13908					match chan.peer_connected_get_handshake(self.chain_hash, &&logger) {
13909						ReconnectionMsg::Reestablish(msg) =>
13910							pending_msg_events.push(MessageSendEvent::SendChannelReestablish {
13911								node_id: chan.context().get_counterparty_node_id(),
13912								msg,
13913							}),
13914						ReconnectionMsg::Open(OpenChannelMessage::V1(msg)) =>
13915							pending_msg_events.push(MessageSendEvent::SendOpenChannel {
13916								node_id: chan.context().get_counterparty_node_id(),
13917								msg,
13918							}),
13919						ReconnectionMsg::Open(OpenChannelMessage::V2(msg)) =>
13920							pending_msg_events.push(MessageSendEvent::SendOpenChannelV2 {
13921								node_id: chan.context().get_counterparty_node_id(),
13922								msg,
13923							}),
13924						ReconnectionMsg::None => {},
13925					}
13926				}
13927			}
13928
13929			return NotifyOption::SkipPersistHandleEvents;
13930			//TODO: Also re-broadcast announcement_signatures
13931		});
13932
13933		// While we usually refresh the AsyncReceiveOfferCache on a timer, we also want to start
13934		// interactively building offers as soon as we can after startup. We can't start building offers
13935		// until we have some peer connection(s) to receive onion messages over, so as a minor optimization
13936		// refresh the cache when a peer connects.
13937		self.check_refresh_async_receive_offer_cache(false);
13938		res
13939	}
13940
13941	/// Returns `MessageSendEvent`s strictly ordered per-peer, in the order they were generated.
13942	/// The returned array will contain `MessageSendEvent`s for different peers if
13943	/// `MessageSendEvent`s to more than one peer exists, but `MessageSendEvent`s to the same peer
13944	/// is always placed next to each other.
13945	///
13946	/// Note that that while `MessageSendEvent`s are strictly ordered per-peer, the peer order for
13947	/// the chunks of `MessageSendEvent`s for different peers is random. I.e. if the array contains
13948	/// `MessageSendEvent`s  for both `node_a` and `node_b`, the `MessageSendEvent`s for `node_a`
13949	/// will randomly be placed first or last in the returned array.
13950	///
13951	/// Note that even though `BroadcastChannelAnnouncement` and `BroadcastChannelUpdate`
13952	/// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be placed among
13953	/// the `MessageSendEvent`s to the specific peer they were generated under.
13954	fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
13955		let events = RefCell::new(Vec::new());
13956		PersistenceNotifierGuard::optionally_notify(self, || {
13957			let mut result = NotifyOption::SkipPersistNoEvents;
13958
13959			// TODO: This behavior should be documented. It's unintuitive that we query
13960			// ChannelMonitors when clearing other events.
13961			if self.process_pending_monitor_events() {
13962				result = NotifyOption::DoPersist;
13963			}
13964
13965			if self.check_free_holding_cells() {
13966				result = NotifyOption::DoPersist;
13967			}
13968			if self.maybe_generate_initial_closing_signed() {
13969				result = NotifyOption::DoPersist;
13970			}
13971
13972			// Quiescence is an in-memory protocol, so we don't have to persist because of it.
13973			self.maybe_send_stfu();
13974
13975			let mut is_any_peer_connected = false;
13976			let mut pending_events = Vec::new();
13977			let per_peer_state = self.per_peer_state.read().unwrap();
13978			for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
13979				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
13980				let peer_state = &mut *peer_state_lock;
13981				if peer_state.pending_msg_events.len() > 0 {
13982					pending_events.append(&mut peer_state.pending_msg_events);
13983				}
13984				if peer_state.is_connected {
13985					is_any_peer_connected = true
13986				}
13987			}
13988
13989			// Ensure that we are connected to some peers before getting broadcast messages.
13990			if is_any_peer_connected {
13991				let mut broadcast_msgs = self.pending_broadcast_messages.lock().unwrap();
13992				pending_events.append(&mut broadcast_msgs);
13993			}
13994
13995			if !pending_events.is_empty() {
13996				events.replace(pending_events);
13997			}
13998
13999			result
14000		});
14001		events.into_inner()
14002	}
14003}
14004
14005impl<
14006		M: Deref,
14007		T: Deref,
14008		ES: Deref,
14009		NS: Deref,
14010		SP: Deref,
14011		F: Deref,
14012		R: Deref,
14013		MR: Deref,
14014		L: Deref,
14015	> EventsProvider for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
14016where
14017	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
14018	T::Target: BroadcasterInterface,
14019	ES::Target: EntropySource,
14020	NS::Target: NodeSigner,
14021	SP::Target: SignerProvider,
14022	F::Target: FeeEstimator,
14023	R::Target: Router,
14024	MR::Target: MessageRouter,
14025	L::Target: Logger,
14026{
14027	/// Processes events that must be periodically handled.
14028	///
14029	/// An [`EventHandler`] may safely call back to the provider in order to handle an event.
14030	/// However, it must not call [`Writeable::write`] as doing so would result in a deadlock.
14031	fn process_pending_events<H: Deref>(&self, handler: H)
14032	where
14033		H::Target: EventHandler,
14034	{
14035		let mut ev;
14036		process_events_body!(self, ev, handler.handle_event(ev));
14037	}
14038}
14039
14040impl<
14041		M: Deref,
14042		T: Deref,
14043		ES: Deref,
14044		NS: Deref,
14045		SP: Deref,
14046		F: Deref,
14047		R: Deref,
14048		MR: Deref,
14049		L: Deref,
14050	> chain::Listen for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
14051where
14052	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
14053	T::Target: BroadcasterInterface,
14054	ES::Target: EntropySource,
14055	NS::Target: NodeSigner,
14056	SP::Target: SignerProvider,
14057	F::Target: FeeEstimator,
14058	R::Target: Router,
14059	MR::Target: MessageRouter,
14060	L::Target: Logger,
14061{
14062	fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
14063		{
14064			let best_block = self.best_block.read().unwrap();
14065			assert_eq!(best_block.block_hash, header.prev_blockhash,
14066				"Blocks must be connected in chain-order - the connected header must build on the last connected header");
14067			assert_eq!(best_block.height, height - 1,
14068				"Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
14069		}
14070
14071		self.transactions_confirmed(header, txdata, height);
14072		self.best_block_updated(header, height);
14073	}
14074
14075	fn blocks_disconnected(&self, fork_point: BestBlock) {
14076		let _persistence_guard =
14077			PersistenceNotifierGuard::optionally_notify_skipping_background_events(
14078				self,
14079				|| -> NotifyOption { NotifyOption::DoPersist },
14080			);
14081		{
14082			let mut best_block = self.best_block.write().unwrap();
14083			assert!(best_block.height > fork_point.height,
14084				"Blocks disconnected must indicate disconnection from the current best height, i.e. the new chain tip must be lower than the previous best height");
14085			*best_block = fork_point;
14086		}
14087
14088		self.do_chain_event(Some(fork_point.height), |channel| {
14089			channel.best_block_updated(
14090				fork_point.height,
14091				None,
14092				self.chain_hash,
14093				&self.node_signer,
14094				&self.config.read().unwrap(),
14095				&&WithChannelContext::from(&self.logger, &channel.context, None),
14096			)
14097		});
14098	}
14099}
14100
14101impl<
14102		M: Deref,
14103		T: Deref,
14104		ES: Deref,
14105		NS: Deref,
14106		SP: Deref,
14107		F: Deref,
14108		R: Deref,
14109		MR: Deref,
14110		L: Deref,
14111	> chain::Confirm for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
14112where
14113	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
14114	T::Target: BroadcasterInterface,
14115	ES::Target: EntropySource,
14116	NS::Target: NodeSigner,
14117	SP::Target: SignerProvider,
14118	F::Target: FeeEstimator,
14119	R::Target: Router,
14120	MR::Target: MessageRouter,
14121	L::Target: Logger,
14122{
14123	#[rustfmt::skip]
14124	fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) {
14125		// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
14126		// during initialization prior to the chain_monitor being fully configured in some cases.
14127		// See the docs for `ChannelManagerReadArgs` for more.
14128
14129		let block_hash = header.block_hash();
14130		log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);
14131
14132		let _persistence_guard =
14133			PersistenceNotifierGuard::optionally_notify_skipping_background_events(
14134				self, || -> NotifyOption { NotifyOption::DoPersist });
14135		self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.config.read().unwrap(), &&WithChannelContext::from(&self.logger, &channel.context, None))
14136			.map(|(a, b)| (a, Vec::new(), b)));
14137
14138		let last_best_block_height = self.best_block.read().unwrap().height;
14139		if height < last_best_block_height {
14140			let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
14141			let do_update = |channel: &mut FundedChannel<SP>| {
14142				channel.best_block_updated(
14143					last_best_block_height,
14144					Some(timestamp as u32),
14145					self.chain_hash,
14146					&self.node_signer,
14147					&self.config.read().unwrap(),
14148					&&WithChannelContext::from(&self.logger, &channel.context, None),
14149				)
14150			};
14151			self.do_chain_event(Some(last_best_block_height), do_update);
14152		}
14153	}
14154
14155	#[rustfmt::skip]
14156	fn best_block_updated(&self, header: &Header, height: u32) {
14157		// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
14158		// during initialization prior to the chain_monitor being fully configured in some cases.
14159		// See the docs for `ChannelManagerReadArgs` for more.
14160
14161		let block_hash = header.block_hash();
14162		log_trace!(self.logger, "New best block: {} at height {}", block_hash, height);
14163
14164		let _persistence_guard =
14165			PersistenceNotifierGuard::optionally_notify_skipping_background_events(
14166				self, || -> NotifyOption { NotifyOption::DoPersist });
14167		*self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
14168
14169		let mut min_anchor_feerate = None;
14170		let mut min_non_anchor_feerate = None;
14171		if self.background_events_processed_since_startup.load(Ordering::Relaxed) {
14172			// If we're past the startup phase, update our feerate cache
14173			let mut last_days_feerates = self.last_days_feerates.lock().unwrap();
14174			if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS {
14175				last_days_feerates.pop_front();
14176			}
14177			let anchor_feerate = self.fee_estimator
14178				.bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedAnchorChannelRemoteFee);
14179			let non_anchor_feerate = self.fee_estimator
14180				.bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee);
14181			last_days_feerates.push_back((anchor_feerate, non_anchor_feerate));
14182			if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS {
14183				min_anchor_feerate = last_days_feerates.iter().map(|(f, _)| f).min().copied();
14184				min_non_anchor_feerate = last_days_feerates.iter().map(|(_, f)| f).min().copied();
14185			}
14186		}
14187
14188		self.do_chain_event(Some(height), |channel| {
14189			let logger = WithChannelContext::from(&self.logger, &channel.context, None);
14190			if channel.funding.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
14191				if let Some(feerate) = min_anchor_feerate {
14192					channel.check_for_stale_feerate(&logger, feerate)?;
14193				}
14194			} else {
14195				if let Some(feerate) = min_non_anchor_feerate {
14196					channel.check_for_stale_feerate(&logger, feerate)?;
14197				}
14198			}
14199
14200			// Remove any SCIDs used by older funding transactions
14201			{
14202				let legacy_scids = channel.remove_legacy_scids_before_block(height);
14203				if !legacy_scids.as_slice().is_empty() {
14204					let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
14205					for scid in legacy_scids {
14206						short_to_chan_info.remove(&scid);
14207					}
14208				}
14209			}
14210
14211			channel.best_block_updated(
14212				height,
14213				Some(header.time),
14214				self.chain_hash,
14215				&self.node_signer,
14216				&self.config.read().unwrap(),
14217				&&WithChannelContext::from(&self.logger, &channel.context, None),
14218			)
14219		});
14220
14221		macro_rules! max_time {
14222			($timestamp: expr) => {
14223				loop {
14224					// Update $timestamp to be the max of its current value and the block
14225					// timestamp. This should keep us close to the current time without relying on
14226					// having an explicit local time source.
14227					// Just in case we end up in a race, we loop until we either successfully
14228					// update $timestamp or decide we don't need to.
14229					let old_serial = $timestamp.load(Ordering::Acquire);
14230					if old_serial >= header.time as usize { break; }
14231					if $timestamp.compare_exchange(old_serial, header.time as usize, Ordering::AcqRel, Ordering::Relaxed).is_ok() {
14232						break;
14233					}
14234				}
14235			}
14236		}
14237		max_time!(self.highest_seen_timestamp);
14238
14239		self.flow.best_block_updated(header, height);
14240	}
14241
14242	fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option<BlockHash>)> {
14243		let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
14244		for (_cp_id, peer_state_mutex) in self.per_peer_state.read().unwrap().iter() {
14245			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
14246			let peer_state = &mut *peer_state_lock;
14247			for chan in peer_state.channel_by_id.values().filter_map(Channel::as_funded) {
14248				for (funding_txid, conf_height, block_hash) in chan.get_relevant_txids() {
14249					res.push((funding_txid, conf_height, block_hash));
14250				}
14251			}
14252		}
14253		res
14254	}
14255
14256	fn transaction_unconfirmed(&self, txid: &Txid) {
14257		let _persistence_guard =
14258			PersistenceNotifierGuard::optionally_notify_skipping_background_events(
14259				self,
14260				|| -> NotifyOption { NotifyOption::DoPersist },
14261			);
14262		self.do_chain_event(None, |channel| {
14263			let logger = WithChannelContext::from(&self.logger, &channel.context, None);
14264			channel.transaction_unconfirmed(txid, &&logger).map(|()| (None, Vec::new(), None))
14265		});
14266	}
14267}
14268
14269pub(super) enum FundingConfirmedMessage {
14270	Establishment(msgs::ChannelReady),
14271	Splice(msgs::SpliceLocked, Option<OutPoint>, Option<ChannelMonitorUpdate>, Vec<FundingInfo>),
14272}
14273
14274impl<
14275		M: Deref,
14276		T: Deref,
14277		ES: Deref,
14278		NS: Deref,
14279		SP: Deref,
14280		F: Deref,
14281		R: Deref,
14282		MR: Deref,
14283		L: Deref,
14284	> ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
14285where
14286	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
14287	T::Target: BroadcasterInterface,
14288	ES::Target: EntropySource,
14289	NS::Target: NodeSigner,
14290	SP::Target: SignerProvider,
14291	F::Target: FeeEstimator,
14292	R::Target: Router,
14293	MR::Target: MessageRouter,
14294	L::Target: Logger,
14295{
14296	/// Calls a function which handles an on-chain event (blocks dis/connected, transactions
14297	/// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by
14298	/// the function.
14299	#[rustfmt::skip]
14300	fn do_chain_event<FN: Fn(&mut FundedChannel<SP>) -> Result<(Option<FundingConfirmedMessage>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>>
14301			(&self, height_opt: Option<u32>, f: FN) {
14302		// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
14303		// during initialization prior to the chain_monitor being fully configured in some cases.
14304		// See the docs for `ChannelManagerReadArgs` for more.
14305
14306		let mut failed_channels: Vec<(Result<Infallible, _>, _)> = Vec::new();
14307		let mut timed_out_htlcs = Vec::new();
14308		let mut to_process_monitor_update_actions = Vec::new();
14309		{
14310			let per_peer_state = self.per_peer_state.read().unwrap();
14311			for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
14312				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
14313				let peer_state = &mut *peer_state_lock;
14314				let pending_msg_events = &mut peer_state.pending_msg_events;
14315
14316				peer_state.channel_by_id.retain(|channel_id, chan| {
14317					match chan.as_funded_mut() {
14318						// Retain unfunded channels.
14319						None => true,
14320						Some(funded_channel) => {
14321							let res = f(funded_channel);
14322							if let Ok((funding_confirmed_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
14323								for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
14324									let reason = LocalHTLCFailureReason::CLTVExpiryTooSoon;
14325									let data = self.get_htlc_inbound_temp_fail_data(reason);
14326									timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(reason, data),
14327										HTLCHandlingFailureType::Forward { node_id: Some(funded_channel.context.get_counterparty_node_id()), channel_id: *channel_id }));
14328								}
14329								let logger = WithChannelContext::from(&self.logger, &funded_channel.context, None);
14330								match funding_confirmed_opt {
14331									Some(FundingConfirmedMessage::Establishment(channel_ready)) => {
14332										send_channel_ready!(self, pending_msg_events, funded_channel, channel_ready);
14333										if funded_channel.context.is_usable() && peer_state.is_connected {
14334											log_trace!(logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", channel_id);
14335											if let Ok(msg) = self.get_channel_update_for_unicast(funded_channel) {
14336												pending_msg_events.push(MessageSendEvent::SendChannelUpdate {
14337													node_id: funded_channel.context.get_counterparty_node_id(),
14338													msg,
14339												});
14340											}
14341										} else {
14342											log_trace!(logger, "Sending channel_ready WITHOUT channel_update for {}", channel_id);
14343										}
14344									},
14345									Some(FundingConfirmedMessage::Splice(splice_locked, funding_txo, monitor_update_opt, discarded_funding)) => {
14346										let counterparty_node_id = funded_channel.context.get_counterparty_node_id();
14347										let channel_id = funded_channel.context.channel_id();
14348
14349										if let Some(funding_txo) = funding_txo {
14350											let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
14351											insert_short_channel_id!(short_to_chan_info, funded_channel);
14352
14353											if let Some(monitor_update) = monitor_update_opt {
14354												handle_new_monitor_update!(
14355													self,
14356													funding_txo,
14357													monitor_update,
14358													peer_state,
14359													funded_channel.context,
14360													REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER
14361												);
14362												to_process_monitor_update_actions.push((
14363													counterparty_node_id, channel_id
14364												));
14365											}
14366
14367											let mut pending_events = self.pending_events.lock().unwrap();
14368											pending_events.push_back((events::Event::ChannelReady {
14369												channel_id,
14370												user_channel_id: funded_channel.context.get_user_id(),
14371												counterparty_node_id,
14372												funding_txo: Some(funding_txo.into_bitcoin_outpoint()),
14373												channel_type: funded_channel.funding.get_channel_type().clone(),
14374											}, None));
14375											discarded_funding.into_iter().for_each(|funding_info| {
14376												let event = Event::DiscardFunding {
14377													channel_id: funded_channel.context.channel_id(),
14378													funding_info,
14379												};
14380												pending_events.push_back((event, None));
14381											});
14382										}
14383
14384										if funded_channel.context.is_connected() {
14385											pending_msg_events.push(MessageSendEvent::SendSpliceLocked {
14386												node_id: counterparty_node_id,
14387												msg: splice_locked,
14388											});
14389										}
14390									},
14391									None => {},
14392								}
14393
14394								{
14395									let mut pending_events = self.pending_events.lock().unwrap();
14396									emit_initial_channel_ready_event!(pending_events, funded_channel);
14397								}
14398
14399								if let Some(height) = height_opt {
14400									// (re-)broadcast signed `channel_announcement`s and
14401									// `channel_update`s for any channels less than a week old.
14402									let funding_conf_height =
14403										funded_channel.funding.get_funding_tx_confirmation_height().unwrap_or(height);
14404									// To avoid broadcast storms after each block, only
14405									// re-broadcast every hour (6 blocks) after the initial
14406									// broadcast, or if this is the first time we're ready to
14407									// broadcast this channel.
14408									let rebroadcast_announcement = funding_conf_height < height + 1008
14409										&& funding_conf_height % 6 == height % 6;
14410									#[allow(unused_mut, unused_assignments)]
14411									let mut should_announce = announcement_sigs.is_some() || rebroadcast_announcement;
14412									// Most of our tests were written when we only broadcasted
14413									// `channel_announcement`s once and then never re-broadcasted
14414									// them again, so disable the re-broadcasting entirely in tests
14415									#[cfg(any(test, feature = "_test_utils"))]
14416									{
14417										should_announce = announcement_sigs.is_some();
14418									}
14419									if should_announce {
14420										if let Some(announcement) = funded_channel.get_signed_channel_announcement(
14421											&self.node_signer, self.chain_hash, height, &self.config.read().unwrap(),
14422										) {
14423											pending_msg_events.push(MessageSendEvent::BroadcastChannelAnnouncement {
14424												msg: announcement,
14425												// Note that get_signed_channel_announcement fails
14426												// if the channel cannot be announced, so
14427												// get_channel_update_for_broadcast will never fail
14428												// by the time we get here.
14429												update_msg: Some(self.get_channel_update_for_broadcast(funded_channel).unwrap()),
14430											});
14431										}
14432									}
14433								}
14434								if let Some(announcement_sigs) = announcement_sigs {
14435									if peer_state.is_connected {
14436										log_trace!(logger, "Sending announcement_signatures for channel {}", funded_channel.context.channel_id());
14437										pending_msg_events.push(MessageSendEvent::SendAnnouncementSignatures {
14438											node_id: funded_channel.context.get_counterparty_node_id(),
14439											msg: announcement_sigs,
14440										});
14441									}
14442								}
14443								if funded_channel.is_our_channel_ready() {
14444									if let Some(real_scid) = funded_channel.funding.get_short_channel_id() {
14445										// If we sent a 0conf channel_ready, and now have an SCID, we add it
14446										// to the short_to_chan_info map here. Note that we check whether we
14447										// can relay using the real SCID at relay-time (i.e.
14448										// enforce option_scid_alias then), and if the funding tx is ever
14449										// un-confirmed we force-close the channel, ensuring short_to_chan_info
14450										// is always consistent.
14451										let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
14452										let scid_insert = short_to_chan_info.insert(real_scid, (funded_channel.context.get_counterparty_node_id(), *channel_id));
14453										assert!(scid_insert.is_none() || scid_insert.unwrap() == (funded_channel.context.get_counterparty_node_id(), *channel_id),
14454											"SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
14455											fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
14456									}
14457								}
14458							} else if let Err(reason) = res {
14459								// It looks like our counterparty went on-chain or funding transaction was
14460								// reorged out of the main chain. Close the channel.
14461								let err = ChannelError::Close((reason.to_string(), reason));
14462								let (_, e) = convert_channel_err!(
14463									self,
14464									peer_state,
14465									err,
14466									funded_channel,
14467									FUNDED_CHANNEL
14468								);
14469								failed_channels.push((Err(e), *counterparty_node_id));
14470								return false;
14471							}
14472							true
14473						}
14474					}
14475				});
14476			}
14477		}
14478
14479		for (counterparty_node_id, channel_id) in to_process_monitor_update_actions {
14480			self.channel_monitor_updated(&channel_id, None, &counterparty_node_id);
14481		}
14482
14483		if let Some(height) = height_opt {
14484			self.claimable_payments.lock().unwrap().claimable_payments.retain(|payment_hash, payment| {
14485				payment.htlcs.retain(|htlc| {
14486					// If height is approaching the number of blocks we think it takes us to get
14487					// our commitment transaction confirmed before the HTLC expires, plus the
14488					// number of blocks we generally consider it to take to do a commitment update,
14489					// just give up on it and fail the HTLC.
14490					if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
14491						let reason = LocalHTLCFailureReason::PaymentClaimBuffer;
14492						timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(),
14493							HTLCFailReason::reason(reason, invalid_payment_err_data(htlc.value, height)),
14494							HTLCHandlingFailureType::Receive { payment_hash: payment_hash.clone() }));
14495						false
14496					} else { true }
14497				});
14498				!payment.htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
14499			});
14500
14501			let mut intercepted_htlcs = self.pending_intercepted_htlcs.lock().unwrap();
14502			intercepted_htlcs.retain(|_, htlc| {
14503				if height >= htlc.forward_info.outgoing_cltv_value - HTLC_FAIL_BACK_BUFFER {
14504					let prev_hop_data = HTLCSource::PreviousHopData(htlc.htlc_previous_hop_data());
14505					let requested_forward_scid /* intercept scid */ = match htlc.forward_info.routing {
14506						PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
14507						_ => unreachable!(),
14508					};
14509					timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash,
14510							HTLCFailReason::from_failure_code(LocalHTLCFailureReason::ForwardExpiryBuffer),
14511							HTLCHandlingFailureType::InvalidForward { requested_forward_scid }));
14512					let logger = WithContext::from(
14513						&self.logger, None, Some(htlc.prev_channel_id), Some(htlc.forward_info.payment_hash)
14514					);
14515					log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
14516					false
14517				} else { true }
14518			});
14519		}
14520
14521		for (failure, counterparty_node_id) in failed_channels {
14522			let _ = handle_error!(self, failure, counterparty_node_id);
14523		}
14524
14525		for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) {
14526			self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, destination, None);
14527		}
14528	}
14529
14530	/// Gets a [`Future`] that completes when this [`ChannelManager`] may need to be persisted or
14531	/// may have events that need processing.
14532	///
14533	/// In order to check if this [`ChannelManager`] needs persisting, call
14534	/// [`Self::get_and_clear_needs_persistence`].
14535	///
14536	/// Note that callbacks registered on the [`Future`] MUST NOT call back into this
14537	/// [`ChannelManager`] and should instead register actions to be taken later.
14538	pub fn get_event_or_persistence_needed_future(&self) -> Future {
14539		self.event_persist_notifier.get_future()
14540	}
14541
14542	/// Returns true if this [`ChannelManager`] needs to be persisted.
14543	///
14544	/// See [`Self::get_event_or_persistence_needed_future`] for retrieving a [`Future`] that
14545	/// indicates this should be checked.
14546	pub fn get_and_clear_needs_persistence(&self) -> bool {
14547		self.needs_persist_flag.swap(false, Ordering::AcqRel)
14548	}
14549
14550	#[cfg(any(test, feature = "_test_utils"))]
14551	pub fn get_event_or_persist_condvar_value(&self) -> bool {
14552		self.event_persist_notifier.notify_pending()
14553	}
14554
14555	/// Gets the latest best block which was connected either via the [`chain::Listen`] or
14556	/// [`chain::Confirm`] interfaces.
14557	pub fn current_best_block(&self) -> BestBlock {
14558		self.best_block.read().unwrap().clone()
14559	}
14560
14561	/// Fetches the set of [`NodeFeatures`] flags that are provided by or required by
14562	/// [`ChannelManager`].
14563	pub fn node_features(&self) -> NodeFeatures {
14564		provided_node_features(&self.config.read().unwrap())
14565	}
14566
14567	/// Fetches the set of [`Bolt11InvoiceFeatures`] flags that are provided by or required by
14568	/// [`ChannelManager`].
14569	///
14570	/// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice"
14571	/// or not. Thus, this method is not public.
14572	#[cfg(any(feature = "_test_utils", test))]
14573	pub fn bolt11_invoice_features(&self) -> Bolt11InvoiceFeatures {
14574		provided_bolt11_invoice_features(&self.config.read().unwrap())
14575	}
14576
14577	/// Fetches the set of [`Bolt12InvoiceFeatures`] flags that are provided by or required by
14578	/// [`ChannelManager`].
14579	fn bolt12_invoice_features(&self) -> Bolt12InvoiceFeatures {
14580		provided_bolt12_invoice_features(&self.config.read().unwrap())
14581	}
14582
14583	/// Fetches the set of [`ChannelFeatures`] flags that are provided by or required by
14584	/// [`ChannelManager`].
14585	pub fn channel_features(&self) -> ChannelFeatures {
14586		provided_channel_features(&self.config.read().unwrap())
14587	}
14588
14589	/// Fetches the set of [`ChannelTypeFeatures`] flags that are provided by or required by
14590	/// [`ChannelManager`].
14591	pub fn channel_type_features(&self) -> ChannelTypeFeatures {
14592		provided_channel_type_features(&self.config.read().unwrap())
14593	}
14594
14595	/// Fetches the set of [`InitFeatures`] flags that are provided by or required by
14596	/// [`ChannelManager`].
14597	pub fn init_features(&self) -> InitFeatures {
14598		provided_init_features(&self.config.read().unwrap())
14599	}
14600}
14601
14602impl<
14603		M: Deref,
14604		T: Deref,
14605		ES: Deref,
14606		NS: Deref,
14607		SP: Deref,
14608		F: Deref,
14609		R: Deref,
14610		MR: Deref,
14611		L: Deref,
14612	> ChannelMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
14613where
14614	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
14615	T::Target: BroadcasterInterface,
14616	ES::Target: EntropySource,
14617	NS::Target: NodeSigner,
14618	SP::Target: SignerProvider,
14619	F::Target: FeeEstimator,
14620	R::Target: Router,
14621	MR::Target: MessageRouter,
14622	L::Target: Logger,
14623{
14624	fn handle_open_channel(&self, counterparty_node_id: PublicKey, message: &msgs::OpenChannel) {
14625		// Note that we never need to persist the updated ChannelManager for an inbound
14626		// open_channel message - pre-funded channels are never written so there should be no
14627		// change to the contents.
14628		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
14629			let msg = OpenChannelMessageRef::V1(message);
14630			let res = self.internal_open_channel(&counterparty_node_id, msg);
14631			let persist = match &res {
14632				Err(e) if e.closes_channel() => {
14633					debug_assert!(false, "We shouldn't close a new channel");
14634					NotifyOption::DoPersist
14635				},
14636				_ => NotifyOption::SkipPersistHandleEvents,
14637			};
14638			let _ = handle_error!(self, res, counterparty_node_id);
14639			persist
14640		});
14641	}
14642
14643	#[rustfmt::skip]
14644	fn handle_open_channel_v2(&self, counterparty_node_id: PublicKey, msg: &msgs::OpenChannelV2) {
14645		if !self.init_features().supports_dual_fund() {
14646			let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
14647				"Dual-funded channels not supported".to_owned(),
14648				msg.common_fields.temporary_channel_id.clone())), counterparty_node_id);
14649			return;
14650		}
14651		// Note that we never need to persist the updated ChannelManager for an inbound
14652		// open_channel message - pre-funded channels are never written so there should be no
14653		// change to the contents.
14654		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
14655			let res = self.internal_open_channel(&counterparty_node_id, OpenChannelMessageRef::V2(msg));
14656			let persist = match &res {
14657				Err(e) if e.closes_channel() => {
14658					debug_assert!(false, "We shouldn't close a new channel");
14659					NotifyOption::DoPersist
14660				},
14661				_ => NotifyOption::SkipPersistHandleEvents,
14662			};
14663			let _ = handle_error!(self, res, counterparty_node_id);
14664			persist
14665		});
14666	}
14667
14668	fn handle_accept_channel(&self, counterparty_node_id: PublicKey, msg: &msgs::AcceptChannel) {
14669		// Note that we never need to persist the updated ChannelManager for an inbound
14670		// accept_channel message - pre-funded channels are never written so there should be no
14671		// change to the contents.
14672		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
14673			let res = self.internal_accept_channel(&counterparty_node_id, msg);
14674			let _ = handle_error!(self, res, counterparty_node_id);
14675			NotifyOption::SkipPersistHandleEvents
14676		});
14677	}
14678
14679	fn handle_accept_channel_v2(
14680		&self, counterparty_node_id: PublicKey, msg: &msgs::AcceptChannelV2,
14681	) {
14682		let err = Err(MsgHandleErrInternal::send_err_msg_no_close(
14683			"Dual-funded channels not supported".to_owned(),
14684			msg.common_fields.temporary_channel_id.clone(),
14685		));
14686		let _: Result<(), _> = handle_error!(self, err, counterparty_node_id);
14687	}
14688
14689	fn handle_funding_created(&self, counterparty_node_id: PublicKey, msg: &msgs::FundingCreated) {
14690		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
14691		let res = self.internal_funding_created(&counterparty_node_id, msg);
14692		let _ = handle_error!(self, res, counterparty_node_id);
14693	}
14694
14695	fn handle_funding_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::FundingSigned) {
14696		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
14697		let res = self.internal_funding_signed(&counterparty_node_id, msg);
14698		let _ = handle_error!(self, res, counterparty_node_id);
14699	}
14700
14701	fn handle_peer_storage(&self, counterparty_node_id: PublicKey, msg: msgs::PeerStorage) {
14702		let _persistence_guard =
14703			PersistenceNotifierGuard::optionally_notify(self, || NotifyOption::SkipPersistNoEvents);
14704		let res = self.internal_peer_storage(counterparty_node_id, msg);
14705		let _ = handle_error!(self, res, counterparty_node_id);
14706	}
14707
14708	fn handle_peer_storage_retrieval(
14709		&self, counterparty_node_id: PublicKey, msg: msgs::PeerStorageRetrieval,
14710	) {
14711		let _persistence_guard =
14712			PersistenceNotifierGuard::optionally_notify(self, || NotifyOption::SkipPersistNoEvents);
14713		let res = self.internal_peer_storage_retrieval(counterparty_node_id, msg);
14714		let _ = handle_error!(self, res, counterparty_node_id);
14715	}
14716
14717	fn handle_channel_ready(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelReady) {
14718		// Note that we never need to persist the updated ChannelManager for an inbound
14719		// channel_ready message - while the channel's state will change, any channel_ready message
14720		// will ultimately be re-sent on startup and the `ChannelMonitor` won't be updated so we
14721		// will not force-close the channel on startup.
14722		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
14723			let res = self.internal_channel_ready(&counterparty_node_id, msg);
14724			let persist = match &res {
14725				Err(e) if e.closes_channel() => NotifyOption::DoPersist,
14726				_ => NotifyOption::SkipPersistHandleEvents,
14727			};
14728			let _ = handle_error!(self, res, counterparty_node_id);
14729			persist
14730		});
14731	}
14732
14733	fn handle_stfu(&self, counterparty_node_id: PublicKey, msg: &msgs::Stfu) {
14734		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
14735			let res = self.internal_stfu(&counterparty_node_id, msg);
14736			let persist = match &res {
14737				Err(e) if e.closes_channel() => NotifyOption::DoPersist,
14738				Err(_) => NotifyOption::SkipPersistHandleEvents,
14739				Ok(responded) => {
14740					if *responded {
14741						NotifyOption::SkipPersistHandleEvents
14742					} else {
14743						NotifyOption::SkipPersistNoEvents
14744					}
14745				},
14746			};
14747			let _ = handle_error!(self, res, counterparty_node_id);
14748			persist
14749		});
14750	}
14751
14752	fn handle_splice_init(&self, counterparty_node_id: PublicKey, msg: &msgs::SpliceInit) {
14753		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
14754			let res = self.internal_splice_init(&counterparty_node_id, msg);
14755			let persist = match &res {
14756				Err(e) if e.closes_channel() => NotifyOption::DoPersist,
14757				Err(_) => NotifyOption::SkipPersistHandleEvents,
14758				Ok(()) => NotifyOption::SkipPersistHandleEvents,
14759			};
14760			let _ = handle_error!(self, res, counterparty_node_id);
14761			persist
14762		});
14763	}
14764
14765	fn handle_splice_ack(&self, counterparty_node_id: PublicKey, msg: &msgs::SpliceAck) {
14766		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
14767			let res = self.internal_splice_ack(&counterparty_node_id, msg);
14768			let persist = match &res {
14769				Err(e) if e.closes_channel() => NotifyOption::DoPersist,
14770				Err(_) => NotifyOption::SkipPersistHandleEvents,
14771				Ok(()) => NotifyOption::SkipPersistHandleEvents,
14772			};
14773			let _ = handle_error!(self, res, counterparty_node_id);
14774			persist
14775		});
14776	}
14777
14778	#[rustfmt::skip]
14779	fn handle_splice_locked(&self, counterparty_node_id: PublicKey, msg: &msgs::SpliceLocked) {
14780		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
14781			let res = self.internal_splice_locked(&counterparty_node_id, msg);
14782			let persist = match &res {
14783				Err(e) if e.closes_channel() => NotifyOption::DoPersist,
14784				Err(_) => NotifyOption::SkipPersistHandleEvents,
14785				Ok(()) => NotifyOption::DoPersist,
14786			};
14787			let _ = handle_error!(self, res, counterparty_node_id);
14788			persist
14789		});
14790	}
14791
14792	fn handle_shutdown(&self, counterparty_node_id: PublicKey, msg: &msgs::Shutdown) {
14793		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
14794		let res = self.internal_shutdown(&counterparty_node_id, msg);
14795		let _ = handle_error!(self, res, counterparty_node_id);
14796	}
14797
14798	fn handle_closing_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::ClosingSigned) {
14799		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
14800		let res = self.internal_closing_signed(&counterparty_node_id, msg);
14801		let _ = handle_error!(self, res, counterparty_node_id);
14802	}
14803
14804	#[cfg(simple_close)]
14805	fn handle_closing_complete(&self, counterparty_node_id: PublicKey, msg: msgs::ClosingComplete) {
14806		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
14807		let res = self.internal_closing_complete(counterparty_node_id, msg);
14808		let _ = handle_error!(self, res, counterparty_node_id);
14809	}
14810
14811	#[cfg(simple_close)]
14812	fn handle_closing_sig(&self, counterparty_node_id: PublicKey, msg: msgs::ClosingSig) {
14813		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
14814		let res = self.internal_closing_sig(counterparty_node_id, msg);
14815		let _ = handle_error!(self, res, counterparty_node_id);
14816	}
14817
14818	fn handle_update_add_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateAddHTLC) {
14819		// Note that we never need to persist the updated ChannelManager for an inbound
14820		// update_add_htlc message - the message itself doesn't change our channel state only the
14821		// `commitment_signed` message afterwards will.
14822		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
14823			let res = self.internal_update_add_htlc(&counterparty_node_id, msg);
14824			let persist = match &res {
14825				Err(e) if e.closes_channel() => NotifyOption::DoPersist,
14826				Err(_) => NotifyOption::SkipPersistHandleEvents,
14827				Ok(()) => NotifyOption::SkipPersistNoEvents,
14828			};
14829			let _ = handle_error!(self, res, counterparty_node_id);
14830			persist
14831		});
14832	}
14833
14834	fn handle_update_fulfill_htlc(
14835		&self, counterparty_node_id: PublicKey, msg: msgs::UpdateFulfillHTLC,
14836	) {
14837		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
14838		let res = self.internal_update_fulfill_htlc(&counterparty_node_id, msg);
14839		let _ = handle_error!(self, res, counterparty_node_id);
14840	}
14841
14842	fn handle_update_fail_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFailHTLC) {
14843		// Note that we never need to persist the updated ChannelManager for an inbound
14844		// update_fail_htlc message - the message itself doesn't change our channel state only the
14845		// `commitment_signed` message afterwards will.
14846		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
14847			let res = self.internal_update_fail_htlc(&counterparty_node_id, msg);
14848			let persist = match &res {
14849				Err(e) if e.closes_channel() => NotifyOption::DoPersist,
14850				Err(_) => NotifyOption::SkipPersistHandleEvents,
14851				Ok(()) => NotifyOption::SkipPersistNoEvents,
14852			};
14853			let _ = handle_error!(self, res, counterparty_node_id);
14854			persist
14855		});
14856	}
14857
14858	fn handle_update_fail_malformed_htlc(
14859		&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFailMalformedHTLC,
14860	) {
14861		// Note that we never need to persist the updated ChannelManager for an inbound
14862		// update_fail_malformed_htlc message - the message itself doesn't change our channel state
14863		// only the `commitment_signed` message afterwards will.
14864		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
14865			let res = self.internal_update_fail_malformed_htlc(&counterparty_node_id, msg);
14866			let persist = match &res {
14867				Err(e) if e.closes_channel() => NotifyOption::DoPersist,
14868				Err(_) => NotifyOption::SkipPersistHandleEvents,
14869				Ok(()) => NotifyOption::SkipPersistNoEvents,
14870			};
14871			let _ = handle_error!(self, res, counterparty_node_id);
14872			persist
14873		});
14874	}
14875
14876	fn handle_commitment_signed(
14877		&self, counterparty_node_id: PublicKey, msg: &msgs::CommitmentSigned,
14878	) {
14879		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
14880		let res = self.internal_commitment_signed(&counterparty_node_id, msg);
14881		let _ = handle_error!(self, res, counterparty_node_id);
14882	}
14883
14884	fn handle_commitment_signed_batch(
14885		&self, counterparty_node_id: PublicKey, channel_id: ChannelId,
14886		batch: Vec<msgs::CommitmentSigned>,
14887	) {
14888		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
14889		let res = self.internal_commitment_signed_batch(&counterparty_node_id, channel_id, batch);
14890		let _ = handle_error!(self, res, counterparty_node_id);
14891	}
14892
14893	fn handle_revoke_and_ack(&self, counterparty_node_id: PublicKey, msg: &msgs::RevokeAndACK) {
14894		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
14895		let res = self.internal_revoke_and_ack(&counterparty_node_id, msg);
14896		let _ = handle_error!(self, res, counterparty_node_id);
14897	}
14898
14899	fn handle_update_fee(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFee) {
14900		// Note that we never need to persist the updated ChannelManager for an inbound
14901		// update_fee message - the message itself doesn't change our channel state only the
14902		// `commitment_signed` message afterwards will.
14903		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
14904			let res = self.internal_update_fee(&counterparty_node_id, msg);
14905			let persist = match &res {
14906				Err(e) if e.closes_channel() => NotifyOption::DoPersist,
14907				Err(_) => NotifyOption::SkipPersistHandleEvents,
14908				Ok(()) => NotifyOption::SkipPersistNoEvents,
14909			};
14910			let _ = handle_error!(self, res, counterparty_node_id);
14911			persist
14912		});
14913	}
14914
14915	fn handle_announcement_signatures(
14916		&self, counterparty_node_id: PublicKey, msg: &msgs::AnnouncementSignatures,
14917	) {
14918		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
14919		let res = self.internal_announcement_signatures(&counterparty_node_id, msg);
14920		let _ = handle_error!(self, res, counterparty_node_id);
14921	}
14922
14923	fn handle_channel_update(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelUpdate) {
14924		PersistenceNotifierGuard::optionally_notify(self, || {
14925			let res = self.internal_channel_update(&counterparty_node_id, msg);
14926			if let Ok(persist) = handle_error!(self, res, counterparty_node_id) {
14927				persist
14928			} else {
14929				NotifyOption::DoPersist
14930			}
14931		});
14932	}
14933
14934	fn handle_channel_reestablish(
14935		&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelReestablish,
14936	) {
14937		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
14938		let res = self.internal_channel_reestablish(&counterparty_node_id, msg);
14939		let _ = handle_error!(self, res, counterparty_node_id);
14940	}
14941
14942	#[rustfmt::skip]
14943	fn handle_error(&self, counterparty_node_id: PublicKey, msg: &msgs::ErrorMessage) {
14944		match &msg.data as &str {
14945			"cannot co-op close channel w/ active htlcs"|
14946			"link failed to shutdown" =>
14947			{
14948				// LND hasn't properly handled shutdown messages ever, and force-closes any time we
14949				// send one while HTLCs are still present. The issue is tracked at
14950				// https://github.com/lightningnetwork/lnd/issues/6039 and has had multiple patches
14951				// to fix it but none so far have managed to land upstream. The issue appears to be
14952				// very low priority for the LND team despite being marked "P1".
14953				// We're not going to bother handling this in a sensible way, instead simply
14954				// repeating the Shutdown message on repeat until morale improves.
14955				if !msg.channel_id.is_zero() {
14956					PersistenceNotifierGuard::optionally_notify(
14957						self,
14958						|| -> NotifyOption {
14959							let per_peer_state = self.per_peer_state.read().unwrap();
14960							let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
14961							if peer_state_mutex_opt.is_none() { return NotifyOption::SkipPersistNoEvents; }
14962							let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
14963							if let Some(chan) = peer_state.channel_by_id
14964								.get(&msg.channel_id)
14965								.and_then(Channel::as_funded)
14966							{
14967								if let Some(msg) = chan.get_outbound_shutdown() {
14968									peer_state.pending_msg_events.push(MessageSendEvent::SendShutdown {
14969										node_id: counterparty_node_id,
14970										msg,
14971									});
14972								}
14973								peer_state.pending_msg_events.push(MessageSendEvent::HandleError {
14974									node_id: counterparty_node_id,
14975									action: msgs::ErrorAction::SendWarningMessage {
14976										msg: msgs::WarningMessage {
14977											channel_id: msg.channel_id,
14978											data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned()
14979										},
14980										log_level: Level::Trace,
14981									}
14982								});
14983								// This can happen in a fairly tight loop, so we absolutely cannot trigger
14984								// a `ChannelManager` write here.
14985								return NotifyOption::SkipPersistHandleEvents;
14986							}
14987							NotifyOption::SkipPersistNoEvents
14988						}
14989					);
14990				}
14991				return;
14992			}
14993			_ => {}
14994		}
14995
14996		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
14997
14998		let peer_msg = UntrustedString(msg.data.clone());
14999		let reason = ClosureReason::CounterpartyForceClosed { peer_msg };
15000
15001		if msg.channel_id.is_zero() {
15002			let channel_ids: Vec<ChannelId> = {
15003				let per_peer_state = self.per_peer_state.read().unwrap();
15004				let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
15005				if peer_state_mutex_opt.is_none() { return; }
15006				let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
15007				let peer_state = &mut *peer_state_lock;
15008				// Note that we don't bother generating any events for pre-accept channels -
15009				// they're not considered "channels" yet from the PoV of our events interface.
15010				peer_state.inbound_channel_request_by_id.clear();
15011				peer_state.channel_by_id.keys().cloned().collect()
15012			};
15013			for channel_id in channel_ids {
15014				// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
15015				let _ = self.force_close_channel_with_peer(&channel_id, &counterparty_node_id, reason.clone());
15016			}
15017		} else {
15018			{
15019				// First check if we can advance the channel type and try again.
15020				let per_peer_state = self.per_peer_state.read().unwrap();
15021				let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
15022				if peer_state_mutex_opt.is_none() { return; }
15023				let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
15024				let peer_state = &mut *peer_state_lock;
15025				match peer_state.channel_by_id.get_mut(&msg.channel_id) {
15026					Some(chan) => match chan.maybe_handle_error_without_close(
15027						self.chain_hash, &self.fee_estimator, &self.logger,
15028						&self.config.read().unwrap(), &peer_state.latest_features,
15029					) {
15030						Ok(Some(OpenChannelMessage::V1(msg))) => {
15031							peer_state.pending_msg_events.push(MessageSendEvent::SendOpenChannel {
15032								node_id: counterparty_node_id,
15033								msg,
15034							});
15035							return;
15036						},
15037						Ok(Some(OpenChannelMessage::V2(msg))) => {
15038							peer_state.pending_msg_events.push(MessageSendEvent::SendOpenChannelV2 {
15039								node_id: counterparty_node_id,
15040								msg,
15041							});
15042							return;
15043						},
15044						Ok(None) | Err(()) => {},
15045					},
15046					None => {},
15047				}
15048			}
15049
15050			// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
15051			let _ = self.force_close_channel_with_peer(&msg.channel_id, &counterparty_node_id, reason);
15052		}
15053	}
15054
15055	fn get_chain_hashes(&self) -> Option<Vec<ChainHash>> {
15056		Some(vec![self.chain_hash])
15057	}
15058
15059	fn handle_tx_add_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddInput) {
15060		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
15061			let res = self.internal_tx_add_input(counterparty_node_id, msg);
15062			let persist = match &res {
15063				Err(_) => NotifyOption::DoPersist,
15064				Ok(persist) => *persist,
15065			};
15066			let _ = handle_error!(self, res, counterparty_node_id);
15067			persist
15068		});
15069	}
15070
15071	fn handle_tx_add_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddOutput) {
15072		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
15073			let res = self.internal_tx_add_output(counterparty_node_id, msg);
15074			let persist = match &res {
15075				Err(_) => NotifyOption::DoPersist,
15076				Ok(persist) => *persist,
15077			};
15078			let _ = handle_error!(self, res, counterparty_node_id);
15079			persist
15080		});
15081	}
15082
15083	fn handle_tx_remove_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveInput) {
15084		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
15085			let res = self.internal_tx_remove_input(counterparty_node_id, msg);
15086			let persist = match &res {
15087				Err(_) => NotifyOption::DoPersist,
15088				Ok(persist) => *persist,
15089			};
15090			let _ = handle_error!(self, res, counterparty_node_id);
15091			persist
15092		});
15093	}
15094
15095	fn handle_tx_remove_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveOutput) {
15096		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
15097			let res = self.internal_tx_remove_output(counterparty_node_id, msg);
15098			let persist = match &res {
15099				Err(_) => NotifyOption::DoPersist,
15100				Ok(persist) => *persist,
15101			};
15102			let _ = handle_error!(self, res, counterparty_node_id);
15103			persist
15104		});
15105	}
15106
15107	fn handle_tx_complete(&self, counterparty_node_id: PublicKey, msg: &msgs::TxComplete) {
15108		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
15109			let res = self.internal_tx_complete(counterparty_node_id, msg);
15110			let persist = match &res {
15111				Err(_) => NotifyOption::DoPersist,
15112				Ok(persist) => *persist,
15113			};
15114			let _ = handle_error!(self, res, counterparty_node_id);
15115			persist
15116		});
15117	}
15118
15119	fn handle_tx_signatures(&self, counterparty_node_id: PublicKey, msg: &msgs::TxSignatures) {
15120		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
15121		let res = self.internal_tx_signatures(&counterparty_node_id, msg);
15122		let _ = handle_error!(self, res, counterparty_node_id);
15123	}
15124
15125	fn handle_tx_init_rbf(&self, counterparty_node_id: PublicKey, msg: &msgs::TxInitRbf) {
15126		let err = Err(MsgHandleErrInternal::send_err_msg_no_close(
15127			"Dual-funded channels not supported".to_owned(),
15128			msg.channel_id.clone(),
15129		));
15130		let _: Result<(), _> = handle_error!(self, err, counterparty_node_id);
15131	}
15132
15133	fn handle_tx_ack_rbf(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAckRbf) {
15134		let err = Err(MsgHandleErrInternal::send_err_msg_no_close(
15135			"Dual-funded channels not supported".to_owned(),
15136			msg.channel_id.clone(),
15137		));
15138		let _: Result<(), _> = handle_error!(self, err, counterparty_node_id);
15139	}
15140
15141	fn handle_tx_abort(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAbort) {
15142		// Note that we never need to persist the updated ChannelManager for an inbound
15143		// tx_abort message - interactive transaction construction does not need to
15144		// be persisted before any signatures are exchanged.
15145		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
15146			let res = self.internal_tx_abort(&counterparty_node_id, msg);
15147			let persist = match &res {
15148				Err(e) if e.closes_channel() => NotifyOption::DoPersist,
15149				Err(_) => NotifyOption::SkipPersistHandleEvents,
15150				Ok(persist) => *persist,
15151			};
15152			let _ = handle_error!(self, res, counterparty_node_id);
15153			persist
15154		});
15155	}
15156
15157	fn message_received(&self) {
15158		for (payment_id, retryable_invoice_request) in
15159			self.pending_outbound_payments.release_invoice_requests_awaiting_invoice()
15160		{
15161			let RetryableInvoiceRequest { invoice_request, nonce, .. } = retryable_invoice_request;
15162
15163			let peers = self.get_peers_for_blinded_path();
15164			let enqueue_invreq_res =
15165				self.flow.enqueue_invoice_request(invoice_request, payment_id, nonce, peers);
15166			if enqueue_invreq_res.is_err() {
15167				log_warn!(
15168					self.logger,
15169					"Retry failed for invoice request with payment_id {}",
15170					payment_id
15171				);
15172			}
15173		}
15174	}
15175}
15176
15177impl<
15178		M: Deref,
15179		T: Deref,
15180		ES: Deref,
15181		NS: Deref,
15182		SP: Deref,
15183		F: Deref,
15184		R: Deref,
15185		MR: Deref,
15186		L: Deref,
15187	> OffersMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
15188where
15189	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
15190	T::Target: BroadcasterInterface,
15191	ES::Target: EntropySource,
15192	NS::Target: NodeSigner,
15193	SP::Target: SignerProvider,
15194	F::Target: FeeEstimator,
15195	R::Target: Router,
15196	MR::Target: MessageRouter,
15197	L::Target: Logger,
15198{
15199	#[rustfmt::skip]
15200	fn handle_message(
15201		&self, message: OffersMessage, context: Option<OffersContext>, responder: Option<Responder>,
15202	) -> Option<(OffersMessage, ResponseInstruction)> {
15203		macro_rules! handle_pay_invoice_res {
15204			($res: expr, $invoice: expr, $logger: expr) => {{
15205				let error = match $res {
15206					Err(Bolt12PaymentError::UnknownRequiredFeatures) => {
15207						log_trace!(
15208							$logger, "Invoice requires unknown features: {:?}",
15209							$invoice.invoice_features()
15210						);
15211						InvoiceError::from(Bolt12SemanticError::UnknownRequiredFeatures)
15212					},
15213					Err(Bolt12PaymentError::SendingFailed(e)) => {
15214						log_trace!($logger, "Failed paying invoice: {:?}", e);
15215						InvoiceError::from_string(format!("{:?}", e))
15216					},
15217					Err(Bolt12PaymentError::BlindedPathCreationFailed) => {
15218						let err_msg = "Failed to create a blinded path back to ourselves";
15219						log_trace!($logger, "{}", err_msg);
15220						InvoiceError::from_string(err_msg.to_string())
15221					},
15222					Err(Bolt12PaymentError::UnexpectedInvoice)
15223						| Err(Bolt12PaymentError::DuplicateInvoice)
15224						| Ok(()) => return None,
15225				};
15226
15227				match responder {
15228					Some(responder) => return Some((OffersMessage::InvoiceError(error), responder.respond())),
15229					None => {
15230						log_trace!($logger, "No reply path to send error: {:?}", error);
15231						return None
15232					},
15233				}
15234			}}
15235		}
15236
15237		match message {
15238			OffersMessage::InvoiceRequest(invoice_request) => {
15239				let responder = match responder {
15240					Some(responder) => responder,
15241					None => return None,
15242				};
15243
15244				let invoice_request = match self.flow.verify_invoice_request(invoice_request, context) {
15245					Ok(InvreqResponseInstructions::SendInvoice(invoice_request)) => invoice_request,
15246					Ok(InvreqResponseInstructions::SendStaticInvoice { recipient_id, invoice_slot, invoice_request }) => {
15247						self.pending_events.lock().unwrap().push_back((Event::StaticInvoiceRequested {
15248							recipient_id, invoice_slot, reply_path: responder, invoice_request,
15249						}, None));
15250
15251						return None
15252					},
15253					Err(_) => return None,
15254				};
15255
15256				let amount_msats = match InvoiceBuilder::<DerivedSigningPubkey>::amount_msats(
15257					&invoice_request.inner
15258				) {
15259					Ok(amount_msats) => amount_msats,
15260					Err(error) => return Some((OffersMessage::InvoiceError(error.into()), responder.respond())),
15261				};
15262
15263				let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
15264				let (payment_hash, payment_secret) = match self.create_inbound_payment(
15265					Some(amount_msats), relative_expiry, None
15266				) {
15267					Ok((payment_hash, payment_secret)) => (payment_hash, payment_secret),
15268					Err(()) => {
15269						let error = Bolt12SemanticError::InvalidAmount;
15270						return Some((OffersMessage::InvoiceError(error.into()), responder.respond()));
15271					},
15272				};
15273
15274				let entropy = &*self.entropy_source;
15275				let (response, context) = self.flow.create_response_for_invoice_request(
15276					&self.node_signer, &self.router, entropy, invoice_request, amount_msats,
15277					payment_hash, payment_secret, self.list_usable_channels()
15278				);
15279
15280				match context {
15281					Some(context) => Some((response, responder.respond_with_reply_path(context))),
15282					None => Some((response, responder.respond()))
15283				}
15284			},
15285			OffersMessage::Invoice(invoice) => {
15286				let payment_id = match self.flow.verify_bolt12_invoice(&invoice, context.as_ref()) {
15287					Ok(payment_id) => payment_id,
15288					Err(()) => return None,
15289				};
15290
15291				let logger = WithContext::from(
15292					&self.logger, None, None, Some(invoice.payment_hash()),
15293				);
15294
15295				if self.config.read().unwrap().manually_handle_bolt12_invoices {
15296					// Update the corresponding entry in `PendingOutboundPayment` for this invoice.
15297					// This ensures that event generation remains idempotent in case we receive
15298					// the same invoice multiple times.
15299					self.pending_outbound_payments.mark_invoice_received(&invoice, payment_id).ok()?;
15300
15301					let event = Event::InvoiceReceived {
15302						payment_id, invoice, context, responder,
15303					};
15304					self.pending_events.lock().unwrap().push_back((event, None));
15305					return None;
15306				}
15307
15308				let res = self.send_payment_for_verified_bolt12_invoice(&invoice, payment_id);
15309				handle_pay_invoice_res!(res, invoice, logger);
15310			},
15311			OffersMessage::StaticInvoice(invoice) => {
15312				let payment_id = match context {
15313					Some(OffersContext::OutboundPayment { payment_id, .. }) => payment_id,
15314					_ => return None
15315				};
15316				let res = self.initiate_async_payment(&invoice, payment_id);
15317				handle_pay_invoice_res!(res, invoice, self.logger);
15318			},
15319			OffersMessage::InvoiceError(invoice_error) => {
15320				let payment_hash = match context {
15321					Some(OffersContext::InboundPayment { payment_hash }) => Some(payment_hash),
15322					_ => None,
15323				};
15324
15325				let logger = WithContext::from(&self.logger, None, None, payment_hash);
15326				log_trace!(logger, "Received invoice_error: {}", invoice_error);
15327
15328				match context {
15329					Some(OffersContext::OutboundPayment { payment_id, .. }) => {
15330						self.abandon_payment_with_reason(
15331							payment_id, PaymentFailureReason::InvoiceRequestRejected,
15332						);
15333					},
15334					_ => {},
15335				}
15336
15337				None
15338			},
15339		}
15340	}
15341
15342	fn release_pending_messages(&self) -> Vec<(OffersMessage, MessageSendInstructions)> {
15343		self.flow.release_pending_offers_messages()
15344	}
15345}
15346
15347impl<
15348		M: Deref,
15349		T: Deref,
15350		ES: Deref,
15351		NS: Deref,
15352		SP: Deref,
15353		F: Deref,
15354		R: Deref,
15355		MR: Deref,
15356		L: Deref,
15357	> AsyncPaymentsMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
15358where
15359	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
15360	T::Target: BroadcasterInterface,
15361	ES::Target: EntropySource,
15362	NS::Target: NodeSigner,
15363	SP::Target: SignerProvider,
15364	F::Target: FeeEstimator,
15365	R::Target: Router,
15366	MR::Target: MessageRouter,
15367	L::Target: Logger,
15368{
15369	fn handle_offer_paths_request(
15370		&self, message: OfferPathsRequest, context: AsyncPaymentsContext,
15371		responder: Option<Responder>,
15372	) -> Option<(OfferPaths, ResponseInstruction)> {
15373		let peers = self.get_peers_for_blinded_path();
15374		let (message, reply_path_context) =
15375			match self.flow.handle_offer_paths_request(&message, context, peers) {
15376				Some(msg) => msg,
15377				None => return None,
15378			};
15379		responder.map(|resp| (message, resp.respond_with_reply_path(reply_path_context)))
15380	}
15381
15382	fn handle_offer_paths(
15383		&self, message: OfferPaths, context: AsyncPaymentsContext, responder: Option<Responder>,
15384	) -> Option<(ServeStaticInvoice, ResponseInstruction)> {
15385		let responder = match responder {
15386			Some(responder) => responder,
15387			None => return None,
15388		};
15389		let (serve_static_invoice, reply_context) = match self.flow.handle_offer_paths(
15390			message,
15391			context,
15392			responder.clone(),
15393			self.get_peers_for_blinded_path(),
15394			self.list_usable_channels(),
15395			&*self.entropy_source,
15396			&*self.router,
15397		) {
15398			Some((msg, ctx)) => (msg, ctx),
15399			None => return None,
15400		};
15401
15402		// We cached a new pending offer, so persist the cache.
15403		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
15404
15405		let response_instructions = responder.respond_with_reply_path(reply_context);
15406		return Some((serve_static_invoice, response_instructions));
15407	}
15408
15409	fn handle_serve_static_invoice(
15410		&self, message: ServeStaticInvoice, context: AsyncPaymentsContext,
15411		responder: Option<Responder>,
15412	) {
15413		let responder = match responder {
15414			Some(resp) => resp,
15415			None => return,
15416		};
15417
15418		let (recipient_id, invoice_slot) =
15419			match self.flow.verify_serve_static_invoice_message(&message, context) {
15420				Ok((recipient_id, inv_slot)) => (recipient_id, inv_slot),
15421				Err(()) => return,
15422			};
15423
15424		let mut pending_events = self.pending_events.lock().unwrap();
15425		pending_events.push_back((
15426			Event::PersistStaticInvoice {
15427				invoice: message.invoice,
15428				invoice_request_path: message.forward_invoice_request_path,
15429				invoice_slot,
15430				recipient_id,
15431				invoice_persisted_path: responder,
15432			},
15433			None,
15434		));
15435	}
15436
15437	fn handle_static_invoice_persisted(
15438		&self, _message: StaticInvoicePersisted, context: AsyncPaymentsContext,
15439	) {
15440		let should_persist = self.flow.handle_static_invoice_persisted(context);
15441		if should_persist {
15442			let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
15443		}
15444	}
15445
15446	fn handle_held_htlc_available(
15447		&self, _message: HeldHtlcAvailable, context: AsyncPaymentsContext,
15448		responder: Option<Responder>,
15449	) -> Option<(ReleaseHeldHtlc, ResponseInstruction)> {
15450		self.flow.verify_inbound_async_payment_context(context).ok()?;
15451		return responder.map(|responder| (ReleaseHeldHtlc {}, responder.respond()));
15452	}
15453
15454	fn handle_release_held_htlc(&self, _message: ReleaseHeldHtlc, context: AsyncPaymentsContext) {
15455		match context {
15456			AsyncPaymentsContext::OutboundPayment { payment_id } => {
15457				if let Err(e) = self.send_payment_for_static_invoice(payment_id) {
15458					log_trace!(
15459						self.logger,
15460						"Failed to release held HTLC with payment id {}: {:?}",
15461						payment_id,
15462						e
15463					);
15464				}
15465			},
15466			AsyncPaymentsContext::ReleaseHeldHtlc {
15467				intercept_id,
15468				prev_outbound_scid_alias,
15469				htlc_id,
15470			} => {
15471				// It's possible the release_held_htlc message raced ahead of us transitioning the pending
15472				// update_add to `Self::pending_intercept_htlcs`. If that's the case, update the pending
15473				// update_add to indicate that the HTLC should be released immediately.
15474				//
15475				// Check for the HTLC here before checking `pending_intercept_htlcs` to avoid a different
15476				// race where the HTLC gets transitioned to `pending_intercept_htlcs` after we drop that
15477				// map's lock but before acquiring the `decode_update_add_htlcs` lock.
15478				let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
15479				if let Some(htlcs) = decode_update_add_htlcs.get_mut(&prev_outbound_scid_alias) {
15480					for update_add in htlcs.iter_mut() {
15481						if update_add.htlc_id == htlc_id {
15482							log_trace!(
15483								self.logger,
15484								"Marking held htlc with intercept_id {} as ready to release",
15485								intercept_id
15486							);
15487							update_add.hold_htlc.take();
15488							return;
15489						}
15490					}
15491				}
15492				core::mem::drop(decode_update_add_htlcs);
15493
15494				let mut htlc = {
15495					let mut pending_intercept_htlcs =
15496						self.pending_intercepted_htlcs.lock().unwrap();
15497					match pending_intercept_htlcs.remove(&intercept_id) {
15498						Some(htlc) => htlc,
15499						None => {
15500							log_trace!(
15501								self.logger,
15502								"Failed to release HTLC with intercept_id {}: HTLC not found",
15503								intercept_id
15504							);
15505							return;
15506						},
15507					}
15508				};
15509				match htlc.forward_info.routing {
15510					PendingHTLCRouting::Forward { ref mut hold_htlc, .. } => {
15511						debug_assert!(hold_htlc.is_some());
15512						*hold_htlc = None;
15513					},
15514					_ => {
15515						debug_assert!(false, "HTLC intercepts can only be forwards");
15516						return;
15517					},
15518				}
15519
15520				let logger = WithContext::from(
15521					&self.logger,
15522					Some(htlc.prev_counterparty_node_id),
15523					Some(htlc.prev_channel_id),
15524					Some(htlc.forward_info.payment_hash),
15525				);
15526				log_trace!(logger, "Releasing held htlc with intercept_id {}", intercept_id);
15527
15528				let mut per_source_pending_forward = [(
15529					htlc.prev_outbound_scid_alias,
15530					htlc.prev_counterparty_node_id,
15531					htlc.prev_funding_outpoint,
15532					htlc.prev_channel_id,
15533					htlc.prev_user_channel_id,
15534					vec![(htlc.forward_info, htlc.prev_htlc_id)],
15535				)];
15536				self.forward_htlcs(&mut per_source_pending_forward);
15537				PersistenceNotifierGuard::notify_on_drop(self);
15538			},
15539			_ => return,
15540		}
15541	}
15542
15543	fn release_pending_messages(&self) -> Vec<(AsyncPaymentsMessage, MessageSendInstructions)> {
15544		self.flow.release_pending_async_messages()
15545	}
15546}
15547
15548#[cfg(feature = "dnssec")]
15549impl<
15550		M: Deref,
15551		T: Deref,
15552		ES: Deref,
15553		NS: Deref,
15554		SP: Deref,
15555		F: Deref,
15556		R: Deref,
15557		MR: Deref,
15558		L: Deref,
15559	> DNSResolverMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
15560where
15561	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
15562	T::Target: BroadcasterInterface,
15563	ES::Target: EntropySource,
15564	NS::Target: NodeSigner,
15565	SP::Target: SignerProvider,
15566	F::Target: FeeEstimator,
15567	R::Target: Router,
15568	MR::Target: MessageRouter,
15569	L::Target: Logger,
15570{
15571	fn handle_dnssec_query(
15572		&self, _message: DNSSECQuery, _responder: Option<Responder>,
15573	) -> Option<(DNSResolverMessage, ResponseInstruction)> {
15574		None
15575	}
15576
15577	#[rustfmt::skip]
15578	fn handle_dnssec_proof(&self, message: DNSSECProof, context: DNSResolverContext) {
15579		let offer_opt = self.flow.hrn_resolver.handle_dnssec_proof_for_offer(message, context);
15580		#[cfg_attr(not(feature = "_test_utils"), allow(unused_mut))]
15581		if let Some((completed_requests, mut offer)) = offer_opt {
15582			for (name, payment_id) in completed_requests {
15583				#[cfg(feature = "_test_utils")]
15584				if let Some(replacement_offer) = self.testing_dnssec_proof_offer_resolution_override.lock().unwrap().remove(&name) {
15585					// If we have multiple pending requests we may end up over-using the override
15586					// offer, but tests can deal with that.
15587					offer = replacement_offer;
15588				}
15589				if let Ok((amt_msats, payer_note)) = self.pending_outbound_payments.params_for_payment_awaiting_offer(payment_id) {
15590					let offer_pay_res =
15591						self.pay_for_offer_intern(&offer, None, Some(amt_msats), payer_note, payment_id, Some(name),
15592							|retryable_invoice_request| {
15593								self.pending_outbound_payments
15594									.received_offer(payment_id, Some(retryable_invoice_request))
15595									.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
15596						});
15597					if offer_pay_res.is_err() {
15598						// The offer we tried to pay is the canonical current offer for the name we
15599						// wanted to pay. If we can't pay it, there's no way to recover so fail the
15600						// payment.
15601						// Note that the PaymentFailureReason should be ignored for an
15602						// AwaitingInvoice payment.
15603						self.pending_outbound_payments.abandon_payment(
15604							payment_id, PaymentFailureReason::RouteNotFound, &self.pending_events,
15605						);
15606					}
15607				}
15608			}
15609		}
15610	}
15611
15612	fn release_pending_messages(&self) -> Vec<(DNSResolverMessage, MessageSendInstructions)> {
15613		self.flow.release_pending_dns_messages()
15614	}
15615}
15616
15617impl<
15618		M: Deref,
15619		T: Deref,
15620		ES: Deref,
15621		NS: Deref,
15622		SP: Deref,
15623		F: Deref,
15624		R: Deref,
15625		MR: Deref,
15626		L: Deref,
15627	> NodeIdLookUp for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
15628where
15629	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
15630	T::Target: BroadcasterInterface,
15631	ES::Target: EntropySource,
15632	NS::Target: NodeSigner,
15633	SP::Target: SignerProvider,
15634	F::Target: FeeEstimator,
15635	R::Target: Router,
15636	MR::Target: MessageRouter,
15637	L::Target: Logger,
15638{
15639	fn next_node_id(&self, short_channel_id: u64) -> Option<PublicKey> {
15640		self.short_to_chan_info.read().unwrap().get(&short_channel_id).map(|(pubkey, _)| *pubkey)
15641	}
15642}
15643
15644/// Fetches the set of [`NodeFeatures`] flags that are provided by or required by
15645/// [`ChannelManager`].
15646pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures {
15647	let mut node_features = provided_init_features(config).to_context();
15648	node_features.set_keysend_optional();
15649	node_features
15650}
15651
15652/// Fetches the set of [`Bolt11InvoiceFeatures`] flags that are provided by or required by
15653/// [`ChannelManager`].
15654///
15655/// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice"
15656/// or not. Thus, this method is not public.
15657#[cfg(any(feature = "_test_utils", test))]
15658pub(crate) fn provided_bolt11_invoice_features(config: &UserConfig) -> Bolt11InvoiceFeatures {
15659	provided_init_features(config).to_context()
15660}
15661
15662/// Fetches the set of [`Bolt12InvoiceFeatures`] flags that are provided by or required by
15663/// [`ChannelManager`].
15664pub(crate) fn provided_bolt12_invoice_features(config: &UserConfig) -> Bolt12InvoiceFeatures {
15665	provided_init_features(config).to_context()
15666}
15667
15668/// Fetches the set of [`ChannelFeatures`] flags that are provided by or required by
15669/// [`ChannelManager`].
15670pub(crate) fn provided_channel_features(config: &UserConfig) -> ChannelFeatures {
15671	provided_init_features(config).to_context()
15672}
15673
15674/// Fetches the set of [`ChannelTypeFeatures`] flags that are provided by or required by
15675/// [`ChannelManager`].
15676pub(crate) fn provided_channel_type_features(config: &UserConfig) -> ChannelTypeFeatures {
15677	ChannelTypeFeatures::from_init(&provided_init_features(config))
15678}
15679
15680/// Fetches the set of [`InitFeatures`] flags that are provided by or required by
15681/// [`ChannelManager`].
15682pub fn provided_init_features(config: &UserConfig) -> InitFeatures {
15683	// Note that if new features are added here which other peers may (eventually) require, we
15684	// should also add the corresponding (optional) bit to the [`BaseMessageHandler`] impl for
15685	// [`ErroringMessageHandler`].
15686	let mut features = InitFeatures::empty();
15687	features.set_data_loss_protect_required();
15688	features.set_upfront_shutdown_script_optional();
15689	features.set_variable_length_onion_required();
15690	features.set_static_remote_key_required();
15691	features.set_payment_secret_required();
15692	features.set_basic_mpp_optional();
15693	features.set_wumbo_optional();
15694	features.set_shutdown_any_segwit_optional();
15695	features.set_channel_type_required();
15696	features.set_scid_privacy_optional();
15697	features.set_zero_conf_optional();
15698	features.set_route_blinding_optional();
15699	features.set_provide_storage_optional();
15700	#[cfg(simple_close)]
15701	features.set_simple_close_optional();
15702	features.set_quiescence_optional();
15703	features.set_splicing_optional();
15704
15705	if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx {
15706		features.set_anchors_zero_fee_htlc_tx_optional();
15707	}
15708	#[cfg(dual_funding)]
15709	if config.enable_dual_funded_channels {
15710		features.set_dual_fund_optional();
15711	}
15712
15713	if config.channel_handshake_config.negotiate_anchor_zero_fee_commitments {
15714		features.set_anchor_zero_fee_commitments_optional();
15715	}
15716
15717	if config.enable_htlc_hold {
15718		features.set_htlc_hold_optional();
15719	}
15720
15721	features
15722}
15723
15724const SERIALIZATION_VERSION: u8 = 1;
15725const MIN_SERIALIZATION_VERSION: u8 = 1;
15726
15727impl_writeable_tlv_based!(PhantomRouteHints, {
15728	(2, channels, required_vec),
15729	(4, phantom_scid, required),
15730	(6, real_node_pubkey, required),
15731});
15732
15733impl_writeable_tlv_based!(BlindedForward, {
15734	(0, inbound_blinding_point, required),
15735	(1, failure, (default_value, BlindedFailure::FromIntroductionNode)),
15736	(3, next_blinding_override, option),
15737});
15738
15739impl_writeable_tlv_based_enum!(PendingHTLCRouting,
15740	(0, Forward) => {
15741		(0, onion_packet, required),
15742		(1, blinded, option),
15743		(2, short_channel_id, required),
15744		(3, incoming_cltv_expiry, option),
15745		(4, hold_htlc, option),
15746	},
15747	(1, Receive) => {
15748		(0, payment_data, required),
15749		(1, phantom_shared_secret, option),
15750		(2, incoming_cltv_expiry, required),
15751		(3, payment_metadata, option),
15752		(5, custom_tlvs, optional_vec),
15753		(7, requires_blinded_error, (default_value, false)),
15754		(9, payment_context, option),
15755	},
15756	(2, ReceiveKeysend) => {
15757		(0, payment_preimage, required),
15758		(1, requires_blinded_error, (default_value, false)),
15759		(2, incoming_cltv_expiry, required),
15760		(3, payment_metadata, option),
15761		(4, payment_data, option), // Added in 0.0.116
15762		(5, custom_tlvs, optional_vec),
15763		(7, has_recipient_created_payment_secret, (default_value, false)),
15764		(9, payment_context, option),
15765		(11, invoice_request, option),
15766	},
15767	(3, TrampolineForward) => {
15768		(0, incoming_shared_secret, required),
15769		(2, onion_packet, required),
15770		(4, blinded, option),
15771		(6, node_id, required),
15772		(8, incoming_cltv_expiry, required),
15773	}
15774);
15775
15776impl_writeable_tlv_based!(PendingHTLCInfo, {
15777	(0, routing, required),
15778	(2, incoming_shared_secret, required),
15779	(4, payment_hash, required),
15780	(6, outgoing_amt_msat, required),
15781	(8, outgoing_cltv_value, required),
15782	(9, incoming_amt_msat, option),
15783	(10, skimmed_fee_msat, option),
15784});
15785
15786impl Writeable for HTLCFailureMsg {
15787	#[rustfmt::skip]
15788	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
15789		match self {
15790			HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id, htlc_id, reason, attribution_data }) => {
15791				0u8.write(writer)?;
15792				channel_id.write(writer)?;
15793				htlc_id.write(writer)?;
15794				reason.write(writer)?;
15795
15796				// This code will only ever be hit for legacy data that is re-serialized. It isn't necessary to try
15797				// writing out attribution data, because it can never be present.
15798				debug_assert!(attribution_data.is_none());
15799			},
15800			HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
15801				channel_id, htlc_id, sha256_of_onion, failure_code
15802			}) => {
15803				1u8.write(writer)?;
15804				channel_id.write(writer)?;
15805				htlc_id.write(writer)?;
15806				sha256_of_onion.write(writer)?;
15807				failure_code.write(writer)?;
15808			},
15809		}
15810		Ok(())
15811	}
15812}
15813
15814impl Readable for HTLCFailureMsg {
15815	#[rustfmt::skip]
15816	fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
15817		let id: u8 = Readable::read(reader)?;
15818		match id {
15819			0 => {
15820				Ok(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
15821					channel_id: Readable::read(reader)?,
15822					htlc_id: Readable::read(reader)?,
15823					reason: Readable::read(reader)?,
15824					attribution_data: None,
15825				}))
15826			},
15827			1 => {
15828				Ok(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
15829					channel_id: Readable::read(reader)?,
15830					htlc_id: Readable::read(reader)?,
15831					sha256_of_onion: Readable::read(reader)?,
15832					failure_code: Readable::read(reader)?,
15833				}))
15834			},
15835			// In versions prior to 0.0.101, HTLCFailureMsg objects were written with type 0 or 1 but
15836			// weren't length-prefixed and thus didn't support reading the TLV stream suffix of the network
15837			// messages contained in the variants.
15838			// In version 0.0.101, support for reading the variants with these types was added, and
15839			// we should migrate to writing these variants when UpdateFailHTLC or
15840			// UpdateFailMalformedHTLC get TLV fields.
15841			2 => {
15842				let length: BigSize = Readable::read(reader)?;
15843				let mut s = FixedLengthReader::new(reader, length.0);
15844				let res = LengthReadable::read_from_fixed_length_buffer(&mut s)?;
15845				s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
15846				Ok(HTLCFailureMsg::Relay(res))
15847			},
15848			3 => {
15849				let length: BigSize = Readable::read(reader)?;
15850				let mut s = FixedLengthReader::new(reader, length.0);
15851				let res = LengthReadable::read_from_fixed_length_buffer(&mut s)?;
15852				s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
15853				Ok(HTLCFailureMsg::Malformed(res))
15854			},
15855			_ => Err(DecodeError::UnknownRequiredFeature),
15856		}
15857	}
15858}
15859
15860impl_writeable_tlv_based_enum_legacy!(PendingHTLCStatus, ;
15861	(0, Forward),
15862	(1, Fail),
15863);
15864
15865impl_writeable_tlv_based_enum!(BlindedFailure,
15866	(0, FromIntroductionNode) => {},
15867	(2, FromBlindedNode) => {},
15868);
15869
15870impl_writeable_tlv_based!(HTLCPreviousHopData, {
15871	(0, prev_outbound_scid_alias, required),
15872	(1, phantom_shared_secret, option),
15873	(2, outpoint, required),
15874	(3, blinded_failure, option),
15875	(4, htlc_id, required),
15876	(5, cltv_expiry, option),
15877	(6, incoming_packet_shared_secret, required),
15878	(7, user_channel_id, option),
15879	// Note that by the time we get past the required read for type 2 above, outpoint will be
15880	// filled in, so we can safely unwrap it here.
15881	(9, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(outpoint.0.unwrap()))),
15882	(11, counterparty_node_id, option),
15883});
15884
15885impl Writeable for ClaimableHTLC {
15886	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
15887		let (payment_data, keysend_preimage) = match &self.onion_payload {
15888			OnionPayload::Invoice { _legacy_hop_data } => (_legacy_hop_data.as_ref(), None),
15889			OnionPayload::Spontaneous(preimage) => (None, Some(preimage)),
15890		};
15891		write_tlv_fields!(writer, {
15892			(0, self.prev_hop, required),
15893			(1, self.total_msat, required),
15894			(2, self.value, required),
15895			(3, self.sender_intended_value, required),
15896			(4, payment_data, option),
15897			(5, self.total_value_received, option),
15898			(6, self.cltv_expiry, required),
15899			(8, keysend_preimage, option),
15900			(10, self.counterparty_skimmed_fee_msat, option),
15901		});
15902		Ok(())
15903	}
15904}
15905
15906impl Readable for ClaimableHTLC {
15907	#[rustfmt::skip]
15908	fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
15909		_init_and_read_len_prefixed_tlv_fields!(reader, {
15910			(0, prev_hop, required),
15911			(1, total_msat, option),
15912			(2, value_ser, required),
15913			(3, sender_intended_value, option),
15914			(4, payment_data_opt, option),
15915			(5, total_value_received, option),
15916			(6, cltv_expiry, required),
15917			(8, keysend_preimage, option),
15918			(10, counterparty_skimmed_fee_msat, option),
15919		});
15920		let payment_data: Option<msgs::FinalOnionHopData> = payment_data_opt;
15921		let value = value_ser.0.unwrap();
15922		let onion_payload = match keysend_preimage {
15923			Some(p) => {
15924				if payment_data.is_some() {
15925					return Err(DecodeError::InvalidValue)
15926				}
15927				if total_msat.is_none() {
15928					total_msat = Some(value);
15929				}
15930				OnionPayload::Spontaneous(p)
15931			},
15932			None => {
15933				if total_msat.is_none() {
15934					if payment_data.is_none() {
15935						return Err(DecodeError::InvalidValue)
15936					}
15937					total_msat = Some(payment_data.as_ref().unwrap().total_msat);
15938				}
15939				OnionPayload::Invoice { _legacy_hop_data: payment_data }
15940			},
15941		};
15942		Ok(Self {
15943			prev_hop: prev_hop.0.unwrap(),
15944			timer_ticks: 0,
15945			value,
15946			sender_intended_value: sender_intended_value.unwrap_or(value),
15947			total_value_received,
15948			total_msat: total_msat.unwrap(),
15949			onion_payload,
15950			cltv_expiry: cltv_expiry.0.unwrap(),
15951			counterparty_skimmed_fee_msat,
15952		})
15953	}
15954}
15955
15956impl Readable for HTLCSource {
15957	#[rustfmt::skip]
15958	fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
15959		let id: u8 = Readable::read(reader)?;
15960		match id {
15961			0 => {
15962				let mut session_priv: crate::util::ser::RequiredWrapper<SecretKey> = crate::util::ser::RequiredWrapper(None);
15963				let mut first_hop_htlc_msat: u64 = 0;
15964				let mut path_hops = Vec::new();
15965				let mut payment_id = None;
15966				let mut payment_params: Option<PaymentParameters> = None;
15967				let mut blinded_tail: Option<BlindedTail> = None;
15968				let mut bolt12_invoice: Option<PaidBolt12Invoice> = None;
15969				read_tlv_fields!(reader, {
15970					(0, session_priv, required),
15971					(1, payment_id, option),
15972					(2, first_hop_htlc_msat, required),
15973					(4, path_hops, required_vec),
15974					(5, payment_params, (option: ReadableArgs, 0)),
15975					(6, blinded_tail, option),
15976					(7, bolt12_invoice, option),
15977				});
15978				if payment_id.is_none() {
15979					// For backwards compat, if there was no payment_id written, use the session_priv bytes
15980					// instead.
15981					payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref()));
15982				}
15983				let path = Path { hops: path_hops, blinded_tail };
15984				if path.hops.len() == 0 {
15985					return Err(DecodeError::InvalidValue);
15986				}
15987				if let Some(params) = payment_params.as_mut() {
15988					if let Payee::Clear { ref mut final_cltv_expiry_delta, .. } = params.payee {
15989						if final_cltv_expiry_delta == &0 {
15990							*final_cltv_expiry_delta = path.final_cltv_expiry_delta().ok_or(DecodeError::InvalidValue)?;
15991						}
15992					}
15993				}
15994				Ok(HTLCSource::OutboundRoute {
15995					session_priv: session_priv.0.unwrap(),
15996					first_hop_htlc_msat,
15997					path,
15998					payment_id: payment_id.unwrap(),
15999					bolt12_invoice,
16000				})
16001			}
16002			1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
16003			_ => Err(DecodeError::UnknownRequiredFeature),
16004		}
16005	}
16006}
16007
16008impl Writeable for HTLCSource {
16009	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), crate::io::Error> {
16010		match self {
16011			HTLCSource::OutboundRoute {
16012				ref session_priv,
16013				ref first_hop_htlc_msat,
16014				ref path,
16015				payment_id,
16016				bolt12_invoice,
16017			} => {
16018				0u8.write(writer)?;
16019				let payment_id_opt = Some(payment_id);
16020				write_tlv_fields!(writer, {
16021				   (0, session_priv, required),
16022				   (1, payment_id_opt, option),
16023				   (2, first_hop_htlc_msat, required),
16024				   // 3 was previously used to write a PaymentSecret for the payment.
16025				   (4, path.hops, required_vec),
16026				   (5, None::<PaymentParameters>, option), // payment_params in LDK versions prior to 0.0.115
16027				   (6, path.blinded_tail, option),
16028				   (7, bolt12_invoice, option),
16029				});
16030			},
16031			HTLCSource::PreviousHopData(ref field) => {
16032				1u8.write(writer)?;
16033				field.write(writer)?;
16034			},
16035		}
16036		Ok(())
16037	}
16038}
16039
16040impl_writeable_tlv_based!(PendingAddHTLCInfo, {
16041	(0, forward_info, required),
16042	(1, prev_user_channel_id, (default_value, 0)),
16043	(2, prev_outbound_scid_alias, required),
16044	(4, prev_htlc_id, required),
16045	(6, prev_funding_outpoint, required),
16046	// Note that by the time we get past the required read for type 6 above, prev_funding_outpoint will be
16047	// filled in, so we can safely unwrap it here.
16048	(7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))),
16049	(9, prev_counterparty_node_id, required),
16050});
16051
16052impl Writeable for HTLCForwardInfo {
16053	fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
16054		const FAIL_HTLC_VARIANT_ID: u8 = 1;
16055		match self {
16056			Self::AddHTLC(info) => {
16057				0u8.write(w)?;
16058				info.write(w)?;
16059			},
16060			Self::FailHTLC { htlc_id, err_packet } => {
16061				FAIL_HTLC_VARIANT_ID.write(w)?;
16062				write_tlv_fields!(w, {
16063					(0, htlc_id, required),
16064					(2, err_packet.data, required),
16065					(5, err_packet.attribution_data, option),
16066				});
16067			},
16068			Self::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
16069				// Since this variant was added in 0.0.119, write this as `::FailHTLC` with an empty error
16070				// packet so older versions have something to fail back with, but serialize the real data as
16071				// optional TLVs for the benefit of newer versions.
16072				FAIL_HTLC_VARIANT_ID.write(w)?;
16073				write_tlv_fields!(w, {
16074					(0, htlc_id, required),
16075					(1, failure_code, required),
16076					(2, Vec::<u8>::new(), required),
16077					(3, sha256_of_onion, required),
16078				});
16079			},
16080		}
16081		Ok(())
16082	}
16083}
16084
16085impl Readable for HTLCForwardInfo {
16086	#[rustfmt::skip]
16087	fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
16088		let id: u8 = Readable::read(r)?;
16089		Ok(match id {
16090			0 => Self::AddHTLC(Readable::read(r)?),
16091			1 => {
16092				_init_and_read_len_prefixed_tlv_fields!(r, {
16093					(0, htlc_id, required),
16094					(1, malformed_htlc_failure_code, option),
16095					(2, err_packet, required),
16096					(3, sha256_of_onion, option),
16097					(5, attribution_data, option),
16098				});
16099				if let Some(failure_code) = malformed_htlc_failure_code {
16100					if attribution_data.is_some() {
16101						return Err(DecodeError::InvalidValue);
16102					}
16103					Self::FailMalformedHTLC {
16104						htlc_id: _init_tlv_based_struct_field!(htlc_id, required),
16105						failure_code,
16106						sha256_of_onion: sha256_of_onion.ok_or(DecodeError::InvalidValue)?,
16107					}
16108				} else {
16109					Self::FailHTLC {
16110						htlc_id: _init_tlv_based_struct_field!(htlc_id, required),
16111						err_packet: crate::ln::msgs::OnionErrorPacket {
16112							data: _init_tlv_based_struct_field!(err_packet, required),
16113							attribution_data: _init_tlv_based_struct_field!(attribution_data, option),
16114						},
16115					}
16116				}
16117			},
16118			_ => return Err(DecodeError::InvalidValue),
16119		})
16120	}
16121}
16122
16123impl_writeable_tlv_based!(PendingInboundPayment, {
16124	(0, payment_secret, required),
16125	(2, expiry_time, required),
16126	(4, user_payment_id, required),
16127	(6, payment_preimage, required),
16128	(8, min_value_msat, required),
16129});
16130
16131impl<
16132		M: Deref,
16133		T: Deref,
16134		ES: Deref,
16135		NS: Deref,
16136		SP: Deref,
16137		F: Deref,
16138		R: Deref,
16139		MR: Deref,
16140		L: Deref,
16141	> Writeable for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
16142where
16143	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
16144	T::Target: BroadcasterInterface,
16145	ES::Target: EntropySource,
16146	NS::Target: NodeSigner,
16147	SP::Target: SignerProvider,
16148	F::Target: FeeEstimator,
16149	R::Target: Router,
16150	MR::Target: MessageRouter,
16151	L::Target: Logger,
16152{
16153	#[rustfmt::skip]
16154	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
16155		let _consistency_lock = self.total_consistency_lock.write().unwrap();
16156
16157		write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
16158
16159		self.chain_hash.write(writer)?;
16160		{
16161			let best_block = self.best_block.read().unwrap();
16162			best_block.height.write(writer)?;
16163			best_block.block_hash.write(writer)?;
16164		}
16165
16166		let per_peer_state = self.per_peer_state.write().unwrap();
16167
16168		let mut serializable_peer_count: u64 = 0;
16169		{
16170			let mut number_of_funded_channels = 0;
16171			for (_, peer_state_mutex) in per_peer_state.iter() {
16172				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
16173				let peer_state = &mut *peer_state_lock;
16174				if !peer_state.ok_to_remove(false) {
16175					serializable_peer_count += 1;
16176				}
16177
16178				number_of_funded_channels += peer_state.channel_by_id
16179					.values()
16180					.filter_map(Channel::as_funded)
16181					.filter(|chan| chan.context.can_resume_on_restart())
16182					.count();
16183			}
16184
16185			(number_of_funded_channels as u64).write(writer)?;
16186
16187			for (_, peer_state_mutex) in per_peer_state.iter() {
16188				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
16189				let peer_state = &mut *peer_state_lock;
16190				for channel in peer_state.channel_by_id
16191					.values()
16192					.filter_map(Channel::as_funded)
16193					.filter(|channel| channel.context.can_resume_on_restart())
16194				{
16195					channel.write(writer)?;
16196				}
16197			}
16198		}
16199
16200		{
16201			let forward_htlcs = self.forward_htlcs.lock().unwrap();
16202			(forward_htlcs.len() as u64).write(writer)?;
16203			for (short_channel_id, pending_forwards) in forward_htlcs.iter() {
16204				short_channel_id.write(writer)?;
16205				(pending_forwards.len() as u64).write(writer)?;
16206				for forward in pending_forwards {
16207					forward.write(writer)?;
16208				}
16209			}
16210		}
16211
16212		let mut decode_update_add_htlcs_opt = None;
16213		let decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
16214		if !decode_update_add_htlcs.is_empty() {
16215			decode_update_add_htlcs_opt = Some(decode_update_add_htlcs);
16216		}
16217
16218		let claimable_payments = self.claimable_payments.lock().unwrap();
16219		let pending_outbound_payments = self.pending_outbound_payments.pending_outbound_payments.lock().unwrap();
16220
16221		let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
16222		let mut htlc_onion_fields: Vec<&_> = Vec::new();
16223		(claimable_payments.claimable_payments.len() as u64).write(writer)?;
16224		for (payment_hash, payment) in claimable_payments.claimable_payments.iter() {
16225			payment_hash.write(writer)?;
16226			(payment.htlcs.len() as u64).write(writer)?;
16227			for htlc in payment.htlcs.iter() {
16228				htlc.write(writer)?;
16229			}
16230			htlc_purposes.push(&payment.purpose);
16231			htlc_onion_fields.push(&payment.onion_fields);
16232		}
16233
16234		let mut monitor_update_blocked_actions_per_peer = None;
16235		let mut peer_states = Vec::new();
16236		for (_, peer_state_mutex) in per_peer_state.iter() {
16237			// Because we're holding the owning `per_peer_state` write lock here there's no chance
16238			// of a lockorder violation deadlock - no other thread can be holding any
16239			// per_peer_state lock at all.
16240			peer_states.push(peer_state_mutex.unsafe_well_ordered_double_lock_self());
16241		}
16242
16243		let mut peer_storage_dir: Vec<(&PublicKey, &Vec<u8>)> = Vec::new();
16244
16245		(serializable_peer_count).write(writer)?;
16246		for ((peer_pubkey, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
16247			// Peers which we have no channels to should be dropped once disconnected. As we
16248			// disconnect all peers when shutting down and serializing the ChannelManager, we
16249			// consider all peers as disconnected here. There's therefore no need write peers with
16250			// no channels.
16251			if !peer_state.ok_to_remove(false) {
16252				peer_pubkey.write(writer)?;
16253				peer_state.latest_features.write(writer)?;
16254				peer_storage_dir.push((peer_pubkey, &peer_state.peer_storage));
16255
16256				if !peer_state.monitor_update_blocked_actions.is_empty() {
16257					monitor_update_blocked_actions_per_peer
16258						.get_or_insert_with(Vec::new)
16259						.push((*peer_pubkey, &peer_state.monitor_update_blocked_actions));
16260				}
16261			}
16262		}
16263
16264
16265		// Since some FundingNegotiation variants are not persisted, any splice in such state must
16266		// be failed upon reload. However, as the necessary information for the SpliceFailed event
16267		// is not persisted, the event itself needs to be persisted even though it hasn't been
16268		// emitted yet. These are removed after the events are written.
16269		let mut events = self.pending_events.lock().unwrap();
16270		let event_count = events.len();
16271		for peer_state in peer_states.iter() {
16272			for chan in peer_state.channel_by_id.values().filter_map(Channel::as_funded) {
16273				if let Some(splice_funding_failed) = chan.maybe_splice_funding_failed() {
16274					events.push_back((
16275						events::Event::SpliceFailed {
16276							channel_id: chan.context.channel_id(),
16277							counterparty_node_id: chan.context.get_counterparty_node_id(),
16278							user_channel_id: chan.context.get_user_id(),
16279							abandoned_funding_txo: splice_funding_failed.funding_txo,
16280							channel_type: splice_funding_failed.channel_type,
16281							contributed_inputs: splice_funding_failed.contributed_inputs,
16282							contributed_outputs: splice_funding_failed.contributed_outputs,
16283						},
16284						None,
16285					));
16286				}
16287			}
16288		}
16289
16290		// LDK versions prior to 0.0.115 don't support post-event actions, thus if there's no
16291		// actions at all, skip writing the required TLV. Otherwise, pre-0.0.115 versions will
16292		// refuse to read the new ChannelManager.
16293		let events_not_backwards_compatible = events.iter().any(|(_, action)| action.is_some());
16294		if events_not_backwards_compatible {
16295			// If we're gonna write a even TLV that will overwrite our events anyway we might as
16296			// well save the space and not write any events here.
16297			0u64.write(writer)?;
16298		} else {
16299			(events.len() as u64).write(writer)?;
16300			for (event, _) in events.iter() {
16301				event.write(writer)?;
16302			}
16303		}
16304
16305		// LDK versions prior to 0.0.116 wrote the `pending_background_events`
16306		// `MonitorUpdateRegeneratedOnStartup`s here, however there was never a reason to do so -
16307		// the closing monitor updates were always effectively replayed on startup (either directly
16308		// by calling `broadcast_latest_holder_commitment_txn` on a `ChannelMonitor` during
16309		// deserialization or, in 0.0.115, by regenerating the monitor update itself).
16310		0u64.write(writer)?;
16311
16312		// Prior to 0.0.111 we tracked node_announcement serials here, however that now happens in
16313		// `PeerManager`, and thus we simply write the `highest_seen_timestamp` twice, which is
16314		// likely to be identical.
16315		(self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?;
16316		(self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?;
16317
16318		// LDK versions prior to 0.0.104 wrote `pending_inbound_payments` here, with deprecated support
16319		// for stateful inbound payments maintained until 0.0.116, after which no further inbound
16320		// payments could have been written here.
16321		(0 as u64).write(writer)?;
16322
16323		// For backwards compat, write the session privs and their total length.
16324		let mut num_pending_outbounds_compat: u64 = 0;
16325		for (_, outbound) in pending_outbound_payments.iter() {
16326			if !outbound.is_fulfilled() && !outbound.abandoned() {
16327				num_pending_outbounds_compat += outbound.remaining_parts() as u64;
16328			}
16329		}
16330		num_pending_outbounds_compat.write(writer)?;
16331		for (_, outbound) in pending_outbound_payments.iter() {
16332			match outbound {
16333				PendingOutboundPayment::Legacy { session_privs } |
16334				PendingOutboundPayment::Retryable { session_privs, .. } => {
16335					for session_priv in session_privs.iter() {
16336						session_priv.write(writer)?;
16337					}
16338				}
16339				PendingOutboundPayment::AwaitingInvoice { .. } => {},
16340				PendingOutboundPayment::AwaitingOffer { .. } => {},
16341				PendingOutboundPayment::InvoiceReceived { .. } => {},
16342				PendingOutboundPayment::StaticInvoiceReceived { .. } => {},
16343				PendingOutboundPayment::Fulfilled { .. } => {},
16344				PendingOutboundPayment::Abandoned { .. } => {},
16345			}
16346		}
16347
16348		// Encode without retry info for 0.0.101 compatibility.
16349		let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = new_hash_map();
16350		for (id, outbound) in pending_outbound_payments.iter() {
16351			match outbound {
16352				PendingOutboundPayment::Legacy { session_privs } |
16353				PendingOutboundPayment::Retryable { session_privs, .. } => {
16354					pending_outbound_payments_no_retry.insert(*id, session_privs.clone());
16355				},
16356				_ => {},
16357			}
16358		}
16359
16360		let mut pending_intercepted_htlcs = None;
16361		let our_pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
16362		if our_pending_intercepts.len() != 0 {
16363			pending_intercepted_htlcs = Some(our_pending_intercepts);
16364		}
16365
16366		let mut pending_claiming_payments = Some(&claimable_payments.pending_claiming_payments);
16367		if pending_claiming_payments.as_ref().unwrap().is_empty() {
16368			// LDK versions prior to 0.0.113 do not know how to read the pending claimed payments
16369			// map. Thus, if there are no entries we skip writing a TLV for it.
16370			pending_claiming_payments = None;
16371		}
16372
16373		let mut legacy_in_flight_monitor_updates: Option<HashMap<(&PublicKey, &OutPoint), &Vec<ChannelMonitorUpdate>>> = None;
16374		let mut in_flight_monitor_updates: Option<HashMap<(&PublicKey, &ChannelId), &Vec<ChannelMonitorUpdate>>> = None;
16375		for ((counterparty_id, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
16376			for (channel_id, (funding_txo, updates)) in peer_state.in_flight_monitor_updates.iter() {
16377				if !updates.is_empty() {
16378					legacy_in_flight_monitor_updates.get_or_insert_with(|| new_hash_map())
16379						.insert((counterparty_id, funding_txo), updates);
16380					in_flight_monitor_updates.get_or_insert_with(|| new_hash_map())
16381						.insert((counterparty_id, channel_id), updates);
16382				}
16383			}
16384		}
16385
16386		write_tlv_fields!(writer, {
16387			(1, pending_outbound_payments_no_retry, required),
16388			(2, pending_intercepted_htlcs, option),
16389			(3, pending_outbound_payments, required),
16390			(4, pending_claiming_payments, option),
16391			(5, self.our_network_pubkey, required),
16392			(6, monitor_update_blocked_actions_per_peer, option),
16393			(7, self.fake_scid_rand_bytes, required),
16394			(8, if events_not_backwards_compatible { Some(&*events) } else { None }, option),
16395			(9, htlc_purposes, required_vec),
16396			(10, legacy_in_flight_monitor_updates, option),
16397			(11, self.probing_cookie_secret, required),
16398			(13, htlc_onion_fields, optional_vec),
16399			(14, decode_update_add_htlcs_opt, option),
16400			(15, self.inbound_payment_id_secret, required),
16401			(17, in_flight_monitor_updates, option),
16402			(19, peer_storage_dir, optional_vec),
16403			(21, WithoutLength(&self.flow.writeable_async_receive_offer_cache()), required),
16404		});
16405
16406		// Remove the SpliceFailed events added earlier.
16407		events.truncate(event_count);
16408
16409		Ok(())
16410	}
16411}
16412
16413impl Writeable for VecDeque<(Event, Option<EventCompletionAction>)> {
16414	fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
16415		(self.len() as u64).write(w)?;
16416		for (event, action) in self.iter() {
16417			event.write(w)?;
16418			action.write(w)?;
16419			#[cfg(debug_assertions)]
16420			{
16421				// Events are MaybeReadable, in some cases indicating that they shouldn't actually
16422				// be persisted and are regenerated on restart. However, if such an event has a
16423				// post-event-handling action we'll write nothing for the event and would have to
16424				// either forget the action or fail on deserialization (which we do below). Thus,
16425				// check that the event is sane here.
16426				let event_encoded = event.encode();
16427				let event_read: Option<Event> =
16428					MaybeReadable::read(&mut &event_encoded[..]).unwrap();
16429				if action.is_some() {
16430					assert!(event_read.is_some());
16431				}
16432			}
16433		}
16434		Ok(())
16435	}
16436}
16437impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
16438	fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
16439		let len: u64 = Readable::read(reader)?;
16440		const MAX_ALLOC_SIZE: u64 = 1024 * 16;
16441		let event_size = mem::size_of::<(events::Event, Option<EventCompletionAction>)>();
16442		let mut events: Self =
16443			VecDeque::with_capacity(cmp::min(MAX_ALLOC_SIZE / event_size as u64, len) as usize);
16444		for _ in 0..len {
16445			let ev_opt = MaybeReadable::read(reader)?;
16446			let action = Readable::read(reader)?;
16447			if let Some(ev) = ev_opt {
16448				events.push_back((ev, action));
16449			} else if action.is_some() {
16450				return Err(DecodeError::InvalidValue);
16451			}
16452		}
16453		Ok(events)
16454	}
16455}
16456
16457/// Arguments for the creation of a ChannelManager that are not deserialized.
16458///
16459/// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
16460/// is:
16461/// 1) Deserialize all stored [`ChannelMonitor`]s.
16462/// 2) Deserialize the [`ChannelManager`] by filling in this struct and calling:
16463///    `<(BlockHash, ChannelManager)>::read(reader, args)`
16464///    This may result in closing some channels if the [`ChannelMonitor`] is newer than the stored
16465///    [`ChannelManager`] state to ensure no loss of funds. Thus, transactions may be broadcasted.
16466/// 3) If you are not fetching full blocks, register all relevant [`ChannelMonitor`] outpoints the
16467///    same way you would handle a [`chain::Filter`] call using
16468///    [`ChannelMonitor::get_outputs_to_watch`] and [`ChannelMonitor::get_funding_txo`].
16469/// 4) Disconnect/connect blocks on your [`ChannelMonitor`]s to get them in sync with the chain.
16470/// 5) Disconnect/connect blocks on the [`ChannelManager`] to get it in sync with the chain.
16471/// 6) Optionally re-persist the [`ChannelMonitor`]s to ensure the latest state is on disk.
16472///    This is important if you have replayed a nontrivial number of blocks in step (4), allowing
16473///    you to avoid having to replay the same blocks if you shut down quickly after startup. It is
16474///    otherwise not required.
16475///
16476///    Note that if you're using a [`ChainMonitor`] for your [`chain::Watch`] implementation, you
16477///    will likely accomplish this as a side-effect of calling [`chain::Watch::watch_channel`] in
16478///    the next step.
16479///
16480///    If you wish to avoid this for performance reasons, use
16481///    [`ChainMonitor::load_existing_monitor`].
16482/// 7) Move the [`ChannelMonitor`]s into your local [`chain::Watch`]. If you're using a
16483///    [`ChainMonitor`], this is done by calling [`chain::Watch::watch_channel`].
16484///
16485/// Note that the ordering of #4-7 is not of importance, however all four must occur before you
16486/// call any other methods on the newly-deserialized [`ChannelManager`].
16487///
16488/// Note that because some channels may be closed during deserialization, it is critical that you
16489/// always deserialize only the latest version of a ChannelManager and ChannelMonitors available to
16490/// you. If you deserialize an old ChannelManager (during which force-closure transactions may be
16491/// broadcast), and then later deserialize a newer version of the same ChannelManager (which will
16492/// not force-close the same channels but consider them live), you may end up revoking a state for
16493/// which you've already broadcasted the transaction.
16494///
16495/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
16496/// [`ChainMonitor::load_existing_monitor`]: crate::chain::chainmonitor::ChainMonitor::load_existing_monitor
16497pub struct ChannelManagerReadArgs<
16498	'a,
16499	M: Deref,
16500	T: Deref,
16501	ES: Deref,
16502	NS: Deref,
16503	SP: Deref,
16504	F: Deref,
16505	R: Deref,
16506	MR: Deref,
16507	L: Deref + Clone,
16508> where
16509	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
16510	T::Target: BroadcasterInterface,
16511	ES::Target: EntropySource,
16512	NS::Target: NodeSigner,
16513	SP::Target: SignerProvider,
16514	F::Target: FeeEstimator,
16515	R::Target: Router,
16516	MR::Target: MessageRouter,
16517	L::Target: Logger,
16518{
16519	/// A cryptographically secure source of entropy.
16520	pub entropy_source: ES,
16521
16522	/// A signer that is able to perform node-scoped cryptographic operations.
16523	pub node_signer: NS,
16524
16525	/// The keys provider which will give us relevant keys. Some keys will be loaded during
16526	/// deserialization and [`SignerProvider::derive_channel_signer`] will be used to derive
16527	/// per-Channel signing data.
16528	pub signer_provider: SP,
16529
16530	/// The fee_estimator for use in the ChannelManager in the future.
16531	///
16532	/// No calls to the FeeEstimator will be made during deserialization.
16533	pub fee_estimator: F,
16534	/// The chain::Watch for use in the ChannelManager in the future.
16535	///
16536	/// No calls to the chain::Watch will be made during deserialization. It is assumed that
16537	/// you have deserialized ChannelMonitors separately and will add them to your
16538	/// chain::Watch after deserializing this ChannelManager.
16539	pub chain_monitor: M,
16540
16541	/// The BroadcasterInterface which will be used in the ChannelManager in the future and may be
16542	/// used to broadcast the latest local commitment transactions of channels which must be
16543	/// force-closed during deserialization.
16544	pub tx_broadcaster: T,
16545	/// The router which will be used in the ChannelManager in the future for finding routes
16546	/// on-the-fly for trampoline payments. Absent in private nodes that don't support forwarding.
16547	///
16548	/// No calls to the router will be made during deserialization.
16549	pub router: R,
16550	/// The [`MessageRouter`] used for constructing [`BlindedMessagePath`]s for [`Offer`]s,
16551	/// [`Refund`]s, and any reply paths.
16552	///
16553	/// [`BlindedMessagePath`]: crate::blinded_path::message::BlindedMessagePath
16554	pub message_router: MR,
16555	/// The Logger for use in the ChannelManager and which may be used to log information during
16556	/// deserialization.
16557	pub logger: L,
16558	/// Default settings used for new channels. Any existing channels will continue to use the
16559	/// runtime settings which were stored when the ChannelManager was serialized.
16560	pub config: UserConfig,
16561
16562	/// A map from channel IDs to ChannelMonitors for those channels.
16563	///
16564	/// If a monitor is inconsistent with the channel state during deserialization the channel will
16565	/// be force-closed using the data in the ChannelMonitor and the channel will be dropped. This
16566	/// is true for missing channels as well. If there is a monitor missing for which we find
16567	/// channel data Err(DecodeError::InvalidValue) will be returned.
16568	///
16569	/// In such cases the latest local transactions will be sent to the tx_broadcaster included in
16570	/// this struct.
16571	///
16572	/// This is not exported to bindings users because we have no HashMap bindings
16573	pub channel_monitors:
16574		HashMap<ChannelId, &'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
16575}
16576
16577impl<
16578		'a,
16579		M: Deref,
16580		T: Deref,
16581		ES: Deref,
16582		NS: Deref,
16583		SP: Deref,
16584		F: Deref,
16585		R: Deref,
16586		MR: Deref,
16587		L: Deref + Clone,
16588	> ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>
16589where
16590	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
16591	T::Target: BroadcasterInterface,
16592	ES::Target: EntropySource,
16593	NS::Target: NodeSigner,
16594	SP::Target: SignerProvider,
16595	F::Target: FeeEstimator,
16596	R::Target: Router,
16597	MR::Target: MessageRouter,
16598	L::Target: Logger,
16599{
16600	/// Simple utility function to create a ChannelManagerReadArgs which creates the monitor
16601	/// HashMap for you. This is primarily useful for C bindings where it is not practical to
16602	/// populate a HashMap directly from C.
16603	pub fn new(
16604		entropy_source: ES, node_signer: NS, signer_provider: SP, fee_estimator: F,
16605		chain_monitor: M, tx_broadcaster: T, router: R, message_router: MR, logger: L,
16606		config: UserConfig,
16607		mut channel_monitors: Vec<&'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
16608	) -> Self {
16609		Self {
16610			entropy_source,
16611			node_signer,
16612			signer_provider,
16613			fee_estimator,
16614			chain_monitor,
16615			tx_broadcaster,
16616			router,
16617			message_router,
16618			logger,
16619			config,
16620			channel_monitors: hash_map_from_iter(
16621				channel_monitors.drain(..).map(|monitor| (monitor.channel_id(), monitor)),
16622			),
16623		}
16624	}
16625}
16626
16627// Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the
16628// SipmleArcChannelManager type:
16629impl<
16630		'a,
16631		M: Deref,
16632		T: Deref,
16633		ES: Deref,
16634		NS: Deref,
16635		SP: Deref,
16636		F: Deref,
16637		R: Deref,
16638		MR: Deref,
16639		L: Deref + Clone,
16640	> ReadableArgs<ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>>
16641	for (BlockHash, Arc<ChannelManager<M, T, ES, NS, SP, F, R, MR, L>>)
16642where
16643	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
16644	T::Target: BroadcasterInterface,
16645	ES::Target: EntropySource,
16646	NS::Target: NodeSigner,
16647	SP::Target: SignerProvider,
16648	F::Target: FeeEstimator,
16649	R::Target: Router,
16650	MR::Target: MessageRouter,
16651	L::Target: Logger,
16652{
16653	fn read<Reader: io::Read>(
16654		reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>,
16655	) -> Result<Self, DecodeError> {
16656		let (blockhash, chan_manager) =
16657			<(BlockHash, ChannelManager<M, T, ES, NS, SP, F, R, MR, L>)>::read(reader, args)?;
16658		Ok((blockhash, Arc::new(chan_manager)))
16659	}
16660}
16661
16662impl<
16663		'a,
16664		M: Deref,
16665		T: Deref,
16666		ES: Deref,
16667		NS: Deref,
16668		SP: Deref,
16669		F: Deref,
16670		R: Deref,
16671		MR: Deref,
16672		L: Deref + Clone,
16673	> ReadableArgs<ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>>
16674	for (BlockHash, ChannelManager<M, T, ES, NS, SP, F, R, MR, L>)
16675where
16676	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
16677	T::Target: BroadcasterInterface,
16678	ES::Target: EntropySource,
16679	NS::Target: NodeSigner,
16680	SP::Target: SignerProvider,
16681	F::Target: FeeEstimator,
16682	R::Target: Router,
16683	MR::Target: MessageRouter,
16684	L::Target: Logger,
16685{
16686	fn read<Reader: io::Read>(
16687		reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>,
16688	) -> Result<Self, DecodeError> {
16689		let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
16690
16691		let chain_hash: ChainHash = Readable::read(reader)?;
16692		let best_block_height: u32 = Readable::read(reader)?;
16693		let best_block_hash: BlockHash = Readable::read(reader)?;
16694
16695		let empty_peer_state = || PeerState {
16696			channel_by_id: new_hash_map(),
16697			inbound_channel_request_by_id: new_hash_map(),
16698			latest_features: InitFeatures::empty(),
16699			pending_msg_events: Vec::new(),
16700			in_flight_monitor_updates: BTreeMap::new(),
16701			monitor_update_blocked_actions: BTreeMap::new(),
16702			actions_blocking_raa_monitor_updates: BTreeMap::new(),
16703			closed_channel_monitor_update_ids: BTreeMap::new(),
16704			peer_storage: Vec::new(),
16705			is_connected: false,
16706		};
16707
16708		let mut failed_htlcs = Vec::new();
16709		let channel_count: u64 = Readable::read(reader)?;
16710		let mut channel_id_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128));
16711		let mut per_peer_state = hash_map_with_capacity(cmp::min(
16712			channel_count as usize,
16713			MAX_ALLOC_SIZE / mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>(),
16714		));
16715		let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
16716		let mut channel_closures = VecDeque::new();
16717		let mut close_background_events = Vec::new();
16718		for _ in 0..channel_count {
16719			let mut channel: FundedChannel<SP> = FundedChannel::read(
16720				reader,
16721				(
16722					&args.entropy_source,
16723					&args.signer_provider,
16724					&provided_channel_type_features(&args.config),
16725				),
16726			)?;
16727			let logger = WithChannelContext::from(&args.logger, &channel.context, None);
16728			let channel_id = channel.context.channel_id();
16729			channel_id_set.insert(channel_id);
16730			if let Some(ref mut monitor) = args.channel_monitors.get_mut(&channel_id) {
16731				if channel.get_cur_holder_commitment_transaction_number()
16732					> monitor.get_cur_holder_commitment_number()
16733					|| channel.get_revoked_counterparty_commitment_transaction_number()
16734						> monitor.get_min_seen_secret()
16735					|| channel.get_cur_counterparty_commitment_transaction_number()
16736						> monitor.get_cur_counterparty_commitment_number()
16737					|| channel.context.get_latest_monitor_update_id()
16738						< monitor.get_latest_update_id()
16739				{
16740					// But if the channel is behind of the monitor, close the channel:
16741					log_error!(
16742						logger,
16743						"A ChannelManager is stale compared to the current ChannelMonitor!"
16744					);
16745					log_error!(logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
16746					if channel.context.get_latest_monitor_update_id()
16747						< monitor.get_latest_update_id()
16748					{
16749						log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
16750							&channel.context.channel_id(), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
16751					}
16752					if channel.get_cur_holder_commitment_transaction_number()
16753						> monitor.get_cur_holder_commitment_number()
16754					{
16755						log_error!(logger, " The ChannelMonitor for channel {} is at holder commitment number {} but the ChannelManager is at holder commitment number {}.",
16756							&channel.context.channel_id(), monitor.get_cur_holder_commitment_number(), channel.get_cur_holder_commitment_transaction_number());
16757					}
16758					if channel.get_revoked_counterparty_commitment_transaction_number()
16759						> monitor.get_min_seen_secret()
16760					{
16761						log_error!(logger, " The ChannelMonitor for channel {} is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.",
16762							&channel.context.channel_id(), monitor.get_min_seen_secret(), channel.get_revoked_counterparty_commitment_transaction_number());
16763					}
16764					if channel.get_cur_counterparty_commitment_transaction_number()
16765						> monitor.get_cur_counterparty_commitment_number()
16766					{
16767						log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
16768							&channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
16769					}
16770					let shutdown_result =
16771						channel.force_shutdown(ClosureReason::OutdatedChannelManager);
16772					if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
16773						return Err(DecodeError::InvalidValue);
16774					}
16775					if let Some((counterparty_node_id, funding_txo, channel_id, mut update)) =
16776						shutdown_result.monitor_update
16777					{
16778						// Our channel information is out of sync with the `ChannelMonitor`, so
16779						// force the update to use the `ChannelMonitor`'s update_id for the close
16780						// update.
16781						let latest_update_id = monitor.get_latest_update_id().saturating_add(1);
16782						update.update_id = latest_update_id;
16783						per_peer_state
16784							.entry(counterparty_node_id)
16785							.or_insert_with(|| Mutex::new(empty_peer_state()))
16786							.lock()
16787							.unwrap()
16788							.closed_channel_monitor_update_ids
16789							.entry(channel_id)
16790							.and_modify(|v| *v = cmp::max(latest_update_id, *v))
16791							.or_insert(latest_update_id);
16792
16793						close_background_events.push(
16794							BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
16795								counterparty_node_id,
16796								funding_txo,
16797								channel_id,
16798								update,
16799							},
16800						);
16801					}
16802					for (source, hash, cp_id, chan_id) in shutdown_result.dropped_outbound_htlcs {
16803						let reason = LocalHTLCFailureReason::ChannelClosed;
16804						failed_htlcs.push((source, hash, cp_id, chan_id, reason, None));
16805					}
16806					channel_closures.push_back((
16807						events::Event::ChannelClosed {
16808							channel_id: channel.context.channel_id(),
16809							user_channel_id: channel.context.get_user_id(),
16810							reason: ClosureReason::OutdatedChannelManager,
16811							counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
16812							channel_capacity_sats: Some(channel.funding.get_value_satoshis()),
16813							channel_funding_txo: channel.funding.get_funding_txo(),
16814							last_local_balance_msat: Some(channel.funding.get_value_to_self_msat()),
16815						},
16816						None,
16817					));
16818					for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
16819						let mut found_htlc = false;
16820						for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() {
16821							if *channel_htlc_source == monitor_htlc_source {
16822								found_htlc = true;
16823								break;
16824							}
16825						}
16826						if !found_htlc {
16827							// If we have some HTLCs in the channel which are not present in the newer
16828							// ChannelMonitor, they have been removed and should be failed back to
16829							// ensure we don't forget them entirely. Note that if the missing HTLC(s)
16830							// were actually claimed we'd have generated and ensured the previous-hop
16831							// claim update ChannelMonitor updates were persisted prior to persising
16832							// the ChannelMonitor update for the forward leg, so attempting to fail the
16833							// backwards leg of the HTLC will simply be rejected.
16834							let logger = WithChannelContext::from(
16835								&args.logger,
16836								&channel.context,
16837								Some(*payment_hash),
16838							);
16839							log_info!(logger,
16840								"Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager",
16841								&channel.context.channel_id(), &payment_hash);
16842							failed_htlcs.push((
16843								channel_htlc_source.clone(),
16844								*payment_hash,
16845								channel.context.get_counterparty_node_id(),
16846								channel.context.channel_id(),
16847								LocalHTLCFailureReason::ChannelClosed,
16848								None,
16849							));
16850						}
16851					}
16852				} else {
16853					channel.on_startup_drop_completed_blocked_mon_updates_through(
16854						&logger,
16855						monitor.get_latest_update_id(),
16856					);
16857					log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {} with {} blocked updates",
16858						&channel.context.channel_id(), channel.context.get_latest_monitor_update_id(),
16859						monitor.get_latest_update_id(), channel.blocked_monitor_updates_pending());
16860					if let Some(short_channel_id) = channel.funding.get_short_channel_id() {
16861						short_to_chan_info.insert(
16862							short_channel_id,
16863							(
16864								channel.context.get_counterparty_node_id(),
16865								channel.context.channel_id(),
16866							),
16867						);
16868					}
16869
16870					for short_channel_id in channel.context.historical_scids() {
16871						let cp_id = channel.context.get_counterparty_node_id();
16872						let chan_id = channel.context.channel_id();
16873						short_to_chan_info.insert(*short_channel_id, (cp_id, chan_id));
16874					}
16875
16876					per_peer_state
16877						.entry(channel.context.get_counterparty_node_id())
16878						.or_insert_with(|| Mutex::new(empty_peer_state()))
16879						.get_mut()
16880						.unwrap()
16881						.channel_by_id
16882						.insert(channel.context.channel_id(), Channel::from(channel));
16883				}
16884			} else if channel.is_awaiting_initial_mon_persist() {
16885				// If we were persisted and shut down while the initial ChannelMonitor persistence
16886				// was in-progress, we never broadcasted the funding transaction and can still
16887				// safely discard the channel.
16888				channel_closures.push_back((
16889					events::Event::ChannelClosed {
16890						channel_id: channel.context.channel_id(),
16891						user_channel_id: channel.context.get_user_id(),
16892						reason: ClosureReason::DisconnectedPeer,
16893						counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
16894						channel_capacity_sats: Some(channel.funding.get_value_satoshis()),
16895						channel_funding_txo: channel.funding.get_funding_txo(),
16896						last_local_balance_msat: Some(channel.funding.get_value_to_self_msat()),
16897					},
16898					None,
16899				));
16900			} else {
16901				log_error!(
16902					logger,
16903					"Missing ChannelMonitor for channel {} needed by ChannelManager.",
16904					&channel.context.channel_id()
16905				);
16906				log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
16907				log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
16908				log_error!(
16909					logger,
16910					" Without the ChannelMonitor we cannot continue without risking funds."
16911				);
16912				log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
16913				return Err(DecodeError::InvalidValue);
16914			}
16915		}
16916
16917		for (channel_id, monitor) in args.channel_monitors.iter() {
16918			if !channel_id_set.contains(channel_id) {
16919				let mut should_queue_fc_update = false;
16920				let counterparty_node_id = monitor.get_counterparty_node_id();
16921
16922				// If the ChannelMonitor had any updates, we may need to update it further and
16923				// thus track it in `closed_channel_monitor_update_ids`. If the channel never
16924				// had any updates at all, there can't be any HTLCs pending which we need to
16925				// claim.
16926				// Note that a `ChannelMonitor` is created with `update_id` 0 and after we
16927				// provide it with a closure update its `update_id` will be at 1.
16928				if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 {
16929					should_queue_fc_update = !monitor.no_further_updates_allowed();
16930					let mut latest_update_id = monitor.get_latest_update_id();
16931					if should_queue_fc_update {
16932						// Note that for channels closed pre-0.1, the latest update_id is
16933						// `u64::MAX`.
16934						latest_update_id = latest_update_id.saturating_add(1);
16935					}
16936					per_peer_state
16937						.entry(counterparty_node_id)
16938						.or_insert_with(|| Mutex::new(empty_peer_state()))
16939						.lock()
16940						.unwrap()
16941						.closed_channel_monitor_update_ids
16942						.entry(monitor.channel_id())
16943						.and_modify(|v| *v = cmp::max(latest_update_id, *v))
16944						.or_insert(latest_update_id);
16945				}
16946
16947				if !should_queue_fc_update {
16948					continue;
16949				}
16950
16951				let logger = WithChannelMonitor::from(&args.logger, monitor, None);
16952				let channel_id = monitor.channel_id();
16953				log_info!(
16954					logger,
16955					"Queueing monitor update to ensure missing channel {} is force closed",
16956					&channel_id
16957				);
16958				let monitor_update = ChannelMonitorUpdate {
16959					update_id: monitor.get_latest_update_id().saturating_add(1),
16960					updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed {
16961						should_broadcast: true,
16962					}],
16963					channel_id: Some(monitor.channel_id()),
16964				};
16965				let funding_txo = monitor.get_funding_txo();
16966				let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
16967					counterparty_node_id,
16968					funding_txo,
16969					channel_id,
16970					update: monitor_update,
16971				};
16972				close_background_events.push(update);
16973			}
16974		}
16975
16976		const MAX_ALLOC_SIZE: usize = 1024 * 64;
16977		let forward_htlcs_count: u64 = Readable::read(reader)?;
16978		let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128));
16979		for _ in 0..forward_htlcs_count {
16980			let short_channel_id = Readable::read(reader)?;
16981			let pending_forwards_count: u64 = Readable::read(reader)?;
16982			let mut pending_forwards = Vec::with_capacity(cmp::min(
16983				pending_forwards_count as usize,
16984				MAX_ALLOC_SIZE / mem::size_of::<HTLCForwardInfo>(),
16985			));
16986			for _ in 0..pending_forwards_count {
16987				pending_forwards.push(Readable::read(reader)?);
16988			}
16989			forward_htlcs.insert(short_channel_id, pending_forwards);
16990		}
16991
16992		let claimable_htlcs_count: u64 = Readable::read(reader)?;
16993		let mut claimable_htlcs_list =
16994			Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128));
16995		for _ in 0..claimable_htlcs_count {
16996			let payment_hash = Readable::read(reader)?;
16997			let previous_hops_len: u64 = Readable::read(reader)?;
16998			let mut previous_hops = Vec::with_capacity(cmp::min(
16999				previous_hops_len as usize,
17000				MAX_ALLOC_SIZE / mem::size_of::<ClaimableHTLC>(),
17001			));
17002			for _ in 0..previous_hops_len {
17003				previous_hops.push(<ClaimableHTLC as Readable>::read(reader)?);
17004			}
17005			claimable_htlcs_list.push((payment_hash, previous_hops));
17006		}
17007
17008		let peer_count: u64 = Readable::read(reader)?;
17009		for _ in 0..peer_count {
17010			let peer_pubkey: PublicKey = Readable::read(reader)?;
17011			let latest_features = Readable::read(reader)?;
17012			if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) {
17013				peer_state.get_mut().unwrap().latest_features = latest_features;
17014			}
17015		}
17016
17017		let event_count: u64 = Readable::read(reader)?;
17018		let mut pending_events_read: VecDeque<(events::Event, Option<EventCompletionAction>)> =
17019			VecDeque::with_capacity(cmp::min(
17020				event_count as usize,
17021				MAX_ALLOC_SIZE / mem::size_of::<(events::Event, Option<EventCompletionAction>)>(),
17022			));
17023		for _ in 0..event_count {
17024			match MaybeReadable::read(reader)? {
17025				Some(event) => pending_events_read.push_back((event, None)),
17026				None => continue,
17027			}
17028		}
17029
17030		let background_event_count: u64 = Readable::read(reader)?;
17031		for _ in 0..background_event_count {
17032			match <u8 as Readable>::read(reader)? {
17033				0 => {
17034					// LDK versions prior to 0.0.116 wrote pending `MonitorUpdateRegeneratedOnStartup`s here,
17035					// however we really don't (and never did) need them - we regenerate all
17036					// on-startup monitor updates.
17037					let _: OutPoint = Readable::read(reader)?;
17038					let _: ChannelMonitorUpdate = Readable::read(reader)?;
17039				},
17040				_ => return Err(DecodeError::InvalidValue),
17041			}
17042		}
17043
17044		let _last_node_announcement_serial: u32 = Readable::read(reader)?; // Only used < 0.0.111
17045		let highest_seen_timestamp: u32 = Readable::read(reader)?;
17046
17047		// The last version where a pending inbound payment may have been added was 0.0.116.
17048		let pending_inbound_payment_count: u64 = Readable::read(reader)?;
17049		for _ in 0..pending_inbound_payment_count {
17050			let payment_hash: PaymentHash = Readable::read(reader)?;
17051			let logger = WithContext::from(&args.logger, None, None, Some(payment_hash));
17052			let inbound: PendingInboundPayment = Readable::read(reader)?;
17053			log_warn!(
17054				logger,
17055				"Ignoring deprecated pending inbound payment with payment hash {}: {:?}",
17056				payment_hash,
17057				inbound
17058			);
17059		}
17060
17061		let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?;
17062		let mut pending_outbound_payments_compat: HashMap<PaymentId, PendingOutboundPayment> =
17063			hash_map_with_capacity(cmp::min(
17064				pending_outbound_payments_count_compat as usize,
17065				MAX_ALLOC_SIZE / 32,
17066			));
17067		for _ in 0..pending_outbound_payments_count_compat {
17068			let session_priv = Readable::read(reader)?;
17069			let payment = PendingOutboundPayment::Legacy {
17070				session_privs: hash_set_from_iter([session_priv]),
17071			};
17072			if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() {
17073				return Err(DecodeError::InvalidValue);
17074			};
17075		}
17076
17077		// pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients.
17078		let mut pending_outbound_payments_no_retry: Option<HashMap<PaymentId, HashSet<[u8; 32]>>> =
17079			None;
17080		let mut pending_outbound_payments = None;
17081		let mut pending_intercepted_htlcs: Option<HashMap<InterceptId, PendingAddHTLCInfo>> =
17082			Some(new_hash_map());
17083		let mut received_network_pubkey: Option<PublicKey> = None;
17084		let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
17085		let mut probing_cookie_secret: Option<[u8; 32]> = None;
17086		let mut claimable_htlc_purposes = None;
17087		let mut claimable_htlc_onion_fields = None;
17088		let mut pending_claiming_payments = Some(new_hash_map());
17089		let mut monitor_update_blocked_actions_per_peer: Option<Vec<(_, BTreeMap<_, Vec<_>>)>> =
17090			Some(Vec::new());
17091		let mut events_override = None;
17092		let mut legacy_in_flight_monitor_updates: Option<
17093			HashMap<(PublicKey, OutPoint), Vec<ChannelMonitorUpdate>>,
17094		> = None;
17095		// We use this one over the legacy since they represent the same data, just with a different
17096		// key. We still need to read the legacy one as it's an even TLV.
17097		let mut in_flight_monitor_updates: Option<
17098			HashMap<(PublicKey, ChannelId), Vec<ChannelMonitorUpdate>>,
17099		> = None;
17100		let mut decode_update_add_htlcs: Option<HashMap<u64, Vec<msgs::UpdateAddHTLC>>> = None;
17101		let mut inbound_payment_id_secret = None;
17102		let mut peer_storage_dir: Option<Vec<(PublicKey, Vec<u8>)>> = None;
17103		let mut async_receive_offer_cache: AsyncReceiveOfferCache = AsyncReceiveOfferCache::new();
17104		read_tlv_fields!(reader, {
17105			(1, pending_outbound_payments_no_retry, option),
17106			(2, pending_intercepted_htlcs, option),
17107			(3, pending_outbound_payments, option),
17108			(4, pending_claiming_payments, option),
17109			(5, received_network_pubkey, option),
17110			(6, monitor_update_blocked_actions_per_peer, option),
17111			(7, fake_scid_rand_bytes, option),
17112			(8, events_override, option),
17113			(9, claimable_htlc_purposes, optional_vec),
17114			(10, legacy_in_flight_monitor_updates, option),
17115			(11, probing_cookie_secret, option),
17116			(13, claimable_htlc_onion_fields, optional_vec),
17117			(14, decode_update_add_htlcs, option),
17118			(15, inbound_payment_id_secret, option),
17119			(17, in_flight_monitor_updates, option),
17120			(19, peer_storage_dir, optional_vec),
17121			(21, async_receive_offer_cache, (default_value, async_receive_offer_cache)),
17122		});
17123		let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map());
17124		let peer_storage_dir: Vec<(PublicKey, Vec<u8>)> = peer_storage_dir.unwrap_or_else(Vec::new);
17125		if fake_scid_rand_bytes.is_none() {
17126			fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes());
17127		}
17128
17129		if probing_cookie_secret.is_none() {
17130			probing_cookie_secret = Some(args.entropy_source.get_secure_random_bytes());
17131		}
17132
17133		if inbound_payment_id_secret.is_none() {
17134			inbound_payment_id_secret = Some(args.entropy_source.get_secure_random_bytes());
17135		}
17136
17137		if let Some(events) = events_override {
17138			pending_events_read = events;
17139		}
17140
17141		if !channel_closures.is_empty() {
17142			pending_events_read.append(&mut channel_closures);
17143		}
17144
17145		if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
17146			pending_outbound_payments = Some(pending_outbound_payments_compat);
17147		} else if pending_outbound_payments.is_none() {
17148			let mut outbounds = new_hash_map();
17149			for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() {
17150				outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
17151			}
17152			pending_outbound_payments = Some(outbounds);
17153		}
17154		let pending_outbounds =
17155			OutboundPayments::new(pending_outbound_payments.unwrap(), args.logger.clone());
17156
17157		for (peer_pubkey, peer_storage) in peer_storage_dir {
17158			if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) {
17159				peer_state.get_mut().unwrap().peer_storage = peer_storage;
17160			}
17161		}
17162
17163		// Handle transitioning from the legacy TLV to the new one on upgrades.
17164		if let Some(legacy_in_flight_upds) = legacy_in_flight_monitor_updates {
17165			// We should never serialize an empty map.
17166			if legacy_in_flight_upds.is_empty() {
17167				return Err(DecodeError::InvalidValue);
17168			}
17169			if in_flight_monitor_updates.is_none() {
17170				let in_flight_upds =
17171					in_flight_monitor_updates.get_or_insert_with(|| new_hash_map());
17172				for ((counterparty_node_id, funding_txo), updates) in legacy_in_flight_upds {
17173					// All channels with legacy in flight monitor updates are v1 channels.
17174					let channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
17175					in_flight_upds.insert((counterparty_node_id, channel_id), updates);
17176				}
17177			} else {
17178				// We should never serialize an empty map.
17179				if in_flight_monitor_updates.as_ref().unwrap().is_empty() {
17180					return Err(DecodeError::InvalidValue);
17181				}
17182			}
17183		}
17184
17185		// We have to replay (or skip, if they were completed after we wrote the `ChannelManager`)
17186		// each `ChannelMonitorUpdate` in `in_flight_monitor_updates`. After doing so, we have to
17187		// check that each channel we have isn't newer than the latest `ChannelMonitorUpdate`(s) we
17188		// replayed, and for each monitor update we have to replay we have to ensure there's a
17189		// `ChannelMonitor` for it.
17190		//
17191		// In order to do so we first walk all of our live channels (so that we can check their
17192		// state immediately after doing the update replays, when we have the `update_id`s
17193		// available) and then walk any remaining in-flight updates.
17194		//
17195		// Because the actual handling of the in-flight updates is the same, it's macro'ized here:
17196		let mut pending_background_events = Vec::new();
17197		macro_rules! handle_in_flight_updates {
17198			($counterparty_node_id: expr, $chan_in_flight_upds: expr, $monitor: expr,
17199			 $peer_state: expr, $logger: expr, $channel_info_log: expr
17200			) => { {
17201				let mut max_in_flight_update_id = 0;
17202				let starting_len =  $chan_in_flight_upds.len();
17203				$chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id());
17204				if $chan_in_flight_upds.len() < starting_len {
17205					log_debug!(
17206						$logger,
17207						"{} ChannelMonitorUpdates completed after ChannelManager was last serialized",
17208						starting_len - $chan_in_flight_upds.len()
17209					);
17210				}
17211				let funding_txo = $monitor.get_funding_txo();
17212				for update in $chan_in_flight_upds.iter() {
17213					log_debug!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
17214						update.update_id, $channel_info_log, &$monitor.channel_id());
17215					max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
17216					pending_background_events.push(
17217						BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
17218							counterparty_node_id: $counterparty_node_id,
17219							funding_txo: funding_txo,
17220							channel_id: $monitor.channel_id(),
17221							update: update.clone(),
17222						});
17223				}
17224				if $chan_in_flight_upds.is_empty() {
17225					// We had some updates to apply, but it turns out they had completed before we
17226					// were serialized, we just weren't notified of that. Thus, we may have to run
17227					// the completion actions for any monitor updates, but otherwise are done.
17228					pending_background_events.push(
17229						BackgroundEvent::MonitorUpdatesComplete {
17230							counterparty_node_id: $counterparty_node_id,
17231							channel_id: $monitor.channel_id(),
17232						});
17233				} else {
17234					$peer_state.closed_channel_monitor_update_ids.entry($monitor.channel_id())
17235						.and_modify(|v| *v = cmp::max(max_in_flight_update_id, *v))
17236						.or_insert(max_in_flight_update_id);
17237				}
17238				if $peer_state.in_flight_monitor_updates.insert($monitor.channel_id(), (funding_txo, $chan_in_flight_upds)).is_some() {
17239					log_error!($logger, "Duplicate in-flight monitor update set for the same channel!");
17240					return Err(DecodeError::InvalidValue);
17241				}
17242				max_in_flight_update_id
17243			} }
17244		}
17245
17246		for (counterparty_id, peer_state_mtx) in per_peer_state.iter_mut() {
17247			let mut peer_state_lock = peer_state_mtx.lock().unwrap();
17248			let peer_state = &mut *peer_state_lock;
17249			for (chan_id, chan) in peer_state.channel_by_id.iter() {
17250				if let Some(funded_chan) = chan.as_funded() {
17251					let logger = WithChannelContext::from(&args.logger, &funded_chan.context, None);
17252
17253					// Channels that were persisted have to be funded, otherwise they should have been
17254					// discarded.
17255					let monitor = args
17256						.channel_monitors
17257						.get(chan_id)
17258						.expect("We already checked for monitor presence when loading channels");
17259					let mut max_in_flight_update_id = monitor.get_latest_update_id();
17260					if let Some(in_flight_upds) = &mut in_flight_monitor_updates {
17261						if let Some(mut chan_in_flight_upds) =
17262							in_flight_upds.remove(&(*counterparty_id, *chan_id))
17263						{
17264							max_in_flight_update_id = cmp::max(
17265								max_in_flight_update_id,
17266								handle_in_flight_updates!(
17267									*counterparty_id,
17268									chan_in_flight_upds,
17269									monitor,
17270									peer_state,
17271									logger,
17272									""
17273								),
17274							);
17275						}
17276					}
17277					if funded_chan.get_latest_unblocked_monitor_update_id()
17278						> max_in_flight_update_id
17279					{
17280						// If the channel is ahead of the monitor, return DangerousValue:
17281						log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
17282						log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
17283							chan_id, monitor.get_latest_update_id(), max_in_flight_update_id);
17284						log_error!(
17285							logger,
17286							" but the ChannelManager is at update_id {}.",
17287							funded_chan.get_latest_unblocked_monitor_update_id()
17288						);
17289						log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
17290						log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
17291						log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
17292						log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
17293						return Err(DecodeError::DangerousValue);
17294					}
17295				} else {
17296					// We shouldn't have persisted (or read) any unfunded channel types so none should have been
17297					// created in this `channel_by_id` map.
17298					debug_assert!(false);
17299					return Err(DecodeError::InvalidValue);
17300				}
17301			}
17302		}
17303
17304		if let Some(in_flight_upds) = in_flight_monitor_updates {
17305			for ((counterparty_id, channel_id), mut chan_in_flight_updates) in in_flight_upds {
17306				let logger =
17307					WithContext::from(&args.logger, Some(counterparty_id), Some(channel_id), None);
17308				if let Some(monitor) = args.channel_monitors.get(&channel_id) {
17309					// Now that we've removed all the in-flight monitor updates for channels that are
17310					// still open, we need to replay any monitor updates that are for closed channels,
17311					// creating the neccessary peer_state entries as we go.
17312					let peer_state_mutex = per_peer_state
17313						.entry(counterparty_id)
17314						.or_insert_with(|| Mutex::new(empty_peer_state()));
17315					let mut peer_state = peer_state_mutex.lock().unwrap();
17316					handle_in_flight_updates!(
17317						counterparty_id,
17318						chan_in_flight_updates,
17319						monitor,
17320						peer_state,
17321						logger,
17322						"closed "
17323					);
17324				} else {
17325					log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
17326					log_error!(
17327						logger,
17328						" The ChannelMonitor for channel {} is missing.",
17329						channel_id
17330					);
17331					log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
17332					log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
17333					log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
17334					log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
17335					log_error!(
17336						logger,
17337						" Pending in-flight updates are: {:?}",
17338						chan_in_flight_updates
17339					);
17340					return Err(DecodeError::InvalidValue);
17341				}
17342			}
17343		}
17344
17345		// The newly generated `close_background_events` have to be added after any updates that
17346		// were already in-flight on shutdown, so we append them here.
17347		pending_background_events.reserve(close_background_events.len());
17348		'each_bg_event: for mut new_event in close_background_events {
17349			if let BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
17350				counterparty_node_id,
17351				funding_txo,
17352				channel_id,
17353				update,
17354			} = &mut new_event
17355			{
17356				debug_assert_eq!(update.updates.len(), 1);
17357				debug_assert!(matches!(
17358					update.updates[0],
17359					ChannelMonitorUpdateStep::ChannelForceClosed { .. }
17360				));
17361				let mut updated_id = false;
17362				for pending_event in pending_background_events.iter() {
17363					if let BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
17364						counterparty_node_id: pending_cp,
17365						funding_txo: pending_funding,
17366						channel_id: pending_chan_id,
17367						update: pending_update,
17368					} = pending_event
17369					{
17370						let for_same_channel = counterparty_node_id == pending_cp
17371							&& funding_txo == pending_funding
17372							&& channel_id == pending_chan_id;
17373						if for_same_channel {
17374							debug_assert!(update.update_id >= pending_update.update_id);
17375							if pending_update.updates.iter().any(|upd| {
17376								matches!(upd, ChannelMonitorUpdateStep::ChannelForceClosed { .. })
17377							}) {
17378								// If the background event we're looking at is just
17379								// force-closing the channel which already has a pending
17380								// force-close update, no need to duplicate it.
17381								continue 'each_bg_event;
17382							}
17383							update.update_id = pending_update.update_id.saturating_add(1);
17384							updated_id = true;
17385						}
17386					}
17387				}
17388				let mut per_peer_state = per_peer_state
17389					.get(counterparty_node_id)
17390					.expect("If we have pending updates for a channel it must have an entry")
17391					.lock()
17392					.unwrap();
17393				if updated_id {
17394					per_peer_state
17395						.closed_channel_monitor_update_ids
17396						.entry(*channel_id)
17397						.and_modify(|v| *v = cmp::max(update.update_id, *v))
17398						.or_insert(update.update_id);
17399				}
17400				let in_flight_updates = &mut per_peer_state
17401					.in_flight_monitor_updates
17402					.entry(*channel_id)
17403					.or_insert_with(|| (*funding_txo, Vec::new()))
17404					.1;
17405				debug_assert!(!in_flight_updates.iter().any(|upd| upd == update));
17406				in_flight_updates.push(update.clone());
17407			}
17408			pending_background_events.push(new_event);
17409		}
17410
17411		// If there's any preimages for forwarded HTLCs hanging around in ChannelMonitors we
17412		// should ensure we try them again on the inbound edge. We put them here and do so after we
17413		// have a fully-constructed `ChannelManager` at the end.
17414		let mut pending_claims_to_replay = Vec::new();
17415
17416		{
17417			// If we're tracking pending payments, ensure we haven't lost any by looking at the
17418			// ChannelMonitor data for any channels for which we do not have authorative state
17419			// (i.e. those for which we just force-closed above or we otherwise don't have a
17420			// corresponding `Channel` at all).
17421			// This avoids several edge-cases where we would otherwise "forget" about pending
17422			// payments which are still in-flight via their on-chain state.
17423			// We only rebuild the pending payments map if we were most recently serialized by
17424			// 0.0.102+
17425			//
17426			// First we rebuild all pending payments, then separately re-claim and re-fail pending
17427			// payments. This avoids edge-cases around MPP payments resulting in redundant actions.
17428			for (channel_id, monitor) in args.channel_monitors.iter() {
17429				let mut is_channel_closed = true;
17430				let counterparty_node_id = monitor.get_counterparty_node_id();
17431				if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
17432					let mut peer_state_lock = peer_state_mtx.lock().unwrap();
17433					let peer_state = &mut *peer_state_lock;
17434					is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id);
17435				}
17436
17437				if is_channel_closed {
17438					for (htlc_source, (htlc, _)) in monitor.get_all_current_outbound_htlcs() {
17439						let logger = WithChannelMonitor::from(
17440							&args.logger,
17441							monitor,
17442							Some(htlc.payment_hash),
17443						);
17444						if let HTLCSource::OutboundRoute {
17445							payment_id, session_priv, path, ..
17446						} = htlc_source
17447						{
17448							if path.hops.is_empty() {
17449								log_error!(logger, "Got an empty path for a pending payment");
17450								return Err(DecodeError::InvalidValue);
17451							}
17452
17453							let mut session_priv_bytes = [0; 32];
17454							session_priv_bytes[..].copy_from_slice(&session_priv[..]);
17455							pending_outbounds.insert_from_monitor_on_startup(
17456								payment_id,
17457								htlc.payment_hash,
17458								session_priv_bytes,
17459								&path,
17460								best_block_height,
17461							);
17462						}
17463					}
17464				}
17465			}
17466			for (channel_id, monitor) in args.channel_monitors.iter() {
17467				let mut is_channel_closed = true;
17468				let counterparty_node_id = monitor.get_counterparty_node_id();
17469				if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
17470					let mut peer_state_lock = peer_state_mtx.lock().unwrap();
17471					let peer_state = &mut *peer_state_lock;
17472					is_channel_closed = !peer_state.channel_by_id.contains_key(channel_id);
17473				}
17474
17475				if is_channel_closed {
17476					for (htlc_source, (htlc, preimage_opt)) in
17477						monitor.get_all_current_outbound_htlcs()
17478					{
17479						let logger = WithChannelMonitor::from(
17480							&args.logger,
17481							monitor,
17482							Some(htlc.payment_hash),
17483						);
17484						let htlc_id = SentHTLCId::from_source(&htlc_source);
17485						match htlc_source {
17486							HTLCSource::PreviousHopData(prev_hop_data) => {
17487								let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
17488									info.prev_funding_outpoint == prev_hop_data.outpoint
17489										&& info.prev_htlc_id == prev_hop_data.htlc_id
17490								};
17491								// The ChannelMonitor is now responsible for this HTLC's
17492								// failure/success and will let us know what its outcome is. If we
17493								// still have an entry for this HTLC in `forward_htlcs` or
17494								// `pending_intercepted_htlcs`, we were apparently not persisted after
17495								// the monitor was when forwarding the payment.
17496								decode_update_add_htlcs.retain(|src_outb_alias, update_add_htlcs| {
17497									update_add_htlcs.retain(|update_add_htlc| {
17498										let matches = *src_outb_alias == prev_hop_data.prev_outbound_scid_alias &&
17499											update_add_htlc.htlc_id == prev_hop_data.htlc_id;
17500										if matches {
17501											log_info!(logger, "Removing pending to-decode HTLC with hash {} as it was forwarded to the closed channel {}",
17502												&htlc.payment_hash, &monitor.channel_id());
17503										}
17504										!matches
17505									});
17506									!update_add_htlcs.is_empty()
17507								});
17508								forward_htlcs.retain(|_, forwards| {
17509									forwards.retain(|forward| {
17510										if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
17511											if pending_forward_matches_htlc(&htlc_info) {
17512												log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
17513													&htlc.payment_hash, &monitor.channel_id());
17514												false
17515											} else { true }
17516										} else { true }
17517									});
17518									!forwards.is_empty()
17519								});
17520								pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
17521									if pending_forward_matches_htlc(&htlc_info) {
17522										log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
17523											&htlc.payment_hash, &monitor.channel_id());
17524										pending_events_read.retain(|(event, _)| {
17525											if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
17526												intercepted_id != ev_id
17527											} else { true }
17528										});
17529										false
17530									} else { true }
17531								});
17532							},
17533							HTLCSource::OutboundRoute {
17534								payment_id,
17535								session_priv,
17536								path,
17537								bolt12_invoice,
17538								..
17539							} => {
17540								if let Some(preimage) = preimage_opt {
17541									let pending_events = Mutex::new(pending_events_read);
17542									let update = PaymentCompleteUpdate {
17543										counterparty_node_id: monitor.get_counterparty_node_id(),
17544										channel_funding_outpoint: monitor.get_funding_txo(),
17545										channel_id: monitor.channel_id(),
17546										htlc_id,
17547									};
17548									let mut compl_action = Some(
17549										EventCompletionAction::ReleasePaymentCompleteChannelMonitorUpdate(update)
17550									);
17551									pending_outbounds.claim_htlc(
17552										payment_id,
17553										preimage,
17554										bolt12_invoice,
17555										session_priv,
17556										path,
17557										true,
17558										&mut compl_action,
17559										&pending_events,
17560									);
17561									// If the completion action was not consumed, then there was no
17562									// payment to claim, and we need to tell the `ChannelMonitor`
17563									// we don't need to hear about the HTLC again, at least as long
17564									// as the PaymentSent event isn't still sitting around in our
17565									// event queue.
17566									let have_action = if compl_action.is_some() {
17567										let pending_events = pending_events.lock().unwrap();
17568										pending_events.iter().any(|(_, act)| *act == compl_action)
17569									} else {
17570										false
17571									};
17572									if !have_action && compl_action.is_some() {
17573										let mut peer_state = per_peer_state
17574											.get(&counterparty_node_id)
17575											.map(|state| state.lock().unwrap())
17576											.expect("Channels originating a preimage must have peer state");
17577										let update_id = peer_state
17578											.closed_channel_monitor_update_ids
17579											.get_mut(channel_id)
17580											.expect("Channels originating a preimage must have a monitor");
17581										// Note that for channels closed pre-0.1, the latest
17582										// update_id is `u64::MAX`.
17583										*update_id = update_id.saturating_add(1);
17584
17585										pending_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
17586											counterparty_node_id: monitor.get_counterparty_node_id(),
17587											funding_txo: monitor.get_funding_txo(),
17588											channel_id: monitor.channel_id(),
17589											update: ChannelMonitorUpdate {
17590												update_id: *update_id,
17591												channel_id: Some(monitor.channel_id()),
17592												updates: vec![ChannelMonitorUpdateStep::ReleasePaymentComplete {
17593													htlc: htlc_id,
17594												}],
17595											},
17596										});
17597									}
17598									pending_events_read = pending_events.into_inner().unwrap();
17599								}
17600							},
17601						}
17602					}
17603					for (htlc_source, payment_hash) in monitor.get_onchain_failed_outbound_htlcs() {
17604						log_info!(
17605							args.logger,
17606							"Failing HTLC with payment hash {} as it was resolved on-chain.",
17607							payment_hash
17608						);
17609						let completion_action = Some(PaymentCompleteUpdate {
17610							counterparty_node_id: monitor.get_counterparty_node_id(),
17611							channel_funding_outpoint: monitor.get_funding_txo(),
17612							channel_id: monitor.channel_id(),
17613							htlc_id: SentHTLCId::from_source(&htlc_source),
17614						});
17615
17616						failed_htlcs.push((
17617							htlc_source,
17618							payment_hash,
17619							monitor.get_counterparty_node_id(),
17620							monitor.channel_id(),
17621							LocalHTLCFailureReason::OnChainTimeout,
17622							completion_action,
17623						));
17624					}
17625				}
17626
17627				// Whether the downstream channel was closed or not, try to re-apply any payment
17628				// preimages from it which may be needed in upstream channels for forwarded
17629				// payments.
17630				let mut fail_read = false;
17631				let outbound_claimed_htlcs_iter = monitor.get_all_current_outbound_htlcs()
17632					.into_iter()
17633					.filter_map(|(htlc_source, (htlc, preimage_opt))| {
17634						if let HTLCSource::PreviousHopData(prev_hop) = &htlc_source {
17635							if let Some(payment_preimage) = preimage_opt {
17636								let inbound_edge_monitor = args.channel_monitors.get(&prev_hop.channel_id);
17637								// Note that for channels which have gone to chain,
17638								// `get_all_current_outbound_htlcs` is never pruned and always returns
17639								// a constant set until the monitor is removed/archived. Thus, we
17640								// want to skip replaying claims that have definitely been resolved
17641								// on-chain.
17642
17643								// If the inbound monitor is not present, we assume it was fully
17644								// resolved and properly archived, implying this payment had plenty
17645								// of time to get claimed and we can safely skip any further
17646								// attempts to claim it (they wouldn't succeed anyway as we don't
17647								// have a monitor against which to do so).
17648								let inbound_edge_monitor = if let Some(monitor) = inbound_edge_monitor {
17649									monitor
17650								} else {
17651									return None;
17652								};
17653								// Second, if the inbound edge of the payment's monitor has been
17654								// fully claimed we've had at least `ANTI_REORG_DELAY` blocks to
17655								// get any PaymentForwarded event(s) to the user and assume that
17656								// there's no need to try to replay the claim just for that.
17657								let inbound_edge_balances = inbound_edge_monitor.get_claimable_balances();
17658								if inbound_edge_balances.is_empty() {
17659									return None;
17660								}
17661
17662								if prev_hop.counterparty_node_id.is_none() {
17663									// We no longer support claiming an HTLC where we don't have
17664									// the counterparty_node_id available if the claim has to go to
17665									// a closed channel. Its possible we can get away with it if
17666									// the channel is not yet closed, but its by no means a
17667									// guarantee.
17668
17669									// Thus, in this case we are a bit more aggressive with our
17670									// pruning - if we have no use for the claim (because the
17671									// inbound edge of the payment's monitor has already claimed
17672									// the HTLC) we skip trying to replay the claim.
17673									let htlc_payment_hash: PaymentHash = payment_preimage.into();
17674									let balance_could_incl_htlc = |bal| match bal {
17675										&Balance::ClaimableOnChannelClose { .. } => {
17676											// The channel is still open, assume we can still
17677											// claim against it
17678											true
17679										},
17680										&Balance::MaybePreimageClaimableHTLC { payment_hash, .. } => {
17681											payment_hash == htlc_payment_hash
17682										},
17683										_ => false,
17684									};
17685									let htlc_may_be_in_balances =
17686										inbound_edge_balances.iter().any(balance_could_incl_htlc);
17687									if !htlc_may_be_in_balances {
17688										return None;
17689									}
17690
17691									// First check if we're absolutely going to fail - if we need
17692									// to replay this claim to get the preimage into the inbound
17693									// edge monitor but the channel is closed (and thus we'll
17694									// immediately panic if we call claim_funds_from_hop).
17695									if short_to_chan_info.get(&prev_hop.prev_outbound_scid_alias).is_none() {
17696										log_error!(args.logger,
17697											"We need to replay the HTLC claim for payment_hash {} (preimage {}) but cannot do so as the HTLC was forwarded prior to LDK 0.0.124.\
17698											All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1",
17699											htlc_payment_hash,
17700											payment_preimage,
17701										);
17702										fail_read = true;
17703									}
17704
17705									// At this point we're confident we need the claim, but the
17706									// inbound edge channel is still live. As long as this remains
17707									// the case, we can conceivably proceed, but we run some risk
17708									// of panicking at runtime. The user ideally should have read
17709									// the release notes and we wouldn't be here, but we go ahead
17710									// and let things run in the hope that it'll all just work out.
17711									log_error!(args.logger,
17712										"We need to replay the HTLC claim for payment_hash {} (preimage {}) but don't have all the required information to do so reliably.\
17713										As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime!\
17714										All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1\
17715										Continuing anyway, though panics may occur!",
17716										htlc_payment_hash,
17717										payment_preimage,
17718									);
17719								}
17720
17721								Some((htlc_source, payment_preimage, htlc.amount_msat,
17722									is_channel_closed, monitor.get_counterparty_node_id(),
17723									monitor.get_funding_txo(), monitor.channel_id()))
17724							} else { None }
17725						} else {
17726							// If it was an outbound payment, we've handled it above - if a preimage
17727							// came in and we persisted the `ChannelManager` we either handled it and
17728							// are good to go or the channel force-closed - we don't have to handle the
17729							// channel still live case here.
17730							None
17731						}
17732					});
17733				for tuple in outbound_claimed_htlcs_iter {
17734					pending_claims_to_replay.push(tuple);
17735				}
17736				if fail_read {
17737					return Err(DecodeError::InvalidValue);
17738				}
17739			}
17740		}
17741
17742		let expanded_inbound_key = args.node_signer.get_expanded_key();
17743
17744		let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len());
17745		if let Some(purposes) = claimable_htlc_purposes {
17746			if purposes.len() != claimable_htlcs_list.len() {
17747				return Err(DecodeError::InvalidValue);
17748			}
17749			if let Some(onion_fields) = claimable_htlc_onion_fields {
17750				if onion_fields.len() != claimable_htlcs_list.len() {
17751					return Err(DecodeError::InvalidValue);
17752				}
17753				for (purpose, (onion, (payment_hash, htlcs))) in purposes
17754					.into_iter()
17755					.zip(onion_fields.into_iter().zip(claimable_htlcs_list.into_iter()))
17756				{
17757					let claimable = ClaimablePayment { purpose, htlcs, onion_fields: onion };
17758					let existing_payment = claimable_payments.insert(payment_hash, claimable);
17759					if existing_payment.is_some() {
17760						return Err(DecodeError::InvalidValue);
17761					}
17762				}
17763			} else {
17764				for (purpose, (payment_hash, htlcs)) in
17765					purposes.into_iter().zip(claimable_htlcs_list.into_iter())
17766				{
17767					let claimable = ClaimablePayment { purpose, htlcs, onion_fields: None };
17768					let existing_payment = claimable_payments.insert(payment_hash, claimable);
17769					if existing_payment.is_some() {
17770						return Err(DecodeError::InvalidValue);
17771					}
17772				}
17773			}
17774		} else {
17775			// LDK versions prior to 0.0.107 did not write a `pending_htlc_purposes`, but do
17776			// include a `_legacy_hop_data` in the `OnionPayload`.
17777			for (payment_hash, htlcs) in claimable_htlcs_list.drain(..) {
17778				if htlcs.is_empty() {
17779					return Err(DecodeError::InvalidValue);
17780				}
17781				let purpose = match &htlcs[0].onion_payload {
17782					OnionPayload::Invoice { _legacy_hop_data } => {
17783						if let Some(hop_data) = _legacy_hop_data {
17784							events::PaymentPurpose::Bolt11InvoicePayment {
17785								payment_preimage: match inbound_payment::verify(
17786									payment_hash,
17787									&hop_data,
17788									0,
17789									&expanded_inbound_key,
17790									&args.logger,
17791								) {
17792									Ok((payment_preimage, _)) => payment_preimage,
17793									Err(()) => {
17794										log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", &payment_hash);
17795										return Err(DecodeError::InvalidValue);
17796									},
17797								},
17798								payment_secret: hop_data.payment_secret,
17799							}
17800						} else {
17801							return Err(DecodeError::InvalidValue);
17802						}
17803					},
17804					OnionPayload::Spontaneous(payment_preimage) => {
17805						events::PaymentPurpose::SpontaneousPayment(*payment_preimage)
17806					},
17807				};
17808				claimable_payments
17809					.insert(payment_hash, ClaimablePayment { purpose, htlcs, onion_fields: None });
17810			}
17811		}
17812
17813		// Similar to the above cases for forwarded payments, if we have any pending inbound HTLCs
17814		// which haven't yet been claimed, we may be missing counterparty_node_id info and would
17815		// panic if we attempted to claim them at this point.
17816		for (payment_hash, payment) in claimable_payments.iter() {
17817			for htlc in payment.htlcs.iter() {
17818				if htlc.prev_hop.counterparty_node_id.is_some() {
17819					continue;
17820				}
17821				if short_to_chan_info.get(&htlc.prev_hop.prev_outbound_scid_alias).is_some() {
17822					log_error!(args.logger,
17823						"We do not have the required information to claim a pending payment with payment hash {} reliably.\
17824						As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime!\
17825						All HTLCs that were received by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1\
17826						Continuing anyway, though panics may occur!",
17827						payment_hash,
17828					);
17829				} else {
17830					log_error!(args.logger,
17831						"We do not have the required information to claim a pending payment with payment hash {}.\
17832						All HTLCs that were received by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1",
17833						payment_hash,
17834					);
17835					return Err(DecodeError::InvalidValue);
17836				}
17837			}
17838		}
17839
17840		let mut secp_ctx = Secp256k1::new();
17841		secp_ctx.seeded_randomize(&args.entropy_source.get_secure_random_bytes());
17842
17843		let our_network_pubkey = match args.node_signer.get_node_id(Recipient::Node) {
17844			Ok(key) => key,
17845			Err(()) => return Err(DecodeError::InvalidValue),
17846		};
17847		if let Some(network_pubkey) = received_network_pubkey {
17848			if network_pubkey != our_network_pubkey {
17849				log_error!(args.logger, "Key that was generated does not match the existing key.");
17850				return Err(DecodeError::InvalidValue);
17851			}
17852		}
17853
17854		let mut outbound_scid_aliases = new_hash_set();
17855		for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() {
17856			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
17857			let peer_state = &mut *peer_state_lock;
17858			for (chan_id, chan) in peer_state.channel_by_id.iter_mut() {
17859				if let Some(funded_chan) = chan.as_funded_mut() {
17860					let logger = WithChannelContext::from(&args.logger, &funded_chan.context, None);
17861					if funded_chan.context.outbound_scid_alias() == 0 {
17862						let mut outbound_scid_alias;
17863						loop {
17864							outbound_scid_alias = fake_scid::Namespace::OutboundAlias
17865								.get_fake_scid(
17866									best_block_height,
17867									&chain_hash,
17868									fake_scid_rand_bytes.as_ref().unwrap(),
17869									&args.entropy_source,
17870								);
17871							if outbound_scid_aliases.insert(outbound_scid_alias) {
17872								break;
17873							}
17874						}
17875						funded_chan.context.set_outbound_scid_alias(outbound_scid_alias);
17876					} else if !outbound_scid_aliases
17877						.insert(funded_chan.context.outbound_scid_alias())
17878					{
17879						// Note that in rare cases its possible to hit this while reading an older
17880						// channel if we just happened to pick a colliding outbound alias above.
17881						log_error!(
17882							logger,
17883							"Got duplicate outbound SCID alias; {}",
17884							funded_chan.context.outbound_scid_alias()
17885						);
17886						return Err(DecodeError::InvalidValue);
17887					}
17888					if funded_chan.context.is_usable() {
17889						let alias = funded_chan.context.outbound_scid_alias();
17890						let cp_id = funded_chan.context.get_counterparty_node_id();
17891						if short_to_chan_info.insert(alias, (cp_id, *chan_id)).is_some() {
17892							// Note that in rare cases its possible to hit this while reading an older
17893							// channel if we just happened to pick a colliding outbound alias above.
17894							log_error!(
17895								logger,
17896								"Got duplicate outbound SCID alias; {}",
17897								funded_chan.context.outbound_scid_alias()
17898							);
17899							return Err(DecodeError::InvalidValue);
17900						}
17901					}
17902				} else {
17903					// We shouldn't have persisted (or read) any unfunded channel types so none should have been
17904					// created in this `channel_by_id` map.
17905					debug_assert!(false);
17906					return Err(DecodeError::InvalidValue);
17907				}
17908			}
17909		}
17910
17911		let bounded_fee_estimator = LowerBoundedFeeEstimator::new(args.fee_estimator);
17912
17913		for (node_id, monitor_update_blocked_actions) in
17914			monitor_update_blocked_actions_per_peer.unwrap()
17915		{
17916			if let Some(peer_state) = per_peer_state.get(&node_id) {
17917				for (channel_id, actions) in monitor_update_blocked_actions.iter() {
17918					let logger =
17919						WithContext::from(&args.logger, Some(node_id), Some(*channel_id), None);
17920					for action in actions.iter() {
17921						if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
17922							downstream_counterparty_and_funding_outpoint:
17923								Some(EventUnblockedChannel {
17924									counterparty_node_id: blocked_node_id,
17925									funding_txo: _,
17926									channel_id: blocked_channel_id,
17927									blocking_action,
17928								}),
17929							..
17930						} = action
17931						{
17932							if let Some(blocked_peer_state) = per_peer_state.get(blocked_node_id) {
17933								log_trace!(logger,
17934									"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
17935									blocked_channel_id);
17936								blocked_peer_state
17937									.lock()
17938									.unwrap()
17939									.actions_blocking_raa_monitor_updates
17940									.entry(*blocked_channel_id)
17941									.or_insert_with(Vec::new)
17942									.push(blocking_action.clone());
17943							} else {
17944								// If the channel we were blocking has closed, we don't need to
17945								// worry about it - the blocked monitor update should never have
17946								// been released from the `Channel` object so it can't have
17947								// completed, and if the channel closed there's no reason to bother
17948								// anymore.
17949							}
17950						}
17951						if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
17952							..
17953						} = action
17954						{
17955							debug_assert!(false, "Non-event-generating channel freeing should not appear in our queue");
17956						}
17957					}
17958					// Note that we may have a post-update action for a channel that has no pending
17959					// `ChannelMonitorUpdate`s, but unlike the no-peer-state case, it may simply be
17960					// because we had a `ChannelMonitorUpdate` complete after the last time this
17961					// `ChannelManager` was serialized. In that case, we'll run the post-update
17962					// actions as soon as we get going.
17963				}
17964				peer_state.lock().unwrap().monitor_update_blocked_actions =
17965					monitor_update_blocked_actions;
17966			} else {
17967				for actions in monitor_update_blocked_actions.values() {
17968					for action in actions.iter() {
17969						if matches!(action, MonitorUpdateCompletionAction::PaymentClaimed { .. }) {
17970							// If there are no state for this channel but we have pending
17971							// post-update actions, its possible that one was left over from pre-0.1
17972							// payment claims where MPP claims led to a channel blocked on itself
17973							// and later `ChannelMonitorUpdate`s didn't get their post-update
17974							// actions run.
17975							// This should only have happened for `PaymentClaimed` post-update actions,
17976							// which we ignore here.
17977						} else {
17978							let logger = WithContext::from(&args.logger, Some(node_id), None, None);
17979							log_error!(
17980								logger,
17981								"Got blocked actions {:?} without a per-peer-state for {}",
17982								monitor_update_blocked_actions,
17983								node_id
17984							);
17985							return Err(DecodeError::InvalidValue);
17986						}
17987					}
17988				}
17989			}
17990		}
17991
17992		let best_block = BestBlock::new(best_block_hash, best_block_height);
17993		let flow = OffersMessageFlow::new(
17994			chain_hash,
17995			best_block,
17996			our_network_pubkey,
17997			highest_seen_timestamp,
17998			expanded_inbound_key,
17999			args.node_signer.get_receive_auth_key(),
18000			secp_ctx.clone(),
18001			args.message_router,
18002			args.logger.clone(),
18003		)
18004		.with_async_payments_offers_cache(async_receive_offer_cache);
18005
18006		let channel_manager = ChannelManager {
18007			chain_hash,
18008			fee_estimator: bounded_fee_estimator,
18009			chain_monitor: args.chain_monitor,
18010			tx_broadcaster: args.tx_broadcaster,
18011			router: args.router,
18012			flow,
18013
18014			best_block: RwLock::new(best_block),
18015
18016			inbound_payment_key: expanded_inbound_key,
18017			pending_outbound_payments: pending_outbounds,
18018			pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()),
18019
18020			forward_htlcs: Mutex::new(forward_htlcs),
18021			decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs),
18022			claimable_payments: Mutex::new(ClaimablePayments {
18023				claimable_payments,
18024				pending_claiming_payments: pending_claiming_payments.unwrap(),
18025			}),
18026			outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
18027			short_to_chan_info: FairRwLock::new(short_to_chan_info),
18028			fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(),
18029
18030			probing_cookie_secret: probing_cookie_secret.unwrap(),
18031			inbound_payment_id_secret: inbound_payment_id_secret.unwrap(),
18032
18033			our_network_pubkey,
18034			secp_ctx,
18035
18036			highest_seen_timestamp: AtomicUsize::new(highest_seen_timestamp as usize),
18037
18038			per_peer_state: FairRwLock::new(per_peer_state),
18039
18040			#[cfg(not(any(test, feature = "_externalize_tests")))]
18041			monitor_update_type: AtomicUsize::new(0),
18042
18043			pending_events: Mutex::new(pending_events_read),
18044			pending_events_processor: AtomicBool::new(false),
18045			pending_htlc_forwards_processor: AtomicBool::new(false),
18046			pending_background_events: Mutex::new(pending_background_events),
18047			total_consistency_lock: RwLock::new(()),
18048			background_events_processed_since_startup: AtomicBool::new(false),
18049
18050			event_persist_notifier: Notifier::new(),
18051			needs_persist_flag: AtomicBool::new(false),
18052
18053			funding_batch_states: Mutex::new(BTreeMap::new()),
18054
18055			pending_broadcast_messages: Mutex::new(Vec::new()),
18056
18057			entropy_source: args.entropy_source,
18058			node_signer: args.node_signer,
18059			signer_provider: args.signer_provider,
18060
18061			last_days_feerates: Mutex::new(VecDeque::new()),
18062
18063			logger: args.logger,
18064			config: RwLock::new(args.config),
18065
18066			#[cfg(feature = "_test_utils")]
18067			testing_dnssec_proof_offer_resolution_override: Mutex::new(new_hash_map()),
18068		};
18069
18070		let mut processed_claims: HashSet<Vec<MPPClaimHTLCSource>> = new_hash_set();
18071		for (_, monitor) in args.channel_monitors.iter() {
18072			for (payment_hash, (payment_preimage, payment_claims)) in monitor.get_stored_preimages()
18073			{
18074				if !payment_claims.is_empty() {
18075					for payment_claim in payment_claims {
18076						if processed_claims.contains(&payment_claim.mpp_parts) {
18077							// We might get the same payment a few times from different channels
18078							// that the MPP payment was received using. There's no point in trying
18079							// to claim the same payment again and again, so we check if the HTLCs
18080							// are the same and skip the payment here.
18081							continue;
18082						}
18083						if payment_claim.mpp_parts.is_empty() {
18084							return Err(DecodeError::InvalidValue);
18085						}
18086						{
18087							let payments = channel_manager.claimable_payments.lock().unwrap();
18088							if !payments.claimable_payments.contains_key(&payment_hash) {
18089								if let Some(payment) =
18090									payments.pending_claiming_payments.get(&payment_hash)
18091								{
18092									if payment.payment_id
18093										== payment_claim.claiming_payment.payment_id
18094									{
18095										// If this payment already exists and was marked as
18096										// being-claimed then the serialized state must contain all
18097										// of the pending `ChannelMonitorUpdate`s required to get
18098										// the preimage on disk in all MPP parts. Thus we can skip
18099										// the replay below.
18100										continue;
18101									}
18102								}
18103							}
18104						}
18105
18106						let mut channels_without_preimage = payment_claim
18107							.mpp_parts
18108							.iter()
18109							.map(|htlc_info| (htlc_info.counterparty_node_id, htlc_info.channel_id))
18110							.collect::<Vec<_>>();
18111						// If we have multiple MPP parts which were received over the same channel,
18112						// we only track it once as once we get a preimage durably in the
18113						// `ChannelMonitor` it will be used for all HTLCs with a matching hash.
18114						channels_without_preimage.sort_unstable();
18115						channels_without_preimage.dedup();
18116						let pending_claims = PendingMPPClaim {
18117							channels_without_preimage,
18118							channels_with_preimage: Vec::new(),
18119						};
18120						let pending_claim_ptr_opt = Some(Arc::new(Mutex::new(pending_claims)));
18121
18122						// While it may be duplicative to generate a PaymentClaimed here, trying to
18123						// figure out if the user definitely saw it before shutdown would require some
18124						// nontrivial logic and may break as we move away from regularly persisting
18125						// ChannelManager. Instead, we rely on the users' event handler being
18126						// idempotent and just blindly generate one no matter what, letting the
18127						// preimages eventually timing out from ChannelMonitors to prevent us from
18128						// doing so forever.
18129
18130						let claim_found = channel_manager
18131							.claimable_payments
18132							.lock()
18133							.unwrap()
18134							.begin_claiming_payment(
18135								payment_hash,
18136								&channel_manager.node_signer,
18137								&channel_manager.logger,
18138								&channel_manager.inbound_payment_id_secret,
18139								true,
18140							);
18141						if claim_found.is_err() {
18142							let mut claimable_payments =
18143								channel_manager.claimable_payments.lock().unwrap();
18144							match claimable_payments.pending_claiming_payments.entry(payment_hash) {
18145								hash_map::Entry::Occupied(_) => {
18146									debug_assert!(
18147										false,
18148										"Entry was added in begin_claiming_payment"
18149									);
18150									return Err(DecodeError::InvalidValue);
18151								},
18152								hash_map::Entry::Vacant(entry) => {
18153									entry.insert(payment_claim.claiming_payment);
18154								},
18155							}
18156						}
18157
18158						for part in payment_claim.mpp_parts.iter() {
18159							let pending_mpp_claim = pending_claim_ptr_opt.as_ref().map(|ptr| {
18160								(
18161									part.counterparty_node_id,
18162									part.channel_id,
18163									PendingMPPClaimPointer(Arc::clone(&ptr)),
18164								)
18165							});
18166							let pending_claim_ptr = pending_claim_ptr_opt.as_ref().map(|ptr| {
18167								RAAMonitorUpdateBlockingAction::ClaimedMPPPayment {
18168									pending_claim: PendingMPPClaimPointer(Arc::clone(&ptr)),
18169								}
18170							});
18171							// Note that we don't need to pass the `payment_info` here - its
18172							// already (clearly) durably on disk in the `ChannelMonitor` so there's
18173							// no need to worry about getting it into others.
18174							//
18175							// We don't encode any attribution data, because the required onion shared secret isn't
18176							// available here.
18177							channel_manager.claim_mpp_part(
18178								part.into(),
18179								payment_preimage,
18180								None,
18181								None,
18182								|_, _| {
18183									(
18184										Some(MonitorUpdateCompletionAction::PaymentClaimed {
18185											payment_hash,
18186											pending_mpp_claim,
18187										}),
18188										pending_claim_ptr,
18189									)
18190								},
18191							);
18192						}
18193						processed_claims.insert(payment_claim.mpp_parts);
18194					}
18195				} else {
18196					let per_peer_state = channel_manager.per_peer_state.read().unwrap();
18197					let mut claimable_payments = channel_manager.claimable_payments.lock().unwrap();
18198					let payment = claimable_payments.claimable_payments.remove(&payment_hash);
18199					mem::drop(claimable_payments);
18200					if let Some(payment) = payment {
18201						log_info!(channel_manager.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", &payment_hash);
18202						let mut claimable_amt_msat = 0;
18203						let mut receiver_node_id = Some(our_network_pubkey);
18204						let phantom_shared_secret = payment.htlcs[0].prev_hop.phantom_shared_secret;
18205						if phantom_shared_secret.is_some() {
18206							let phantom_pubkey = channel_manager
18207								.node_signer
18208								.get_node_id(Recipient::PhantomNode)
18209								.expect("Failed to get node_id for phantom node recipient");
18210							receiver_node_id = Some(phantom_pubkey)
18211						}
18212						for claimable_htlc in &payment.htlcs {
18213							claimable_amt_msat += claimable_htlc.value;
18214
18215							// Add a holding-cell claim of the payment to the Channel, which should be
18216							// applied ~immediately on peer reconnection. Because it won't generate a
18217							// new commitment transaction we can just provide the payment preimage to
18218							// the corresponding ChannelMonitor and nothing else.
18219							//
18220							// We do so directly instead of via the normal ChannelMonitor update
18221							// procedure as the ChainMonitor hasn't yet been initialized, implying
18222							// we're not allowed to call it directly yet. Further, we do the update
18223							// without incrementing the ChannelMonitor update ID as there isn't any
18224							// reason to.
18225							// If we were to generate a new ChannelMonitor update ID here and then
18226							// crash before the user finishes block connect we'd end up force-closing
18227							// this channel as well. On the flip side, there's no harm in restarting
18228							// without the new monitor persisted - we'll end up right back here on
18229							// restart.
18230							let previous_channel_id = claimable_htlc.prev_hop.channel_id;
18231							let peer_node_id = monitor.get_counterparty_node_id();
18232							{
18233								let peer_state_mutex = per_peer_state.get(&peer_node_id).unwrap();
18234								let mut peer_state_lock = peer_state_mutex.lock().unwrap();
18235								let peer_state = &mut *peer_state_lock;
18236								if let Some(channel) = peer_state
18237									.channel_by_id
18238									.get_mut(&previous_channel_id)
18239									.and_then(Channel::as_funded_mut)
18240								{
18241									let logger = WithChannelContext::from(
18242										&channel_manager.logger,
18243										&channel.context,
18244										Some(payment_hash),
18245									);
18246									channel
18247										.claim_htlc_while_disconnected_dropping_mon_update_legacy(
18248											claimable_htlc.prev_hop.htlc_id,
18249											payment_preimage,
18250											&&logger,
18251										);
18252								}
18253							}
18254							if let Some(previous_hop_monitor) =
18255								args.channel_monitors.get(&claimable_htlc.prev_hop.channel_id)
18256							{
18257								// Note that this is unsafe as we no longer require the
18258								// `ChannelMonitor`s to be re-persisted prior to this
18259								// `ChannelManager` being persisted after we get started running.
18260								// If this `ChannelManager` gets persisted first then we crash, we
18261								// won't have the `claimable_payments` entry we need to re-enter
18262								// this code block, causing us to not re-apply the preimage to this
18263								// `ChannelMonitor`.
18264								//
18265								// We should never be here with modern payment claims, however, as
18266								// they should always include the HTLC list. Instead, this is only
18267								// for nodes during upgrade, and we explicitly require the old
18268								// persistence semantics on upgrade in the release notes.
18269								previous_hop_monitor.provide_payment_preimage_unsafe_legacy(
18270									&payment_hash,
18271									&payment_preimage,
18272									&channel_manager.tx_broadcaster,
18273									&channel_manager.fee_estimator,
18274									&channel_manager.logger,
18275								);
18276							}
18277						}
18278						let mut pending_events = channel_manager.pending_events.lock().unwrap();
18279						let payment_id =
18280							payment.inbound_payment_id(&inbound_payment_id_secret.unwrap());
18281						let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect();
18282						let sender_intended_total_msat =
18283							payment.htlcs.first().map(|htlc| htlc.total_msat);
18284						pending_events.push_back((
18285							events::Event::PaymentClaimed {
18286								receiver_node_id,
18287								payment_hash,
18288								purpose: payment.purpose,
18289								amount_msat: claimable_amt_msat,
18290								htlcs,
18291								sender_intended_total_msat,
18292								onion_fields: payment.onion_fields,
18293								payment_id: Some(payment_id),
18294							},
18295							// Note that we don't bother adding a EventCompletionAction here to
18296							// ensure the `PaymentClaimed` event is durable processed as this
18297							// should only be hit for particularly old channels and we don't have
18298							// enough information to generate such an action.
18299							None,
18300						));
18301					}
18302				}
18303			}
18304		}
18305
18306		for htlc_source in failed_htlcs {
18307			let (source, hash, counterparty_id, channel_id, failure_reason, ev_action) =
18308				htlc_source;
18309			let receiver =
18310				HTLCHandlingFailureType::Forward { node_id: Some(counterparty_id), channel_id };
18311			let reason = HTLCFailReason::from_failure_code(failure_reason);
18312			channel_manager
18313				.fail_htlc_backwards_internal(&source, &hash, &reason, receiver, ev_action);
18314		}
18315
18316		for (
18317			source,
18318			preimage,
18319			downstream_value,
18320			downstream_closed,
18321			downstream_node_id,
18322			downstream_funding,
18323			downstream_channel_id,
18324		) in pending_claims_to_replay
18325		{
18326			// We use `downstream_closed` in place of `from_onchain` here just as a guess - we
18327			// don't remember in the `ChannelMonitor` where we got a preimage from, but if the
18328			// channel is closed we just assume that it probably came from an on-chain claim.
18329			// The same holds for attribution data. We don't have any, so we pass an empty one.
18330			channel_manager.claim_funds_internal(
18331				source,
18332				preimage,
18333				Some(downstream_value),
18334				None,
18335				downstream_closed,
18336				downstream_node_id,
18337				downstream_funding,
18338				downstream_channel_id,
18339				None,
18340				None,
18341				None,
18342			);
18343		}
18344
18345		//TODO: Broadcast channel update for closed channels, but only after we've made a
18346		//connection or two.
18347
18348		Ok((best_block_hash.clone(), channel_manager))
18349	}
18350}
18351
18352#[cfg(test)]
18353mod tests {
18354	use crate::events::{ClosureReason, Event, HTLCHandlingFailureType};
18355	use crate::ln::channelmanager::{
18356		create_recv_pending_htlc_info, inbound_payment, HTLCForwardInfo, InterceptId, PaymentId,
18357		RecipientOnionFields,
18358	};
18359	use crate::ln::functional_test_utils::*;
18360	use crate::ln::msgs::{self, BaseMessageHandler, ChannelMessageHandler, MessageSendEvent};
18361	use crate::ln::onion_utils::AttributionData;
18362	use crate::ln::onion_utils::{self, LocalHTLCFailureReason};
18363	use crate::ln::outbound_payment::Retry;
18364	use crate::ln::types::ChannelId;
18365	use crate::prelude::*;
18366	use crate::routing::router::{find_route, PaymentParameters, RouteParameters};
18367	use crate::sign::EntropySource;
18368	use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret};
18369	use crate::util::config::{ChannelConfig, ChannelConfigUpdate};
18370	use crate::util::errors::APIError;
18371	use crate::util::ser::Writeable;
18372	use crate::util::test_utils;
18373	use bitcoin::secp256k1::ecdh::SharedSecret;
18374	use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
18375	use core::sync::atomic::Ordering;
18376
18377	#[test]
18378	#[rustfmt::skip]
18379	fn test_notify_limits() {
18380		// Check that a few cases which don't require the persistence of a new ChannelManager,
18381		// indeed, do not cause the persistence of a new ChannelManager.
18382		let chanmon_cfgs = create_chanmon_cfgs(3);
18383		let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
18384		let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
18385		let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
18386
18387		// All nodes start with a persistable update pending as `create_network` connects each node
18388		// with all other nodes to make most tests simpler.
18389		assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
18390		assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
18391		assert!(nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
18392
18393		let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1);
18394
18395		// We check that the channel info nodes have doesn't change too early, even though we try
18396		// to connect messages with new values
18397		chan.0.contents.fee_base_msat *= 2;
18398		chan.1.contents.fee_base_msat *= 2;
18399		let node_a_chan_info = nodes[0].node.list_channels_with_counterparty(
18400			&nodes[1].node.get_our_node_id()).pop().unwrap();
18401		let node_b_chan_info = nodes[1].node.list_channels_with_counterparty(
18402			&nodes[0].node.get_our_node_id()).pop().unwrap();
18403
18404		// The first two nodes (which opened a channel) should now require fresh persistence
18405		assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
18406		assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
18407		// ... but the last node should not.
18408		assert!(!nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
18409		// After persisting the first two nodes they should no longer need fresh persistence.
18410		assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
18411		assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
18412
18413		// Node 3, unrelated to the only channel, shouldn't care if it receives a channel_update
18414		// about the channel.
18415		nodes[2].node.handle_channel_update(nodes[1].node.get_our_node_id(), &chan.0);
18416		nodes[2].node.handle_channel_update(nodes[1].node.get_our_node_id(), &chan.1);
18417		assert!(!nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
18418
18419		// The nodes which are a party to the channel should also ignore messages from unrelated
18420		// parties.
18421		nodes[0].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.0);
18422		nodes[0].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.1);
18423		nodes[1].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.0);
18424		nodes[1].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.1);
18425		assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
18426		assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
18427
18428		// At this point the channel info given by peers should still be the same.
18429		assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
18430		assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
18431
18432		// An earlier version of handle_channel_update didn't check the directionality of the
18433		// update message and would always update the local fee info, even if our peer was
18434		// (spuriously) forwarding us our own channel_update.
18435		let as_node_one = nodes[0].node.get_our_node_id().serialize()[..] < nodes[1].node.get_our_node_id().serialize()[..];
18436		let as_update = if as_node_one == (chan.0.contents.channel_flags & 1 == 0 /* chan.0 is from node one */) { &chan.0 } else { &chan.1 };
18437		let bs_update = if as_node_one == (chan.0.contents.channel_flags & 1 == 0 /* chan.0 is from node one */) { &chan.1 } else { &chan.0 };
18438
18439		// First deliver each peers' own message, checking that the node doesn't need to be
18440		// persisted and that its channel info remains the same.
18441		nodes[0].node.handle_channel_update(nodes[1].node.get_our_node_id(), &as_update);
18442		nodes[1].node.handle_channel_update(nodes[0].node.get_our_node_id(), &bs_update);
18443		assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
18444		assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
18445		assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
18446		assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
18447
18448		// Finally, deliver the other peers' message, ensuring each node needs to be persisted and
18449		// the channel info has updated.
18450		nodes[0].node.handle_channel_update(nodes[1].node.get_our_node_id(), &bs_update);
18451		nodes[1].node.handle_channel_update(nodes[0].node.get_our_node_id(), &as_update);
18452		assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
18453		assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
18454		assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
18455		assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
18456	}
18457
18458	#[test]
18459	#[rustfmt::skip]
18460	fn test_keysend_dup_hash_partial_mpp() {
18461		// Test that a keysend payment with a duplicate hash to an existing partial MPP payment fails as
18462		// expected.
18463		let chanmon_cfgs = create_chanmon_cfgs(2);
18464		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
18465		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
18466		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
18467		create_announced_chan_between_nodes(&nodes, 0, 1);
18468
18469		// First, send a partial MPP payment.
18470		let (route, our_payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000);
18471		let mut mpp_route = route.clone();
18472		mpp_route.paths.push(mpp_route.paths[0].clone());
18473
18474		let payment_id = PaymentId([42; 32]);
18475		// Use the utility function send_payment_along_path to send the payment with MPP data which
18476		// indicates there are more HTLCs coming.
18477		let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
18478		let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
18479			RecipientOnionFields::secret_only(payment_secret), payment_id, &mpp_route).unwrap();
18480		nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &our_payment_hash,
18481			RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
18482		check_added_monitors!(nodes[0], 1);
18483		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
18484		assert_eq!(events.len(), 1);
18485		pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
18486
18487		// Next, send a keysend payment with the same payment_hash and make sure it fails.
18488		nodes[0].node.send_spontaneous_payment(
18489			Some(payment_preimage), RecipientOnionFields::spontaneous_empty(),
18490			PaymentId(payment_preimage.0), route.route_params.clone().unwrap(), Retry::Attempts(0)
18491		).unwrap();
18492		check_added_monitors!(nodes[0], 1);
18493		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
18494		assert_eq!(events.len(), 1);
18495		let ev = events.drain(..).next().unwrap();
18496		let payment_event = SendEvent::from_event(ev);
18497		nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
18498		check_added_monitors!(nodes[1], 0);
18499		commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
18500		expect_and_process_pending_htlcs(&nodes[1], true);
18501		let events = nodes[1].node.get_and_clear_pending_events();
18502		let fail = HTLCHandlingFailureType::Receive { payment_hash: our_payment_hash };
18503		expect_htlc_failure_conditions(events, &[fail]);
18504		check_added_monitors!(nodes[1], 1);
18505		let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
18506		assert!(updates.update_add_htlcs.is_empty());
18507		assert!(updates.update_fulfill_htlcs.is_empty());
18508		assert_eq!(updates.update_fail_htlcs.len(), 1);
18509		assert!(updates.update_fail_malformed_htlcs.is_empty());
18510		assert!(updates.update_fee.is_none());
18511		nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
18512		commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
18513		expect_payment_failed!(nodes[0], our_payment_hash, true);
18514
18515		// Send the second half of the original MPP payment.
18516		nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &our_payment_hash,
18517			RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
18518		check_added_monitors!(nodes[0], 1);
18519		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
18520		assert_eq!(events.len(), 1);
18521		pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), true, None);
18522
18523		// Claim the full MPP payment. Note that we can't use a test utility like
18524		// claim_funds_along_route because the ordering of the messages causes the second half of the
18525		// payment to be put in the holding cell, which confuses the test utilities. So we exchange the
18526		// lightning messages manually.
18527		nodes[1].node.claim_funds(payment_preimage);
18528		expect_payment_claimed!(nodes[1], our_payment_hash, 200_000);
18529		check_added_monitors!(nodes[1], 2);
18530
18531		let mut bs_1st_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
18532		nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), bs_1st_updates.update_fulfill_htlcs.remove(0));
18533		expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
18534		nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_1st_updates.commitment_signed);
18535		check_added_monitors!(nodes[0], 1);
18536		let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
18537		nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa);
18538		check_added_monitors!(nodes[1], 1);
18539		let mut bs_2nd_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
18540		nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_first_cs);
18541		check_added_monitors!(nodes[1], 1);
18542		let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
18543		nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), bs_2nd_updates.update_fulfill_htlcs.remove(0));
18544		nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_2nd_updates.commitment_signed);
18545		check_added_monitors!(nodes[0], 1);
18546		let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
18547		nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa);
18548		let as_second_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
18549		check_added_monitors!(nodes[0], 1);
18550		nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa);
18551		check_added_monitors!(nodes[1], 1);
18552		nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed);
18553		check_added_monitors!(nodes[1], 1);
18554		let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
18555		nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_third_raa);
18556		check_added_monitors!(nodes[0], 1);
18557
18558		// Note that successful MPP payments will generate a single PaymentSent event upon the first
18559		// path's success and a PaymentPathSuccessful event for each path's success.
18560		let events = nodes[0].node.get_and_clear_pending_events();
18561		assert_eq!(events.len(), 2);
18562		match events[0] {
18563			Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path, .. } => {
18564				assert_eq!(payment_id, *actual_payment_id);
18565				assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
18566				assert_eq!(route.paths[0], *path);
18567			},
18568			_ => panic!("Unexpected event"),
18569		}
18570		match events[1] {
18571			Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path, ..} => {
18572				assert_eq!(payment_id, *actual_payment_id);
18573				assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
18574				assert_eq!(route.paths[0], *path);
18575			},
18576			_ => panic!("Unexpected event"),
18577		}
18578	}
18579
18580	#[test]
18581	#[rustfmt::skip]
18582	fn test_keysend_dup_payment_hash() {
18583		// (1): Test that a keysend payment with a duplicate payment hash to an existing pending
18584		//      outbound regular payment fails as expected.
18585		// (2): Test that a regular payment with a duplicate payment hash to an existing keysend payment
18586		//      fails as expected.
18587		// (3): Test that a keysend payment with a duplicate payment hash to an existing keysend
18588		//      payment fails as expected. We only accept MPP keysends with payment secrets and reject
18589		//      otherwise.
18590		let chanmon_cfgs = create_chanmon_cfgs(2);
18591		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
18592		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
18593		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
18594		create_announced_chan_between_nodes(&nodes, 0, 1);
18595		let scorer = test_utils::TestScorer::new();
18596		let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
18597
18598		// To start (1), send a regular payment but don't claim it.
18599		let expected_route = [&nodes[1]];
18600		let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &expected_route, 100_000);
18601
18602		// Next, attempt a keysend payment and make sure it fails.
18603		let route_params = RouteParameters::from_payment_params_and_value(
18604			PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(),
18605			TEST_FINAL_CLTV, false), 100_000);
18606		nodes[0].node.send_spontaneous_payment(
18607			Some(payment_preimage), RecipientOnionFields::spontaneous_empty(),
18608			PaymentId(payment_preimage.0), route_params.clone(), Retry::Attempts(0)
18609		).unwrap();
18610		check_added_monitors!(nodes[0], 1);
18611		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
18612		assert_eq!(events.len(), 1);
18613		let ev = events.drain(..).next().unwrap();
18614		let payment_event = SendEvent::from_event(ev);
18615		nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
18616		check_added_monitors!(nodes[1], 0);
18617		commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
18618		// We have to forward pending HTLCs twice - once tries to forward the payment forward (and
18619		// fails), the second will process the resulting failure and fail the HTLC backward
18620		expect_and_process_pending_htlcs(&nodes[1], true);
18621		let events = nodes[1].node.get_and_clear_pending_events();
18622		let fail = HTLCHandlingFailureType::Receive { payment_hash };
18623		expect_htlc_failure_conditions(events, &[fail]);
18624		check_added_monitors!(nodes[1], 1);
18625		let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
18626		assert!(updates.update_add_htlcs.is_empty());
18627		assert!(updates.update_fulfill_htlcs.is_empty());
18628		assert_eq!(updates.update_fail_htlcs.len(), 1);
18629		assert!(updates.update_fail_malformed_htlcs.is_empty());
18630		assert!(updates.update_fee.is_none());
18631		nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
18632		commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
18633		expect_payment_failed!(nodes[0], payment_hash, true);
18634
18635		// Finally, claim the original payment.
18636		claim_payment(&nodes[0], &expected_route, payment_preimage);
18637
18638		// To start (2), send a keysend payment but don't claim it.
18639		let payment_preimage = PaymentPreimage([42; 32]);
18640		let route = find_route(
18641			&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
18642			None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
18643		).unwrap();
18644		let payment_hash = nodes[0].node.send_spontaneous_payment(
18645			Some(payment_preimage), RecipientOnionFields::spontaneous_empty(),
18646			PaymentId(payment_preimage.0), route.route_params.clone().unwrap(), Retry::Attempts(0)
18647		).unwrap();
18648		check_added_monitors!(nodes[0], 1);
18649		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
18650		assert_eq!(events.len(), 1);
18651		let event = events.pop().unwrap();
18652		let path = vec![&nodes[1]];
18653		pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
18654
18655		// Next, attempt a regular payment and make sure it fails.
18656		let payment_secret = PaymentSecret([43; 32]);
18657		nodes[0].node.send_payment_with_route(route.clone(), payment_hash,
18658			RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
18659		check_added_monitors!(nodes[0], 1);
18660		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
18661		assert_eq!(events.len(), 1);
18662		let ev = events.drain(..).next().unwrap();
18663		let payment_event = SendEvent::from_event(ev);
18664		nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
18665		check_added_monitors!(nodes[1], 0);
18666		commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
18667		expect_and_process_pending_htlcs(&nodes[1], true);
18668		let events = nodes[1].node.get_and_clear_pending_events();
18669		let fail = HTLCHandlingFailureType::Receive { payment_hash };
18670		expect_htlc_failure_conditions(events, &[fail]);
18671		check_added_monitors!(nodes[1], 1);
18672		let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
18673		assert!(updates.update_add_htlcs.is_empty());
18674		assert!(updates.update_fulfill_htlcs.is_empty());
18675		assert_eq!(updates.update_fail_htlcs.len(), 1);
18676		assert!(updates.update_fail_malformed_htlcs.is_empty());
18677		assert!(updates.update_fee.is_none());
18678		nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
18679		commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
18680		expect_payment_failed!(nodes[0], payment_hash, true);
18681
18682		// Finally, succeed the keysend payment.
18683		claim_payment(&nodes[0], &expected_route, payment_preimage);
18684
18685		// To start (3), send a keysend payment but don't claim it.
18686		let payment_id_1 = PaymentId([44; 32]);
18687		let payment_hash = nodes[0].node.send_spontaneous_payment(
18688			Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_1,
18689			route.route_params.clone().unwrap(), Retry::Attempts(0)
18690		).unwrap();
18691		check_added_monitors!(nodes[0], 1);
18692		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
18693		assert_eq!(events.len(), 1);
18694		let event = events.pop().unwrap();
18695		let path = vec![&nodes[1]];
18696		pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
18697
18698		// Next, attempt a keysend payment and make sure it fails.
18699		let route_params = RouteParameters::from_payment_params_and_value(
18700			PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false),
18701			100_000
18702		);
18703		let payment_id_2 = PaymentId([45; 32]);
18704		nodes[0].node.send_spontaneous_payment(
18705			Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_2, route_params,
18706			Retry::Attempts(0)
18707		).unwrap();
18708		check_added_monitors!(nodes[0], 1);
18709		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
18710		assert_eq!(events.len(), 1);
18711		let ev = events.drain(..).next().unwrap();
18712		let payment_event = SendEvent::from_event(ev);
18713		nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
18714		check_added_monitors!(nodes[1], 0);
18715		commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
18716		expect_and_process_pending_htlcs(&nodes[1], true);
18717		let events = nodes[1].node.get_and_clear_pending_events();
18718		let fail = HTLCHandlingFailureType::Receive { payment_hash };
18719		expect_htlc_failure_conditions(events, &[fail]);
18720		check_added_monitors!(nodes[1], 1);
18721		let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
18722		assert!(updates.update_add_htlcs.is_empty());
18723		assert!(updates.update_fulfill_htlcs.is_empty());
18724		assert_eq!(updates.update_fail_htlcs.len(), 1);
18725		assert!(updates.update_fail_malformed_htlcs.is_empty());
18726		assert!(updates.update_fee.is_none());
18727		nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
18728		commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
18729		expect_payment_failed!(nodes[0], payment_hash, true);
18730
18731		// Finally, claim the original payment.
18732		claim_payment(&nodes[0], &expected_route, payment_preimage);
18733	}
18734
18735	#[test]
18736	#[rustfmt::skip]
18737	fn test_keysend_hash_mismatch() {
18738		// Test that if we receive a keysend `update_add_htlc` msg, we fail as expected if the keysend
18739		// preimage doesn't match the msg's payment hash.
18740		let chanmon_cfgs = create_chanmon_cfgs(2);
18741		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
18742		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
18743		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
18744
18745		let payer_pubkey = nodes[0].node.get_our_node_id();
18746		let payee_pubkey = nodes[1].node.get_our_node_id();
18747
18748		let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
18749		let route_params = RouteParameters::from_payment_params_and_value(
18750			PaymentParameters::for_keysend(payee_pubkey, 40, false), 10_000);
18751		let network_graph = nodes[0].network_graph;
18752		let first_hops = nodes[0].node.list_usable_channels();
18753		let scorer = test_utils::TestScorer::new();
18754		let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
18755		let route = find_route(
18756			&payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
18757			nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
18758		).unwrap();
18759
18760		let test_preimage = PaymentPreimage([42; 32]);
18761		let mismatch_payment_hash = PaymentHash([43; 32]);
18762		let session_privs = nodes[0].node.test_add_new_pending_payment(mismatch_payment_hash,
18763			RecipientOnionFields::spontaneous_empty(), PaymentId(mismatch_payment_hash.0), &route).unwrap();
18764		nodes[0].node.test_send_payment_internal(&route, mismatch_payment_hash,
18765			RecipientOnionFields::spontaneous_empty(), Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap();
18766		check_added_monitors!(nodes[0], 1);
18767
18768		let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
18769		assert_eq!(updates.update_add_htlcs.len(), 1);
18770		assert!(updates.update_fulfill_htlcs.is_empty());
18771		assert!(updates.update_fail_htlcs.is_empty());
18772		assert!(updates.update_fail_malformed_htlcs.is_empty());
18773		assert!(updates.update_fee.is_none());
18774		nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
18775		commitment_signed_dance!(nodes[1], nodes[0], &updates.commitment_signed, false);
18776		expect_and_process_pending_htlcs(&nodes[1], false);
18777		expect_htlc_handling_failed_destinations!(nodes[1].node.get_and_clear_pending_events(), &[HTLCHandlingFailureType::Receive { payment_hash: mismatch_payment_hash }]);
18778		check_added_monitors(&nodes[1], 1);
18779		let _ = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
18780
18781		nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Payment preimage didn't match payment hash", 1);
18782	}
18783
18784	#[test]
18785	#[rustfmt::skip]
18786	fn test_multi_hop_missing_secret() {
18787		let chanmon_cfgs = create_chanmon_cfgs(4);
18788		let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
18789		let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
18790		let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
18791
18792		let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
18793		let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
18794		let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
18795		let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
18796
18797		// Marshall an MPP route.
18798		let (mut route, payment_hash, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
18799		let path = route.paths[0].clone();
18800		route.paths.push(path);
18801		route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
18802		route.paths[0].hops[0].short_channel_id = chan_1_id;
18803		route.paths[0].hops[1].short_channel_id = chan_3_id;
18804		route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
18805		route.paths[1].hops[0].short_channel_id = chan_2_id;
18806		route.paths[1].hops[1].short_channel_id = chan_4_id;
18807
18808		nodes[0].node.send_payment_with_route(route, payment_hash,
18809			RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)).unwrap();
18810		let events = nodes[0].node.get_and_clear_pending_events();
18811		assert_eq!(events.len(), 1);
18812		match events[0] {
18813			Event::PaymentFailed { reason, .. } => {
18814				assert_eq!(reason.unwrap(), crate::events::PaymentFailureReason::UnexpectedError);
18815			}
18816			_ => panic!()
18817		}
18818		nodes[0].logger.assert_log_contains("lightning::ln::outbound_payment", "Payment secret is required for multi-path payments", 2);
18819		assert!(nodes[0].node.list_recent_payments().is_empty());
18820	}
18821
18822	#[test]
18823	#[rustfmt::skip]
18824	fn test_channel_update_cached() {
18825		let chanmon_cfgs = create_chanmon_cfgs(3);
18826		let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
18827		let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
18828		let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
18829
18830		let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
18831
18832		let message = "Channel force-closed".to_owned();
18833		nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id(), message.clone()).unwrap();
18834		check_added_monitors!(nodes[0], 1);
18835		let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message };
18836		check_closed_event!(nodes[0], 1, reason, [nodes[1].node.get_our_node_id()], 100000);
18837
18838		// Confirm that the channel_update was not sent immediately to node[1] but was cached.
18839		let node_1_events = nodes[1].node.get_and_clear_pending_msg_events();
18840		assert_eq!(node_1_events.len(), 0);
18841
18842		{
18843			// Assert that ChannelUpdate message has been added to node[0] pending broadcast messages
18844			let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
18845			assert_eq!(pending_broadcast_messages.len(), 1);
18846		}
18847
18848		// Test that we do not retrieve the pending broadcast messages when we are not connected to any peer
18849		nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
18850		nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
18851
18852		nodes[0].node.peer_disconnected(nodes[2].node.get_our_node_id());
18853		nodes[2].node.peer_disconnected(nodes[0].node.get_our_node_id());
18854
18855		let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
18856		assert_eq!(node_0_events.len(), 0);
18857
18858		// Now we reconnect to a peer
18859		nodes[0].node.peer_connected(nodes[2].node.get_our_node_id(), &msgs::Init {
18860			features: nodes[2].node.init_features(), networks: None, remote_network_address: None
18861		}, true).unwrap();
18862		nodes[2].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
18863			features: nodes[0].node.init_features(), networks: None, remote_network_address: None
18864		}, false).unwrap();
18865
18866		// Confirm that get_and_clear_pending_msg_events correctly captures pending broadcast messages
18867		let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
18868		assert_eq!(node_0_events.len(), 1);
18869		match &node_0_events[0] {
18870			MessageSendEvent::BroadcastChannelUpdate { .. } => (),
18871			_ => panic!("Unexpected event"),
18872		}
18873		{
18874			// Assert that ChannelUpdate message has been cleared from nodes[0] pending broadcast messages
18875			let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
18876			assert_eq!(pending_broadcast_messages.len(), 0);
18877		}
18878	}
18879
18880	#[test]
18881	#[rustfmt::skip]
18882	fn test_drop_disconnected_peers_when_removing_channels() {
18883		let chanmon_cfgs = create_chanmon_cfgs(2);
18884		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
18885		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
18886		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
18887
18888		create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
18889
18890		nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
18891		nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
18892		let chan_id = nodes[0].node.list_channels()[0].channel_id;
18893		let message = "Channel force-closed".to_owned();
18894		nodes[0]
18895			.node
18896			.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone())
18897			.unwrap();
18898		check_added_monitors!(nodes[0], 1);
18899		let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message };
18900		check_closed_event!(nodes[0], 1, reason, [nodes[1].node.get_our_node_id()], 1_000_000);
18901
18902		{
18903			// Assert that nodes[1] is awaiting removal for nodes[0] once nodes[1] has been
18904			// disconnected and the channel between has been force closed.
18905			let nodes_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
18906			// Assert that nodes[1] isn't removed before `timer_tick_occurred` has been executed.
18907			assert_eq!(nodes_0_per_peer_state.len(), 1);
18908			assert!(nodes_0_per_peer_state.get(&nodes[1].node.get_our_node_id()).is_some());
18909		}
18910
18911		nodes[0].node.timer_tick_occurred();
18912
18913		{
18914			// Assert that nodes[1] has now been removed.
18915			assert_eq!(nodes[0].node.per_peer_state.read().unwrap().len(), 0);
18916		}
18917	}
18918
18919	#[test]
18920	#[rustfmt::skip]
18921	fn test_drop_peers_when_removing_unfunded_channels() {
18922		let chanmon_cfgs = create_chanmon_cfgs(2);
18923		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
18924		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
18925		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
18926
18927		exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
18928		let events = nodes[0].node.get_and_clear_pending_events();
18929		assert_eq!(events.len(), 1, "Unexpected events {:?}", events);
18930		match events[0] {
18931			Event::FundingGenerationReady { .. } => {}
18932			_ => panic!("Unexpected event {:?}", events),
18933		}
18934
18935		nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
18936		nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
18937		check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer, [nodes[1].node.get_our_node_id()], 1_000_000);
18938		check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer, [nodes[0].node.get_our_node_id()], 1_000_000);
18939
18940		// At this point the state for the peers should have been removed.
18941		assert_eq!(nodes[0].node.per_peer_state.read().unwrap().len(), 0);
18942		assert_eq!(nodes[1].node.per_peer_state.read().unwrap().len(), 0);
18943	}
18944
18945	#[test]
18946	#[rustfmt::skip]
18947	fn bad_inbound_payment_hash() {
18948		// Add coverage for checking that a user-provided payment hash matches the payment secret.
18949		let chanmon_cfgs = create_chanmon_cfgs(2);
18950		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
18951		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
18952		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
18953
18954		let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[0]);
18955		let payment_data = msgs::FinalOnionHopData {
18956			payment_secret,
18957			total_msat: 100_000,
18958		};
18959
18960		// Ensure that if the payment hash given to `inbound_payment::verify` differs from the original,
18961		// payment verification fails as expected.
18962		let mut bad_payment_hash = payment_hash.clone();
18963		bad_payment_hash.0[0] += 1;
18964		match inbound_payment::verify(bad_payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) {
18965			Ok(_) => panic!("Unexpected ok"),
18966			Err(()) => {
18967				nodes[0].logger.assert_log_contains("lightning::ln::inbound_payment", "Failing HTLC with user-generated payment_hash", 1);
18968			}
18969		}
18970
18971		// Check that using the original payment hash succeeds.
18972		assert!(inbound_payment::verify(payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger).is_ok());
18973	}
18974
18975	fn check_not_connected_to_peer_error<T>(
18976		res_err: Result<T, APIError>, expected_public_key: PublicKey,
18977	) {
18978		let expected_message = format!("Not connected to node: {}", expected_public_key);
18979		check_api_error_message(expected_message, res_err)
18980	}
18981
18982	#[rustfmt::skip]
18983	fn check_unkown_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
18984		let expected_message = format!("Can't find a peer matching the passed counterparty node_id {}", expected_public_key);
18985		check_api_error_message(expected_message, res_err)
18986	}
18987
18988	#[rustfmt::skip]
18989	fn check_channel_unavailable_error<T>(res_err: Result<T, APIError>, expected_channel_id: ChannelId, peer_node_id: PublicKey) {
18990		let expected_message = format!("Channel with id {} not found for the passed counterparty node_id {}", expected_channel_id, peer_node_id);
18991		check_api_error_message(expected_message, res_err)
18992	}
18993
18994	fn check_api_misuse_error<T>(res_err: Result<T, APIError>) {
18995		let expected_message = "No such channel awaiting to be accepted.".to_string();
18996		check_api_error_message(expected_message, res_err)
18997	}
18998
18999	fn check_api_error_message<T>(expected_err_message: String, res_err: Result<T, APIError>) {
19000		match res_err {
19001			Err(APIError::APIMisuseError { err }) => {
19002				assert_eq!(err, expected_err_message);
19003			},
19004			Err(APIError::ChannelUnavailable { err }) => {
19005				assert_eq!(err, expected_err_message);
19006			},
19007			Ok(_) => panic!("Unexpected Ok"),
19008			Err(_) => panic!("Unexpected Error"),
19009		}
19010	}
19011
19012	#[test]
19013	#[rustfmt::skip]
19014	fn test_api_calls_with_unkown_counterparty_node() {
19015		// Tests that our API functions that expects a `counterparty_node_id` as input, behaves as
19016		// expected if the `counterparty_node_id` is an unkown peer in the
19017		// `ChannelManager::per_peer_state` map.
19018		let chanmon_cfg = create_chanmon_cfgs(2);
19019		let node_cfg = create_node_cfgs(2, &chanmon_cfg);
19020		let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
19021		let nodes = create_network(2, &node_cfg, &node_chanmgr);
19022
19023		// Dummy values
19024		let channel_id = ChannelId::from_bytes([4; 32]);
19025		let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap());
19026		let intercept_id = InterceptId([0; 32]);
19027		let error_message = "Channel force-closed";
19028
19029		// Test the API functions.
19030		check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None, None), unkown_public_key);
19031
19032		check_unkown_peer_error(nodes[0].node.accept_inbound_channel(&channel_id, &unkown_public_key, 42, None), unkown_public_key);
19033
19034		check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key);
19035
19036		check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key);
19037
19038		check_unkown_peer_error(nodes[0].node.forward_intercepted_htlc(intercept_id, &channel_id, unkown_public_key, 1_000_000), unkown_public_key);
19039
19040		check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key);
19041	}
19042
19043	#[test]
19044	#[rustfmt::skip]
19045	fn test_api_calls_with_unavailable_channel() {
19046		// Tests that our API functions that expects a `counterparty_node_id` and a `channel_id`
19047		// as input, behaves as expected if the `counterparty_node_id` is a known peer in the
19048		// `ChannelManager::per_peer_state` map, but the peer state doesn't contain a channel with
19049		// the given `channel_id`.
19050		let chanmon_cfg = create_chanmon_cfgs(2);
19051		let node_cfg = create_node_cfgs(2, &chanmon_cfg);
19052		let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
19053		let nodes = create_network(2, &node_cfg, &node_chanmgr);
19054
19055		let counterparty_node_id = nodes[1].node.get_our_node_id();
19056
19057		// Dummy values
19058		let channel_id = ChannelId::from_bytes([4; 32]);
19059		let error_message = "Channel force-closed";
19060
19061		// Test the API functions.
19062		check_api_misuse_error(nodes[0].node.accept_inbound_channel(&channel_id, &counterparty_node_id, 42, None));
19063
19064		check_channel_unavailable_error(nodes[0].node.close_channel(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
19065
19066		check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id);
19067
19068		check_channel_unavailable_error(nodes[0].node.forward_intercepted_htlc(InterceptId([0; 32]), &channel_id, counterparty_node_id, 1_000_000), channel_id, counterparty_node_id);
19069
19070		check_channel_unavailable_error(nodes[0].node.update_channel_config(&counterparty_node_id, &[channel_id], &ChannelConfig::default()), channel_id, counterparty_node_id);
19071	}
19072
19073	#[test]
19074	#[rustfmt::skip]
19075	fn test_connection_limiting() {
19076		// Test that we limit un-channel'd peers and un-funded channels properly.
19077		let chanmon_cfgs = create_chanmon_cfgs(2);
19078		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
19079		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
19080		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
19081
19082		// Note that create_network connects the nodes together for us
19083
19084		nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
19085		let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
19086
19087		let mut funding_tx = None;
19088		for idx in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
19089			nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
19090			let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
19091
19092			if idx == 0 {
19093				nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel);
19094				let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
19095				funding_tx = Some(tx.clone());
19096				nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx).unwrap();
19097				let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
19098
19099				nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg);
19100				check_added_monitors!(nodes[1], 1);
19101				expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
19102
19103				let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
19104
19105				nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed);
19106				check_added_monitors!(nodes[0], 1);
19107				expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
19108			}
19109			open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
19110		}
19111
19112		// A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected
19113		open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(
19114			&nodes[0].keys_manager);
19115		nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
19116		assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
19117			open_channel_msg.common_fields.temporary_channel_id);
19118
19119		// Further, because all of our channels with nodes[0] are inbound, and none of them funded,
19120		// it doesn't count as a "protected" peer, i.e. it counts towards the MAX_NO_CHANNEL_PEERS
19121		// limit.
19122		let mut peer_pks = Vec::with_capacity(super::MAX_NO_CHANNEL_PEERS);
19123		for _ in 1..super::MAX_NO_CHANNEL_PEERS {
19124			let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
19125				&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
19126			peer_pks.push(random_pk);
19127			nodes[1].node.peer_connected(random_pk, &msgs::Init {
19128				features: nodes[0].node.init_features(), networks: None, remote_network_address: None
19129			}, true).unwrap();
19130		}
19131		let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
19132			&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
19133		nodes[1].node.peer_connected(last_random_pk, &msgs::Init {
19134			features: nodes[0].node.init_features(), networks: None, remote_network_address: None
19135		}, true).unwrap_err();
19136
19137		// Also importantly, because nodes[0] isn't "protected", we will refuse a reconnection from
19138		// them if we have too many un-channel'd peers.
19139		nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
19140		let chan_closed_events = nodes[1].node.get_and_clear_pending_events();
19141		assert_eq!(chan_closed_events.len(), super::MAX_UNFUNDED_CHANS_PER_PEER - 1);
19142		for ev in chan_closed_events {
19143			if let Event::ChannelClosed { .. } = ev { } else { panic!(); }
19144		}
19145		nodes[1].node.peer_connected(last_random_pk, &msgs::Init {
19146			features: nodes[0].node.init_features(), networks: None, remote_network_address: None
19147		}, true).unwrap();
19148		nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
19149			features: nodes[0].node.init_features(), networks: None, remote_network_address: None
19150		}, true).unwrap_err();
19151
19152		// but of course if the connection is outbound its allowed...
19153		nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
19154			features: nodes[0].node.init_features(), networks: None, remote_network_address: None
19155		}, false).unwrap();
19156		nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
19157
19158		// Now nodes[0] is disconnected but still has a pending, un-funded channel lying around.
19159		// Even though we accept one more connection from new peers, we won't actually let them
19160		// open channels.
19161		assert!(peer_pks.len() > super::MAX_UNFUNDED_CHANNEL_PEERS - 1);
19162		for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
19163			nodes[1].node.handle_open_channel(peer_pks[i], &open_channel_msg);
19164			get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]);
19165			open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
19166		}
19167		nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
19168		assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
19169			open_channel_msg.common_fields.temporary_channel_id);
19170
19171		// Of course, however, outbound channels are always allowed
19172		nodes[1].node.create_channel(last_random_pk, 100_000, 0, 42, None, None).unwrap();
19173		get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, last_random_pk);
19174
19175		// If we fund the first channel, nodes[0] has a live on-chain channel with us, it is now
19176		// "protected" and can connect again.
19177		mine_transaction(&nodes[1], funding_tx.as_ref().unwrap());
19178		nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
19179			features: nodes[0].node.init_features(), networks: None, remote_network_address: None
19180		}, true).unwrap();
19181		get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
19182
19183		// Further, because the first channel was funded, we can open another channel with
19184		// last_random_pk.
19185		nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
19186		get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
19187	}
19188
19189	#[test]
19190	#[rustfmt::skip]
19191	fn reject_excessively_underpaying_htlcs() {
19192		let chanmon_cfg = create_chanmon_cfgs(1);
19193		let node_cfg = create_node_cfgs(1, &chanmon_cfg);
19194		let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
19195		let node = create_network(1, &node_cfg, &node_chanmgr);
19196		let sender_intended_amt_msat = 100;
19197		let extra_fee_msat = 10;
19198		let hop_data = onion_utils::Hop::Receive {
19199			hop_data: msgs::InboundOnionReceivePayload {
19200				sender_intended_htlc_amt_msat: 100,
19201				cltv_expiry_height: 42,
19202				payment_metadata: None,
19203				keysend_preimage: None,
19204				payment_data: Some(msgs::FinalOnionHopData {
19205					payment_secret: PaymentSecret([0; 32]),
19206					total_msat: sender_intended_amt_msat,
19207				}),
19208				custom_tlvs: Vec::new(),
19209			},
19210			shared_secret: SharedSecret::from_bytes([0; 32]),
19211		};
19212		// Check that if the amount we received + the penultimate hop extra fee is less than the sender
19213		// intended amount, we fail the payment.
19214		let current_height: u32 = node[0].node.best_block.read().unwrap().height;
19215		if let Err(crate::ln::channelmanager::InboundHTLCErr { reason, .. }) =
19216			create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
19217				sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat),
19218				current_height)
19219		{
19220			assert_eq!(reason, LocalHTLCFailureReason::FinalIncorrectHTLCAmount);
19221		} else { panic!(); }
19222
19223		// If amt_received + extra_fee is equal to the sender intended amount, we're fine.
19224		let hop_data = onion_utils::Hop::Receive {
19225			hop_data: msgs::InboundOnionReceivePayload { // This is the same payload as above, InboundOnionPayload doesn't implement Clone
19226				sender_intended_htlc_amt_msat: 100,
19227				cltv_expiry_height: 42,
19228				payment_metadata: None,
19229				keysend_preimage: None,
19230				payment_data: Some(msgs::FinalOnionHopData {
19231					payment_secret: PaymentSecret([0; 32]),
19232					total_msat: sender_intended_amt_msat,
19233				}),
19234				custom_tlvs: Vec::new(),
19235			},
19236			shared_secret: SharedSecret::from_bytes([0; 32]),
19237		};
19238		let current_height: u32 = node[0].node.best_block.read().unwrap().height;
19239		assert!(create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
19240			sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat),
19241			current_height).is_ok());
19242	}
19243
19244	#[test]
19245	#[rustfmt::skip]
19246	fn test_final_incorrect_cltv(){
19247		let chanmon_cfg = create_chanmon_cfgs(1);
19248		let node_cfg = create_node_cfgs(1, &chanmon_cfg);
19249		let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
19250		let node = create_network(1, &node_cfg, &node_chanmgr);
19251
19252		let current_height: u32 = node[0].node.best_block.read().unwrap().height;
19253		let result = create_recv_pending_htlc_info(onion_utils::Hop::Receive {
19254			hop_data: msgs::InboundOnionReceivePayload {
19255				sender_intended_htlc_amt_msat: 100,
19256				cltv_expiry_height: TEST_FINAL_CLTV,
19257				payment_metadata: None,
19258				keysend_preimage: None,
19259				payment_data: Some(msgs::FinalOnionHopData {
19260					payment_secret: PaymentSecret([0; 32]),
19261					total_msat: 100,
19262				}),
19263				custom_tlvs: Vec::new(),
19264			},
19265			shared_secret: SharedSecret::from_bytes([0; 32]),
19266		}, [0; 32], PaymentHash([0; 32]), 100, TEST_FINAL_CLTV + 1, None, true, None, current_height);
19267
19268		// Should not return an error as this condition:
19269		// https://github.com/lightning/bolts/blob/4dcc377209509b13cf89a4b91fde7d478f5b46d8/04-onion-routing.md?plain=1#L334
19270		// is not satisfied.
19271		assert!(result.is_ok());
19272	}
19273
19274	#[test]
19275	#[rustfmt::skip]
19276	fn test_update_channel_config() {
19277		let chanmon_cfg = create_chanmon_cfgs(2);
19278		let node_cfg = create_node_cfgs(2, &chanmon_cfg);
19279		let mut user_config = test_default_channel_config();
19280		let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config.clone()), Some(user_config.clone())]);
19281		let nodes = create_network(2, &node_cfg, &node_chanmgr);
19282		let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
19283		let channel = &nodes[0].node.list_channels()[0];
19284
19285		nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap();
19286		let events = nodes[0].node.get_and_clear_pending_msg_events();
19287		assert_eq!(events.len(), 0);
19288
19289		user_config.channel_config.forwarding_fee_base_msat += 10;
19290		nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap();
19291		assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_base_msat, user_config.channel_config.forwarding_fee_base_msat);
19292		let events = nodes[0].node.get_and_clear_pending_msg_events();
19293		assert_eq!(events.len(), 1);
19294		match &events[0] {
19295			MessageSendEvent::BroadcastChannelUpdate { .. } => {},
19296			_ => panic!("expected BroadcastChannelUpdate event"),
19297		}
19298
19299		nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate::default()).unwrap();
19300		let events = nodes[0].node.get_and_clear_pending_msg_events();
19301		assert_eq!(events.len(), 0);
19302
19303		let new_cltv_expiry_delta = user_config.channel_config.cltv_expiry_delta + 6;
19304		nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate {
19305			cltv_expiry_delta: Some(new_cltv_expiry_delta),
19306			..Default::default()
19307		}).unwrap();
19308		assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta);
19309		let events = nodes[0].node.get_and_clear_pending_msg_events();
19310		assert_eq!(events.len(), 1);
19311		match &events[0] {
19312			MessageSendEvent::BroadcastChannelUpdate { .. } => {},
19313			_ => panic!("expected BroadcastChannelUpdate event"),
19314		}
19315
19316		let new_fee = user_config.channel_config.forwarding_fee_proportional_millionths + 100;
19317		nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate {
19318			forwarding_fee_proportional_millionths: Some(new_fee),
19319			accept_underpaying_htlcs: Some(true),
19320			..Default::default()
19321		}).unwrap();
19322		assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta);
19323		assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, new_fee);
19324		assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().accept_underpaying_htlcs, true);
19325		let events = nodes[0].node.get_and_clear_pending_msg_events();
19326		assert_eq!(events.len(), 1);
19327		match &events[0] {
19328			MessageSendEvent::BroadcastChannelUpdate { .. } => {},
19329			_ => panic!("expected BroadcastChannelUpdate event"),
19330		}
19331
19332		// If we provide a channel_id not associated with the peer, we should get an error and no updates
19333		// should be applied to ensure update atomicity as specified in the API docs.
19334		let bad_channel_id = ChannelId::v1_from_funding_txid(&[10; 32], 10);
19335		let current_fee = nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths;
19336		let new_fee = current_fee + 100;
19337		assert!(
19338			matches!(
19339				nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id, bad_channel_id], &ChannelConfigUpdate {
19340					forwarding_fee_proportional_millionths: Some(new_fee),
19341					..Default::default()
19342				}),
19343				Err(APIError::ChannelUnavailable { err: _ }),
19344			)
19345		);
19346		// Check that the fee hasn't changed for the channel that exists.
19347		assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, current_fee);
19348		let events = nodes[0].node.get_and_clear_pending_msg_events();
19349		assert_eq!(events.len(), 0);
19350	}
19351
19352	#[test]
19353	#[rustfmt::skip]
19354	fn test_payment_display() {
19355		let payment_id = PaymentId([42; 32]);
19356		assert_eq!(format!("{}", &payment_id), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
19357		let payment_hash = PaymentHash([42; 32]);
19358		assert_eq!(format!("{}", &payment_hash), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
19359		let payment_preimage = PaymentPreimage([42; 32]);
19360		assert_eq!(format!("{}", &payment_preimage), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
19361	}
19362
19363	#[test]
19364	#[rustfmt::skip]
19365	fn test_trigger_lnd_force_close() {
19366		let chanmon_cfg = create_chanmon_cfgs(2);
19367		let node_cfg = create_node_cfgs(2, &chanmon_cfg);
19368		let user_config = test_default_channel_config();
19369		let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config.clone()), Some(user_config)]);
19370		let nodes = create_network(2, &node_cfg, &node_chanmgr);
19371		let message = "Channel force-closed".to_owned();
19372
19373		// Open a channel, immediately disconnect each other, and broadcast Alice's latest state.
19374		let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
19375		nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
19376		nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
19377		nodes[0]
19378			.node
19379			.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), message.clone())
19380			.unwrap();
19381		check_closed_broadcast(&nodes[0], 1, false);
19382		check_added_monitors(&nodes[0], 1);
19383		let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message };
19384		check_closed_event!(nodes[0], 1, reason, [nodes[1].node.get_our_node_id()], 100000);
19385		{
19386			let txn = nodes[0].tx_broadcaster.txn_broadcast();
19387			assert_eq!(txn.len(), 1);
19388			check_spends!(txn[0], funding_tx);
19389		}
19390
19391		// Since they're disconnected, Bob won't receive Alice's `Error` message. Reconnect them
19392		// such that Bob sends a `ChannelReestablish` to Alice since the channel is still open from
19393		// their side.
19394		nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init {
19395			features: nodes[1].node.init_features(), networks: None, remote_network_address: None
19396		}, true).unwrap();
19397		nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
19398			features: nodes[0].node.init_features(), networks: None, remote_network_address: None
19399		}, false).unwrap();
19400		assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
19401		let channel_reestablish = get_event_msg!(
19402			nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()
19403		);
19404		nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &channel_reestablish);
19405
19406		// Alice should respond with an error since the channel isn't known, but a bogus
19407		// `ChannelReestablish` should be sent first, such that we actually trigger Bob to force
19408		// close even if it was an lnd node.
19409		let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
19410		assert_eq!(msg_events.len(), 2);
19411		if let MessageSendEvent::SendChannelReestablish { node_id, msg } = &msg_events[0] {
19412			assert_eq!(*node_id, nodes[1].node.get_our_node_id());
19413			assert_eq!(msg.next_local_commitment_number, 0);
19414			assert_eq!(msg.next_remote_commitment_number, 0);
19415			nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &msg);
19416		} else { panic!() };
19417		check_closed_broadcast(&nodes[1], 1, true);
19418		check_added_monitors(&nodes[1], 1);
19419		let expected_close_reason = ClosureReason::ProcessingError {
19420			err: "Peer sent an invalid channel_reestablish to force close in a non-standard way".to_string()
19421		};
19422		check_closed_event!(nodes[1], 1, expected_close_reason, [nodes[0].node.get_our_node_id()], 100000);
19423		{
19424			let txn = nodes[1].tx_broadcaster.txn_broadcast();
19425			assert_eq!(txn.len(), 1);
19426			check_spends!(txn[0], funding_tx);
19427		}
19428	}
19429
19430	#[test]
19431	#[rustfmt::skip]
19432	fn test_malformed_forward_htlcs_ser() {
19433		// Ensure that `HTLCForwardInfo::FailMalformedHTLC`s are (de)serialized properly.
19434		let chanmon_cfg = create_chanmon_cfgs(1);
19435		let node_cfg = create_node_cfgs(1, &chanmon_cfg);
19436		let persister;
19437		let chain_monitor;
19438		let chanmgrs = create_node_chanmgrs(1, &node_cfg, &[None]);
19439		let deserialized_chanmgr;
19440		let mut nodes = create_network(1, &node_cfg, &chanmgrs);
19441
19442		let dummy_failed_htlc = |htlc_id| {
19443			HTLCForwardInfo::FailHTLC { htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42], attribution_data: Some(AttributionData::new()) } }
19444		};
19445		let dummy_malformed_htlc = |htlc_id| {
19446			HTLCForwardInfo::FailMalformedHTLC {
19447				htlc_id,
19448				failure_code: LocalHTLCFailureReason::InvalidOnionPayload.failure_code(),
19449				sha256_of_onion: [0; 32],
19450			}
19451		};
19452
19453		let dummy_htlcs_1: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
19454			if htlc_id % 2 == 0 {
19455				dummy_failed_htlc(htlc_id)
19456			} else {
19457				dummy_malformed_htlc(htlc_id)
19458			}
19459		}).collect();
19460
19461		let dummy_htlcs_2: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
19462			if htlc_id % 2 == 1 {
19463				dummy_failed_htlc(htlc_id)
19464			} else {
19465				dummy_malformed_htlc(htlc_id)
19466			}
19467		}).collect();
19468
19469
19470		let (scid_1, scid_2) = (42, 43);
19471		let mut forward_htlcs = new_hash_map();
19472		forward_htlcs.insert(scid_1, dummy_htlcs_1.clone());
19473		forward_htlcs.insert(scid_2, dummy_htlcs_2.clone());
19474
19475		let mut chanmgr_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
19476		*chanmgr_fwd_htlcs = forward_htlcs.clone();
19477		core::mem::drop(chanmgr_fwd_htlcs);
19478
19479		reload_node!(nodes[0], nodes[0].node.encode(), &[], persister, chain_monitor, deserialized_chanmgr);
19480
19481		let mut deserialized_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
19482		for scid in [scid_1, scid_2].iter() {
19483			let deserialized_htlcs = deserialized_fwd_htlcs.remove(scid).unwrap();
19484			assert_eq!(forward_htlcs.remove(scid).unwrap(), deserialized_htlcs);
19485		}
19486		assert!(deserialized_fwd_htlcs.is_empty());
19487		core::mem::drop(deserialized_fwd_htlcs);
19488	}
19489}
19490
19491#[cfg(ldk_bench)]
19492pub mod bench {
19493	use crate::chain::chainmonitor::{ChainMonitor, Persist};
19494	use crate::chain::Listen;
19495	use crate::events::Event;
19496	use crate::ln::channelmanager::{
19497		BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentId, PaymentPreimage,
19498		RecipientOnionFields, Retry,
19499	};
19500	use crate::ln::functional_test_utils::*;
19501	use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, Init, MessageSendEvent};
19502	use crate::routing::gossip::NetworkGraph;
19503	use crate::routing::router::{PaymentParameters, RouteParameters};
19504	use crate::sign::{InMemorySigner, KeysManager, NodeSigner};
19505	use crate::util::config::{MaxDustHTLCExposure, UserConfig};
19506	use crate::util::test_utils;
19507
19508	use bitcoin::amount::Amount;
19509	use bitcoin::hashes::sha256::Hash as Sha256;
19510	use bitcoin::hashes::Hash;
19511	use bitcoin::locktime::absolute::LockTime;
19512	use bitcoin::transaction::Version;
19513	use bitcoin::{Transaction, TxOut};
19514
19515	use crate::sync::{Arc, RwLock};
19516
19517	use criterion::Criterion;
19518
19519	type Manager<'a, P> = ChannelManager<
19520		&'a ChainMonitor<
19521			InMemorySigner,
19522			&'a test_utils::TestChainSource,
19523			&'a test_utils::TestBroadcaster,
19524			&'a test_utils::TestFeeEstimator,
19525			&'a test_utils::TestLogger,
19526			&'a P,
19527			&'a KeysManager,
19528		>,
19529		&'a test_utils::TestBroadcaster,
19530		&'a KeysManager,
19531		&'a KeysManager,
19532		&'a KeysManager,
19533		&'a test_utils::TestFeeEstimator,
19534		&'a test_utils::TestRouter<'a>,
19535		&'a test_utils::TestMessageRouter<'a>,
19536		&'a test_utils::TestLogger,
19537	>;
19538
19539	struct ANodeHolder<'node_cfg, 'chan_mon_cfg: 'node_cfg, P: Persist<InMemorySigner>> {
19540		node: &'node_cfg Manager<'chan_mon_cfg, P>,
19541	}
19542	impl<'node_cfg, 'chan_mon_cfg: 'node_cfg, P: Persist<InMemorySigner>> NodeHolder
19543		for ANodeHolder<'node_cfg, 'chan_mon_cfg, P>
19544	{
19545		type CM = Manager<'chan_mon_cfg, P>;
19546		#[inline]
19547		#[rustfmt::skip]
19548		fn node(&self) -> &Manager<'chan_mon_cfg, P> { self.node }
19549		#[inline]
19550		#[rustfmt::skip]
19551		fn chain_monitor(&self) -> Option<&test_utils::TestChainMonitor> { None }
19552	}
19553
19554	#[rustfmt::skip]
19555	pub fn bench_sends(bench: &mut Criterion) {
19556		bench_two_sends(bench, "bench_sends", test_utils::TestPersister::new(), test_utils::TestPersister::new());
19557	}
19558
19559	#[rustfmt::skip]
19560	pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Criterion, bench_name: &str, persister_a: P, persister_b: P) {
19561		// Do a simple benchmark of sending a payment back and forth between two nodes.
19562		// Note that this is unrealistic as each payment send will require at least two fsync
19563		// calls per node.
19564		let network = bitcoin::Network::Testnet;
19565		let genesis_block = bitcoin::constants::genesis_block(network);
19566
19567		let tx_broadcaster = test_utils::TestBroadcaster::new(network);
19568		let fee_estimator = test_utils::TestFeeEstimator::new(253);
19569		let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
19570		let scorer = RwLock::new(test_utils::TestScorer::new());
19571		let entropy = test_utils::TestKeysInterface::new(&[0u8; 32], network);
19572		let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &logger_a, &scorer);
19573		let message_router = test_utils::TestMessageRouter::new_default(Arc::new(NetworkGraph::new(network, &logger_a)), &entropy);
19574
19575		let mut config: UserConfig = Default::default();
19576		config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253);
19577		config.channel_handshake_config.minimum_depth = 1;
19578
19579		let seed_a = [1u8; 32];
19580		let keys_manager_a = KeysManager::new(&seed_a, 42, 42, true);
19581		let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a, &keys_manager_a, keys_manager_a.get_peer_storage_key());
19582		let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &message_router, &logger_a, &keys_manager_a, &keys_manager_a, &keys_manager_a, config.clone(), ChainParameters {
19583			network,
19584			best_block: BestBlock::from_network(network),
19585		}, genesis_block.header.time);
19586		let node_a_holder = ANodeHolder { node: &node_a };
19587
19588		let logger_b = test_utils::TestLogger::with_id("node a".to_owned());
19589		let seed_b = [2u8; 32];
19590		let keys_manager_b = KeysManager::new(&seed_b, 42, 42, true);
19591		let chain_monitor_b = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_b, &keys_manager_b, keys_manager_b.get_peer_storage_key());
19592		let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &message_router, &logger_b, &keys_manager_b, &keys_manager_b, &keys_manager_b, config.clone(), ChainParameters {
19593			network,
19594			best_block: BestBlock::from_network(network),
19595		}, genesis_block.header.time);
19596		let node_b_holder = ANodeHolder { node: &node_b };
19597
19598		node_a.peer_connected(node_b.get_our_node_id(), &Init {
19599			features: node_b.init_features(), networks: None, remote_network_address: None
19600		}, true).unwrap();
19601		node_b.peer_connected(node_a.get_our_node_id(), &Init {
19602			features: node_a.init_features(), networks: None, remote_network_address: None
19603		}, false).unwrap();
19604		node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None, None).unwrap();
19605		node_b.handle_open_channel(node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
19606		node_a.handle_accept_channel(node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
19607
19608		let tx;
19609		if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) {
19610			tx = Transaction { version: Version::TWO, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
19611				value: Amount::from_sat(8_000_000), script_pubkey: output_script,
19612			}]};
19613			node_a.funding_transaction_generated(temporary_channel_id, node_b.get_our_node_id(), tx.clone()).unwrap();
19614		} else { panic!(); }
19615
19616		node_b.handle_funding_created(node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingCreated, node_b.get_our_node_id()));
19617		let events_b = node_b.get_and_clear_pending_events();
19618		assert_eq!(events_b.len(), 1);
19619		match events_b[0] {
19620			Event::ChannelPending{ ref counterparty_node_id, .. } => {
19621				assert_eq!(*counterparty_node_id, node_a.get_our_node_id());
19622			},
19623			_ => panic!("Unexpected event"),
19624		}
19625
19626		node_a.handle_funding_signed(node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingSigned, node_a.get_our_node_id()));
19627		let events_a = node_a.get_and_clear_pending_events();
19628		assert_eq!(events_a.len(), 1);
19629		match events_a[0] {
19630			Event::ChannelPending{ ref counterparty_node_id, .. } => {
19631				assert_eq!(*counterparty_node_id, node_b.get_our_node_id());
19632			},
19633			_ => panic!("Unexpected event"),
19634		}
19635
19636		assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
19637
19638		let block = create_dummy_block(BestBlock::from_network(network).block_hash, 42, vec![tx]);
19639		Listen::block_connected(&node_a, &block, 1);
19640		Listen::block_connected(&node_b, &block, 1);
19641
19642		node_a.handle_channel_ready(node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendChannelReady, node_a.get_our_node_id()));
19643		let msg_events = node_a.get_and_clear_pending_msg_events();
19644		assert_eq!(msg_events.len(), 2);
19645		match msg_events[0] {
19646			MessageSendEvent::SendChannelReady { ref msg, .. } => {
19647				node_b.handle_channel_ready(node_a.get_our_node_id(), msg);
19648				get_event_msg!(node_b_holder, MessageSendEvent::SendChannelUpdate, node_a.get_our_node_id());
19649			},
19650			_ => panic!(),
19651		}
19652		match msg_events[1] {
19653			MessageSendEvent::SendChannelUpdate { .. } => {},
19654			_ => panic!(),
19655		}
19656
19657		let events_a = node_a.get_and_clear_pending_events();
19658		assert_eq!(events_a.len(), 1);
19659		match events_a[0] {
19660			Event::ChannelReady{ ref counterparty_node_id, .. } => {
19661				assert_eq!(*counterparty_node_id, node_b.get_our_node_id());
19662			},
19663			_ => panic!("Unexpected event"),
19664		}
19665
19666		let events_b = node_b.get_and_clear_pending_events();
19667		assert_eq!(events_b.len(), 1);
19668		match events_b[0] {
19669			Event::ChannelReady{ ref counterparty_node_id, .. } => {
19670				assert_eq!(*counterparty_node_id, node_a.get_our_node_id());
19671			},
19672			_ => panic!("Unexpected event"),
19673		}
19674
19675		let mut payment_count: u64 = 0;
19676		macro_rules! send_payment {
19677			($node_a: expr, $node_b: expr) => {
19678				let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id(), TEST_FINAL_CLTV)
19679					.with_bolt11_features($node_b.bolt11_invoice_features()).unwrap();
19680				let mut payment_preimage = PaymentPreimage([0; 32]);
19681				payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes());
19682				payment_count += 1;
19683				let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
19684				let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
19685
19686				$node_a.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
19687					PaymentId(payment_hash.0),
19688					RouteParameters::from_payment_params_and_value(payment_params, 10_000),
19689					Retry::Attempts(0)).unwrap();
19690				let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
19691				$node_b.handle_update_add_htlc($node_a.get_our_node_id(), &payment_event.msgs[0]);
19692				$node_b.handle_commitment_signed_batch_test($node_a.get_our_node_id(), &payment_event.commitment_msg);
19693				let (raa, cs) = get_revoke_commit_msgs(&ANodeHolder { node: &$node_b }, &$node_a.get_our_node_id());
19694				$node_a.handle_revoke_and_ack($node_b.get_our_node_id(), &raa);
19695				$node_a.handle_commitment_signed_batch_test($node_b.get_our_node_id(), &cs);
19696				$node_b.handle_revoke_and_ack($node_a.get_our_node_id(), &get_event_msg!(ANodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id()));
19697
19698				$node_b.process_pending_htlc_forwards();
19699				expect_payment_claimable!(ANodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000);
19700				$node_b.claim_funds(payment_preimage);
19701				expect_payment_claimed!(ANodeHolder { node: &$node_b }, payment_hash, 10_000);
19702
19703				match $node_b.get_and_clear_pending_msg_events().pop().unwrap() {
19704					MessageSendEvent::UpdateHTLCs { node_id, mut updates, .. } => {
19705						assert_eq!(node_id, $node_a.get_our_node_id());
19706						let fulfill = updates.update_fulfill_htlcs.remove(0);
19707						$node_a.handle_update_fulfill_htlc($node_b.get_our_node_id(), fulfill);
19708						$node_a.handle_commitment_signed_batch_test($node_b.get_our_node_id(), &updates.commitment_signed);
19709					},
19710					_ => panic!("Failed to generate claim event"),
19711				}
19712
19713				let (raa, cs) = get_revoke_commit_msgs(&ANodeHolder { node: &$node_a }, &$node_b.get_our_node_id());
19714				$node_b.handle_revoke_and_ack($node_a.get_our_node_id(), &raa);
19715				$node_b.handle_commitment_signed_batch_test($node_a.get_our_node_id(), &cs);
19716				$node_a.handle_revoke_and_ack($node_b.get_our_node_id(), &get_event_msg!(ANodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id()));
19717
19718				expect_payment_sent!(ANodeHolder { node: &$node_a }, payment_preimage);
19719			}
19720		}
19721
19722		bench.bench_function(bench_name, |b| b.iter(|| {
19723			send_payment!(node_a, node_b);
19724			send_payment!(node_b, node_a);
19725		}));
19726	}
19727}