lightning/ln/
channelmanager.rs

1// This file is Copyright its original authors, visible in version control
2// history.
3//
4// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7// You may not use this file except in accordance with one or both of these
8// licenses.
9
10//! The top-level channel management and payment tracking stuff lives here.
11//!
12//! The [`ChannelManager`] is the main chunk of logic implementing the lightning protocol and is
13//! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those
14//! upon reconnect to the relevant peer(s).
15//!
16//! It does not manage routing logic (see [`Router`] for that) nor does it manage constructing
17//! on-chain transactions (it only monitors the chain to watch for any force-closes that might
18//! imply it needs to fail HTLCs/payments/channels it manages).
19
20use bitcoin::block::Header;
21use bitcoin::transaction::{Transaction, TxIn};
22use bitcoin::constants::ChainHash;
23use bitcoin::key::constants::SECRET_KEY_SIZE;
24use bitcoin::network::Network;
25
26use bitcoin::hashes::{Hash, HashEngine, HmacEngine};
27use bitcoin::hashes::hmac::Hmac;
28use bitcoin::hashes::sha256::Hash as Sha256;
29use bitcoin::hash_types::{BlockHash, Txid};
30
31use bitcoin::secp256k1::{SecretKey,PublicKey};
32use bitcoin::secp256k1::Secp256k1;
33use bitcoin::{secp256k1, Sequence, Weight};
34
35use crate::events::FundingInfo;
36use crate::blinded_path::message::{AsyncPaymentsContext, MessageContext, OffersContext};
37use crate::blinded_path::NodeIdLookUp;
38use crate::blinded_path::message::{BlindedMessagePath, MessageForwardNode};
39use crate::blinded_path::payment::{BlindedPaymentPath, Bolt12OfferContext, Bolt12RefundContext, PaymentConstraints, PaymentContext, UnauthenticatedReceiveTlvs};
40use crate::chain;
41use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
42use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
43use crate::chain::channelmonitor::{Balance, ChannelMonitor, ChannelMonitorUpdate, WithChannelMonitor, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent};
44use crate::chain::transaction::{OutPoint, TransactionData};
45use crate::events::{self, Event, EventHandler, EventsProvider, InboundChannelFunds, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination, PaymentFailureReason, ReplayEvent};
46// Since this struct is returned in `list_channels` methods, expose it here in case users want to
47// construct one themselves.
48use crate::ln::inbound_payment;
49use crate::ln::types::ChannelId;
50use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret};
51use crate::ln::channel::{self, Channel, ChannelPhase, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext, InteractivelyFunded as _};
52#[cfg(any(dual_funding, splicing))]
53use crate::ln::channel::InboundV2Channel;
54use crate::ln::channel_state::ChannelDetails;
55use crate::types::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
56#[cfg(any(feature = "_test_utils", test))]
57use crate::types::features::Bolt11InvoiceFeatures;
58use crate::routing::router::{BlindedTail, FixedRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router};
59use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundHTLCErr, NextPacketDetails};
60use crate::ln::msgs;
61use crate::ln::onion_utils;
62use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING};
63use crate::ln::msgs::{ChannelMessageHandler, CommitmentUpdate, DecodeError, LightningError};
64#[cfg(test)]
65use crate::ln::outbound_payment;
66use crate::ln::outbound_payment::{OutboundPayments, PendingOutboundPayment, RetryableInvoiceRequest, SendAlongPathArgs, StaleExpiration};
67use crate::offers::invoice::{Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, ExplicitSigningPubkey, InvoiceBuilder, UnsignedBolt12Invoice};
68use crate::offers::invoice_error::InvoiceError;
69use crate::offers::invoice_request::{InvoiceRequest, InvoiceRequestBuilder};
70use crate::offers::nonce::Nonce;
71use crate::offers::offer::{Offer, OfferBuilder};
72use crate::offers::parse::Bolt12SemanticError;
73use crate::offers::refund::{Refund, RefundBuilder};
74use crate::offers::signer;
75#[cfg(async_payments)]
76use crate::offers::static_invoice::StaticInvoice;
77use crate::onion_message::async_payments::{AsyncPaymentsMessage, HeldHtlcAvailable, ReleaseHeldHtlc, AsyncPaymentsMessageHandler};
78use crate::onion_message::dns_resolution::HumanReadableName;
79use crate::onion_message::messenger::{Destination, MessageRouter, Responder, ResponseInstruction, MessageSendInstructions};
80use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
81use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider};
82use crate::sign::ecdsa::EcdsaChannelSigner;
83use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
84use crate::util::wakers::{Future, Notifier};
85use crate::util::scid_utils::fake_scid;
86use crate::util::string::UntrustedString;
87use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
88use crate::util::ser::TransactionU16LenLimited;
89use crate::util::logger::{Level, Logger, WithContext};
90use crate::util::errors::APIError;
91
92#[cfg(feature = "dnssec")]
93use crate::blinded_path::message::DNSResolverContext;
94#[cfg(feature = "dnssec")]
95use crate::onion_message::dns_resolution::{DNSResolverMessage, DNSResolverMessageHandler, DNSSECQuery, DNSSECProof, OMNameResolver};
96
97#[cfg(not(c_bindings))]
98use {
99	crate::offers::offer::DerivedMetadata,
100	crate::onion_message::messenger::DefaultMessageRouter,
101	crate::routing::router::DefaultRouter,
102	crate::routing::gossip::NetworkGraph,
103	crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters},
104	crate::sign::KeysManager,
105};
106#[cfg(c_bindings)]
107use {
108	crate::offers::offer::OfferWithDerivedMetadataBuilder,
109	crate::offers::refund::RefundMaybeWithDerivedMetadataBuilder,
110};
111
112use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, CreationError, Currency, Description, InvoiceBuilder as Bolt11InvoiceBuilder, SignOrCreationError, DEFAULT_EXPIRY_TIME};
113
114use alloc::collections::{btree_map, BTreeMap};
115
116use crate::io;
117use crate::prelude::*;
118use core::{cmp, mem};
119use core::borrow::Borrow;
120use core::cell::RefCell;
121use crate::io::Read;
122use crate::sync::{Arc, Mutex, RwLock, RwLockReadGuard, FairRwLock, LockTestExt, LockHeldState};
123use core::sync::atomic::{AtomicUsize, AtomicBool, Ordering};
124use core::time::Duration;
125use core::ops::Deref;
126use bitcoin::hex::impl_fmt_traits;
127// Re-export this for use in the public API.
128pub use crate::ln::outbound_payment::{Bolt12PaymentError, ProbeSendFailure, Retry, RetryableSendFailure, RecipientOnionFields};
129#[cfg(test)]
130pub(crate) use crate::ln::outbound_payment::PaymentSendFailure;
131use crate::ln::script::ShutdownScript;
132
133// We hold various information about HTLC relay in the HTLC objects in Channel itself:
134//
135// Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should
136// forward the HTLC with information it will give back to us when it does so, or if it should Fail
137// the HTLC with the relevant message for the Channel to handle giving to the remote peer.
138//
139// Once said HTLC is committed in the Channel, if the PendingHTLCStatus indicated Forward, the
140// Channel will return the PendingHTLCInfo back to us, and we will create an HTLCForwardInfo
141// with it to track where it came from (in case of onwards-forward error), waiting a random delay
142// before we forward it.
143//
144// We will then use HTLCForwardInfo's PendingHTLCInfo to construct an outbound HTLC, with a
145// relevant HTLCSource::PreviousHopData filled in to indicate where it came from (which we can use
146// to either fail-backwards or fulfill the HTLC backwards along the relevant path).
147// Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is
148// our payment, which we can use to decode errors or inform the user that the payment was sent.
149
150/// Information about where a received HTLC('s onion) has indicated the HTLC should go.
151#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
152#[cfg_attr(test, derive(Debug, PartialEq))]
153pub enum PendingHTLCRouting {
154	/// An HTLC which should be forwarded on to another node.
155	Forward {
156		/// The onion which should be included in the forwarded HTLC, telling the next hop what to
157		/// do with the HTLC.
158		onion_packet: msgs::OnionPacket,
159		/// The short channel ID of the channel which we were instructed to forward this HTLC to.
160		///
161		/// This could be a real on-chain SCID, an SCID alias, or some other SCID which has meaning
162		/// to the receiving node, such as one returned from
163		/// [`ChannelManager::get_intercept_scid`] or [`ChannelManager::get_phantom_scid`].
164		short_channel_id: u64, // This should be NonZero<u64> eventually when we bump MSRV
165		/// Set if this HTLC is being forwarded within a blinded path.
166		blinded: Option<BlindedForward>,
167		/// The absolute CLTV of the inbound HTLC
168		incoming_cltv_expiry: Option<u32>,
169	},
170	/// The onion indicates that this is a payment for an invoice (supposedly) generated by us.
171	///
172	/// Note that at this point, we have not checked that the invoice being paid was actually
173	/// generated by us, but rather it's claiming to pay an invoice of ours.
174	Receive {
175		/// Information about the amount the sender intended to pay and (potential) proof that this
176		/// is a payment for an invoice we generated. This proof of payment is is also used for
177		/// linking MPP parts of a larger payment.
178		payment_data: msgs::FinalOnionHopData,
179		/// Additional data which we (allegedly) instructed the sender to include in the onion.
180		///
181		/// For HTLCs received by LDK, this will ultimately be exposed in
182		/// [`Event::PaymentClaimable::onion_fields`] as
183		/// [`RecipientOnionFields::payment_metadata`].
184		payment_metadata: Option<Vec<u8>>,
185		/// The context of the payment included by the recipient in a blinded path, or `None` if a
186		/// blinded path was not used.
187		///
188		/// Used in part to determine the [`events::PaymentPurpose`].
189		payment_context: Option<PaymentContext>,
190		/// CLTV expiry of the received HTLC.
191		///
192		/// Used to track when we should expire pending HTLCs that go unclaimed.
193		incoming_cltv_expiry: u32,
194		/// If the onion had forwarding instructions to one of our phantom node SCIDs, this will
195		/// provide the onion shared secret used to decrypt the next level of forwarding
196		/// instructions.
197		phantom_shared_secret: Option<[u8; 32]>,
198		/// Custom TLVs which were set by the sender.
199		///
200		/// For HTLCs received by LDK, this will ultimately be exposed in
201		/// [`Event::PaymentClaimable::onion_fields`] as
202		/// [`RecipientOnionFields::custom_tlvs`].
203		custom_tlvs: Vec<(u64, Vec<u8>)>,
204		/// Set if this HTLC is the final hop in a multi-hop blinded path.
205		requires_blinded_error: bool,
206	},
207	/// The onion indicates that this is for payment to us but which contains the preimage for
208	/// claiming included, and is unrelated to any invoice we'd previously generated (aka a
209	/// "keysend" or "spontaneous" payment).
210	ReceiveKeysend {
211		/// Information about the amount the sender intended to pay and possibly a token to
212		/// associate MPP parts of a larger payment.
213		///
214		/// This will only be filled in if receiving MPP keysend payments is enabled, and it being
215		/// present will cause deserialization to fail on versions of LDK prior to 0.0.116.
216		payment_data: Option<msgs::FinalOnionHopData>,
217		/// Preimage for this onion payment. This preimage is provided by the sender and will be
218		/// used to settle the spontaneous payment.
219		payment_preimage: PaymentPreimage,
220		/// Additional data which we (allegedly) instructed the sender to include in the onion.
221		///
222		/// For HTLCs received by LDK, this will ultimately bubble back up as
223		/// [`RecipientOnionFields::payment_metadata`].
224		payment_metadata: Option<Vec<u8>>,
225		/// CLTV expiry of the received HTLC.
226		///
227		/// Used to track when we should expire pending HTLCs that go unclaimed.
228		incoming_cltv_expiry: u32,
229		/// Custom TLVs which were set by the sender.
230		///
231		/// For HTLCs received by LDK, these will ultimately bubble back up as
232		/// [`RecipientOnionFields::custom_tlvs`].
233		custom_tlvs: Vec<(u64, Vec<u8>)>,
234		/// Set if this HTLC is the final hop in a multi-hop blinded path.
235		requires_blinded_error: bool,
236		/// Set if we are receiving a keysend to a blinded path, meaning we created the
237		/// [`PaymentSecret`] and should verify it using our
238		/// [`NodeSigner::get_inbound_payment_key`].
239		has_recipient_created_payment_secret: bool,
240	},
241}
242
243/// Information used to forward or fail this HTLC that is being forwarded within a blinded path.
244#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
245pub struct BlindedForward {
246	/// The `blinding_point` that was set in the inbound [`msgs::UpdateAddHTLC`], or in the inbound
247	/// onion payload if we're the introduction node. Useful for calculating the next hop's
248	/// [`msgs::UpdateAddHTLC::blinding_point`].
249	pub inbound_blinding_point: PublicKey,
250	/// If needed, this determines how this HTLC should be failed backwards, based on whether we are
251	/// the introduction node.
252	pub failure: BlindedFailure,
253	/// Overrides the next hop's [`msgs::UpdateAddHTLC::blinding_point`]. Set if this HTLC is being
254	/// forwarded within a [`BlindedPaymentPath`] that was concatenated to another blinded path that
255	/// starts at the next hop.
256	pub next_blinding_override: Option<PublicKey>,
257}
258
259impl PendingHTLCRouting {
260	// Used to override the onion failure code and data if the HTLC is blinded.
261	fn blinded_failure(&self) -> Option<BlindedFailure> {
262		match self {
263			Self::Forward { blinded: Some(BlindedForward { failure, .. }), .. } => Some(*failure),
264			Self::Receive { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
265			Self::ReceiveKeysend { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
266			_ => None,
267		}
268	}
269
270	fn incoming_cltv_expiry(&self) -> Option<u32> {
271		match self {
272			Self::Forward { incoming_cltv_expiry, .. } => *incoming_cltv_expiry,
273			Self::Receive { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry),
274			Self::ReceiveKeysend { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry),
275		}
276	}
277}
278
279/// Information about an incoming HTLC, including the [`PendingHTLCRouting`] describing where it
280/// should go next.
281#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
282#[cfg_attr(test, derive(Debug, PartialEq))]
283pub struct PendingHTLCInfo {
284	/// Further routing details based on whether the HTLC is being forwarded or received.
285	pub routing: PendingHTLCRouting,
286	/// The onion shared secret we build with the sender used to decrypt the onion.
287	///
288	/// This is later used to encrypt failure packets in the event that the HTLC is failed.
289	pub incoming_shared_secret: [u8; 32],
290	/// Hash of the payment preimage, to lock the payment until the receiver releases the preimage.
291	pub payment_hash: PaymentHash,
292	/// Amount received in the incoming HTLC.
293	///
294	/// This field was added in LDK 0.0.113 and will be `None` for objects written by prior
295	/// versions.
296	pub incoming_amt_msat: Option<u64>,
297	/// The amount the sender indicated should be forwarded on to the next hop or amount the sender
298	/// intended for us to receive for received payments.
299	///
300	/// If the received amount is less than this for received payments, an intermediary hop has
301	/// attempted to steal some of our funds and we should fail the HTLC (the sender should retry
302	/// it along another path).
303	///
304	/// Because nodes can take less than their required fees, and because senders may wish to
305	/// improve their own privacy, this amount may be less than [`Self::incoming_amt_msat`] for
306	/// received payments. In such cases, recipients must handle this HTLC as if it had received
307	/// [`Self::outgoing_amt_msat`].
308	pub outgoing_amt_msat: u64,
309	/// The CLTV the sender has indicated we should set on the forwarded HTLC (or has indicated
310	/// should have been set on the received HTLC for received payments).
311	pub outgoing_cltv_value: u32,
312	/// The fee taken for this HTLC in addition to the standard protocol HTLC fees.
313	///
314	/// If this is a payment for forwarding, this is the fee we are taking before forwarding the
315	/// HTLC.
316	///
317	/// If this is a received payment, this is the fee that our counterparty took.
318	///
319	/// This is used to allow LSPs to take fees as a part of payments, without the sender having to
320	/// shoulder them.
321	pub skimmed_fee_msat: Option<u64>,
322}
323
324#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
325pub(super) enum HTLCFailureMsg {
326	Relay(msgs::UpdateFailHTLC),
327	Malformed(msgs::UpdateFailMalformedHTLC),
328}
329
330/// Stores whether we can't forward an HTLC or relevant forwarding info
331#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
332pub(super) enum PendingHTLCStatus {
333	Forward(PendingHTLCInfo),
334	Fail(HTLCFailureMsg),
335}
336
337#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
338pub(super) struct PendingAddHTLCInfo {
339	pub(super) forward_info: PendingHTLCInfo,
340
341	// These fields are produced in `forward_htlcs()` and consumed in
342	// `process_pending_htlc_forwards()` for constructing the
343	// `HTLCSource::PreviousHopData` for failed and forwarded
344	// HTLCs.
345	//
346	// Note that this may be an outbound SCID alias for the associated channel.
347	prev_short_channel_id: u64,
348	prev_htlc_id: u64,
349	prev_counterparty_node_id: Option<PublicKey>,
350	prev_channel_id: ChannelId,
351	prev_funding_outpoint: OutPoint,
352	prev_user_channel_id: u128,
353}
354
355#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
356pub(super) enum HTLCForwardInfo {
357	AddHTLC(PendingAddHTLCInfo),
358	FailHTLC {
359		htlc_id: u64,
360		err_packet: msgs::OnionErrorPacket,
361	},
362	FailMalformedHTLC {
363		htlc_id: u64,
364		failure_code: u16,
365		sha256_of_onion: [u8; 32],
366	},
367}
368
369/// Whether this blinded HTLC is being failed backwards by the introduction node or a blinded node,
370/// which determines the failure message that should be used.
371#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
372pub enum BlindedFailure {
373	/// This HTLC is being failed backwards by the introduction node, and thus should be failed with
374	/// [`msgs::UpdateFailHTLC`] and error code `0x8000|0x4000|24`.
375	FromIntroductionNode,
376	/// This HTLC is being failed backwards by a blinded node within the path, and thus should be
377	/// failed with [`msgs::UpdateFailMalformedHTLC`] and error code `0x8000|0x4000|24`.
378	FromBlindedNode,
379}
380
381/// Tracks the inbound corresponding to an outbound HTLC
382#[derive(Clone, Debug, Hash, PartialEq, Eq)]
383pub(crate) struct HTLCPreviousHopData {
384	// Note that this may be an outbound SCID alias for the associated channel.
385	short_channel_id: u64,
386	user_channel_id: Option<u128>,
387	htlc_id: u64,
388	incoming_packet_shared_secret: [u8; 32],
389	phantom_shared_secret: Option<[u8; 32]>,
390	blinded_failure: Option<BlindedFailure>,
391	channel_id: ChannelId,
392
393	// These fields are consumed by `claim_funds_from_hop()` when updating a force-closed backwards
394	// channel with a preimage provided by the forward channel.
395	outpoint: OutPoint,
396	counterparty_node_id: Option<PublicKey>,
397	/// Used to preserve our backwards channel by failing back in case an HTLC claim in the forward
398	/// channel remains unconfirmed for too long.
399	cltv_expiry: Option<u32>,
400}
401
402#[derive(PartialEq, Eq)]
403enum OnionPayload {
404	/// Indicates this incoming onion payload is for the purpose of paying an invoice.
405	Invoice {
406		/// This is only here for backwards-compatibility in serialization, in the future it can be
407		/// removed, breaking clients running 0.0.106 and earlier.
408		_legacy_hop_data: Option<msgs::FinalOnionHopData>,
409	},
410	/// Contains the payer-provided preimage.
411	Spontaneous(PaymentPreimage),
412}
413
414/// HTLCs that are to us and can be failed/claimed by the user
415#[derive(PartialEq, Eq)]
416struct ClaimableHTLC {
417	prev_hop: HTLCPreviousHopData,
418	cltv_expiry: u32,
419	/// The amount (in msats) of this MPP part
420	value: u64,
421	/// The amount (in msats) that the sender intended to be sent in this MPP
422	/// part (used for validating total MPP amount)
423	sender_intended_value: u64,
424	onion_payload: OnionPayload,
425	timer_ticks: u8,
426	/// The total value received for a payment (sum of all MPP parts if the payment is a MPP).
427	/// Gets set to the amount reported when pushing [`Event::PaymentClaimable`].
428	total_value_received: Option<u64>,
429	/// The sender intended sum total of all MPP parts specified in the onion
430	total_msat: u64,
431	/// The extra fee our counterparty skimmed off the top of this HTLC.
432	counterparty_skimmed_fee_msat: Option<u64>,
433}
434
435impl From<&ClaimableHTLC> for events::ClaimedHTLC {
436	fn from(val: &ClaimableHTLC) -> Self {
437		events::ClaimedHTLC {
438			channel_id: val.prev_hop.channel_id,
439			user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0),
440			cltv_expiry: val.cltv_expiry,
441			value_msat: val.value,
442			counterparty_skimmed_fee_msat: val.counterparty_skimmed_fee_msat.unwrap_or(0),
443		}
444	}
445}
446
447impl PartialOrd for ClaimableHTLC {
448	fn partial_cmp(&self, other: &ClaimableHTLC) -> Option<cmp::Ordering> {
449		Some(self.cmp(other))
450	}
451}
452impl Ord for ClaimableHTLC {
453	fn cmp(&self, other: &ClaimableHTLC) -> cmp::Ordering {
454		let res = (self.prev_hop.channel_id, self.prev_hop.htlc_id).cmp(
455			&(other.prev_hop.channel_id, other.prev_hop.htlc_id)
456		);
457		if res.is_eq() {
458			debug_assert!(self == other, "ClaimableHTLCs from the same source should be identical");
459		}
460		res
461	}
462}
463
464/// A trait defining behavior for creating and verifing the HMAC for authenticating a given data.
465pub trait Verification {
466	/// Constructs an HMAC to include in [`OffersContext`] for the data along with the given
467	/// [`Nonce`].
468	fn hmac_for_offer_payment(
469		&self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
470	) -> Hmac<Sha256>;
471
472	/// Authenticates the data using an HMAC and a [`Nonce`] taken from an [`OffersContext`].
473	fn verify_for_offer_payment(
474		&self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
475	) -> Result<(), ()>;
476}
477
478impl Verification for PaymentHash {
479	/// Constructs an HMAC to include in [`OffersContext::InboundPayment`] for the payment hash
480	/// along with the given [`Nonce`].
481	fn hmac_for_offer_payment(
482		&self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
483	) -> Hmac<Sha256> {
484		signer::hmac_for_payment_hash(*self, nonce, expanded_key)
485	}
486
487	/// Authenticates the payment id using an HMAC and a [`Nonce`] taken from an
488	/// [`OffersContext::InboundPayment`].
489	fn verify_for_offer_payment(
490		&self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
491	) -> Result<(), ()> {
492		signer::verify_payment_hash(*self, hmac, nonce, expanded_key)
493	}
494}
495
496impl Verification for UnauthenticatedReceiveTlvs {
497	fn hmac_for_offer_payment(
498		&self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
499	) -> Hmac<Sha256> {
500		signer::hmac_for_payment_tlvs(self, nonce, expanded_key)
501	}
502
503	fn verify_for_offer_payment(
504		&self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
505	) -> Result<(), ()> {
506		signer::verify_payment_tlvs(self, hmac, nonce, expanded_key)
507	}
508}
509
510/// A user-provided identifier in [`ChannelManager::send_payment`] used to uniquely identify
511/// a payment and ensure idempotency in LDK.
512///
513/// This is not exported to bindings users as we just use [u8; 32] directly
514#[derive(Hash, Copy, Clone, PartialEq, Eq)]
515pub struct PaymentId(pub [u8; Self::LENGTH]);
516
517impl PaymentId {
518	/// Number of bytes in the id.
519	pub const LENGTH: usize = 32;
520
521	/// Constructs an HMAC to include in [`AsyncPaymentsContext::OutboundPayment`] for the payment id
522	/// along with the given [`Nonce`].
523	#[cfg(async_payments)]
524	pub fn hmac_for_async_payment(
525		&self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
526	) -> Hmac<Sha256> {
527		signer::hmac_for_async_payment_id(*self, nonce, expanded_key)
528	}
529
530	/// Authenticates the payment id using an HMAC and a [`Nonce`] taken from an
531	/// [`AsyncPaymentsContext::OutboundPayment`].
532	#[cfg(async_payments)]
533	pub fn verify_for_async_payment(
534		&self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
535	) -> Result<(), ()> {
536		signer::verify_async_payment_id(*self, hmac, nonce, expanded_key)
537	}
538}
539
540impl Verification for PaymentId {
541	/// Constructs an HMAC to include in [`OffersContext::OutboundPayment`] for the payment id
542	/// along with the given [`Nonce`].
543	fn hmac_for_offer_payment(
544		&self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
545	) -> Hmac<Sha256> {
546		signer::hmac_for_offer_payment_id(*self, nonce, expanded_key)
547	}
548
549	/// Authenticates the payment id using an HMAC and a [`Nonce`] taken from an
550	/// [`OffersContext::OutboundPayment`].
551	fn verify_for_offer_payment(
552		&self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
553	) -> Result<(), ()> {
554		signer::verify_offer_payment_id(*self, hmac, nonce, expanded_key)
555	}
556}
557
558impl PaymentId {
559	fn for_inbound_from_htlcs<I: Iterator<Item=(ChannelId, u64)>>(key: &[u8; 32], htlcs: I) -> PaymentId {
560		let mut prev_pair = None;
561		let mut hasher = HmacEngine::new(key);
562		for (channel_id, htlc_id) in htlcs {
563			hasher.input(&channel_id.0);
564			hasher.input(&htlc_id.to_le_bytes());
565			if let Some(prev) = prev_pair {
566				debug_assert!(prev < (channel_id, htlc_id), "HTLCs should be sorted");
567			}
568			prev_pair = Some((channel_id, htlc_id));
569		}
570		PaymentId(Hmac::<Sha256>::from_engine(hasher).to_byte_array())
571	}
572}
573
574impl Borrow<[u8]> for PaymentId {
575	fn borrow(&self) -> &[u8] {
576		&self.0[..]
577	}
578}
579
580impl_fmt_traits! {
581	impl fmt_traits for PaymentId {
582		const LENGTH: usize = 32;
583	}
584}
585
586impl Writeable for PaymentId {
587	fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
588		self.0.write(w)
589	}
590}
591
592impl Readable for PaymentId {
593	fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
594		let buf: [u8; 32] = Readable::read(r)?;
595		Ok(PaymentId(buf))
596	}
597}
598
599/// An identifier used to uniquely identify an intercepted HTLC to LDK.
600///
601/// This is not exported to bindings users as we just use [u8; 32] directly
602#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
603pub struct InterceptId(pub [u8; 32]);
604
605impl Writeable for InterceptId {
606	fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
607		self.0.write(w)
608	}
609}
610
611impl Readable for InterceptId {
612	fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
613		let buf: [u8; 32] = Readable::read(r)?;
614		Ok(InterceptId(buf))
615	}
616}
617
618#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
619/// Uniquely describes an HTLC by its source. Just the guaranteed-unique subset of [`HTLCSource`].
620pub(crate) enum SentHTLCId {
621	PreviousHopData { short_channel_id: u64, htlc_id: u64 },
622	OutboundRoute { session_priv: [u8; SECRET_KEY_SIZE] },
623}
624impl SentHTLCId {
625	pub(crate) fn from_source(source: &HTLCSource) -> Self {
626		match source {
627			HTLCSource::PreviousHopData(hop_data) => Self::PreviousHopData {
628				short_channel_id: hop_data.short_channel_id,
629				htlc_id: hop_data.htlc_id,
630			},
631			HTLCSource::OutboundRoute { session_priv, .. } =>
632				Self::OutboundRoute { session_priv: session_priv.secret_bytes() },
633		}
634	}
635}
636impl_writeable_tlv_based_enum!(SentHTLCId,
637	(0, PreviousHopData) => {
638		(0, short_channel_id, required),
639		(2, htlc_id, required),
640	},
641	(2, OutboundRoute) => {
642		(0, session_priv, required),
643	},
644);
645
646
647/// Tracks the inbound corresponding to an outbound HTLC
648#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
649#[derive(Clone, Debug, PartialEq, Eq)]
650pub(crate) enum HTLCSource {
651	PreviousHopData(HTLCPreviousHopData),
652	OutboundRoute {
653		path: Path,
654		session_priv: SecretKey,
655		/// Technically we can recalculate this from the route, but we cache it here to avoid
656		/// doing a double-pass on route when we get a failure back
657		first_hop_htlc_msat: u64,
658		payment_id: PaymentId,
659	},
660}
661#[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
662impl core::hash::Hash for HTLCSource {
663	fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
664		match self {
665			HTLCSource::PreviousHopData(prev_hop_data) => {
666				0u8.hash(hasher);
667				prev_hop_data.hash(hasher);
668			},
669			HTLCSource::OutboundRoute { path, session_priv, payment_id, first_hop_htlc_msat } => {
670				1u8.hash(hasher);
671				path.hash(hasher);
672				session_priv[..].hash(hasher);
673				payment_id.hash(hasher);
674				first_hop_htlc_msat.hash(hasher);
675			},
676		}
677	}
678}
679impl HTLCSource {
680	#[cfg(all(ldk_test_vectors, test))]
681	pub fn dummy() -> Self {
682		assert!(cfg!(not(feature = "grind_signatures")));
683		HTLCSource::OutboundRoute {
684			path: Path { hops: Vec::new(), blinded_tail: None },
685			session_priv: SecretKey::from_slice(&[1; 32]).unwrap(),
686			first_hop_htlc_msat: 0,
687			payment_id: PaymentId([2; 32]),
688		}
689	}
690
691	#[cfg(debug_assertions)]
692	/// Checks whether this HTLCSource could possibly match the given HTLC output in a commitment
693	/// transaction. Useful to ensure different datastructures match up.
694	pub(crate) fn possibly_matches_output(&self, htlc: &super::chan_utils::HTLCOutputInCommitment) -> bool {
695		if let HTLCSource::OutboundRoute { first_hop_htlc_msat, .. } = self {
696			*first_hop_htlc_msat == htlc.amount_msat
697		} else {
698			// There's nothing we can check for forwarded HTLCs
699			true
700		}
701	}
702
703	/// Returns the CLTV expiry of the inbound HTLC (i.e. the source referred to by this object),
704	/// if the source was a forwarded HTLC and the HTLC was first forwarded on LDK 0.1.1 or later.
705	pub(crate) fn inbound_htlc_expiry(&self) -> Option<u32> {
706		match self {
707			Self::PreviousHopData(HTLCPreviousHopData { cltv_expiry, .. }) => *cltv_expiry,
708			_ => None,
709		}
710	}
711}
712
713/// This enum is used to specify which error data to send to peers when failing back an HTLC
714/// using [`ChannelManager::fail_htlc_backwards_with_reason`].
715///
716/// For more info on failure codes, see <https://github.com/lightning/bolts/blob/master/04-onion-routing.md#failure-messages>.
717#[derive(Clone, Copy)]
718pub enum FailureCode {
719	/// We had a temporary error processing the payment. Useful if no other error codes fit
720	/// and you want to indicate that the payer may want to retry.
721	TemporaryNodeFailure,
722	/// We have a required feature which was not in this onion. For example, you may require
723	/// some additional metadata that was not provided with this payment.
724	RequiredNodeFeatureMissing,
725	/// You may wish to use this when a `payment_preimage` is unknown, or the CLTV expiry of
726	/// the HTLC is too close to the current block height for safe handling.
727	/// Using this failure code in [`ChannelManager::fail_htlc_backwards_with_reason`] is
728	/// equivalent to calling [`ChannelManager::fail_htlc_backwards`].
729	IncorrectOrUnknownPaymentDetails,
730	/// We failed to process the payload after the onion was decrypted. You may wish to
731	/// use this when receiving custom HTLC TLVs with even type numbers that you don't recognize.
732	///
733	/// If available, the tuple data may include the type number and byte offset in the
734	/// decrypted byte stream where the failure occurred.
735	InvalidOnionPayload(Option<(u64, u16)>),
736}
737
738impl Into<u16> for FailureCode {
739	fn into(self) -> u16 {
740		match self {
741			FailureCode::TemporaryNodeFailure => 0x2000 | 2,
742			FailureCode::RequiredNodeFeatureMissing => 0x4000 | 0x2000 | 3,
743			FailureCode::IncorrectOrUnknownPaymentDetails => 0x4000 | 15,
744			FailureCode::InvalidOnionPayload(_) => 0x4000 | 22,
745		}
746	}
747}
748
749/// Error type returned across the peer_state mutex boundary. When an Err is generated for a
750/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
751/// immediately (ie with no further calls on it made). Thus, this step happens inside a
752/// peer_state lock. We then return the set of things that need to be done outside the lock in
753/// this struct and call handle_error!() on it.
754struct MsgHandleErrInternal {
755	err: msgs::LightningError,
756	closes_channel: bool,
757	shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
758}
759impl MsgHandleErrInternal {
760	#[inline]
761	fn send_err_msg_no_close(err: String, channel_id: ChannelId) -> Self {
762		Self {
763			err: LightningError {
764				err: err.clone(),
765				action: msgs::ErrorAction::SendErrorMessage {
766					msg: msgs::ErrorMessage {
767						channel_id,
768						data: err
769					},
770				},
771			},
772			closes_channel: false,
773			shutdown_finish: None,
774		}
775	}
776	#[inline]
777	fn from_no_close(err: msgs::LightningError) -> Self {
778		Self { err, closes_channel: false, shutdown_finish: None }
779	}
780	#[inline]
781	fn from_finish_shutdown(err: String, channel_id: ChannelId, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
782		let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() };
783		let action = if shutdown_res.monitor_update.is_some() {
784			// We have a closing `ChannelMonitorUpdate`, which means the channel was funded and we
785			// should disconnect our peer such that we force them to broadcast their latest
786			// commitment upon reconnecting.
787			msgs::ErrorAction::DisconnectPeer { msg: Some(err_msg) }
788		} else {
789			msgs::ErrorAction::SendErrorMessage { msg: err_msg }
790		};
791		Self {
792			err: LightningError { err, action },
793			closes_channel: true,
794			shutdown_finish: Some((shutdown_res, channel_update)),
795		}
796	}
797	#[inline]
798	fn from_chan_no_close(err: ChannelError, channel_id: ChannelId) -> Self {
799		Self {
800			err: match err {
801				ChannelError::Warn(msg) =>  LightningError {
802					err: msg.clone(),
803					action: msgs::ErrorAction::SendWarningMessage {
804						msg: msgs::WarningMessage {
805							channel_id,
806							data: msg
807						},
808						log_level: Level::Warn,
809					},
810				},
811				ChannelError::Ignore(msg) => LightningError {
812					err: msg,
813					action: msgs::ErrorAction::IgnoreError,
814				},
815				ChannelError::Close((msg, _reason)) => LightningError {
816					err: msg.clone(),
817					action: msgs::ErrorAction::SendErrorMessage {
818						msg: msgs::ErrorMessage {
819							channel_id,
820							data: msg
821						},
822					},
823				},
824			},
825			closes_channel: false,
826			shutdown_finish: None,
827		}
828	}
829
830	fn closes_channel(&self) -> bool {
831		self.closes_channel
832	}
833}
834
835/// We hold back HTLCs we intend to relay for a random interval greater than this (see
836/// Event::PendingHTLCsForwardable for the API guidelines indicating how long should be waited).
837/// This provides some limited amount of privacy. Ideally this would range from somewhere like one
838/// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly.
839pub(super) const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100;
840
841/// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should
842/// be sent in the order they appear in the return value, however sometimes the order needs to be
843/// variable at runtime (eg Channel::channel_reestablish needs to re-send messages in the order
844/// they were originally sent). In those cases, this enum is also returned.
845#[derive(Clone, PartialEq, Debug)]
846pub(super) enum RAACommitmentOrder {
847	/// Send the CommitmentUpdate messages first
848	CommitmentFirst,
849	/// Send the RevokeAndACK message first
850	RevokeAndACKFirst,
851}
852
853/// Information about a payment which is currently being claimed.
854#[derive(Clone, Debug, PartialEq, Eq)]
855struct ClaimingPayment {
856	amount_msat: u64,
857	payment_purpose: events::PaymentPurpose,
858	receiver_node_id: PublicKey,
859	htlcs: Vec<events::ClaimedHTLC>,
860	sender_intended_value: Option<u64>,
861	onion_fields: Option<RecipientOnionFields>,
862	payment_id: Option<PaymentId>,
863}
864impl_writeable_tlv_based!(ClaimingPayment, {
865	(0, amount_msat, required),
866	(2, payment_purpose, required),
867	(4, receiver_node_id, required),
868	(5, htlcs, optional_vec),
869	(7, sender_intended_value, option),
870	(9, onion_fields, option),
871	(11, payment_id, option),
872});
873
874struct ClaimablePayment {
875	purpose: events::PaymentPurpose,
876	onion_fields: Option<RecipientOnionFields>,
877	htlcs: Vec<ClaimableHTLC>,
878}
879
880impl ClaimablePayment {
881	fn inbound_payment_id(&self, secret: &[u8; 32]) -> PaymentId {
882		PaymentId::for_inbound_from_htlcs(
883			secret,
884			self.htlcs.iter().map(|htlc| (htlc.prev_hop.channel_id, htlc.prev_hop.htlc_id))
885		)
886	}
887}
888
889/// Represent the channel funding transaction type.
890enum FundingType {
891	/// This variant is useful when we want LDK to validate the funding transaction and
892	/// broadcast it automatically.
893	///
894	/// This is the normal flow.
895	Checked(Transaction),
896	/// This variant is useful when we want to loosen the validation checks and allow to
897	/// manually broadcast the funding transaction, leaving the responsibility to the caller.
898	///
899	/// This is useful in cases of constructing the funding transaction as part of another
900	/// flow and the caller wants to perform the validation and broadcasting. An example of such
901	/// scenario could be when constructing the funding transaction as part of a Payjoin
902	/// transaction.
903	Unchecked(OutPoint),
904}
905
906impl FundingType {
907	fn txid(&self) -> Txid {
908		match self {
909			FundingType::Checked(tx) => tx.compute_txid(),
910			FundingType::Unchecked(outp) => outp.txid,
911		}
912	}
913
914	fn transaction_or_dummy(&self) -> Transaction {
915		match self {
916			FundingType::Checked(tx) => tx.clone(),
917			FundingType::Unchecked(_) => Transaction {
918				version: bitcoin::transaction::Version::TWO,
919				lock_time: bitcoin::absolute::LockTime::ZERO,
920				input: Vec::new(),
921				output: Vec::new(),
922			},
923		}
924	}
925
926	fn is_manual_broadcast(&self) -> bool {
927		match self {
928			FundingType::Checked(_) => false,
929			FundingType::Unchecked(_) => true,
930		}
931	}
932}
933
934/// Information about claimable or being-claimed payments
935struct ClaimablePayments {
936	/// Map from payment hash to the payment data and any HTLCs which are to us and can be
937	/// failed/claimed by the user.
938	///
939	/// Note that, no consistency guarantees are made about the channels given here actually
940	/// existing anymore by the time you go to read them!
941	///
942	/// When adding to the map, [`Self::pending_claiming_payments`] must also be checked to ensure
943	/// we don't get a duplicate payment.
944	claimable_payments: HashMap<PaymentHash, ClaimablePayment>,
945
946	/// Map from payment hash to the payment data for HTLCs which we have begun claiming, but which
947	/// are waiting on a [`ChannelMonitorUpdate`] to complete in order to be surfaced to the user
948	/// as an [`events::Event::PaymentClaimed`].
949	pending_claiming_payments: HashMap<PaymentHash, ClaimingPayment>,
950}
951
952impl ClaimablePayments {
953	/// Moves a payment from [`Self::claimable_payments`] to [`Self::pending_claiming_payments`].
954	///
955	/// If `custom_tlvs_known` is false and custom even TLVs are set by the sender, the set of
956	/// pending HTLCs will be returned in the `Err` variant of this method. They MUST then be
957	/// failed by the caller as they will not be in either [`Self::claimable_payments`] or
958	/// [`Self::pending_claiming_payments`].
959	///
960	/// If `custom_tlvs_known` is true, and a matching payment is found, it will always be moved.
961	///
962	/// If no payment is found, `Err(Vec::new())` is returned.
963	fn begin_claiming_payment<L: Deref, S: Deref>(
964		&mut self, payment_hash: PaymentHash, node_signer: &S, logger: &L,
965		inbound_payment_id_secret: &[u8; 32], custom_tlvs_known: bool,
966	) -> Result<(Vec<ClaimableHTLC>, ClaimingPayment), Vec<ClaimableHTLC>>
967		where L::Target: Logger, S::Target: NodeSigner,
968	{
969		match self.claimable_payments.remove(&payment_hash) {
970			Some(payment) => {
971				let mut receiver_node_id = node_signer.get_node_id(Recipient::Node)
972					.expect("Failed to get node_id for node recipient");
973				for htlc in payment.htlcs.iter() {
974					if htlc.prev_hop.phantom_shared_secret.is_some() {
975						let phantom_pubkey = node_signer.get_node_id(Recipient::PhantomNode)
976							.expect("Failed to get node_id for phantom node recipient");
977						receiver_node_id = phantom_pubkey;
978						break;
979					}
980				}
981
982				if let Some(RecipientOnionFields { custom_tlvs, .. }) = &payment.onion_fields {
983					if !custom_tlvs_known && custom_tlvs.iter().any(|(typ, _)| typ % 2 == 0) {
984						log_info!(logger, "Rejecting payment with payment hash {} as we cannot accept payment with unknown even TLVs: {}",
985							&payment_hash, log_iter!(custom_tlvs.iter().map(|(typ, _)| typ).filter(|typ| *typ % 2 == 0)));
986						return Err(payment.htlcs);
987					}
988				}
989
990				let payment_id = payment.inbound_payment_id(inbound_payment_id_secret);
991				let claiming_payment = self.pending_claiming_payments
992					.entry(payment_hash)
993					.and_modify(|_| {
994						debug_assert!(false, "Shouldn't get a duplicate pending claim event ever");
995						log_error!(logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug",
996							&payment_hash);
997					})
998					.or_insert_with(|| {
999						let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect();
1000						let sender_intended_value = payment.htlcs.first().map(|htlc| htlc.total_msat);
1001						ClaimingPayment {
1002							amount_msat: payment.htlcs.iter().map(|source| source.value).sum(),
1003							payment_purpose: payment.purpose,
1004							receiver_node_id,
1005							htlcs,
1006							sender_intended_value,
1007							onion_fields: payment.onion_fields,
1008							payment_id: Some(payment_id),
1009						}
1010					}).clone();
1011
1012				Ok((payment.htlcs, claiming_payment))
1013			},
1014			None => Err(Vec::new())
1015		}
1016	}
1017}
1018
1019/// Events which we process internally but cannot be processed immediately at the generation site
1020/// usually because we're running pre-full-init. They are handled immediately once we detect we are
1021/// running normally, and specifically must be processed before any other non-background
1022/// [`ChannelMonitorUpdate`]s are applied.
1023#[derive(Debug)]
1024enum BackgroundEvent {
1025	/// Handle a ChannelMonitorUpdate which closes the channel or for an already-closed channel.
1026	/// This is only separated from [`Self::MonitorUpdateRegeneratedOnStartup`] as for truly
1027	/// ancient [`ChannelMonitor`]s that haven't seen an update since LDK 0.0.118 we may not have
1028	/// the counterparty node ID available.
1029	///
1030	/// Note that any such events are lost on shutdown, so in general they must be updates which
1031	/// are regenerated on startup.
1032	ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelId, ChannelMonitorUpdate)),
1033	/// Handle a ChannelMonitorUpdate which may or may not close the channel and may unblock the
1034	/// channel to continue normal operation.
1035	///
1036	/// In general this should be used rather than
1037	/// [`Self::ClosedMonitorUpdateRegeneratedOnStartup`], however in cases where the
1038	/// `counterparty_node_id` is not available as the channel has closed from a [`ChannelMonitor`]
1039	/// error the other variant is acceptable.
1040	///
1041	/// Any such events that exist in [`ChannelManager::pending_background_events`] will *also* be
1042	/// tracked in [`PeerState::in_flight_monitor_updates`].
1043	///
1044	/// Note that any such events are lost on shutdown, so in general they must be updates which
1045	/// are regenerated on startup.
1046	MonitorUpdateRegeneratedOnStartup {
1047		counterparty_node_id: PublicKey,
1048		funding_txo: OutPoint,
1049		channel_id: ChannelId,
1050		update: ChannelMonitorUpdate
1051	},
1052	/// Some [`ChannelMonitorUpdate`] (s) completed before we were serialized but we still have
1053	/// them marked pending, thus we need to run any [`MonitorUpdateCompletionAction`] (s) pending
1054	/// on a channel.
1055	MonitorUpdatesComplete {
1056		counterparty_node_id: PublicKey,
1057		channel_id: ChannelId,
1058	},
1059}
1060
1061/// A pointer to a channel that is unblocked when an event is surfaced
1062#[derive(Debug)]
1063pub(crate) struct EventUnblockedChannel {
1064	counterparty_node_id: PublicKey,
1065	funding_txo: OutPoint,
1066	channel_id: ChannelId,
1067	blocking_action: RAAMonitorUpdateBlockingAction,
1068}
1069
1070impl Writeable for EventUnblockedChannel {
1071	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
1072		self.counterparty_node_id.write(writer)?;
1073		self.funding_txo.write(writer)?;
1074		self.channel_id.write(writer)?;
1075		self.blocking_action.write(writer)
1076	}
1077}
1078
1079impl MaybeReadable for EventUnblockedChannel {
1080	fn read<R: Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
1081		let counterparty_node_id = Readable::read(reader)?;
1082		let funding_txo = Readable::read(reader)?;
1083		let channel_id = Readable::read(reader)?;
1084		let blocking_action = match RAAMonitorUpdateBlockingAction::read(reader)? {
1085			Some(blocking_action) => blocking_action,
1086			None => return Ok(None),
1087		};
1088		Ok(Some(EventUnblockedChannel {
1089			counterparty_node_id,
1090			funding_txo,
1091			channel_id,
1092			blocking_action,
1093		}))
1094	}
1095}
1096
1097#[derive(Debug)]
1098pub(crate) enum MonitorUpdateCompletionAction {
1099	/// Indicates that a payment ultimately destined for us was claimed and we should emit an
1100	/// [`events::Event::PaymentClaimed`] to the user if we haven't yet generated such an event for
1101	/// this payment. Note that this is only best-effort. On restart it's possible such a duplicate
1102	/// event can be generated.
1103	PaymentClaimed {
1104		payment_hash: PaymentHash,
1105		/// A pending MPP claim which hasn't yet completed.
1106		///
1107		/// Not written to disk.
1108		pending_mpp_claim: Option<(PublicKey, ChannelId, PendingMPPClaimPointer)>,
1109	},
1110	/// Indicates an [`events::Event`] should be surfaced to the user and possibly resume the
1111	/// operation of another channel.
1112	///
1113	/// This is usually generated when we've forwarded an HTLC and want to block the outbound edge
1114	/// from completing a monitor update which removes the payment preimage until the inbound edge
1115	/// completes a monitor update containing the payment preimage. In that case, after the inbound
1116	/// edge completes, we will surface an [`Event::PaymentForwarded`] as well as unblock the
1117	/// outbound edge.
1118	EmitEventAndFreeOtherChannel {
1119		event: events::Event,
1120		downstream_counterparty_and_funding_outpoint: Option<EventUnblockedChannel>,
1121	},
1122	/// Indicates we should immediately resume the operation of another channel, unless there is
1123	/// some other reason why the channel is blocked. In practice this simply means immediately
1124	/// removing the [`RAAMonitorUpdateBlockingAction`] provided from the blocking set.
1125	///
1126	/// This is usually generated when we've forwarded an HTLC and want to block the outbound edge
1127	/// from completing a monitor update which removes the payment preimage until the inbound edge
1128	/// completes a monitor update containing the payment preimage. However, we use this variant
1129	/// instead of [`Self::EmitEventAndFreeOtherChannel`] when we discover that the claim was in
1130	/// fact duplicative and we simply want to resume the outbound edge channel immediately.
1131	///
1132	/// This variant should thus never be written to disk, as it is processed inline rather than
1133	/// stored for later processing.
1134	FreeOtherChannelImmediately {
1135		downstream_counterparty_node_id: PublicKey,
1136		downstream_funding_outpoint: OutPoint,
1137		blocking_action: RAAMonitorUpdateBlockingAction,
1138		downstream_channel_id: ChannelId,
1139	},
1140}
1141
1142impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction,
1143	(0, PaymentClaimed) => {
1144		(0, payment_hash, required),
1145		(9999999999, pending_mpp_claim, (static_value, None)),
1146	},
1147	// Note that FreeOtherChannelImmediately should never be written - we were supposed to free
1148	// *immediately*. However, for simplicity we implement read/write here.
1149	(1, FreeOtherChannelImmediately) => {
1150		(0, downstream_counterparty_node_id, required),
1151		(2, downstream_funding_outpoint, required),
1152		(4, blocking_action, upgradable_required),
1153		// Note that by the time we get past the required read above, downstream_funding_outpoint will be
1154		// filled in, so we can safely unwrap it here.
1155		(5, downstream_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(downstream_funding_outpoint.0.unwrap()))),
1156	},
1157	(2, EmitEventAndFreeOtherChannel) => {
1158		(0, event, upgradable_required),
1159		// LDK prior to 0.0.116 did not have this field as the monitor update application order was
1160		// required by clients. If we downgrade to something prior to 0.0.116 this may result in
1161		// monitor updates which aren't properly blocked or resumed, however that's fine - we don't
1162		// support async monitor updates even in LDK 0.0.116 and once we do we'll require no
1163		// downgrades to prior versions.
1164		(1, downstream_counterparty_and_funding_outpoint, upgradable_option),
1165	},
1166);
1167
1168#[derive(Clone, Debug, PartialEq, Eq)]
1169pub(crate) enum EventCompletionAction {
1170	ReleaseRAAChannelMonitorUpdate {
1171		counterparty_node_id: PublicKey,
1172		channel_funding_outpoint: OutPoint,
1173		channel_id: ChannelId,
1174	},
1175}
1176impl_writeable_tlv_based_enum!(EventCompletionAction,
1177	(0, ReleaseRAAChannelMonitorUpdate) => {
1178		(0, channel_funding_outpoint, required),
1179		(2, counterparty_node_id, required),
1180		// Note that by the time we get past the required read above, channel_funding_outpoint will be
1181		// filled in, so we can safely unwrap it here.
1182		(3, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(channel_funding_outpoint.0.unwrap()))),
1183	}
1184);
1185
1186/// The source argument which is passed to [`ChannelManager::claim_mpp_part`].
1187///
1188/// This is identical to [`MPPClaimHTLCSource`] except that [`Self::counterparty_node_id`] is an
1189/// `Option`, whereas it is required in [`MPPClaimHTLCSource`]. In the future, we should ideally
1190/// drop this and merge the two, however doing so may break upgrades for nodes which have pending
1191/// forwarded payments.
1192struct HTLCClaimSource {
1193	counterparty_node_id: Option<PublicKey>,
1194	funding_txo: OutPoint,
1195	channel_id: ChannelId,
1196	htlc_id: u64,
1197}
1198
1199impl From<&MPPClaimHTLCSource> for HTLCClaimSource {
1200	fn from(o: &MPPClaimHTLCSource) -> HTLCClaimSource {
1201		HTLCClaimSource {
1202			counterparty_node_id: Some(o.counterparty_node_id),
1203			funding_txo: o.funding_txo,
1204			channel_id: o.channel_id,
1205			htlc_id: o.htlc_id,
1206		}
1207	}
1208}
1209
1210#[derive(Debug)]
1211pub(crate) struct PendingMPPClaim {
1212	channels_without_preimage: Vec<(PublicKey, OutPoint, ChannelId)>,
1213	channels_with_preimage: Vec<(PublicKey, OutPoint, ChannelId)>,
1214}
1215
1216#[derive(Clone, Debug, Hash, PartialEq, Eq)]
1217/// The source of an HTLC which is being claimed as a part of an incoming payment. Each part is
1218/// tracked in [`ChannelMonitor`]s, so that it can be converted to an [`HTLCClaimSource`] for claim
1219/// replays on startup.
1220struct MPPClaimHTLCSource {
1221	counterparty_node_id: PublicKey,
1222	funding_txo: OutPoint,
1223	channel_id: ChannelId,
1224	htlc_id: u64,
1225}
1226
1227impl_writeable_tlv_based!(MPPClaimHTLCSource, {
1228	(0, counterparty_node_id, required),
1229	(2, funding_txo, required),
1230	(4, channel_id, required),
1231	(6, htlc_id, required),
1232});
1233
1234#[derive(Clone, Debug, PartialEq, Eq)]
1235/// When we're claiming a(n MPP) payment, we want to store information about that payment in the
1236/// [`ChannelMonitor`] so that we can replay the claim without any information from the
1237/// [`ChannelManager`] at all. This struct stores that information with enough to replay claims
1238/// against all MPP parts as well as generate an [`Event::PaymentClaimed`].
1239pub(crate) struct PaymentClaimDetails {
1240	mpp_parts: Vec<MPPClaimHTLCSource>,
1241	/// Use [`ClaimingPayment`] as a stable source of all the fields we need to generate the
1242	/// [`Event::PaymentClaimed`].
1243	claiming_payment: ClaimingPayment,
1244}
1245
1246impl_writeable_tlv_based!(PaymentClaimDetails, {
1247	(0, mpp_parts, required_vec),
1248	(2, claiming_payment, required),
1249});
1250
1251#[derive(Clone)]
1252pub(crate) struct PendingMPPClaimPointer(Arc<Mutex<PendingMPPClaim>>);
1253
1254impl PartialEq for PendingMPPClaimPointer {
1255	fn eq(&self, o: &Self) -> bool { Arc::ptr_eq(&self.0, &o.0) }
1256}
1257impl Eq for PendingMPPClaimPointer {}
1258
1259impl core::fmt::Debug for PendingMPPClaimPointer {
1260	fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
1261		self.0.lock().unwrap().fmt(f)
1262	}
1263}
1264
1265#[derive(Clone, PartialEq, Eq, Debug)]
1266/// If something is blocked on the completion of an RAA-generated [`ChannelMonitorUpdate`] we track
1267/// the blocked action here. See enum variants for more info.
1268pub(crate) enum RAAMonitorUpdateBlockingAction {
1269	/// A forwarded payment was claimed. We block the downstream channel completing its monitor
1270	/// update which removes the HTLC preimage until the upstream channel has gotten the preimage
1271	/// durably to disk.
1272	ForwardedPaymentInboundClaim {
1273		/// The upstream channel ID (i.e. the inbound edge).
1274		channel_id: ChannelId,
1275		/// The HTLC ID on the inbound edge.
1276		htlc_id: u64,
1277	},
1278	/// We claimed an MPP payment across multiple channels. We have to block removing the payment
1279	/// preimage from any monitor until the last monitor is updated to contain the payment
1280	/// preimage. Otherwise we may not be able to replay the preimage on the monitor(s) that
1281	/// weren't updated on startup.
1282	///
1283	/// This variant is *not* written to disk, instead being inferred from [`ChannelMonitor`]
1284	/// state.
1285	ClaimedMPPPayment {
1286		pending_claim: PendingMPPClaimPointer,
1287	}
1288}
1289
1290impl RAAMonitorUpdateBlockingAction {
1291	fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
1292		Self::ForwardedPaymentInboundClaim {
1293			channel_id: prev_hop.channel_id,
1294			htlc_id: prev_hop.htlc_id,
1295		}
1296	}
1297}
1298
1299impl_writeable_tlv_based_enum_upgradable!(RAAMonitorUpdateBlockingAction,
1300	(0, ForwardedPaymentInboundClaim) => { (0, channel_id, required), (2, htlc_id, required) },
1301	unread_variants: ClaimedMPPPayment
1302);
1303
1304impl Readable for Option<RAAMonitorUpdateBlockingAction> {
1305	fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
1306		Ok(RAAMonitorUpdateBlockingAction::read(reader)?)
1307	}
1308}
1309
1310/// State we hold per-peer.
1311pub(super) struct PeerState<SP: Deref> where SP::Target: SignerProvider {
1312	/// `channel_id` -> `ChannelPhase`
1313	///
1314	/// Holds all channels within corresponding `ChannelPhase`s where the peer is the counterparty.
1315	pub(super) channel_by_id: HashMap<ChannelId, ChannelPhase<SP>>,
1316	/// `temporary_channel_id` -> `InboundChannelRequest`.
1317	///
1318	/// When manual channel acceptance is enabled, this holds all unaccepted inbound channels where
1319	/// the peer is the counterparty. If the channel is accepted, then the entry in this table is
1320	/// removed, and an InboundV1Channel is created and placed in the `inbound_v1_channel_by_id` table. If
1321	/// the channel is rejected, then the entry is simply removed.
1322	pub(super) inbound_channel_request_by_id: HashMap<ChannelId, InboundChannelRequest>,
1323	/// The latest `InitFeatures` we heard from the peer.
1324	latest_features: InitFeatures,
1325	/// Messages to send to the peer - pushed to in the same lock that they are generated in (except
1326	/// for broadcast messages, where ordering isn't as strict).
1327	pub(super) pending_msg_events: Vec<MessageSendEvent>,
1328	/// Map from Channel IDs to pending [`ChannelMonitorUpdate`]s which have been passed to the
1329	/// user but which have not yet completed.
1330	///
1331	/// Note that the channel may no longer exist. For example if the channel was closed but we
1332	/// later needed to claim an HTLC which is pending on-chain, we may generate a monitor update
1333	/// for a missing channel.
1334	///
1335	/// Note that any pending [`BackgroundEvent::MonitorUpdateRegeneratedOnStartup`]s which are
1336	/// sitting in [`ChannelManager::pending_background_events`] will *also* be tracked here. This
1337	/// avoids a race condition during [`ChannelManager::pending_background_events`] processing
1338	/// where we complete one [`ChannelMonitorUpdate`] (but there are more pending as background
1339	/// events) but we conclude all pending [`ChannelMonitorUpdate`]s have completed and its safe
1340	/// to run post-completion actions.
1341	in_flight_monitor_updates: BTreeMap<OutPoint, Vec<ChannelMonitorUpdate>>,
1342	/// Map from a specific channel to some action(s) that should be taken when all pending
1343	/// [`ChannelMonitorUpdate`]s for the channel complete updating.
1344	///
1345	/// Note that because we generally only have one entry here a HashMap is pretty overkill. A
1346	/// BTreeMap currently stores more than ten elements per leaf node, so even up to a few
1347	/// channels with a peer this will just be one allocation and will amount to a linear list of
1348	/// channels to walk, avoiding the whole hashing rigmarole.
1349	///
1350	/// Note that the channel may no longer exist. For example, if a channel was closed but we
1351	/// later needed to claim an HTLC which is pending on-chain, we may generate a monitor update
1352	/// for a missing channel. While a malicious peer could construct a second channel with the
1353	/// same `temporary_channel_id` (or final `channel_id` in the case of 0conf channels or prior
1354	/// to funding appearing on-chain), the downstream `ChannelMonitor` set is required to ensure
1355	/// duplicates do not occur, so such channels should fail without a monitor update completing.
1356	monitor_update_blocked_actions: BTreeMap<ChannelId, Vec<MonitorUpdateCompletionAction>>,
1357	/// If another channel's [`ChannelMonitorUpdate`] needs to complete before a channel we have
1358	/// with this peer can complete an RAA [`ChannelMonitorUpdate`] (e.g. because the RAA update
1359	/// will remove a preimage that needs to be durably in an upstream channel first), we put an
1360	/// entry here to note that the channel with the key's ID is blocked on a set of actions.
1361	actions_blocking_raa_monitor_updates: BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
1362	/// The latest [`ChannelMonitor::get_latest_update_id`] value for all closed channels as they
1363	/// exist on-disk/in our [`chain::Watch`].
1364	///
1365	/// If there are any updates pending in [`Self::in_flight_monitor_updates`] this will contain
1366	/// the highest `update_id` of all the pending in-flight updates (note that any pending updates
1367	/// not yet applied sitting in [`ChannelManager::pending_background_events`] will also be
1368	/// considered as they are also in [`Self::in_flight_monitor_updates`]).
1369	closed_channel_monitor_update_ids: BTreeMap<ChannelId, u64>,
1370	/// The peer is currently connected (i.e. we've seen a
1371	/// [`ChannelMessageHandler::peer_connected`] and no corresponding
1372	/// [`ChannelMessageHandler::peer_disconnected`].
1373	pub is_connected: bool,
1374}
1375
1376impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
1377	/// Indicates that a peer meets the criteria where we're ok to remove it from our storage.
1378	/// If true is passed for `require_disconnected`, the function will return false if we haven't
1379	/// disconnected from the node already, ie. `PeerState::is_connected` is set to `true`.
1380	fn ok_to_remove(&self, require_disconnected: bool) -> bool {
1381		if require_disconnected && self.is_connected {
1382			return false
1383		}
1384		for (_, updates) in self.in_flight_monitor_updates.iter() {
1385			if !updates.is_empty() {
1386				return false;
1387			}
1388		}
1389		!self.channel_by_id.iter().any(|(_, phase)|
1390			match phase {
1391				ChannelPhase::Funded(_) | ChannelPhase::UnfundedOutboundV1(_) => true,
1392				ChannelPhase::UnfundedInboundV1(_) => false,
1393				ChannelPhase::UnfundedOutboundV2(_) => true,
1394				ChannelPhase::UnfundedInboundV2(_) => false,
1395			}
1396		)
1397			&& self.monitor_update_blocked_actions.is_empty()
1398			&& self.closed_channel_monitor_update_ids.is_empty()
1399	}
1400
1401	// Returns a count of all channels we have with this peer, including unfunded channels.
1402	fn total_channel_count(&self) -> usize {
1403		self.channel_by_id.len() + self.inbound_channel_request_by_id.len()
1404	}
1405
1406	// Returns a bool indicating if the given `channel_id` matches a channel we have with this peer.
1407	fn has_channel(&self, channel_id: &ChannelId) -> bool {
1408		self.channel_by_id.contains_key(channel_id) ||
1409			self.inbound_channel_request_by_id.contains_key(channel_id)
1410	}
1411}
1412
1413#[derive(Clone)]
1414pub(super) enum OpenChannelMessage {
1415	V1(msgs::OpenChannel),
1416	#[cfg(dual_funding)]
1417	V2(msgs::OpenChannelV2),
1418}
1419
1420pub(super) enum OpenChannelMessageRef<'a> {
1421	V1(&'a msgs::OpenChannel),
1422	#[cfg(dual_funding)]
1423	V2(&'a msgs::OpenChannelV2),
1424}
1425
1426/// A not-yet-accepted inbound (from counterparty) channel. Once
1427/// accepted, the parameters will be used to construct a channel.
1428pub(super) struct InboundChannelRequest {
1429	/// The original OpenChannel message.
1430	pub open_channel_msg: OpenChannelMessage,
1431	/// The number of ticks remaining before the request expires.
1432	pub ticks_remaining: i32,
1433}
1434
1435/// The number of ticks that may elapse while we're waiting for an unaccepted inbound channel to be
1436/// accepted. An unaccepted channel that exceeds this limit will be abandoned.
1437const UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS: i32 = 2;
1438
1439/// The number of blocks of historical feerate estimates we keep around and consider when deciding
1440/// to force-close a channel for having too-low fees. Also the number of blocks we have to see
1441/// after startup before we consider force-closing channels for having too-low fees.
1442pub(super) const FEERATE_TRACKING_BLOCKS: usize = 144;
1443
1444/// Stores a PaymentSecret and any other data we may need to validate an inbound payment is
1445/// actually ours and not some duplicate HTLC sent to us by a node along the route.
1446///
1447/// For users who don't want to bother doing their own payment preimage storage, we also store that
1448/// here.
1449///
1450/// Note that this struct will be removed entirely soon, in favor of storing no inbound payment data
1451/// and instead encoding it in the payment secret.
1452#[derive(Debug)]
1453struct PendingInboundPayment {
1454	/// The payment secret that the sender must use for us to accept this payment
1455	payment_secret: PaymentSecret,
1456	/// Time at which this HTLC expires - blocks with a header time above this value will result in
1457	/// this payment being removed.
1458	expiry_time: u64,
1459	/// Arbitrary identifier the user specifies (or not)
1460	user_payment_id: u64,
1461	// Other required attributes of the payment, optionally enforced:
1462	payment_preimage: Option<PaymentPreimage>,
1463	min_value_msat: Option<u64>,
1464}
1465
1466/// [`SimpleArcChannelManager`] is useful when you need a [`ChannelManager`] with a static lifetime, e.g.
1467/// when you're using `lightning-net-tokio` (since `tokio::spawn` requires parameters with static
1468/// lifetimes). Other times you can afford a reference, which is more efficient, in which case
1469/// [`SimpleRefChannelManager`] is the more appropriate type. Defining these type aliases prevents
1470/// issues such as overly long function definitions. Note that the `ChannelManager` can take any type
1471/// that implements [`NodeSigner`], [`EntropySource`], and [`SignerProvider`] for its keys manager,
1472/// or, respectively, [`Router`] for its router, but this type alias chooses the concrete types
1473/// of [`KeysManager`] and [`DefaultRouter`].
1474///
1475/// This is not exported to bindings users as type aliases aren't supported in most languages.
1476#[cfg(not(c_bindings))]
1477pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<
1478	Arc<M>,
1479	Arc<T>,
1480	Arc<KeysManager>,
1481	Arc<KeysManager>,
1482	Arc<KeysManager>,
1483	Arc<F>,
1484	Arc<DefaultRouter<
1485		Arc<NetworkGraph<Arc<L>>>,
1486		Arc<L>,
1487		Arc<KeysManager>,
1488		Arc<RwLock<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
1489		ProbabilisticScoringFeeParameters,
1490		ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>,
1491	>>,
1492	Arc<DefaultMessageRouter<
1493		Arc<NetworkGraph<Arc<L>>>,
1494		Arc<L>,
1495		Arc<KeysManager>,
1496	>>,
1497	Arc<L>
1498>;
1499
1500/// [`SimpleRefChannelManager`] is a type alias for a ChannelManager reference, and is the reference
1501/// counterpart to the [`SimpleArcChannelManager`] type alias. Use this type by default when you don't
1502/// need a ChannelManager with a static lifetime. You'll need a static lifetime in cases such as
1503/// usage of lightning-net-tokio (since `tokio::spawn` requires parameters with static lifetimes).
1504/// But if this is not necessary, using a reference is more efficient. Defining these type aliases
1505/// issues such as overly long function definitions. Note that the ChannelManager can take any type
1506/// that implements [`NodeSigner`], [`EntropySource`], and [`SignerProvider`] for its keys manager,
1507/// or, respectively, [`Router`]  for its router, but this type alias chooses the concrete types
1508/// of [`KeysManager`] and [`DefaultRouter`].
1509///
1510/// This is not exported to bindings users as type aliases aren't supported in most languages.
1511#[cfg(not(c_bindings))]
1512pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, 'i, M, T, F, L> =
1513	ChannelManager<
1514		&'a M,
1515		&'b T,
1516		&'c KeysManager,
1517		&'c KeysManager,
1518		&'c KeysManager,
1519		&'d F,
1520		&'e DefaultRouter<
1521			&'f NetworkGraph<&'g L>,
1522			&'g L,
1523			&'c KeysManager,
1524			&'h RwLock<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>,
1525			ProbabilisticScoringFeeParameters,
1526			ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>
1527		>,
1528		&'i DefaultMessageRouter<
1529			&'f NetworkGraph<&'g L>,
1530			&'g L,
1531			&'c KeysManager,
1532		>,
1533		&'g L
1534	>;
1535
1536/// A trivial trait which describes any [`ChannelManager`].
1537///
1538/// This is not exported to bindings users as general cover traits aren't useful in other
1539/// languages.
1540pub trait AChannelManager {
1541	/// A type implementing [`chain::Watch`].
1542	type Watch: chain::Watch<Self::Signer> + ?Sized;
1543	/// A type that may be dereferenced to [`Self::Watch`].
1544	type M: Deref<Target = Self::Watch>;
1545	/// A type implementing [`BroadcasterInterface`].
1546	type Broadcaster: BroadcasterInterface + ?Sized;
1547	/// A type that may be dereferenced to [`Self::Broadcaster`].
1548	type T: Deref<Target = Self::Broadcaster>;
1549	/// A type implementing [`EntropySource`].
1550	type EntropySource: EntropySource + ?Sized;
1551	/// A type that may be dereferenced to [`Self::EntropySource`].
1552	type ES: Deref<Target = Self::EntropySource>;
1553	/// A type implementing [`NodeSigner`].
1554	type NodeSigner: NodeSigner + ?Sized;
1555	/// A type that may be dereferenced to [`Self::NodeSigner`].
1556	type NS: Deref<Target = Self::NodeSigner>;
1557	/// A type implementing [`EcdsaChannelSigner`].
1558	type Signer: EcdsaChannelSigner + Sized;
1559	/// A type implementing [`SignerProvider`] for [`Self::Signer`].
1560	type SignerProvider: SignerProvider<EcdsaSigner= Self::Signer> + ?Sized;
1561	/// A type that may be dereferenced to [`Self::SignerProvider`].
1562	type SP: Deref<Target = Self::SignerProvider>;
1563	/// A type implementing [`FeeEstimator`].
1564	type FeeEstimator: FeeEstimator + ?Sized;
1565	/// A type that may be dereferenced to [`Self::FeeEstimator`].
1566	type F: Deref<Target = Self::FeeEstimator>;
1567	/// A type implementing [`Router`].
1568	type Router: Router + ?Sized;
1569	/// A type that may be dereferenced to [`Self::Router`].
1570	type R: Deref<Target = Self::Router>;
1571	/// A type implementing [`MessageRouter`].
1572	type MessageRouter: MessageRouter + ?Sized;
1573	/// A type that may be dereferenced to [`Self::MessageRouter`].
1574	type MR: Deref<Target = Self::MessageRouter>;
1575	/// A type implementing [`Logger`].
1576	type Logger: Logger + ?Sized;
1577	/// A type that may be dereferenced to [`Self::Logger`].
1578	type L: Deref<Target = Self::Logger>;
1579	/// Returns a reference to the actual [`ChannelManager`] object.
1580	fn get_cm(&self) -> &ChannelManager<Self::M, Self::T, Self::ES, Self::NS, Self::SP, Self::F, Self::R, Self::MR, Self::L>;
1581}
1582
1583impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> AChannelManager
1584for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
1585where
1586	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
1587	T::Target: BroadcasterInterface,
1588	ES::Target: EntropySource,
1589	NS::Target: NodeSigner,
1590	SP::Target: SignerProvider,
1591	F::Target: FeeEstimator,
1592	R::Target: Router,
1593	MR::Target: MessageRouter,
1594	L::Target: Logger,
1595{
1596	type Watch = M::Target;
1597	type M = M;
1598	type Broadcaster = T::Target;
1599	type T = T;
1600	type EntropySource = ES::Target;
1601	type ES = ES;
1602	type NodeSigner = NS::Target;
1603	type NS = NS;
1604	type Signer = <SP::Target as SignerProvider>::EcdsaSigner;
1605	type SignerProvider = SP::Target;
1606	type SP = SP;
1607	type FeeEstimator = F::Target;
1608	type F = F;
1609	type Router = R::Target;
1610	type R = R;
1611	type MessageRouter = MR::Target;
1612	type MR = MR;
1613	type Logger = L::Target;
1614	type L = L;
1615	fn get_cm(&self) -> &ChannelManager<M, T, ES, NS, SP, F, R, MR, L> { self }
1616}
1617
1618/// A lightning node's channel state machine and payment management logic, which facilitates
1619/// sending, forwarding, and receiving payments through lightning channels.
1620///
1621/// [`ChannelManager`] is parameterized by a number of components to achieve this.
1622/// - [`chain::Watch`] (typically [`ChainMonitor`]) for on-chain monitoring and enforcement of each
1623///   channel
1624/// - [`BroadcasterInterface`] for broadcasting transactions related to opening, funding, and
1625///   closing channels
1626/// - [`EntropySource`] for providing random data needed for cryptographic operations
1627/// - [`NodeSigner`] for cryptographic operations scoped to the node
1628/// - [`SignerProvider`] for providing signers whose operations are scoped to individual channels
1629/// - [`FeeEstimator`] to determine transaction fee rates needed to have a transaction mined in a
1630///   timely manner
1631/// - [`Router`] for finding payment paths when initiating and retrying payments
1632/// - [`MessageRouter`] for finding message paths when initiating and retrying onion messages
1633/// - [`Logger`] for logging operational information of varying degrees
1634///
1635/// Additionally, it implements the following traits:
1636/// - [`ChannelMessageHandler`] to handle off-chain channel activity from peers
1637/// - [`MessageSendEventsProvider`] to similarly send such messages to peers
1638/// - [`OffersMessageHandler`] for BOLT 12 message handling and sending
1639/// - [`EventsProvider`] to generate user-actionable [`Event`]s
1640/// - [`chain::Listen`] and [`chain::Confirm`] for notification of on-chain activity
1641///
1642/// Thus, [`ChannelManager`] is typically used to parameterize a [`MessageHandler`] and an
1643/// [`OnionMessenger`]. The latter is required to support BOLT 12 functionality.
1644///
1645/// # `ChannelManager` vs `ChannelMonitor`
1646///
1647/// It's important to distinguish between the *off-chain* management and *on-chain* enforcement of
1648/// lightning channels. [`ChannelManager`] exchanges messages with peers to manage the off-chain
1649/// state of each channel. During this process, it generates a [`ChannelMonitor`] for each channel
1650/// and a [`ChannelMonitorUpdate`] for each relevant change, notifying its parameterized
1651/// [`chain::Watch`] of them.
1652///
1653/// An implementation of [`chain::Watch`], such as [`ChainMonitor`], is responsible for aggregating
1654/// these [`ChannelMonitor`]s and applying any [`ChannelMonitorUpdate`]s to them. It then monitors
1655/// for any pertinent on-chain activity, enforcing claims as needed.
1656///
1657/// This division of off-chain management and on-chain enforcement allows for interesting node
1658/// setups. For instance, on-chain enforcement could be moved to a separate host or have added
1659/// redundancy, possibly as a watchtower. See [`chain::Watch`] for the relevant interface.
1660///
1661/// # Initialization
1662///
1663/// Use [`ChannelManager::new`] with the most recent [`BlockHash`] when creating a fresh instance.
1664/// Otherwise, if restarting, construct [`ChannelManagerReadArgs`] with the necessary parameters and
1665/// references to any deserialized [`ChannelMonitor`]s that were previously persisted. Use this to
1666/// deserialize the [`ChannelManager`] and feed it any new chain data since it was last online, as
1667/// detailed in the [`ChannelManagerReadArgs`] documentation.
1668///
1669/// ```
1670/// use bitcoin::BlockHash;
1671/// use bitcoin::network::Network;
1672/// use lightning::chain::BestBlock;
1673/// # use lightning::chain::channelmonitor::ChannelMonitor;
1674/// use lightning::ln::channelmanager::{ChainParameters, ChannelManager, ChannelManagerReadArgs};
1675/// # use lightning::routing::gossip::NetworkGraph;
1676/// use lightning::util::config::UserConfig;
1677/// use lightning::util::ser::ReadableArgs;
1678///
1679/// # fn read_channel_monitors() -> Vec<ChannelMonitor<lightning::sign::InMemorySigner>> { vec![] }
1680/// # fn example<
1681/// #     'a,
1682/// #     L: lightning::util::logger::Logger,
1683/// #     ES: lightning::sign::EntropySource,
1684/// #     S: for <'b> lightning::routing::scoring::LockableScore<'b, ScoreLookUp = SL>,
1685/// #     SL: lightning::routing::scoring::ScoreLookUp<ScoreParams = SP>,
1686/// #     SP: Sized,
1687/// #     R: lightning::io::Read,
1688/// # >(
1689/// #     fee_estimator: &dyn lightning::chain::chaininterface::FeeEstimator,
1690/// #     chain_monitor: &dyn lightning::chain::Watch<lightning::sign::InMemorySigner>,
1691/// #     tx_broadcaster: &dyn lightning::chain::chaininterface::BroadcasterInterface,
1692/// #     router: &lightning::routing::router::DefaultRouter<&NetworkGraph<&'a L>, &'a L, &ES, &S, SP, SL>,
1693/// #     message_router: &lightning::onion_message::messenger::DefaultMessageRouter<&NetworkGraph<&'a L>, &'a L, &ES>,
1694/// #     logger: &L,
1695/// #     entropy_source: &ES,
1696/// #     node_signer: &dyn lightning::sign::NodeSigner,
1697/// #     signer_provider: &lightning::sign::DynSignerProvider,
1698/// #     best_block: lightning::chain::BestBlock,
1699/// #     current_timestamp: u32,
1700/// #     mut reader: R,
1701/// # ) -> Result<(), lightning::ln::msgs::DecodeError> {
1702/// // Fresh start with no channels
1703/// let params = ChainParameters {
1704///     network: Network::Bitcoin,
1705///     best_block,
1706/// };
1707/// let default_config = UserConfig::default();
1708/// let channel_manager = ChannelManager::new(
1709///     fee_estimator, chain_monitor, tx_broadcaster, router, message_router, logger,
1710///     entropy_source, node_signer, signer_provider, default_config, params, current_timestamp,
1711/// );
1712///
1713/// // Restart from deserialized data
1714/// let mut channel_monitors = read_channel_monitors();
1715/// let args = ChannelManagerReadArgs::new(
1716///     entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster,
1717///     router, message_router, logger, default_config, channel_monitors.iter().collect(),
1718/// );
1719/// let (block_hash, channel_manager) =
1720///     <(BlockHash, ChannelManager<_, _, _, _, _, _, _, _, _>)>::read(&mut reader, args)?;
1721///
1722/// // Update the ChannelManager and ChannelMonitors with the latest chain data
1723/// // ...
1724///
1725/// // Move the monitors to the ChannelManager's chain::Watch parameter
1726/// for monitor in channel_monitors {
1727///     chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
1728/// }
1729/// # Ok(())
1730/// # }
1731/// ```
1732///
1733/// # Operation
1734///
1735/// The following is required for [`ChannelManager`] to function properly:
1736/// - Handle messages from peers using its [`ChannelMessageHandler`] implementation (typically
1737///   called by [`PeerManager::read_event`] when processing network I/O)
1738/// - Send messages to peers obtained via its [`MessageSendEventsProvider`] implementation
1739///   (typically initiated when [`PeerManager::process_events`] is called)
1740/// - Feed on-chain activity using either its [`chain::Listen`] or [`chain::Confirm`] implementation
1741///   as documented by those traits
1742/// - Perform any periodic channel and payment checks by calling [`timer_tick_occurred`] roughly
1743///   every minute
1744/// - Persist to disk whenever [`get_and_clear_needs_persistence`] returns `true` using a
1745///   [`Persister`] such as a [`KVStore`] implementation
1746/// - Handle [`Event`]s obtained via its [`EventsProvider`] implementation
1747///
1748/// The [`Future`] returned by [`get_event_or_persistence_needed_future`] is useful in determining
1749/// when the last two requirements need to be checked.
1750///
1751/// The [`lightning-block-sync`] and [`lightning-transaction-sync`] crates provide utilities that
1752/// simplify feeding in on-chain activity using the [`chain::Listen`] and [`chain::Confirm`] traits,
1753/// respectively. The remaining requirements can be met using the [`lightning-background-processor`]
1754/// crate. For languages other than Rust, the availability of similar utilities may vary.
1755///
1756/// # Channels
1757///
1758/// [`ChannelManager`]'s primary function involves managing a channel state. Without channels,
1759/// payments can't be sent. Use [`list_channels`] or [`list_usable_channels`] for a snapshot of the
1760/// currently open channels.
1761///
1762/// ```
1763/// # use lightning::ln::channelmanager::AChannelManager;
1764/// #
1765/// # fn example<T: AChannelManager>(channel_manager: T) {
1766/// # let channel_manager = channel_manager.get_cm();
1767/// let channels = channel_manager.list_usable_channels();
1768/// for details in channels {
1769///     println!("{:?}", details);
1770/// }
1771/// # }
1772/// ```
1773///
1774/// Each channel is identified using a [`ChannelId`], which will change throughout the channel's
1775/// life cycle. Additionally, channels are assigned a `user_channel_id`, which is given in
1776/// [`Event`]s associated with the channel and serves as a fixed identifier but is otherwise unused
1777/// by [`ChannelManager`].
1778///
1779/// ## Opening Channels
1780///
1781/// To an open a channel with a peer, call [`create_channel`]. This will initiate the process of
1782/// opening an outbound channel, which requires self-funding when handling
1783/// [`Event::FundingGenerationReady`].
1784///
1785/// ```
1786/// # use bitcoin::{ScriptBuf, Transaction};
1787/// # use bitcoin::secp256k1::PublicKey;
1788/// # use lightning::ln::channelmanager::AChannelManager;
1789/// # use lightning::events::{Event, EventsProvider};
1790/// #
1791/// # trait Wallet {
1792/// #     fn create_funding_transaction(
1793/// #         &self, _amount_sats: u64, _output_script: ScriptBuf
1794/// #     ) -> Transaction;
1795/// # }
1796/// #
1797/// # fn example<T: AChannelManager, W: Wallet>(channel_manager: T, wallet: W, peer_id: PublicKey) {
1798/// # let channel_manager = channel_manager.get_cm();
1799/// let value_sats = 1_000_000;
1800/// let push_msats = 10_000_000;
1801/// match channel_manager.create_channel(peer_id, value_sats, push_msats, 42, None, None) {
1802///     Ok(channel_id) => println!("Opening channel {}", channel_id),
1803///     Err(e) => println!("Error opening channel: {:?}", e),
1804/// }
1805///
1806/// // On the event processing thread once the peer has responded
1807/// channel_manager.process_pending_events(&|event| {
1808///     match event {
1809///         Event::FundingGenerationReady {
1810///             temporary_channel_id, counterparty_node_id, channel_value_satoshis, output_script,
1811///             user_channel_id, ..
1812///         } => {
1813///             assert_eq!(user_channel_id, 42);
1814///             let funding_transaction = wallet.create_funding_transaction(
1815///                 channel_value_satoshis, output_script
1816///             );
1817///             match channel_manager.funding_transaction_generated(
1818///                 temporary_channel_id, counterparty_node_id, funding_transaction
1819///             ) {
1820///                 Ok(()) => println!("Funding channel {}", temporary_channel_id),
1821///                 Err(e) => println!("Error funding channel {}: {:?}", temporary_channel_id, e),
1822///             }
1823///         },
1824///         Event::ChannelPending { channel_id, user_channel_id, former_temporary_channel_id, .. } => {
1825///             assert_eq!(user_channel_id, 42);
1826///             println!(
1827///                 "Channel {} now {} pending (funding transaction has been broadcasted)", channel_id,
1828///                 former_temporary_channel_id.unwrap()
1829///             );
1830///         },
1831///         Event::ChannelReady { channel_id, user_channel_id, .. } => {
1832///             assert_eq!(user_channel_id, 42);
1833///             println!("Channel {} ready", channel_id);
1834///         },
1835///         // ...
1836///     #     _ => {},
1837///     }
1838///     Ok(())
1839/// });
1840/// # }
1841/// ```
1842///
1843/// ## Accepting Channels
1844///
1845/// Inbound channels are initiated by peers and are automatically accepted unless [`ChannelManager`]
1846/// has [`UserConfig::manually_accept_inbound_channels`] set. In that case, the channel may be
1847/// either accepted or rejected when handling [`Event::OpenChannelRequest`].
1848///
1849/// ```
1850/// # use bitcoin::secp256k1::PublicKey;
1851/// # use lightning::ln::channelmanager::AChannelManager;
1852/// # use lightning::events::{Event, EventsProvider};
1853/// #
1854/// # fn is_trusted(counterparty_node_id: PublicKey) -> bool {
1855/// #     // ...
1856/// #     unimplemented!()
1857/// # }
1858/// #
1859/// # fn example<T: AChannelManager>(channel_manager: T) {
1860/// # let channel_manager = channel_manager.get_cm();
1861/// # let error_message = "Channel force-closed";
1862/// channel_manager.process_pending_events(&|event| {
1863///     match event {
1864///         Event::OpenChannelRequest { temporary_channel_id, counterparty_node_id, ..  } => {
1865///             if !is_trusted(counterparty_node_id) {
1866///                 match channel_manager.force_close_without_broadcasting_txn(
1867///                     &temporary_channel_id, &counterparty_node_id, error_message.to_string()
1868///                 ) {
1869///                     Ok(()) => println!("Rejecting channel {}", temporary_channel_id),
1870///                     Err(e) => println!("Error rejecting channel {}: {:?}", temporary_channel_id, e),
1871///                 }
1872///                 return Ok(());
1873///             }
1874///
1875///             let user_channel_id = 43;
1876///             match channel_manager.accept_inbound_channel(
1877///                 &temporary_channel_id, &counterparty_node_id, user_channel_id
1878///             ) {
1879///                 Ok(()) => println!("Accepting channel {}", temporary_channel_id),
1880///                 Err(e) => println!("Error accepting channel {}: {:?}", temporary_channel_id, e),
1881///             }
1882///         },
1883///         // ...
1884///     #     _ => {},
1885///     }
1886///     Ok(())
1887/// });
1888/// # }
1889/// ```
1890///
1891/// ## Closing Channels
1892///
1893/// There are two ways to close a channel: either cooperatively using [`close_channel`] or
1894/// unilaterally using [`force_close_broadcasting_latest_txn`]. The former is ideal as it makes for
1895/// lower fees and immediate access to funds. However, the latter may be necessary if the
1896/// counterparty isn't behaving properly or has gone offline. [`Event::ChannelClosed`] is generated
1897/// once the channel has been closed successfully.
1898///
1899/// ```
1900/// # use bitcoin::secp256k1::PublicKey;
1901/// # use lightning::ln::types::ChannelId;
1902/// # use lightning::ln::channelmanager::AChannelManager;
1903/// # use lightning::events::{Event, EventsProvider};
1904/// #
1905/// # fn example<T: AChannelManager>(
1906/// #     channel_manager: T, channel_id: ChannelId, counterparty_node_id: PublicKey
1907/// # ) {
1908/// # let channel_manager = channel_manager.get_cm();
1909/// match channel_manager.close_channel(&channel_id, &counterparty_node_id) {
1910///     Ok(()) => println!("Closing channel {}", channel_id),
1911///     Err(e) => println!("Error closing channel {}: {:?}", channel_id, e),
1912/// }
1913///
1914/// // On the event processing thread
1915/// channel_manager.process_pending_events(&|event| {
1916///     match event {
1917///         Event::ChannelClosed { channel_id, user_channel_id, ..  } => {
1918///             assert_eq!(user_channel_id, 42);
1919///             println!("Channel {} closed", channel_id);
1920///         },
1921///         // ...
1922///     #     _ => {},
1923///     }
1924///     Ok(())
1925/// });
1926/// # }
1927/// ```
1928///
1929/// # Payments
1930///
1931/// [`ChannelManager`] is responsible for sending, forwarding, and receiving payments through its
1932/// channels. A payment is typically initiated from a [BOLT 11] invoice or a [BOLT 12] offer, though
1933/// spontaneous (i.e., keysend) payments are also possible. Incoming payments don't require
1934/// maintaining any additional state as [`ChannelManager`] can reconstruct the [`PaymentPreimage`]
1935/// from the [`PaymentSecret`]. Sending payments, however, require tracking in order to retry failed
1936/// HTLCs.
1937///
1938/// After a payment is initiated, it will appear in [`list_recent_payments`] until a short time
1939/// after either an [`Event::PaymentSent`] or [`Event::PaymentFailed`] is handled. Failed HTLCs
1940/// for a payment will be retried according to the payment's [`Retry`] strategy or until
1941/// [`abandon_payment`] is called.
1942///
1943/// ## BOLT 11 Invoices
1944///
1945/// The [`lightning-invoice`] crate is useful for creating BOLT 11 invoices. However, in order to
1946/// construct a [`Bolt11Invoice`] that is compatible with [`ChannelManager`], use
1947/// [`create_bolt11_invoice`]. This method serves as a convenience for building invoices with the
1948/// [`PaymentHash`] and [`PaymentSecret`] returned from [`create_inbound_payment`]. To provide your
1949/// own [`PaymentHash`], override the appropriate [`Bolt11InvoiceParameters`], which is equivalent
1950/// to using [`create_inbound_payment_for_hash`].
1951///
1952/// [`ChannelManager`] generates an [`Event::PaymentClaimable`] once the full payment has been
1953/// received. Call [`claim_funds`] to release the [`PaymentPreimage`], which in turn will result in
1954/// an [`Event::PaymentClaimed`].
1955///
1956/// ```
1957/// # use lightning::events::{Event, EventsProvider, PaymentPurpose};
1958/// # use lightning::ln::channelmanager::{AChannelManager, Bolt11InvoiceParameters};
1959/// #
1960/// # fn example<T: AChannelManager>(channel_manager: T) {
1961/// # let channel_manager = channel_manager.get_cm();
1962/// let params = Bolt11InvoiceParameters {
1963///     amount_msats: Some(10_000_000),
1964///     invoice_expiry_delta_secs: Some(3600),
1965///     ..Default::default()
1966/// };
1967/// let invoice = match channel_manager.create_bolt11_invoice(params) {
1968///     Ok(invoice) => {
1969///         println!("Creating invoice with payment hash {}", invoice.payment_hash());
1970///         invoice
1971///     },
1972///     Err(e) => panic!("Error creating invoice: {}", e),
1973/// };
1974///
1975/// // On the event processing thread
1976/// channel_manager.process_pending_events(&|event| {
1977///     match event {
1978///         Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose {
1979///             PaymentPurpose::Bolt11InvoicePayment { payment_preimage: Some(payment_preimage), .. } => {
1980///                 assert_eq!(payment_hash.0, invoice.payment_hash().as_ref());
1981///                 println!("Claiming payment {}", payment_hash);
1982///                 channel_manager.claim_funds(payment_preimage);
1983///             },
1984///             PaymentPurpose::Bolt11InvoicePayment { payment_preimage: None, .. } => {
1985///                 println!("Unknown payment hash: {}", payment_hash);
1986///             },
1987///             PaymentPurpose::SpontaneousPayment(payment_preimage) => {
1988///                 assert_ne!(payment_hash.0, invoice.payment_hash().as_ref());
1989///                 println!("Claiming spontaneous payment {}", payment_hash);
1990///                 channel_manager.claim_funds(payment_preimage);
1991///             },
1992///             // ...
1993/// #           _ => {},
1994///         },
1995///         Event::PaymentClaimed { payment_hash, amount_msat, .. } => {
1996///             assert_eq!(payment_hash.0, invoice.payment_hash().as_ref());
1997///             println!("Claimed {} msats", amount_msat);
1998///         },
1999///         // ...
2000/// #       _ => {},
2001///     }
2002///     Ok(())
2003/// });
2004/// # }
2005/// ```
2006///
2007/// For paying an invoice, see the [`bolt11_payment`] module with convenience functions for use with
2008/// [`send_payment`].
2009///
2010/// ```
2011/// # use lightning::events::{Event, EventsProvider};
2012/// # use lightning::types::payment::PaymentHash;
2013/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, RecipientOnionFields, Retry};
2014/// # use lightning::routing::router::RouteParameters;
2015/// #
2016/// # fn example<T: AChannelManager>(
2017/// #     channel_manager: T, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields,
2018/// #     route_params: RouteParameters, retry: Retry
2019/// # ) {
2020/// # let channel_manager = channel_manager.get_cm();
2021/// // let (payment_hash, recipient_onion, route_params) =
2022/// //     payment::payment_parameters_from_invoice(&invoice);
2023/// let payment_id = PaymentId([42; 32]);
2024/// match channel_manager.send_payment(
2025///     payment_hash, recipient_onion, payment_id, route_params, retry
2026/// ) {
2027///     Ok(()) => println!("Sending payment with hash {}", payment_hash),
2028///     Err(e) => println!("Failed sending payment with hash {}: {:?}", payment_hash, e),
2029/// }
2030///
2031/// let expected_payment_id = payment_id;
2032/// let expected_payment_hash = payment_hash;
2033/// assert!(
2034///     channel_manager.list_recent_payments().iter().find(|details| matches!(
2035///         details,
2036///         RecentPaymentDetails::Pending {
2037///             payment_id: expected_payment_id,
2038///             payment_hash: expected_payment_hash,
2039///             ..
2040///         }
2041///     )).is_some()
2042/// );
2043///
2044/// // On the event processing thread
2045/// channel_manager.process_pending_events(&|event| {
2046///     match event {
2047///         Event::PaymentSent { payment_hash, .. } => println!("Paid {}", payment_hash),
2048///         Event::PaymentFailed { payment_hash: Some(payment_hash), .. } =>
2049///             println!("Failed paying {}", payment_hash),
2050///         // ...
2051///     #     _ => {},
2052///     }
2053///     Ok(())
2054/// });
2055/// # }
2056/// ```
2057///
2058/// ## BOLT 12 Offers
2059///
2060/// The [`offers`] module is useful for creating BOLT 12 offers. An [`Offer`] is a precursor to a
2061/// [`Bolt12Invoice`], which must first be requested by the payer. The interchange of these messages
2062/// as defined in the specification is handled by [`ChannelManager`] and its implementation of
2063/// [`OffersMessageHandler`]. However, this only works with an [`Offer`] created using a builder
2064/// returned by [`create_offer_builder`]. With this approach, BOLT 12 offers and invoices are
2065/// stateless just as BOLT 11 invoices are.
2066///
2067/// ```
2068/// # use lightning::events::{Event, EventsProvider, PaymentPurpose};
2069/// # use lightning::ln::channelmanager::AChannelManager;
2070/// # use lightning::offers::parse::Bolt12SemanticError;
2071/// #
2072/// # fn example<T: AChannelManager>(channel_manager: T) -> Result<(), Bolt12SemanticError> {
2073/// # let channel_manager = channel_manager.get_cm();
2074/// # let absolute_expiry = None;
2075/// let offer = channel_manager
2076///     .create_offer_builder(absolute_expiry)?
2077/// # ;
2078/// # // Needed for compiling for c_bindings
2079/// # let builder: lightning::offers::offer::OfferBuilder<_, _> = offer.into();
2080/// # let offer = builder
2081///     .description("coffee".to_string())
2082///     .amount_msats(10_000_000)
2083///     .build()?;
2084/// let bech32_offer = offer.to_string();
2085///
2086/// // On the event processing thread
2087/// channel_manager.process_pending_events(&|event| {
2088///     match event {
2089///         Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose {
2090///             PaymentPurpose::Bolt12OfferPayment { payment_preimage: Some(payment_preimage), .. } => {
2091///                 println!("Claiming payment {}", payment_hash);
2092///                 channel_manager.claim_funds(payment_preimage);
2093///             },
2094///             PaymentPurpose::Bolt12OfferPayment { payment_preimage: None, .. } => {
2095///                 println!("Unknown payment hash: {}", payment_hash);
2096///             }
2097/// #           _ => {},
2098///         },
2099///         Event::PaymentClaimed { payment_hash, amount_msat, .. } => {
2100///             println!("Claimed {} msats", amount_msat);
2101///         },
2102///         // ...
2103///     #     _ => {},
2104///     }
2105///     Ok(())
2106/// });
2107/// # Ok(())
2108/// # }
2109/// ```
2110///
2111/// Use [`pay_for_offer`] to initiated payment, which sends an [`InvoiceRequest`] for an [`Offer`]
2112/// and pays the [`Bolt12Invoice`] response.
2113///
2114/// ```
2115/// # use lightning::events::{Event, EventsProvider};
2116/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, Retry};
2117/// # use lightning::offers::offer::Offer;
2118/// #
2119/// # fn example<T: AChannelManager>(
2120/// #     channel_manager: T, offer: &Offer, quantity: Option<u64>, amount_msats: Option<u64>,
2121/// #     payer_note: Option<String>, retry: Retry, max_total_routing_fee_msat: Option<u64>
2122/// # ) {
2123/// # let channel_manager = channel_manager.get_cm();
2124/// let payment_id = PaymentId([42; 32]);
2125/// match channel_manager.pay_for_offer(
2126///     offer, quantity, amount_msats, payer_note, payment_id, retry, max_total_routing_fee_msat
2127/// ) {
2128///     Ok(()) => println!("Requesting invoice for offer"),
2129///     Err(e) => println!("Unable to request invoice for offer: {:?}", e),
2130/// }
2131///
2132/// // First the payment will be waiting on an invoice
2133/// let expected_payment_id = payment_id;
2134/// assert!(
2135///     channel_manager.list_recent_payments().iter().find(|details| matches!(
2136///         details,
2137///         RecentPaymentDetails::AwaitingInvoice { payment_id: expected_payment_id }
2138///     )).is_some()
2139/// );
2140///
2141/// // Once the invoice is received, a payment will be sent
2142/// assert!(
2143///     channel_manager.list_recent_payments().iter().find(|details| matches!(
2144///         details,
2145///         RecentPaymentDetails::Pending { payment_id: expected_payment_id, ..  }
2146///     )).is_some()
2147/// );
2148///
2149/// // On the event processing thread
2150/// channel_manager.process_pending_events(&|event| {
2151///     match event {
2152///         Event::PaymentSent { payment_id: Some(payment_id), .. } => println!("Paid {}", payment_id),
2153///         Event::PaymentFailed { payment_id, .. } => println!("Failed paying {}", payment_id),
2154///         // ...
2155///     #     _ => {},
2156///     }
2157///     Ok(())
2158/// });
2159/// # }
2160/// ```
2161///
2162/// ## BOLT 12 Refunds
2163///
2164/// A [`Refund`] is a request for an invoice to be paid. Like *paying* for an [`Offer`], *creating*
2165/// a [`Refund`] involves maintaining state since it represents a future outbound payment.
2166/// Therefore, use [`create_refund_builder`] when creating one, otherwise [`ChannelManager`] will
2167/// refuse to pay any corresponding [`Bolt12Invoice`] that it receives.
2168///
2169/// ```
2170/// # use core::time::Duration;
2171/// # use lightning::events::{Event, EventsProvider};
2172/// # use lightning::ln::channelmanager::{AChannelManager, PaymentId, RecentPaymentDetails, Retry};
2173/// # use lightning::offers::parse::Bolt12SemanticError;
2174/// #
2175/// # fn example<T: AChannelManager>(
2176/// #     channel_manager: T, amount_msats: u64, absolute_expiry: Duration, retry: Retry,
2177/// #     max_total_routing_fee_msat: Option<u64>
2178/// # ) -> Result<(), Bolt12SemanticError> {
2179/// # let channel_manager = channel_manager.get_cm();
2180/// let payment_id = PaymentId([42; 32]);
2181/// let refund = channel_manager
2182///     .create_refund_builder(
2183///         amount_msats, absolute_expiry, payment_id, retry, max_total_routing_fee_msat
2184///     )?
2185/// # ;
2186/// # // Needed for compiling for c_bindings
2187/// # let builder: lightning::offers::refund::RefundBuilder<_> = refund.into();
2188/// # let refund = builder
2189///     .description("coffee".to_string())
2190///     .payer_note("refund for order 1234".to_string())
2191///     .build()?;
2192/// let bech32_refund = refund.to_string();
2193///
2194/// // First the payment will be waiting on an invoice
2195/// let expected_payment_id = payment_id;
2196/// assert!(
2197///     channel_manager.list_recent_payments().iter().find(|details| matches!(
2198///         details,
2199///         RecentPaymentDetails::AwaitingInvoice { payment_id: expected_payment_id }
2200///     )).is_some()
2201/// );
2202///
2203/// // Once the invoice is received, a payment will be sent
2204/// assert!(
2205///     channel_manager.list_recent_payments().iter().find(|details| matches!(
2206///         details,
2207///         RecentPaymentDetails::Pending { payment_id: expected_payment_id, ..  }
2208///     )).is_some()
2209/// );
2210///
2211/// // On the event processing thread
2212/// channel_manager.process_pending_events(&|event| {
2213///     match event {
2214///         Event::PaymentSent { payment_id: Some(payment_id), .. } => println!("Paid {}", payment_id),
2215///         Event::PaymentFailed { payment_id, .. } => println!("Failed paying {}", payment_id),
2216///         // ...
2217///     #     _ => {},
2218///     }
2219///     Ok(())
2220/// });
2221/// # Ok(())
2222/// # }
2223/// ```
2224///
2225/// Use [`request_refund_payment`] to send a [`Bolt12Invoice`] for receiving the refund. Similar to
2226/// *creating* an [`Offer`], this is stateless as it represents an inbound payment.
2227///
2228/// ```
2229/// # use lightning::events::{Event, EventsProvider, PaymentPurpose};
2230/// # use lightning::ln::channelmanager::AChannelManager;
2231/// # use lightning::offers::refund::Refund;
2232/// #
2233/// # fn example<T: AChannelManager>(channel_manager: T, refund: &Refund) {
2234/// # let channel_manager = channel_manager.get_cm();
2235/// let known_payment_hash = match channel_manager.request_refund_payment(refund) {
2236///     Ok(invoice) => {
2237///         let payment_hash = invoice.payment_hash();
2238///         println!("Requesting refund payment {}", payment_hash);
2239///         payment_hash
2240///     },
2241///     Err(e) => panic!("Unable to request payment for refund: {:?}", e),
2242/// };
2243///
2244/// // On the event processing thread
2245/// channel_manager.process_pending_events(&|event| {
2246///     match event {
2247///         Event::PaymentClaimable { payment_hash, purpose, .. } => match purpose {
2248///             PaymentPurpose::Bolt12RefundPayment { payment_preimage: Some(payment_preimage), .. } => {
2249///                 assert_eq!(payment_hash, known_payment_hash);
2250///                 println!("Claiming payment {}", payment_hash);
2251///                 channel_manager.claim_funds(payment_preimage);
2252///             },
2253///             PaymentPurpose::Bolt12RefundPayment { payment_preimage: None, .. } => {
2254///                 println!("Unknown payment hash: {}", payment_hash);
2255///             },
2256///             // ...
2257/// #           _ => {},
2258///     },
2259///     Event::PaymentClaimed { payment_hash, amount_msat, .. } => {
2260///         assert_eq!(payment_hash, known_payment_hash);
2261///         println!("Claimed {} msats", amount_msat);
2262///     },
2263///     // ...
2264/// #     _ => {},
2265///     }
2266///     Ok(())
2267/// });
2268/// # }
2269/// ```
2270///
2271/// # Persistence
2272///
2273/// Implements [`Writeable`] to write out all channel state to disk. Implies [`peer_disconnected`] for
2274/// all peers during write/read (though does not modify this instance, only the instance being
2275/// serialized). This will result in any channels which have not yet exchanged [`funding_created`] (i.e.,
2276/// called [`funding_transaction_generated`] for outbound channels) being closed.
2277///
2278/// Note that you can be a bit lazier about writing out `ChannelManager` than you can be with
2279/// [`ChannelMonitor`]. With [`ChannelMonitor`] you MUST durably write each
2280/// [`ChannelMonitorUpdate`] before returning from
2281/// [`chain::Watch::watch_channel`]/[`update_channel`] or before completing async writes. With
2282/// `ChannelManager`s, writing updates happens out-of-band (and will prevent any other
2283/// `ChannelManager` operations from occurring during the serialization process). If the
2284/// deserialized version is out-of-date compared to the [`ChannelMonitor`] passed by reference to
2285/// [`read`], those channels will be force-closed based on the `ChannelMonitor` state and no funds
2286/// will be lost (modulo on-chain transaction fees).
2287///
2288/// Note that the deserializer is only implemented for `(`[`BlockHash`]`, `[`ChannelManager`]`)`, which
2289/// tells you the last block hash which was connected. You should get the best block tip before using the manager.
2290/// See [`chain::Listen`] and [`chain::Confirm`] for more details.
2291///
2292/// # `ChannelUpdate` Messages
2293///
2294/// Note that `ChannelManager` is responsible for tracking liveness of its channels and generating
2295/// [`ChannelUpdate`] messages informing peers that the channel is temporarily disabled. To avoid
2296/// spam due to quick disconnection/reconnection, updates are not sent until the channel has been
2297/// offline for a full minute. In order to track this, you must call
2298/// [`timer_tick_occurred`] roughly once per minute, though it doesn't have to be perfect.
2299///
2300/// # DoS Mitigation
2301///
2302/// To avoid trivial DoS issues, `ChannelManager` limits the number of inbound connections and
2303/// inbound channels without confirmed funding transactions. This may result in nodes which we do
2304/// not have a channel with being unable to connect to us or open new channels with us if we have
2305/// many peers with unfunded channels.
2306///
2307/// Because it is an indication of trust, inbound channels which we've accepted as 0conf are
2308/// exempted from the count of unfunded channels. Similarly, outbound channels and connections are
2309/// never limited. Please ensure you limit the count of such channels yourself.
2310///
2311/// # Type Aliases
2312///
2313/// Rather than using a plain `ChannelManager`, it is preferable to use either a [`SimpleArcChannelManager`]
2314/// a [`SimpleRefChannelManager`], for conciseness. See their documentation for more details, but
2315/// essentially you should default to using a [`SimpleRefChannelManager`], and use a
2316/// [`SimpleArcChannelManager`] when you require a `ChannelManager` with a static lifetime, such as when
2317/// you're using lightning-net-tokio.
2318///
2319/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
2320/// [`MessageHandler`]: crate::ln::peer_handler::MessageHandler
2321/// [`OnionMessenger`]: crate::onion_message::messenger::OnionMessenger
2322/// [`PeerManager::read_event`]: crate::ln::peer_handler::PeerManager::read_event
2323/// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
2324/// [`timer_tick_occurred`]: Self::timer_tick_occurred
2325/// [`get_and_clear_needs_persistence`]: Self::get_and_clear_needs_persistence
2326/// [`Persister`]: crate::util::persist::Persister
2327/// [`KVStore`]: crate::util::persist::KVStore
2328/// [`get_event_or_persistence_needed_future`]: Self::get_event_or_persistence_needed_future
2329/// [`lightning-block-sync`]: https://docs.rs/lightning_block_sync/latest/lightning_block_sync
2330/// [`lightning-transaction-sync`]: https://docs.rs/lightning_transaction_sync/latest/lightning_transaction_sync
2331/// [`lightning-background-processor`]: https://docs.rs/lightning_background_processor/lightning_background_processor
2332/// [`list_channels`]: Self::list_channels
2333/// [`list_usable_channels`]: Self::list_usable_channels
2334/// [`create_channel`]: Self::create_channel
2335/// [`close_channel`]: Self::force_close_broadcasting_latest_txn
2336/// [`force_close_broadcasting_latest_txn`]: Self::force_close_broadcasting_latest_txn
2337/// [BOLT 11]: https://github.com/lightning/bolts/blob/master/11-payment-encoding.md
2338/// [BOLT 12]: https://github.com/rustyrussell/lightning-rfc/blob/guilt/offers/12-offer-encoding.md
2339/// [`list_recent_payments`]: Self::list_recent_payments
2340/// [`abandon_payment`]: Self::abandon_payment
2341/// [`lightning-invoice`]: https://docs.rs/lightning_invoice/latest/lightning_invoice
2342/// [`create_bolt11_invoice`]: Self::create_bolt11_invoice
2343/// [`create_inbound_payment`]: Self::create_inbound_payment
2344/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
2345/// [`bolt11_payment`]: crate::ln::bolt11_payment
2346/// [`claim_funds`]: Self::claim_funds
2347/// [`send_payment`]: Self::send_payment
2348/// [`offers`]: crate::offers
2349/// [`create_offer_builder`]: Self::create_offer_builder
2350/// [`pay_for_offer`]: Self::pay_for_offer
2351/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
2352/// [`create_refund_builder`]: Self::create_refund_builder
2353/// [`request_refund_payment`]: Self::request_refund_payment
2354/// [`peer_disconnected`]: msgs::ChannelMessageHandler::peer_disconnected
2355/// [`funding_created`]: msgs::FundingCreated
2356/// [`funding_transaction_generated`]: Self::funding_transaction_generated
2357/// [`BlockHash`]: bitcoin::hash_types::BlockHash
2358/// [`update_channel`]: chain::Watch::update_channel
2359/// [`ChannelUpdate`]: msgs::ChannelUpdate
2360/// [`read`]: ReadableArgs::read
2361//
2362// Lock order:
2363// The tree structure below illustrates the lock order requirements for the different locks of the
2364// `ChannelManager`. Locks can be held at the same time if they are on the same branch in the tree,
2365// and should then be taken in the order of the lowest to the highest level in the tree.
2366// Note that locks on different branches shall not be taken at the same time, as doing so will
2367// create a new lock order for those specific locks in the order they were taken.
2368//
2369// Lock order tree:
2370//
2371// `pending_offers_messages`
2372//
2373// `pending_async_payments_messages`
2374//
2375// `total_consistency_lock`
2376//  |
2377//  |__`forward_htlcs`
2378//  |   |
2379//  |   |__`pending_intercepted_htlcs`
2380//  |
2381//  |__`decode_update_add_htlcs`
2382//  |
2383//  |__`per_peer_state`
2384//      |
2385//      |__`claimable_payments`
2386//      |
2387//      |__`pending_outbound_payments` // This field's struct contains a map of pending outbounds
2388//         |
2389//         |__`peer_state`
2390//            |
2391//            |__`outpoint_to_peer`
2392//            |
2393//            |__`short_to_chan_info`
2394//            |
2395//            |__`outbound_scid_aliases`
2396//            |
2397//            |__`best_block`
2398//            |
2399//            |__`pending_events`
2400//               |
2401//               |__`pending_background_events`
2402//
2403pub struct ChannelManager<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
2404where
2405	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
2406	T::Target: BroadcasterInterface,
2407	ES::Target: EntropySource,
2408	NS::Target: NodeSigner,
2409	SP::Target: SignerProvider,
2410	F::Target: FeeEstimator,
2411	R::Target: Router,
2412	MR::Target: MessageRouter,
2413	L::Target: Logger,
2414{
2415	default_configuration: UserConfig,
2416	chain_hash: ChainHash,
2417	fee_estimator: LowerBoundedFeeEstimator<F>,
2418	chain_monitor: M,
2419	tx_broadcaster: T,
2420	router: R,
2421	message_router: MR,
2422
2423	/// See `ChannelManager` struct-level documentation for lock order requirements.
2424	#[cfg(test)]
2425	pub(super) best_block: RwLock<BestBlock>,
2426	#[cfg(not(test))]
2427	best_block: RwLock<BestBlock>,
2428	secp_ctx: Secp256k1<secp256k1::All>,
2429
2430	/// The session_priv bytes and retry metadata of outbound payments which are pending resolution.
2431	/// The authoritative state of these HTLCs resides either within Channels or ChannelMonitors
2432	/// (if the channel has been force-closed), however we track them here to prevent duplicative
2433	/// PaymentSent/PaymentPathFailed events. Specifically, in the case of a duplicative
2434	/// update_fulfill_htlc message after a reconnect, we may "claim" a payment twice.
2435	/// Additionally, because ChannelMonitors are often not re-serialized after connecting block(s)
2436	/// which may generate a claim event, we may receive similar duplicate claim/fail MonitorEvents
2437	/// after reloading from disk while replaying blocks against ChannelMonitors.
2438	///
2439	/// See `PendingOutboundPayment` documentation for more info.
2440	///
2441	/// See `ChannelManager` struct-level documentation for lock order requirements.
2442	pending_outbound_payments: OutboundPayments,
2443
2444	/// SCID/SCID Alias -> forward infos. Key of 0 means payments received.
2445	///
2446	/// Note that because we may have an SCID Alias as the key we can have two entries per channel,
2447	/// though in practice we probably won't be receiving HTLCs for a channel both via the alias
2448	/// and via the classic SCID.
2449	///
2450	/// Note that no consistency guarantees are made about the existence of a channel with the
2451	/// `short_channel_id` here, nor the `short_channel_id` in the `PendingHTLCInfo`!
2452	///
2453	/// See `ChannelManager` struct-level documentation for lock order requirements.
2454	#[cfg(test)]
2455	pub(super) forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
2456	#[cfg(not(test))]
2457	forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
2458	/// Storage for HTLCs that have been intercepted and bubbled up to the user. We hold them here
2459	/// until the user tells us what we should do with them.
2460	///
2461	/// See `ChannelManager` struct-level documentation for lock order requirements.
2462	pending_intercepted_htlcs: Mutex<HashMap<InterceptId, PendingAddHTLCInfo>>,
2463
2464	/// SCID/SCID Alias -> pending `update_add_htlc`s to decode.
2465	///
2466	/// Note that because we may have an SCID Alias as the key we can have two entries per channel,
2467	/// though in practice we probably won't be receiving HTLCs for a channel both via the alias
2468	/// and via the classic SCID.
2469	///
2470	/// Note that no consistency guarantees are made about the existence of a channel with the
2471	/// `short_channel_id` here, nor the `channel_id` in `UpdateAddHTLC`!
2472	///
2473	/// See `ChannelManager` struct-level documentation for lock order requirements.
2474	decode_update_add_htlcs: Mutex<HashMap<u64, Vec<msgs::UpdateAddHTLC>>>,
2475
2476	/// The sets of payments which are claimable or currently being claimed. See
2477	/// [`ClaimablePayments`]' individual field docs for more info.
2478	///
2479	/// See `ChannelManager` struct-level documentation for lock order requirements.
2480	claimable_payments: Mutex<ClaimablePayments>,
2481
2482	/// The set of outbound SCID aliases across all our channels, including unconfirmed channels
2483	/// and some closed channels which reached a usable state prior to being closed. This is used
2484	/// only to avoid duplicates, and is not persisted explicitly to disk, but rebuilt from the
2485	/// active channel list on load.
2486	///
2487	/// See `ChannelManager` struct-level documentation for lock order requirements.
2488	outbound_scid_aliases: Mutex<HashSet<u64>>,
2489
2490	/// Channel funding outpoint -> `counterparty_node_id`.
2491	///
2492	/// Note that this map should only be used for `MonitorEvent` handling, to be able to access
2493	/// the corresponding channel for the event, as we only have access to the `channel_id` during
2494	/// the handling of the events.
2495	///
2496	/// Note that no consistency guarantees are made about the existence of a peer with the
2497	/// `counterparty_node_id` in our other maps.
2498	///
2499	/// TODO:
2500	/// The `counterparty_node_id` isn't passed with `MonitorEvent`s currently. To pass it, we need
2501	/// to make `counterparty_node_id`'s a required field in `ChannelMonitor`s, which unfortunately
2502	/// would break backwards compatability.
2503	/// We should add `counterparty_node_id`s to `MonitorEvent`s, and eventually rely on it in the
2504	/// future. That would make this map redundant, as only the `ChannelManager::per_peer_state` is
2505	/// required to access the channel with the `counterparty_node_id`.
2506	///
2507	/// See `ChannelManager` struct-level documentation for lock order requirements.
2508	#[cfg(not(test))]
2509	outpoint_to_peer: Mutex<HashMap<OutPoint, PublicKey>>,
2510	#[cfg(test)]
2511	pub(crate) outpoint_to_peer: Mutex<HashMap<OutPoint, PublicKey>>,
2512
2513	/// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s.
2514	///
2515	/// Outbound SCID aliases are added here once the channel is available for normal use, with
2516	/// SCIDs being added once the funding transaction is confirmed at the channel's required
2517	/// confirmation depth.
2518	///
2519	/// Note that while this holds `counterparty_node_id`s and `channel_id`s, no consistency
2520	/// guarantees are made about the existence of a peer with the `counterparty_node_id` nor a
2521	/// channel with the `channel_id` in our other maps.
2522	///
2523	/// See `ChannelManager` struct-level documentation for lock order requirements.
2524	#[cfg(test)]
2525	pub(super) short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, ChannelId)>>,
2526	#[cfg(not(test))]
2527	short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, ChannelId)>>,
2528
2529	our_network_pubkey: PublicKey,
2530
2531	inbound_payment_key: inbound_payment::ExpandedKey,
2532
2533	/// LDK puts the [fake scids] that it generates into namespaces, to identify the type of an
2534	/// incoming payment. To make it harder for a third-party to identify the type of a payment,
2535	/// we encrypt the namespace identifier using these bytes.
2536	///
2537	/// [fake scids]: crate::util::scid_utils::fake_scid
2538	fake_scid_rand_bytes: [u8; 32],
2539
2540	/// When we send payment probes, we generate the [`PaymentHash`] based on this cookie secret
2541	/// and a random [`PaymentId`]. This allows us to discern probes from real payments, without
2542	/// keeping additional state.
2543	probing_cookie_secret: [u8; 32],
2544
2545	/// When generating [`PaymentId`]s for inbound payments, we HMAC the HTLCs with this secret.
2546	inbound_payment_id_secret: [u8; 32],
2547
2548	/// The highest block timestamp we've seen, which is usually a good guess at the current time.
2549	/// Assuming most miners are generating blocks with reasonable timestamps, this shouldn't be
2550	/// very far in the past, and can only ever be up to two hours in the future.
2551	highest_seen_timestamp: AtomicUsize,
2552
2553	/// The bulk of our storage. Currently the `per_peer_state` stores our channels on a per-peer
2554	/// basis, as well as the peer's latest features.
2555	///
2556	/// If we are connected to a peer we always at least have an entry here, even if no channels
2557	/// are currently open with that peer.
2558	///
2559	/// Because adding or removing an entry is rare, we usually take an outer read lock and then
2560	/// operate on the inner value freely. This opens up for parallel per-peer operation for
2561	/// channels.
2562	///
2563	/// Note that the same thread must never acquire two inner `PeerState` locks at the same time.
2564	///
2565	/// See `ChannelManager` struct-level documentation for lock order requirements.
2566	#[cfg(not(any(test, feature = "_test_utils")))]
2567	per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<SP>>>>,
2568	#[cfg(any(test, feature = "_test_utils"))]
2569	pub(super) per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<SP>>>>,
2570
2571	/// The set of events which we need to give to the user to handle. In some cases an event may
2572	/// require some further action after the user handles it (currently only blocking a monitor
2573	/// update from being handed to the user to ensure the included changes to the channel state
2574	/// are handled by the user before they're persisted durably to disk). In that case, the second
2575	/// element in the tuple is set to `Some` with further details of the action.
2576	///
2577	/// Note that events MUST NOT be removed from pending_events after deserialization, as they
2578	/// could be in the middle of being processed without the direct mutex held.
2579	///
2580	/// See `ChannelManager` struct-level documentation for lock order requirements.
2581	#[cfg(not(any(test, feature = "_test_utils")))]
2582	pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
2583	#[cfg(any(test, feature = "_test_utils"))]
2584	pub(crate) pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
2585
2586	/// A simple atomic flag to ensure only one task at a time can be processing events asynchronously.
2587	pending_events_processor: AtomicBool,
2588
2589	/// If we are running during init (either directly during the deserialization method or in
2590	/// block connection methods which run after deserialization but before normal operation) we
2591	/// cannot provide the user with [`ChannelMonitorUpdate`]s through the normal update flow -
2592	/// prior to normal operation the user may not have loaded the [`ChannelMonitor`]s into their
2593	/// [`ChainMonitor`] and thus attempting to update it will fail or panic.
2594	///
2595	/// Thus, we place them here to be handled as soon as possible once we are running normally.
2596	///
2597	/// See `ChannelManager` struct-level documentation for lock order requirements.
2598	///
2599	/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
2600	pending_background_events: Mutex<Vec<BackgroundEvent>>,
2601	/// Used when we have to take a BIG lock to make sure everything is self-consistent.
2602	/// Essentially just when we're serializing ourselves out.
2603	/// Taken first everywhere where we are making changes before any other locks.
2604	/// When acquiring this lock in read mode, rather than acquiring it directly, call
2605	/// `PersistenceNotifierGuard::notify_on_drop(..)` and pass the lock to it, to ensure the
2606	/// Notifier the lock contains sends out a notification when the lock is released.
2607	total_consistency_lock: RwLock<()>,
2608	/// Tracks the progress of channels going through batch funding by whether funding_signed was
2609	/// received and the monitor has been persisted.
2610	///
2611	/// This information does not need to be persisted as funding nodes can forget
2612	/// unfunded channels upon disconnection.
2613	funding_batch_states: Mutex<BTreeMap<Txid, Vec<(ChannelId, PublicKey, bool)>>>,
2614
2615	background_events_processed_since_startup: AtomicBool,
2616
2617	event_persist_notifier: Notifier,
2618	needs_persist_flag: AtomicBool,
2619
2620	#[cfg(not(any(test, feature = "_test_utils")))]
2621	pending_offers_messages: Mutex<Vec<(OffersMessage, MessageSendInstructions)>>,
2622	#[cfg(any(test, feature = "_test_utils"))]
2623	pub(crate) pending_offers_messages: Mutex<Vec<(OffersMessage, MessageSendInstructions)>>,
2624	pending_async_payments_messages: Mutex<Vec<(AsyncPaymentsMessage, MessageSendInstructions)>>,
2625
2626	/// Tracks the message events that are to be broadcasted when we are connected to some peer.
2627	pending_broadcast_messages: Mutex<Vec<MessageSendEvent>>,
2628
2629	/// We only want to force-close our channels on peers based on stale feerates when we're
2630	/// confident the feerate on the channel is *really* stale, not just became stale recently.
2631	/// Thus, we store the fee estimates we had as of the last [`FEERATE_TRACKING_BLOCKS`] blocks
2632	/// (after startup completed) here, and only force-close when channels have a lower feerate
2633	/// than we predicted any time in the last [`FEERATE_TRACKING_BLOCKS`] blocks.
2634	///
2635	/// We only keep this in memory as we assume any feerates we receive immediately after startup
2636	/// may be bunk (as they often are if Bitcoin Core crashes) and want to delay taking any
2637	/// actions for a day anyway.
2638	///
2639	/// The first element in the pair is the
2640	/// [`ConfirmationTarget::MinAllowedAnchorChannelRemoteFee`] estimate, the second the
2641	/// [`ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee`] estimate.
2642	last_days_feerates: Mutex<VecDeque<(u32, u32)>>,
2643
2644	#[cfg(feature = "dnssec")]
2645	hrn_resolver: OMNameResolver,
2646	#[cfg(feature = "dnssec")]
2647	pending_dns_onion_messages: Mutex<Vec<(DNSResolverMessage, MessageSendInstructions)>>,
2648
2649	#[cfg(feature = "_test_utils")]
2650	/// In testing, it is useful be able to forge a name -> offer mapping so that we can pay an
2651	/// offer generated in the test.
2652	///
2653	/// This allows for doing so, validating proofs as normal, but, if they pass, replacing the
2654	/// offer they resolve to to the given one.
2655	pub testing_dnssec_proof_offer_resolution_override: Mutex<HashMap<HumanReadableName, Offer>>,
2656
2657	#[cfg(test)]
2658	pub(super) entropy_source: ES,
2659	#[cfg(not(test))]
2660	entropy_source: ES,
2661	node_signer: NS,
2662	#[cfg(test)]
2663	pub(super) signer_provider: SP,
2664	#[cfg(not(test))]
2665	signer_provider: SP,
2666
2667	logger: L,
2668}
2669
2670/// Chain-related parameters used to construct a new `ChannelManager`.
2671///
2672/// Typically, the block-specific parameters are derived from the best block hash for the network,
2673/// as a newly constructed `ChannelManager` will not have created any channels yet. These parameters
2674/// are not needed when deserializing a previously constructed `ChannelManager`.
2675#[derive(Clone, Copy, PartialEq)]
2676pub struct ChainParameters {
2677	/// The network for determining the `chain_hash` in Lightning messages.
2678	pub network: Network,
2679
2680	/// The hash and height of the latest block successfully connected.
2681	///
2682	/// Used to track on-chain channel funding outputs and send payments with reliable timelocks.
2683	pub best_block: BestBlock,
2684}
2685
2686#[derive(Copy, Clone, PartialEq)]
2687#[must_use]
2688enum NotifyOption {
2689	DoPersist,
2690	SkipPersistHandleEvents,
2691	SkipPersistNoEvents,
2692}
2693
2694/// Whenever we release the `ChannelManager`'s `total_consistency_lock`, from read mode, it is
2695/// desirable to notify any listeners on `await_persistable_update_timeout`/
2696/// `await_persistable_update` when new updates are available for persistence. Therefore, this
2697/// struct is responsible for locking the total consistency lock and, upon going out of scope,
2698/// sending the aforementioned notification (since the lock being released indicates that the
2699/// updates are ready for persistence).
2700///
2701/// We allow callers to either always notify by constructing with `notify_on_drop` or choose to
2702/// notify or not based on whether relevant changes have been made, providing a closure to
2703/// `optionally_notify` which returns a `NotifyOption`.
2704struct PersistenceNotifierGuard<'a, F: FnMut() -> NotifyOption> {
2705	event_persist_notifier: &'a Notifier,
2706	needs_persist_flag: &'a AtomicBool,
2707	should_persist: F,
2708	// We hold onto this result so the lock doesn't get released immediately.
2709	_read_guard: RwLockReadGuard<'a, ()>,
2710}
2711
2712impl<'a> PersistenceNotifierGuard<'a, fn() -> NotifyOption> { // We don't care what the concrete F is here, it's unused
2713	/// Notifies any waiters and indicates that we need to persist, in addition to possibly having
2714	/// events to handle.
2715	///
2716	/// This must always be called if the changes included a `ChannelMonitorUpdate`, as well as in
2717	/// other cases where losing the changes on restart may result in a force-close or otherwise
2718	/// isn't ideal.
2719	fn notify_on_drop<C: AChannelManager>(cm: &'a C) -> PersistenceNotifierGuard<'a, impl FnMut() -> NotifyOption> {
2720		Self::optionally_notify(cm, || -> NotifyOption { NotifyOption::DoPersist })
2721	}
2722
2723	fn optionally_notify<F: FnMut() -> NotifyOption, C: AChannelManager>(cm: &'a C, mut persist_check: F)
2724	-> PersistenceNotifierGuard<'a, impl FnMut() -> NotifyOption> {
2725		let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
2726		let force_notify = cm.get_cm().process_background_events();
2727
2728		PersistenceNotifierGuard {
2729			event_persist_notifier: &cm.get_cm().event_persist_notifier,
2730			needs_persist_flag: &cm.get_cm().needs_persist_flag,
2731			should_persist: move || {
2732				// Pick the "most" action between `persist_check` and the background events
2733				// processing and return that.
2734				let notify = persist_check();
2735				match (notify, force_notify) {
2736					(NotifyOption::DoPersist, _) => NotifyOption::DoPersist,
2737					(_, NotifyOption::DoPersist) => NotifyOption::DoPersist,
2738					(NotifyOption::SkipPersistHandleEvents, _) => NotifyOption::SkipPersistHandleEvents,
2739					(_, NotifyOption::SkipPersistHandleEvents) => NotifyOption::SkipPersistHandleEvents,
2740					_ => NotifyOption::SkipPersistNoEvents,
2741				}
2742			},
2743			_read_guard: read_guard,
2744		}
2745	}
2746
2747	/// Note that if any [`ChannelMonitorUpdate`]s are possibly generated,
2748	/// [`ChannelManager::process_background_events`] MUST be called first (or
2749	/// [`Self::optionally_notify`] used).
2750	fn optionally_notify_skipping_background_events<F: Fn() -> NotifyOption, C: AChannelManager>
2751	(cm: &'a C, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
2752		let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
2753
2754		PersistenceNotifierGuard {
2755			event_persist_notifier: &cm.get_cm().event_persist_notifier,
2756			needs_persist_flag: &cm.get_cm().needs_persist_flag,
2757			should_persist: persist_check,
2758			_read_guard: read_guard,
2759		}
2760	}
2761}
2762
2763impl<'a, F: FnMut() -> NotifyOption> Drop for PersistenceNotifierGuard<'a, F> {
2764	fn drop(&mut self) {
2765		match (self.should_persist)() {
2766			NotifyOption::DoPersist => {
2767				self.needs_persist_flag.store(true, Ordering::Release);
2768				self.event_persist_notifier.notify()
2769			},
2770			NotifyOption::SkipPersistHandleEvents =>
2771				self.event_persist_notifier.notify(),
2772			NotifyOption::SkipPersistNoEvents => {},
2773		}
2774	}
2775}
2776
2777/// The amount of time in blocks we require our counterparty wait to claim their money (ie time
2778/// between when we, or our watchtower, must check for them having broadcast a theft transaction).
2779///
2780/// This can be increased (but not decreased) through [`ChannelHandshakeConfig::our_to_self_delay`]
2781///
2782/// [`ChannelHandshakeConfig::our_to_self_delay`]: crate::util::config::ChannelHandshakeConfig::our_to_self_delay
2783pub const BREAKDOWN_TIMEOUT: u16 = 6 * 24;
2784/// The amount of time in blocks we're willing to wait to claim money back to us. This matches
2785/// the maximum required amount in lnd as of March 2021.
2786pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7;
2787
2788/// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound
2789/// HTLC's CLTV. The current default represents roughly seven hours of blocks at six blocks/hour.
2790///
2791/// This can be increased (but not decreased) through [`ChannelConfig::cltv_expiry_delta`]
2792///
2793/// [`ChannelConfig::cltv_expiry_delta`]: crate::util::config::ChannelConfig::cltv_expiry_delta
2794// This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER,
2795// i.e. the node we forwarded the payment on to should always have enough room to reliably time out
2796// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
2797// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more).
2798pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6*7;
2799// This should be long enough to allow a payment path drawn across multiple routing hops with substantial
2800// `cltv_expiry_delta`. Indeed, the length of those values is the reaction delay offered to a routing node
2801// in case of HTLC on-chain settlement. While appearing less competitive, a node operator could decide to
2802// scale them up to suit its security policy. At the network-level, we shouldn't constrain them too much,
2803// while avoiding to introduce a DoS vector. Further, a low CTLV_FAR_FAR_AWAY could be a source of
2804// routing failure for any HTLC sender picking up an LDK node among the first hops.
2805pub(super) const CLTV_FAR_FAR_AWAY: u32 = 14 * 24 * 6;
2806
2807/// Minimum CLTV difference between the current block height and received inbound payments.
2808/// Invoices generated for payment to us must set their `min_final_cltv_expiry_delta` field to at least
2809/// this value.
2810// Note that we fail if exactly HTLC_FAIL_BACK_BUFFER + 1 was used, so we need to add one for
2811// any payments to succeed. Further, we don't want payments to fail if a block was found while
2812// a payment was being routed, so we add an extra block to be safe.
2813pub const MIN_FINAL_CLTV_EXPIRY_DELTA: u16 = HTLC_FAIL_BACK_BUFFER as u16 + 3;
2814
2815// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS,
2816// ie that if the next-hop peer fails the HTLC within
2817// LATENCY_GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER left to timeout it onchain,
2818// then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and
2819// failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before
2820// LATENCY_GRACE_PERIOD_BLOCKS.
2821#[allow(dead_code)]
2822const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
2823
2824// Check for ability of an attacker to make us fail on-chain by delaying an HTLC claim. See
2825// ChannelMonitor::should_broadcast_holder_commitment_txn for a description of why this is needed.
2826#[allow(dead_code)]
2827const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
2828
2829/// The number of ticks of [`ChannelManager::timer_tick_occurred`] until expiry of incomplete MPPs
2830pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;
2831
2832/// The number of ticks of [`ChannelManager::timer_tick_occurred`] where a peer is disconnected
2833/// until we mark the channel disabled and gossip the update.
2834pub(crate) const DISABLE_GOSSIP_TICKS: u8 = 10;
2835
2836/// The number of ticks of [`ChannelManager::timer_tick_occurred`] where a peer is connected until
2837/// we mark the channel enabled and gossip the update.
2838pub(crate) const ENABLE_GOSSIP_TICKS: u8 = 5;
2839
2840/// The maximum number of unfunded channels we can have per-peer before we start rejecting new
2841/// (inbound) ones. The number of peers with unfunded channels is limited separately in
2842/// [`MAX_UNFUNDED_CHANNEL_PEERS`].
2843const MAX_UNFUNDED_CHANS_PER_PEER: usize = 4;
2844
2845/// The maximum number of peers from which we will allow pending unfunded channels. Once we reach
2846/// this many peers we reject new (inbound) channels from peers with which we don't have a channel.
2847const MAX_UNFUNDED_CHANNEL_PEERS: usize = 50;
2848
2849/// The maximum number of peers which we do not have a (funded) channel with. Once we reach this
2850/// many peers we reject new (inbound) connections.
2851const MAX_NO_CHANNEL_PEERS: usize = 250;
2852
2853/// The maximum expiration from the current time where an [`Offer`] or [`Refund`] is considered
2854/// short-lived, while anything with a greater expiration is considered long-lived.
2855///
2856/// Using [`ChannelManager::create_offer_builder`] or [`ChannelManager::create_refund_builder`],
2857/// will included a [`BlindedMessagePath`] created using:
2858/// - [`MessageRouter::create_compact_blinded_paths`] when short-lived, and
2859/// - [`MessageRouter::create_blinded_paths`] when long-lived.
2860///
2861/// Using compact [`BlindedMessagePath`]s may provide better privacy as the [`MessageRouter`] could select
2862/// more hops. However, since they use short channel ids instead of pubkeys, they are more likely to
2863/// become invalid over time as channels are closed. Thus, they are only suitable for short-term use.
2864pub const MAX_SHORT_LIVED_RELATIVE_EXPIRY: Duration = Duration::from_secs(60 * 60 * 24);
2865
2866/// Used by [`ChannelManager::list_recent_payments`] to express the status of recent payments.
2867/// These include payments that have yet to find a successful path, or have unresolved HTLCs.
2868#[derive(Debug, PartialEq)]
2869pub enum RecentPaymentDetails {
2870	/// When an invoice was requested and thus a payment has not yet been sent.
2871	AwaitingInvoice {
2872		/// A user-provided identifier in [`ChannelManager::pay_for_offer`] used to uniquely identify a
2873		/// payment and ensure idempotency in LDK.
2874		payment_id: PaymentId,
2875	},
2876	/// When a payment is still being sent and awaiting successful delivery.
2877	Pending {
2878		/// A user-provided identifier in [`send_payment`] or [`pay_for_offer`] used to uniquely
2879		/// identify a payment and ensure idempotency in LDK.
2880		///
2881		/// [`send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
2882		/// [`pay_for_offer`]: crate::ln::channelmanager::ChannelManager::pay_for_offer
2883		payment_id: PaymentId,
2884		/// Hash of the payment that is currently being sent but has yet to be fulfilled or
2885		/// abandoned.
2886		payment_hash: PaymentHash,
2887		/// Total amount (in msat, excluding fees) across all paths for this payment,
2888		/// not just the amount currently inflight.
2889		total_msat: u64,
2890	},
2891	/// When a pending payment is fulfilled, we continue tracking it until all pending HTLCs have
2892	/// been resolved. Upon receiving [`Event::PaymentSent`], we delay for a few minutes before the
2893	/// payment is removed from tracking.
2894	Fulfilled {
2895		/// A user-provided identifier in [`send_payment`] or [`pay_for_offer`] used to uniquely
2896		/// identify a payment and ensure idempotency in LDK.
2897		///
2898		/// [`send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
2899		/// [`pay_for_offer`]: crate::ln::channelmanager::ChannelManager::pay_for_offer
2900		payment_id: PaymentId,
2901		/// Hash of the payment that was claimed. `None` for serializations of [`ChannelManager`]
2902		/// made before LDK version 0.0.104.
2903		payment_hash: Option<PaymentHash>,
2904	},
2905	/// After a payment's retries are exhausted per the provided [`Retry`], or it is explicitly
2906	/// abandoned via [`ChannelManager::abandon_payment`], it is marked as abandoned until all
2907	/// pending HTLCs for this payment resolve and an [`Event::PaymentFailed`] is generated.
2908	Abandoned {
2909		/// A user-provided identifier in [`send_payment`] or [`pay_for_offer`] used to uniquely
2910		/// identify a payment and ensure idempotency in LDK.
2911		///
2912		/// [`send_payment`]: crate::ln::channelmanager::ChannelManager::send_payment
2913		/// [`pay_for_offer`]: crate::ln::channelmanager::ChannelManager::pay_for_offer
2914		payment_id: PaymentId,
2915		/// Hash of the payment that we have given up trying to send.
2916		payment_hash: PaymentHash,
2917	},
2918}
2919
2920/// Route hints used in constructing invoices for [phantom node payents].
2921///
2922/// [phantom node payments]: crate::sign::PhantomKeysManager
2923#[derive(Clone)]
2924pub struct PhantomRouteHints {
2925	/// The list of channels to be included in the invoice route hints.
2926	pub channels: Vec<ChannelDetails>,
2927	/// A fake scid used for representing the phantom node's fake channel in generating the invoice
2928	/// route hints.
2929	pub phantom_scid: u64,
2930	/// The pubkey of the real backing node that would ultimately receive the payment.
2931	pub real_node_pubkey: PublicKey,
2932}
2933
2934macro_rules! handle_error {
2935	($self: ident, $internal: expr, $counterparty_node_id: expr) => { {
2936		// In testing, ensure there are no deadlocks where the lock is already held upon
2937		// entering the macro.
2938		debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
2939		debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
2940
2941		match $internal {
2942			Ok(msg) => Ok(msg),
2943			Err(MsgHandleErrInternal { err, shutdown_finish, .. }) => {
2944				let mut msg_event = None;
2945
2946				if let Some((shutdown_res, update_option)) = shutdown_finish {
2947					let counterparty_node_id = shutdown_res.counterparty_node_id;
2948					let channel_id = shutdown_res.channel_id;
2949					let logger = WithContext::from(
2950						&$self.logger, Some(counterparty_node_id), Some(channel_id), None
2951					);
2952					log_error!(logger, "Force-closing channel: {}", err.err);
2953
2954					$self.finish_close_channel(shutdown_res);
2955					if let Some(update) = update_option {
2956						let mut pending_broadcast_messages = $self.pending_broadcast_messages.lock().unwrap();
2957						pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
2958							msg: update
2959						});
2960					}
2961				} else {
2962					log_error!($self.logger, "Got non-closing error: {}", err.err);
2963				}
2964
2965				if let msgs::ErrorAction::IgnoreError = err.action {
2966				} else {
2967					msg_event = Some(events::MessageSendEvent::HandleError {
2968						node_id: $counterparty_node_id,
2969						action: err.action.clone()
2970					});
2971				}
2972
2973				if let Some(msg_event) = msg_event {
2974					let per_peer_state = $self.per_peer_state.read().unwrap();
2975					if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) {
2976						let mut peer_state = peer_state_mutex.lock().unwrap();
2977						peer_state.pending_msg_events.push(msg_event);
2978					}
2979				}
2980
2981				// Return error in case higher-API need one
2982				Err(err)
2983			},
2984		}
2985	} };
2986}
2987
2988/// When a channel is removed, two things need to happen:
2989/// (a) This must be called in the same `per_peer_state` lock as the channel-closing action,
2990/// (b) [`ChannelManager::finish_close_channel`] needs to be called without holding any locks
2991///     (except [`ChannelManager::total_consistency_lock`].
2992///
2993/// Note that this step can be skipped if the channel was never opened (through the creation of a
2994/// [`ChannelMonitor`]/channel funding transaction) to begin with.
2995macro_rules! locked_close_channel {
2996	($self: ident, $peer_state: expr, $channel_context: expr, $shutdown_res_mut: expr) => {{
2997		if let Some((_, funding_txo, _, update)) = $shutdown_res_mut.monitor_update.take() {
2998			handle_new_monitor_update!($self, funding_txo, update, $peer_state,
2999				$channel_context, REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER);
3000		}
3001		// If there's a possibility that we need to generate further monitor updates for this
3002		// channel, we need to store the last update_id of it. However, we don't want to insert
3003		// into the map (which prevents the `PeerState` from being cleaned up) for channels that
3004		// never even got confirmations (which would open us up to DoS attacks).
3005		let update_id = $channel_context.get_latest_monitor_update_id();
3006		if $channel_context.get_funding_tx_confirmation_height().is_some() || $channel_context.minimum_depth() == Some(0) || update_id > 1 {
3007			let chan_id = $channel_context.channel_id();
3008			$peer_state.closed_channel_monitor_update_ids.insert(chan_id, update_id);
3009		}
3010		if let Some(outpoint) = $channel_context.get_funding_txo() {
3011			$self.outpoint_to_peer.lock().unwrap().remove(&outpoint);
3012		}
3013		let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
3014		if let Some(short_id) = $channel_context.get_short_channel_id() {
3015			short_to_chan_info.remove(&short_id);
3016		} else {
3017			// If the channel was never confirmed on-chain prior to its closure, remove the
3018			// outbound SCID alias we used for it from the collision-prevention set. While we
3019			// generally want to avoid ever re-using an outbound SCID alias across all channels, we
3020			// also don't want a counterparty to be able to trivially cause a memory leak by simply
3021			// opening a million channels with us which are closed before we ever reach the funding
3022			// stage.
3023			let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel_context.outbound_scid_alias());
3024			debug_assert!(alias_removed);
3025		}
3026		short_to_chan_info.remove(&$channel_context.outbound_scid_alias());
3027	}}
3028}
3029
3030/// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error)
3031macro_rules! convert_chan_phase_err {
3032	($self: ident, $peer_state: expr, $err: expr, $channel: expr, $channel_id: expr, MANUAL_CHANNEL_UPDATE, $channel_update: expr) => {
3033		match $err {
3034			ChannelError::Warn(msg) => {
3035				(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), *$channel_id))
3036			},
3037			ChannelError::Ignore(msg) => {
3038				(false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), *$channel_id))
3039			},
3040			ChannelError::Close((msg, reason)) => {
3041				let logger = WithChannelContext::from(&$self.logger, &$channel.context, None);
3042				log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
3043				let mut shutdown_res = $channel.context.force_shutdown(true, reason);
3044				locked_close_channel!($self, $peer_state, &$channel.context, &mut shutdown_res);
3045				let err =
3046					MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $channel_update);
3047				(true, err)
3048			},
3049		}
3050	};
3051	($self: ident, $peer_state: expr, $err: expr, $channel: expr, $channel_id: expr, FUNDED_CHANNEL) => {
3052		convert_chan_phase_err!($self, $peer_state, $err, $channel, $channel_id, MANUAL_CHANNEL_UPDATE, { $self.get_channel_update_for_broadcast($channel).ok() })
3053	};
3054	($self: ident, $peer_state: expr, $err: expr, $channel: expr, $channel_id: expr, UNFUNDED_CHANNEL) => {
3055		convert_chan_phase_err!($self, $peer_state, $err, $channel, $channel_id, MANUAL_CHANNEL_UPDATE, None)
3056	};
3057	($self: ident, $peer_state: expr, $err: expr, $channel_phase: expr, $channel_id: expr) => {
3058		match $channel_phase {
3059			ChannelPhase::Funded(channel) => {
3060				convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, FUNDED_CHANNEL)
3061			},
3062			ChannelPhase::UnfundedOutboundV1(channel) => {
3063				convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, UNFUNDED_CHANNEL)
3064			},
3065			ChannelPhase::UnfundedInboundV1(channel) => {
3066				convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, UNFUNDED_CHANNEL)
3067			},
3068			ChannelPhase::UnfundedOutboundV2(channel) => {
3069				convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, UNFUNDED_CHANNEL)
3070			},
3071			ChannelPhase::UnfundedInboundV2(channel) => {
3072				convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, UNFUNDED_CHANNEL)
3073			},
3074		}
3075	};
3076}
3077
3078macro_rules! break_chan_phase_entry {
3079	($self: ident, $peer_state: expr, $res: expr, $entry: expr) => {
3080		match $res {
3081			Ok(res) => res,
3082			Err(e) => {
3083				let key = *$entry.key();
3084				let (drop, res) = convert_chan_phase_err!($self, $peer_state, e, $entry.get_mut(), &key);
3085				if drop {
3086					$entry.remove_entry();
3087				}
3088				break Err(res);
3089			}
3090		}
3091	}
3092}
3093
3094macro_rules! try_chan_phase_entry {
3095	($self: ident, $peer_state: expr, $res: expr, $entry: expr) => {
3096		match $res {
3097			Ok(res) => res,
3098			Err(e) => {
3099				let key = *$entry.key();
3100				let (drop, res) = convert_chan_phase_err!($self, $peer_state, e, $entry.get_mut(), &key);
3101				if drop {
3102					$entry.remove_entry();
3103				}
3104				return Err(res);
3105			}
3106		}
3107	}
3108}
3109
3110macro_rules! remove_channel_phase {
3111	($self: ident, $peer_state: expr, $entry: expr, $shutdown_res_mut: expr) => {
3112		{
3113			let channel = $entry.remove_entry().1;
3114			locked_close_channel!($self, $peer_state, &channel.context(), $shutdown_res_mut);
3115			channel
3116		}
3117	}
3118}
3119
3120macro_rules! send_channel_ready {
3121	($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{
3122		$pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
3123			node_id: $channel.context.get_counterparty_node_id(),
3124			msg: $channel_ready_msg,
3125		});
3126		// Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so
3127		// we allow collisions, but we shouldn't ever be updating the channel ID pointed to.
3128		let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
3129		let outbound_alias_insert = short_to_chan_info.insert($channel.context.outbound_scid_alias(), ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()));
3130		assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()),
3131			"SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
3132		if let Some(real_scid) = $channel.context.get_short_channel_id() {
3133			let scid_insert = short_to_chan_info.insert(real_scid, ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()));
3134			assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()),
3135				"SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
3136		}
3137	}}
3138}
3139macro_rules! emit_funding_tx_broadcast_safe_event {
3140	($locked_events: expr, $channel: expr, $funding_txo: expr) => {
3141		if !$channel.context.funding_tx_broadcast_safe_event_emitted() {
3142			$locked_events.push_back((events::Event::FundingTxBroadcastSafe {
3143				channel_id: $channel.context.channel_id(),
3144				user_channel_id: $channel.context.get_user_id(),
3145				funding_txo: $funding_txo,
3146				counterparty_node_id: $channel.context.get_counterparty_node_id(),
3147				former_temporary_channel_id: $channel.context.temporary_channel_id()
3148					.expect("Unreachable: FundingTxBroadcastSafe event feature added to channel establishment process in LDK v0.0.124 where this should never be None."),
3149			}, None));
3150			$channel.context.set_funding_tx_broadcast_safe_event_emitted();
3151		}
3152	}
3153}
3154
3155macro_rules! emit_channel_pending_event {
3156	($locked_events: expr, $channel: expr) => {
3157		if $channel.context.should_emit_channel_pending_event() {
3158			$locked_events.push_back((events::Event::ChannelPending {
3159				channel_id: $channel.context.channel_id(),
3160				former_temporary_channel_id: $channel.context.temporary_channel_id(),
3161				counterparty_node_id: $channel.context.get_counterparty_node_id(),
3162				user_channel_id: $channel.context.get_user_id(),
3163				funding_txo: $channel.context.get_funding_txo().unwrap().into_bitcoin_outpoint(),
3164				channel_type: Some($channel.context.get_channel_type().clone()),
3165			}, None));
3166			$channel.context.set_channel_pending_event_emitted();
3167		}
3168	}
3169}
3170
3171macro_rules! emit_channel_ready_event {
3172	($locked_events: expr, $channel: expr) => {
3173		if $channel.context.should_emit_channel_ready_event() {
3174			debug_assert!($channel.context.channel_pending_event_emitted());
3175			$locked_events.push_back((events::Event::ChannelReady {
3176				channel_id: $channel.context.channel_id(),
3177				user_channel_id: $channel.context.get_user_id(),
3178				counterparty_node_id: $channel.context.get_counterparty_node_id(),
3179				channel_type: $channel.context.get_channel_type().clone(),
3180			}, None));
3181			$channel.context.set_channel_ready_event_emitted();
3182		}
3183	}
3184}
3185
3186macro_rules! handle_monitor_update_completion {
3187	($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
3188		let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3189		let mut updates = $chan.monitor_updating_restored(&&logger,
3190			&$self.node_signer, $self.chain_hash, &$self.default_configuration,
3191			$self.best_block.read().unwrap().height);
3192		let counterparty_node_id = $chan.context.get_counterparty_node_id();
3193		let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() {
3194			// We only send a channel_update in the case where we are just now sending a
3195			// channel_ready and the channel is in a usable state. We may re-send a
3196			// channel_update later through the announcement_signatures process for public
3197			// channels, but there's no reason not to just inform our counterparty of our fees
3198			// now.
3199			if let Ok(msg) = $self.get_channel_update_for_unicast($chan) {
3200				Some(events::MessageSendEvent::SendChannelUpdate {
3201					node_id: counterparty_node_id,
3202					msg,
3203				})
3204			} else { None }
3205		} else { None };
3206
3207		let update_actions = $peer_state.monitor_update_blocked_actions
3208			.remove(&$chan.context.channel_id()).unwrap_or(Vec::new());
3209
3210		let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption(
3211			&mut $peer_state.pending_msg_events, $chan, updates.raa,
3212			updates.commitment_update, updates.order, updates.accepted_htlcs, updates.pending_update_adds,
3213			updates.funding_broadcastable, updates.channel_ready,
3214			updates.announcement_sigs, updates.tx_signatures);
3215		if let Some(upd) = channel_update {
3216			$peer_state.pending_msg_events.push(upd);
3217		}
3218
3219		let channel_id = $chan.context.channel_id();
3220		let unbroadcasted_batch_funding_txid = $chan.context.unbroadcasted_batch_funding_txid();
3221		core::mem::drop($peer_state_lock);
3222		core::mem::drop($per_peer_state_lock);
3223
3224		// If the channel belongs to a batch funding transaction, the progress of the batch
3225		// should be updated as we have received funding_signed and persisted the monitor.
3226		if let Some(txid) = unbroadcasted_batch_funding_txid {
3227			let mut funding_batch_states = $self.funding_batch_states.lock().unwrap();
3228			let mut batch_completed = false;
3229			if let Some(batch_state) = funding_batch_states.get_mut(&txid) {
3230				let channel_state = batch_state.iter_mut().find(|(chan_id, pubkey, _)| (
3231					*chan_id == channel_id &&
3232					*pubkey == counterparty_node_id
3233				));
3234				if let Some(channel_state) = channel_state {
3235					channel_state.2 = true;
3236				} else {
3237					debug_assert!(false, "Missing channel batch state for channel which completed initial monitor update");
3238				}
3239				batch_completed = batch_state.iter().all(|(_, _, completed)| *completed);
3240			} else {
3241				debug_assert!(false, "Missing batch state for channel which completed initial monitor update");
3242			}
3243
3244			// When all channels in a batched funding transaction have become ready, it is not necessary
3245			// to track the progress of the batch anymore and the state of the channels can be updated.
3246			if batch_completed {
3247				let removed_batch_state = funding_batch_states.remove(&txid).into_iter().flatten();
3248				let per_peer_state = $self.per_peer_state.read().unwrap();
3249				let mut batch_funding_tx = None;
3250				for (channel_id, counterparty_node_id, _) in removed_batch_state {
3251					if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
3252						let mut peer_state = peer_state_mutex.lock().unwrap();
3253						if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
3254							batch_funding_tx = batch_funding_tx.or_else(|| chan.context.unbroadcasted_funding());
3255							chan.set_batch_ready();
3256							let mut pending_events = $self.pending_events.lock().unwrap();
3257							emit_channel_pending_event!(pending_events, chan);
3258						}
3259					}
3260				}
3261				if let Some(tx) = batch_funding_tx {
3262					log_info!($self.logger, "Broadcasting batch funding transaction with txid {}", tx.compute_txid());
3263					$self.tx_broadcaster.broadcast_transactions(&[&tx]);
3264				}
3265			}
3266		}
3267
3268		$self.handle_monitor_update_completion_actions(update_actions);
3269
3270		if let Some(forwards) = htlc_forwards {
3271			$self.forward_htlcs(&mut [forwards][..]);
3272		}
3273		if let Some(decode) = decode_update_add_htlcs {
3274			$self.push_decode_update_add_htlcs(decode);
3275		}
3276		$self.finalize_claims(updates.finalized_claimed_htlcs);
3277		for failure in updates.failed_htlcs.drain(..) {
3278			let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
3279			$self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver);
3280		}
3281	} }
3282}
3283
3284macro_rules! handle_new_monitor_update {
3285	($self: ident, $update_res: expr, $logger: expr, $channel_id: expr, _internal, $completed: expr) => { {
3286		debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire));
3287		match $update_res {
3288			ChannelMonitorUpdateStatus::UnrecoverableError => {
3289				let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
3290				log_error!($logger, "{}", err_str);
3291				panic!("{}", err_str);
3292			},
3293			ChannelMonitorUpdateStatus::InProgress => {
3294				log_debug!($logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
3295					$channel_id);
3296				false
3297			},
3298			ChannelMonitorUpdateStatus::Completed => {
3299				$completed;
3300				true
3301			},
3302		}
3303	} };
3304	($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, INITIAL_MONITOR) => {
3305		let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3306		handle_new_monitor_update!($self, $update_res, logger, $chan.context.channel_id(), _internal,
3307			handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan))
3308	};
3309	(
3310		$self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $logger: expr,
3311		$chan_id: expr, $counterparty_node_id: expr, $in_flight_updates: ident, $update_idx: ident,
3312		_internal_outer, $completed: expr
3313	) => { {
3314		$in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
3315			.or_insert_with(Vec::new);
3316		// During startup, we push monitor updates as background events through to here in
3317		// order to replay updates that were in-flight when we shut down. Thus, we have to
3318		// filter for uniqueness here.
3319		$update_idx = $in_flight_updates.iter().position(|upd| upd == &$update)
3320			.unwrap_or_else(|| {
3321				$in_flight_updates.push($update);
3322				$in_flight_updates.len() - 1
3323			});
3324		if $self.background_events_processed_since_startup.load(Ordering::Acquire) {
3325			let update_res = $self.chain_monitor.update_channel($funding_txo, &$in_flight_updates[$update_idx]);
3326			handle_new_monitor_update!($self, update_res, $logger, $chan_id, _internal, $completed)
3327		} else {
3328			// We blindly assume that the ChannelMonitorUpdate will be regenerated on startup if we
3329			// fail to persist it. This is a fairly safe assumption, however, since anything we do
3330			// during the startup sequence should be replayed exactly if we immediately crash.
3331			let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
3332				counterparty_node_id: $counterparty_node_id,
3333				funding_txo: $funding_txo,
3334				channel_id: $chan_id,
3335				update: $in_flight_updates[$update_idx].clone(),
3336			};
3337			// We want to track the in-flight update both in `in_flight_monitor_updates` and in
3338			// `pending_background_events` to avoid a race condition during
3339			// `pending_background_events` processing where we complete one
3340			// `ChannelMonitorUpdate` (but there are more pending as background events) but we
3341			// conclude that all pending `ChannelMonitorUpdate`s have completed and its safe to
3342			// run post-completion actions.
3343			// We could work around that with some effort, but its simpler to just track updates
3344			// twice.
3345			$self.pending_background_events.lock().unwrap().push(event);
3346			false
3347		}
3348	} };
3349	(
3350		$self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $chan_context: expr,
3351		REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER
3352	) => { {
3353		let logger = WithChannelContext::from(&$self.logger, &$chan_context, None);
3354		let chan_id = $chan_context.channel_id();
3355		let counterparty_node_id = $chan_context.get_counterparty_node_id();
3356		let in_flight_updates;
3357		let idx;
3358		handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
3359			counterparty_node_id, in_flight_updates, idx, _internal_outer,
3360			{
3361				let _ = in_flight_updates.remove(idx);
3362			})
3363	} };
3364	(
3365		$self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
3366		$per_peer_state_lock: expr, $counterparty_node_id: expr, $channel_id: expr, POST_CHANNEL_CLOSE
3367	) => { {
3368		let logger = WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None);
3369		let in_flight_updates;
3370		let idx;
3371		handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger,
3372			$channel_id, $counterparty_node_id, in_flight_updates, idx, _internal_outer,
3373			{
3374				let _ = in_flight_updates.remove(idx);
3375				if in_flight_updates.is_empty() {
3376					let update_actions = $peer_state.monitor_update_blocked_actions
3377						.remove(&$channel_id).unwrap_or(Vec::new());
3378
3379					mem::drop($peer_state_lock);
3380					mem::drop($per_peer_state_lock);
3381
3382					$self.handle_monitor_update_completion_actions(update_actions);
3383				}
3384			})
3385	} };
3386	(
3387		$self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
3388		$per_peer_state_lock: expr, $chan: expr
3389	) => { {
3390		let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3391		let chan_id = $chan.context.channel_id();
3392		let counterparty_node_id = $chan.context.get_counterparty_node_id();
3393		let in_flight_updates;
3394		let idx;
3395		handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
3396			counterparty_node_id, in_flight_updates, idx, _internal_outer,
3397			{
3398				let _ = in_flight_updates.remove(idx);
3399				if in_flight_updates.is_empty() && $chan.blocked_monitor_updates_pending() == 0 {
3400					handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan);
3401				}
3402			})
3403	} };
3404}
3405
3406macro_rules! process_events_body {
3407	($self: expr, $event_to_handle: expr, $handle_event: expr) => {
3408		let mut handling_failed = false;
3409		let mut processed_all_events = false;
3410		while !handling_failed && !processed_all_events {
3411			if $self.pending_events_processor.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed).is_err() {
3412				return;
3413			}
3414
3415			let mut result;
3416
3417			{
3418				// We'll acquire our total consistency lock so that we can be sure no other
3419				// persists happen while processing monitor events.
3420				let _read_guard = $self.total_consistency_lock.read().unwrap();
3421
3422				// Because `handle_post_event_actions` may send `ChannelMonitorUpdate`s to the user we must
3423				// ensure any startup-generated background events are handled first.
3424				result = $self.process_background_events();
3425
3426				// TODO: This behavior should be documented. It's unintuitive that we query
3427				// ChannelMonitors when clearing other events.
3428				if $self.process_pending_monitor_events() {
3429					result = NotifyOption::DoPersist;
3430				}
3431			}
3432
3433			let pending_events = $self.pending_events.lock().unwrap().clone();
3434			if !pending_events.is_empty() {
3435				result = NotifyOption::DoPersist;
3436			}
3437
3438			let mut post_event_actions = Vec::new();
3439
3440			let mut num_handled_events = 0;
3441			for (event, action_opt) in pending_events {
3442				log_trace!($self.logger, "Handling event {:?}...", event);
3443				$event_to_handle = event;
3444				let event_handling_result = $handle_event;
3445				log_trace!($self.logger, "Done handling event, result: {:?}", event_handling_result);
3446				match event_handling_result {
3447					Ok(()) => {
3448						if let Some(action) = action_opt {
3449							post_event_actions.push(action);
3450						}
3451						num_handled_events += 1;
3452					}
3453					Err(_e) => {
3454						// If we encounter an error we stop handling events and make sure to replay
3455						// any unhandled events on the next invocation.
3456						handling_failed = true;
3457						break;
3458					}
3459				}
3460			}
3461
3462			{
3463				let mut pending_events = $self.pending_events.lock().unwrap();
3464				pending_events.drain(..num_handled_events);
3465				processed_all_events = pending_events.is_empty();
3466				// Note that `push_pending_forwards_ev` relies on `pending_events_processor` being
3467				// updated here with the `pending_events` lock acquired.
3468				$self.pending_events_processor.store(false, Ordering::Release);
3469			}
3470
3471			if !post_event_actions.is_empty() {
3472				$self.handle_post_event_actions(post_event_actions);
3473				// If we had some actions, go around again as we may have more events now
3474				processed_all_events = false;
3475			}
3476
3477			match result {
3478				NotifyOption::DoPersist => {
3479					$self.needs_persist_flag.store(true, Ordering::Release);
3480					$self.event_persist_notifier.notify();
3481				},
3482				NotifyOption::SkipPersistHandleEvents =>
3483					$self.event_persist_notifier.notify(),
3484				NotifyOption::SkipPersistNoEvents => {},
3485			}
3486		}
3487	}
3488}
3489
3490impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
3491where
3492	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
3493	T::Target: BroadcasterInterface,
3494	ES::Target: EntropySource,
3495	NS::Target: NodeSigner,
3496	SP::Target: SignerProvider,
3497	F::Target: FeeEstimator,
3498	R::Target: Router,
3499	MR::Target: MessageRouter,
3500	L::Target: Logger,
3501{
3502	/// Constructs a new `ChannelManager` to hold several channels and route between them.
3503	///
3504	/// The current time or latest block header time can be provided as the `current_timestamp`.
3505	///
3506	/// This is the main "logic hub" for all channel-related actions, and implements
3507	/// [`ChannelMessageHandler`].
3508	///
3509	/// Non-proportional fees are fixed according to our risk using the provided fee estimator.
3510	///
3511	/// Users need to notify the new `ChannelManager` when a new block is connected or
3512	/// disconnected using its [`block_connected`] and [`block_disconnected`] methods, starting
3513	/// from after [`params.best_block.block_hash`]. See [`chain::Listen`] and [`chain::Confirm`] for
3514	/// more details.
3515	///
3516	/// [`block_connected`]: chain::Listen::block_connected
3517	/// [`block_disconnected`]: chain::Listen::block_disconnected
3518	/// [`params.best_block.block_hash`]: chain::BestBlock::block_hash
3519	pub fn new(
3520		fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, message_router: MR, logger: L,
3521		entropy_source: ES, node_signer: NS, signer_provider: SP, config: UserConfig,
3522		params: ChainParameters, current_timestamp: u32,
3523	) -> Self {
3524		let mut secp_ctx = Secp256k1::new();
3525		secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
3526		let expanded_inbound_key = node_signer.get_inbound_payment_key();
3527		ChannelManager {
3528			default_configuration: config.clone(),
3529			chain_hash: ChainHash::using_genesis_block(params.network),
3530			fee_estimator: LowerBoundedFeeEstimator::new(fee_est),
3531			chain_monitor,
3532			tx_broadcaster,
3533			router,
3534			message_router,
3535
3536			best_block: RwLock::new(params.best_block),
3537
3538			outbound_scid_aliases: Mutex::new(new_hash_set()),
3539			pending_outbound_payments: OutboundPayments::new(new_hash_map()),
3540			forward_htlcs: Mutex::new(new_hash_map()),
3541			decode_update_add_htlcs: Mutex::new(new_hash_map()),
3542			claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }),
3543			pending_intercepted_htlcs: Mutex::new(new_hash_map()),
3544			outpoint_to_peer: Mutex::new(new_hash_map()),
3545			short_to_chan_info: FairRwLock::new(new_hash_map()),
3546
3547			our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(),
3548			secp_ctx,
3549
3550			inbound_payment_key: expanded_inbound_key,
3551			fake_scid_rand_bytes: entropy_source.get_secure_random_bytes(),
3552
3553			probing_cookie_secret: entropy_source.get_secure_random_bytes(),
3554			inbound_payment_id_secret: entropy_source.get_secure_random_bytes(),
3555
3556			highest_seen_timestamp: AtomicUsize::new(current_timestamp as usize),
3557
3558			per_peer_state: FairRwLock::new(new_hash_map()),
3559
3560			pending_events: Mutex::new(VecDeque::new()),
3561			pending_events_processor: AtomicBool::new(false),
3562			pending_background_events: Mutex::new(Vec::new()),
3563			total_consistency_lock: RwLock::new(()),
3564			background_events_processed_since_startup: AtomicBool::new(false),
3565			event_persist_notifier: Notifier::new(),
3566			needs_persist_flag: AtomicBool::new(false),
3567			funding_batch_states: Mutex::new(BTreeMap::new()),
3568
3569			pending_offers_messages: Mutex::new(Vec::new()),
3570			pending_async_payments_messages: Mutex::new(Vec::new()),
3571			pending_broadcast_messages: Mutex::new(Vec::new()),
3572
3573			last_days_feerates: Mutex::new(VecDeque::new()),
3574
3575			entropy_source,
3576			node_signer,
3577			signer_provider,
3578
3579			logger,
3580
3581			#[cfg(feature = "dnssec")]
3582			hrn_resolver: OMNameResolver::new(current_timestamp, params.best_block.height),
3583			#[cfg(feature = "dnssec")]
3584			pending_dns_onion_messages: Mutex::new(Vec::new()),
3585
3586			#[cfg(feature = "_test_utils")]
3587			testing_dnssec_proof_offer_resolution_override: Mutex::new(new_hash_map()),
3588		}
3589	}
3590
3591	/// Gets the current configuration applied to all new channels.
3592	pub fn get_current_default_configuration(&self) -> &UserConfig {
3593		&self.default_configuration
3594	}
3595
3596	#[cfg(test)]
3597	pub fn create_and_insert_outbound_scid_alias_for_test(&self) -> u64 {
3598		self.create_and_insert_outbound_scid_alias()
3599	}
3600
3601	fn create_and_insert_outbound_scid_alias(&self) -> u64 {
3602		let height = self.best_block.read().unwrap().height;
3603		let mut outbound_scid_alias = 0;
3604		let mut i = 0;
3605		loop {
3606			if cfg!(fuzzing) { // fuzzing chacha20 doesn't use the key at all so we always get the same alias
3607				outbound_scid_alias += 1;
3608			} else {
3609				outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
3610			}
3611			if outbound_scid_alias != 0 && self.outbound_scid_aliases.lock().unwrap().insert(outbound_scid_alias) {
3612				break;
3613			}
3614			i += 1;
3615			if i > 1_000_000 { panic!("Your RNG is busted or we ran out of possible outbound SCID aliases (which should never happen before we run out of memory to store channels"); }
3616		}
3617		outbound_scid_alias
3618	}
3619
3620	/// Creates a new outbound channel to the given remote node and with the given value.
3621	///
3622	/// `user_channel_id` will be provided back as in
3623	/// [`Event::FundingGenerationReady::user_channel_id`] to allow tracking of which events
3624	/// correspond with which `create_channel` call. Note that the `user_channel_id` defaults to a
3625	/// randomized value for inbound channels. `user_channel_id` has no meaning inside of LDK, it
3626	/// is simply copied to events and otherwise ignored.
3627	///
3628	/// Raises [`APIError::APIMisuseError`] when `channel_value_satoshis` > 2**24 or `push_msat` is
3629	/// greater than `channel_value_satoshis * 1k` or `channel_value_satoshis < 1000`.
3630	///
3631	/// Raises [`APIError::ChannelUnavailable`] if the channel cannot be opened due to failing to
3632	/// generate a shutdown scriptpubkey or destination script set by
3633	/// [`SignerProvider::get_shutdown_scriptpubkey`] or [`SignerProvider::get_destination_script`].
3634	///
3635	/// Note that we do not check if you are currently connected to the given peer. If no
3636	/// connection is available, the outbound `open_channel` message may fail to send, resulting in
3637	/// the channel eventually being silently forgotten (dropped on reload).
3638	///
3639	/// If `temporary_channel_id` is specified, it will be used as the temporary channel ID of the
3640	/// channel. Otherwise, a random one will be generated for you.
3641	///
3642	/// Returns the new Channel's temporary `channel_id`. This ID will appear as
3643	/// [`Event::FundingGenerationReady::temporary_channel_id`] and in
3644	/// [`ChannelDetails::channel_id`] until after
3645	/// [`ChannelManager::funding_transaction_generated`] is called, swapping the Channel's ID for
3646	/// one derived from the funding transaction's TXID. If the counterparty rejects the channel
3647	/// immediately, this temporary ID will appear in [`Event::ChannelClosed::channel_id`].
3648	///
3649	/// [`Event::FundingGenerationReady::user_channel_id`]: events::Event::FundingGenerationReady::user_channel_id
3650	/// [`Event::FundingGenerationReady::temporary_channel_id`]: events::Event::FundingGenerationReady::temporary_channel_id
3651	/// [`Event::ChannelClosed::channel_id`]: events::Event::ChannelClosed::channel_id
3652	pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u128, temporary_channel_id: Option<ChannelId>, override_config: Option<UserConfig>) -> Result<ChannelId, APIError> {
3653		if channel_value_satoshis < 1000 {
3654			return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
3655		}
3656
3657		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
3658		// We want to make sure the lock is actually acquired by PersistenceNotifierGuard.
3659		debug_assert!(&self.total_consistency_lock.try_write().is_err());
3660
3661		let per_peer_state = self.per_peer_state.read().unwrap();
3662
3663		let peer_state_mutex = per_peer_state.get(&their_network_key)
3664			.ok_or_else(|| APIError::APIMisuseError{ err: format!("Not connected to node: {}", their_network_key) })?;
3665
3666		let mut peer_state = peer_state_mutex.lock().unwrap();
3667
3668		if let Some(temporary_channel_id) = temporary_channel_id {
3669			if peer_state.channel_by_id.contains_key(&temporary_channel_id) {
3670				return Err(APIError::APIMisuseError{ err: format!("Channel with temporary channel ID {} already exists!", temporary_channel_id)});
3671			}
3672		}
3673
3674		let mut channel = {
3675			let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
3676			let their_features = &peer_state.latest_features;
3677			let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
3678			match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key,
3679				their_features, channel_value_satoshis, push_msat, user_channel_id, config,
3680				self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id, &*self.logger)
3681			{
3682				Ok(res) => res,
3683				Err(e) => {
3684					self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
3685					return Err(e);
3686				},
3687			}
3688		};
3689		let logger = WithChannelContext::from(&self.logger, &channel.context, None);
3690		let res = channel.get_open_channel(self.chain_hash, &&logger);
3691
3692		let temporary_channel_id = channel.context.channel_id();
3693		match peer_state.channel_by_id.entry(temporary_channel_id) {
3694			hash_map::Entry::Occupied(_) => {
3695				if cfg!(fuzzing) {
3696					return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
3697				} else {
3698					panic!("RNG is bad???");
3699				}
3700			},
3701			hash_map::Entry::Vacant(entry) => { entry.insert(ChannelPhase::UnfundedOutboundV1(channel)); }
3702		}
3703
3704		if let Some(msg) = res {
3705			peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
3706				node_id: their_network_key,
3707				msg,
3708			});
3709		}
3710		Ok(temporary_channel_id)
3711	}
3712
3713	fn list_funded_channels_with_filter<Fn: FnMut(&(&ChannelId, &Channel<SP>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
3714		// Allocate our best estimate of the number of channels we have in the `res`
3715		// Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
3716		// a scid or a scid alias, and the `outpoint_to_peer` shouldn't be used outside
3717		// of the ChannelMonitor handling. Therefore reallocations may still occur, but is
3718		// unlikely as the `short_to_chan_info` map often contains 2 entries for
3719		// the same channel.
3720		let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
3721		{
3722			let best_block_height = self.best_block.read().unwrap().height;
3723			let per_peer_state = self.per_peer_state.read().unwrap();
3724			for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
3725				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3726				let peer_state = &mut *peer_state_lock;
3727				res.extend(peer_state.channel_by_id.iter()
3728					.filter_map(|(chan_id, phase)| match phase {
3729						// Only `Channels` in the `ChannelPhase::Funded` phase can be considered funded.
3730						ChannelPhase::Funded(chan) => Some((chan_id, chan)),
3731						_ => None,
3732					})
3733					.filter(f)
3734					.map(|(_channel_id, channel)| {
3735						ChannelDetails::from_channel_context(&channel.context, best_block_height,
3736							peer_state.latest_features.clone(), &self.fee_estimator)
3737					})
3738				);
3739			}
3740		}
3741		res
3742	}
3743
3744	/// Gets the list of open channels, in random order. See [`ChannelDetails`] field documentation for
3745	/// more information.
3746	pub fn list_channels(&self) -> Vec<ChannelDetails> {
3747		// Allocate our best estimate of the number of channels we have in the `res`
3748		// Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
3749		// a scid or a scid alias, and the `outpoint_to_peer` shouldn't be used outside
3750		// of the ChannelMonitor handling. Therefore reallocations may still occur, but is
3751		// unlikely as the `short_to_chan_info` map often contains 2 entries for
3752		// the same channel.
3753		let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
3754		{
3755			let best_block_height = self.best_block.read().unwrap().height;
3756			let per_peer_state = self.per_peer_state.read().unwrap();
3757			for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
3758				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3759				let peer_state = &mut *peer_state_lock;
3760				for context in peer_state.channel_by_id.iter().map(|(_, phase)| phase.context()) {
3761					let details = ChannelDetails::from_channel_context(context, best_block_height,
3762						peer_state.latest_features.clone(), &self.fee_estimator);
3763					res.push(details);
3764				}
3765			}
3766		}
3767		res
3768	}
3769
3770	/// Gets the list of usable channels, in random order. Useful as an argument to
3771	/// [`Router::find_route`] to ensure non-announced channels are used.
3772	///
3773	/// These are guaranteed to have their [`ChannelDetails::is_usable`] value set to true, see the
3774	/// documentation for [`ChannelDetails::is_usable`] for more info on exactly what the criteria
3775	/// are.
3776	pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
3777		// Note we use is_live here instead of usable which leads to somewhat confused
3778		// internal/external nomenclature, but that's ok cause that's probably what the user
3779		// really wanted anyway.
3780		self.list_funded_channels_with_filter(|&(_, ref channel)| channel.context.is_live())
3781	}
3782
3783	/// Gets the list of channels we have with a given counterparty, in random order.
3784	pub fn list_channels_with_counterparty(&self, counterparty_node_id: &PublicKey) -> Vec<ChannelDetails> {
3785		let best_block_height = self.best_block.read().unwrap().height;
3786		let per_peer_state = self.per_peer_state.read().unwrap();
3787
3788		if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
3789			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3790			let peer_state = &mut *peer_state_lock;
3791			let features = &peer_state.latest_features;
3792			let context_to_details = |context| {
3793				ChannelDetails::from_channel_context(context, best_block_height, features.clone(), &self.fee_estimator)
3794			};
3795			return peer_state.channel_by_id
3796				.iter()
3797				.map(|(_, phase)| phase.context())
3798				.map(context_to_details)
3799				.collect();
3800		}
3801		vec![]
3802	}
3803
3804	/// Returns in an undefined order recent payments that -- if not fulfilled -- have yet to find a
3805	/// successful path, or have unresolved HTLCs.
3806	///
3807	/// This can be useful for payments that may have been prepared, but ultimately not sent, as a
3808	/// result of a crash. If such a payment exists, is not listed here, and an
3809	/// [`Event::PaymentSent`] has not been received, you may consider resending the payment.
3810	///
3811	/// [`Event::PaymentSent`]: events::Event::PaymentSent
3812	pub fn list_recent_payments(&self) -> Vec<RecentPaymentDetails> {
3813		self.pending_outbound_payments.pending_outbound_payments.lock().unwrap().iter()
3814			.filter_map(|(payment_id, pending_outbound_payment)| match pending_outbound_payment {
3815				PendingOutboundPayment::AwaitingInvoice { .. }
3816					| PendingOutboundPayment::AwaitingOffer { .. }
3817					// InvoiceReceived is an intermediate state and doesn't need to be exposed
3818					| PendingOutboundPayment::InvoiceReceived { .. } =>
3819				{
3820					Some(RecentPaymentDetails::AwaitingInvoice { payment_id: *payment_id })
3821				},
3822				PendingOutboundPayment::StaticInvoiceReceived { .. } => {
3823					Some(RecentPaymentDetails::AwaitingInvoice { payment_id: *payment_id })
3824				},
3825				PendingOutboundPayment::Retryable { payment_hash, total_msat, .. } => {
3826					Some(RecentPaymentDetails::Pending {
3827						payment_id: *payment_id,
3828						payment_hash: *payment_hash,
3829						total_msat: *total_msat,
3830					})
3831				},
3832				PendingOutboundPayment::Abandoned { payment_hash, .. } => {
3833					Some(RecentPaymentDetails::Abandoned { payment_id: *payment_id, payment_hash: *payment_hash })
3834				},
3835				PendingOutboundPayment::Fulfilled { payment_hash, .. } => {
3836					Some(RecentPaymentDetails::Fulfilled { payment_id: *payment_id, payment_hash: *payment_hash })
3837				},
3838				PendingOutboundPayment::Legacy { .. } => None
3839			})
3840			.collect()
3841	}
3842
3843	fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
3844		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
3845
3846		let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new();
3847		let mut shutdown_result = None;
3848
3849		{
3850			let per_peer_state = self.per_peer_state.read().unwrap();
3851
3852			let peer_state_mutex = per_peer_state.get(counterparty_node_id)
3853				.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
3854
3855			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3856			let peer_state = &mut *peer_state_lock;
3857
3858			match peer_state.channel_by_id.entry(channel_id.clone()) {
3859				hash_map::Entry::Occupied(mut chan_phase_entry) => {
3860					if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
3861						let funding_txo_opt = chan.context.get_funding_txo();
3862						let their_features = &peer_state.latest_features;
3863						let (shutdown_msg, mut monitor_update_opt, htlcs) =
3864							chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
3865						failed_htlcs = htlcs;
3866
3867						// We can send the `shutdown` message before updating the `ChannelMonitor`
3868						// here as we don't need the monitor update to complete until we send a
3869						// `shutdown_signed`, which we'll delay if we're pending a monitor update.
3870						peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
3871							node_id: *counterparty_node_id,
3872							msg: shutdown_msg,
3873						});
3874
3875						debug_assert!(monitor_update_opt.is_none() || !chan.is_shutdown(),
3876							"We can't both complete shutdown and generate a monitor update");
3877
3878						// Update the monitor with the shutdown script if necessary.
3879						if let Some(monitor_update) = monitor_update_opt.take() {
3880							handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
3881								peer_state_lock, peer_state, per_peer_state, chan);
3882						}
3883					} else {
3884						let mut shutdown_res = chan_phase_entry.get_mut().context_mut()
3885							.force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) });
3886						remove_channel_phase!(self, peer_state, chan_phase_entry, shutdown_res);
3887						shutdown_result = Some(shutdown_res);
3888					}
3889				},
3890				hash_map::Entry::Vacant(_) => {
3891					return Err(APIError::ChannelUnavailable {
3892						err: format!(
3893							"Channel with id {} not found for the passed counterparty node_id {}",
3894							channel_id, counterparty_node_id,
3895						)
3896					});
3897				},
3898			}
3899		}
3900
3901		for htlc_source in failed_htlcs.drain(..) {
3902			let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
3903			let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id };
3904			self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
3905		}
3906
3907		if let Some(shutdown_result) = shutdown_result {
3908			self.finish_close_channel(shutdown_result);
3909		}
3910
3911		Ok(())
3912	}
3913
3914	/// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
3915	/// will be accepted on the given channel, and after additional timeout/the closing of all
3916	/// pending HTLCs, the channel will be closed on chain.
3917	///
3918	///  * If we are the channel initiator, we will pay between our [`ChannelCloseMinimum`] and
3919	///    [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`NonAnchorChannelFee`]
3920	///    fee estimate.
3921	///  * If our counterparty is the channel initiator, we will require a channel closing
3922	///    transaction feerate of at least our [`ChannelCloseMinimum`] feerate or the feerate which
3923	///    would appear on a force-closure transaction, whichever is lower. We will allow our
3924	///    counterparty to pay as much fee as they'd like, however.
3925	///
3926	/// May generate a [`SendShutdown`] message event on success, which should be relayed.
3927	///
3928	/// Raises [`APIError::ChannelUnavailable`] if the channel cannot be closed due to failing to
3929	/// generate a shutdown scriptpubkey or destination script set by
3930	/// [`SignerProvider::get_shutdown_scriptpubkey`]. A force-closure may be needed to close the
3931	/// channel.
3932	///
3933	/// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
3934	/// [`ChannelCloseMinimum`]: crate::chain::chaininterface::ConfirmationTarget::ChannelCloseMinimum
3935	/// [`NonAnchorChannelFee`]: crate::chain::chaininterface::ConfirmationTarget::NonAnchorChannelFee
3936	/// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown
3937	pub fn close_channel(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey) -> Result<(), APIError> {
3938		self.close_channel_internal(channel_id, counterparty_node_id, None, None)
3939	}
3940
3941	/// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
3942	/// will be accepted on the given channel, and after additional timeout/the closing of all
3943	/// pending HTLCs, the channel will be closed on chain.
3944	///
3945	/// `target_feerate_sat_per_1000_weight` has different meanings depending on if we initiated
3946	/// the channel being closed or not:
3947	///  * If we are the channel initiator, we will pay at least this feerate on the closing
3948	///    transaction. The upper-bound is set by
3949	///    [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`NonAnchorChannelFee`]
3950	///    fee estimate (or `target_feerate_sat_per_1000_weight`, if it is greater).
3951	///  * If our counterparty is the channel initiator, we will refuse to accept a channel closure
3952	///    transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which
3953	///    will appear on a force-closure transaction, whichever is lower).
3954	///
3955	/// The `shutdown_script` provided  will be used as the `scriptPubKey` for the closing transaction.
3956	/// Will fail if a shutdown script has already been set for this channel by
3957	/// ['ChannelHandshakeConfig::commit_upfront_shutdown_pubkey`]. The given shutdown script must
3958	/// also be compatible with our and the counterparty's features.
3959	///
3960	/// May generate a [`SendShutdown`] message event on success, which should be relayed.
3961	///
3962	/// Raises [`APIError::ChannelUnavailable`] if the channel cannot be closed due to failing to
3963	/// generate a shutdown scriptpubkey or destination script set by
3964	/// [`SignerProvider::get_shutdown_scriptpubkey`]. A force-closure may be needed to close the
3965	/// channel.
3966	///
3967	/// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
3968	/// [`NonAnchorChannelFee`]: crate::chain::chaininterface::ConfirmationTarget::NonAnchorChannelFee
3969	/// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown
3970	pub fn close_channel_with_feerate_and_script(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
3971		self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
3972	}
3973
3974	/// Applies a [`ChannelMonitorUpdate`] which may or may not be for a channel which is closed.
3975	fn apply_post_close_monitor_update(
3976		&self, counterparty_node_id: PublicKey, channel_id: ChannelId, funding_txo: OutPoint,
3977		monitor_update: ChannelMonitorUpdate,
3978	) {
3979		// Note that there may be some post-close updates which need to be well-ordered with
3980		// respect to the `update_id`, so we hold the `peer_state` lock here.
3981		let per_peer_state = self.per_peer_state.read().unwrap();
3982		let mut peer_state_lock = per_peer_state.get(&counterparty_node_id)
3983			.expect("We must always have a peer entry for a peer with which we have channels that have ChannelMonitors")
3984			.lock().unwrap();
3985		let peer_state = &mut *peer_state_lock;
3986		match peer_state.channel_by_id.entry(channel_id) {
3987			hash_map::Entry::Occupied(mut chan_phase) => {
3988				if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
3989					handle_new_monitor_update!(self, funding_txo,
3990						monitor_update, peer_state_lock, peer_state, per_peer_state, chan);
3991					return;
3992				} else {
3993					debug_assert!(false, "We shouldn't have an update for a non-funded channel");
3994				}
3995			},
3996			hash_map::Entry::Vacant(_) => {},
3997		}
3998
3999		handle_new_monitor_update!(
4000			self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
4001			counterparty_node_id, channel_id, POST_CHANNEL_CLOSE
4002		);
4003	}
4004
4005	/// When a channel is removed, two things need to happen:
4006	/// (a) [`locked_close_channel`] must be called in the same `per_peer_state` lock as
4007	///     the channel-closing action,
4008	/// (b) this needs to be called without holding any locks (except
4009	///     [`ChannelManager::total_consistency_lock`].
4010	fn finish_close_channel(&self, mut shutdown_res: ShutdownResult) {
4011		debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
4012		#[cfg(debug_assertions)]
4013		for (_, peer) in self.per_peer_state.read().unwrap().iter() {
4014			debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
4015		}
4016
4017		let logger = WithContext::from(
4018			&self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id), None
4019		);
4020
4021		log_debug!(logger, "Finishing closure of channel due to {} with {} HTLCs to fail",
4022			shutdown_res.closure_reason, shutdown_res.dropped_outbound_htlcs.len());
4023		for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
4024			let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
4025			let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
4026			let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
4027			self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
4028		}
4029		if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
4030			debug_assert!(false, "This should have been handled in `locked_close_channel`");
4031			self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
4032		}
4033		if self.background_events_processed_since_startup.load(Ordering::Acquire) {
4034			// If a `ChannelMonitorUpdate` was applied (i.e. any time we have a funding txo and are
4035			// not in the startup sequence) check if we need to handle any
4036			// `MonitorUpdateCompletionAction`s.
4037			// TODO: If we do the `in_flight_monitor_updates.is_empty()` check in
4038			// `locked_close_channel` we can skip the locks here.
4039			if let Some(funding_txo) = shutdown_res.channel_funding_txo {
4040				let per_peer_state = self.per_peer_state.read().unwrap();
4041				if let Some(peer_state_mtx) = per_peer_state.get(&shutdown_res.counterparty_node_id) {
4042					let mut peer_state = peer_state_mtx.lock().unwrap();
4043					if peer_state.in_flight_monitor_updates.get(&funding_txo).map(|l| l.is_empty()).unwrap_or(true) {
4044						let update_actions = peer_state.monitor_update_blocked_actions
4045							.remove(&shutdown_res.channel_id).unwrap_or(Vec::new());
4046
4047						mem::drop(peer_state);
4048						mem::drop(per_peer_state);
4049
4050						self.handle_monitor_update_completion_actions(update_actions);
4051					}
4052				}
4053			}
4054		}
4055		let mut shutdown_results = Vec::new();
4056		if let Some(txid) = shutdown_res.unbroadcasted_batch_funding_txid {
4057			let mut funding_batch_states = self.funding_batch_states.lock().unwrap();
4058			let affected_channels = funding_batch_states.remove(&txid).into_iter().flatten();
4059			let per_peer_state = self.per_peer_state.read().unwrap();
4060			let mut has_uncompleted_channel = None;
4061			for (channel_id, counterparty_node_id, state) in affected_channels {
4062				if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
4063					let mut peer_state = peer_state_mutex.lock().unwrap();
4064					if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) {
4065						let mut close_res = chan.context_mut().force_shutdown(false, ClosureReason::FundingBatchClosure);
4066						locked_close_channel!(self, &mut *peer_state, chan.context(), close_res);
4067						shutdown_results.push(close_res);
4068					}
4069				}
4070				has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state));
4071			}
4072			debug_assert!(
4073				has_uncompleted_channel.unwrap_or(true),
4074				"Closing a batch where all channels have completed initial monitor update",
4075			);
4076		}
4077
4078		{
4079			let mut pending_events = self.pending_events.lock().unwrap();
4080			pending_events.push_back((events::Event::ChannelClosed {
4081				channel_id: shutdown_res.channel_id,
4082				user_channel_id: shutdown_res.user_channel_id,
4083				reason: shutdown_res.closure_reason,
4084				counterparty_node_id: Some(shutdown_res.counterparty_node_id),
4085				channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis),
4086				channel_funding_txo: shutdown_res.channel_funding_txo,
4087				last_local_balance_msat: Some(shutdown_res.last_local_balance_msat),
4088			}, None));
4089
4090			if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx {
4091				let funding_info = if shutdown_res.is_manual_broadcast {
4092					FundingInfo::OutPoint {
4093						outpoint: shutdown_res.channel_funding_txo
4094							.expect("We had an unbroadcasted funding tx, so should also have had a funding outpoint"),
4095					}
4096				} else {
4097					FundingInfo::Tx{ transaction }
4098				};
4099				pending_events.push_back((events::Event::DiscardFunding {
4100					channel_id: shutdown_res.channel_id, funding_info
4101				}, None));
4102			}
4103		}
4104		for shutdown_result in shutdown_results.drain(..) {
4105			self.finish_close_channel(shutdown_result);
4106		}
4107	}
4108
4109	/// `peer_msg` should be set when we receive a message from a peer, but not set when the
4110	/// user closes, which will be re-exposed as the `ChannelClosed` reason.
4111	fn force_close_channel_with_peer(&self, channel_id: &ChannelId, peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool)
4112	-> Result<PublicKey, APIError> {
4113		let per_peer_state = self.per_peer_state.read().unwrap();
4114		let peer_state_mutex = per_peer_state.get(peer_node_id)
4115			.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?;
4116		let (update_opt, counterparty_node_id) = {
4117			let mut peer_state = peer_state_mutex.lock().unwrap();
4118			let closure_reason = if let Some(peer_msg) = peer_msg {
4119				ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) }
4120			} else {
4121				ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(broadcast) }
4122			};
4123			let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None);
4124			if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
4125				log_error!(logger, "Force-closing channel {}", channel_id);
4126				let (mut shutdown_res, update_opt) = match chan_phase_entry.get_mut() {
4127					ChannelPhase::Funded(ref mut chan) => {
4128						(
4129							chan.context.force_shutdown(broadcast, closure_reason),
4130							self.get_channel_update_for_broadcast(&chan).ok(),
4131						)
4132					},
4133					ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) |
4134					ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => {
4135						// Unfunded channel has no update
4136						(chan_phase_entry.get_mut().context_mut().force_shutdown(false, closure_reason), None)
4137					},
4138				};
4139				let chan_phase = remove_channel_phase!(self, peer_state, chan_phase_entry, shutdown_res);
4140				mem::drop(peer_state);
4141				mem::drop(per_peer_state);
4142				self.finish_close_channel(shutdown_res);
4143				(update_opt, chan_phase.context().get_counterparty_node_id())
4144			} else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() {
4145				log_error!(logger, "Force-closing channel {}", &channel_id);
4146				// N.B. that we don't send any channel close event here: we
4147				// don't have a user_channel_id, and we never sent any opening
4148				// events anyway.
4149				(None, *peer_node_id)
4150			} else {
4151				return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, peer_node_id) });
4152			}
4153		};
4154		if let Some(update) = update_opt {
4155			// If we have some Channel Update to broadcast, we cache it and broadcast it later.
4156			let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
4157			pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
4158				msg: update
4159			});
4160		}
4161
4162		Ok(counterparty_node_id)
4163	}
4164
4165	fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, broadcast: bool, error_message: String)
4166	-> Result<(), APIError> {
4167		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4168		log_debug!(self.logger,
4169			"Force-closing channel, The error message sent to the peer : {}", error_message);
4170		match self.force_close_channel_with_peer(channel_id, &counterparty_node_id, None, broadcast) {
4171			Ok(counterparty_node_id) => {
4172				let per_peer_state = self.per_peer_state.read().unwrap();
4173				if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
4174					let mut peer_state = peer_state_mutex.lock().unwrap();
4175					peer_state.pending_msg_events.push(
4176						events::MessageSendEvent::HandleError {
4177							node_id: counterparty_node_id,
4178							action: msgs::ErrorAction::SendErrorMessage {
4179								msg: msgs::ErrorMessage { channel_id: *channel_id, data: error_message }
4180							},
4181						}
4182					);
4183				}
4184				Ok(())
4185			},
4186			Err(e) => Err(e)
4187		}
4188	}
4189
4190	/// Force closes a channel, immediately broadcasting the latest local transaction(s),
4191	/// rejecting new HTLCs.
4192	///
4193	/// The provided `error_message` is sent to connected peers for closing
4194	/// channels and should be a human-readable description of what went wrong.
4195	///
4196	/// Fails if `channel_id` is unknown to the manager, or if the `counterparty_node_id`
4197	/// isn't the counterparty of the corresponding channel.
4198	pub fn force_close_broadcasting_latest_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String)
4199	-> Result<(), APIError> {
4200		self.force_close_sending_error(channel_id, counterparty_node_id, true, error_message)
4201	}
4202
4203	/// Force closes a channel, rejecting new HTLCs on the given channel but skips broadcasting
4204	/// the latest local transaction(s).
4205	///
4206	/// The provided `error_message` is sent to connected peers for closing channels and should
4207	/// be a human-readable description of what went wrong.
4208	///
4209	/// Fails if `channel_id` is unknown to the manager, or if the
4210	/// `counterparty_node_id` isn't the counterparty of the corresponding channel.
4211	/// You can always broadcast the latest local transaction(s) via
4212	/// [`ChannelMonitor::broadcast_latest_holder_commitment_txn`].
4213	pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String)
4214	-> Result<(), APIError> {
4215		self.force_close_sending_error(channel_id, counterparty_node_id, false, error_message)
4216	}
4217
4218	/// Force close all channels, immediately broadcasting the latest local commitment transaction
4219	/// for each to the chain and rejecting new HTLCs on each.
4220	///
4221	/// The provided `error_message` is sent to connected peers for closing channels and should
4222	/// be a human-readable description of what went wrong.
4223	pub fn force_close_all_channels_broadcasting_latest_txn(&self, error_message: String) {
4224		for chan in self.list_channels() {
4225			let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id, error_message.clone());
4226		}
4227	}
4228
4229	/// Force close all channels rejecting new HTLCs on each but without broadcasting the latest
4230	/// local transaction(s).
4231	///
4232	/// The provided `error_message` is sent to connected peers for closing channels and
4233	/// should be a human-readable description of what went wrong.
4234	pub fn force_close_all_channels_without_broadcasting_txn(&self, error_message: String) {
4235		for chan in self.list_channels() {
4236			let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id, error_message.clone());
4237		}
4238	}
4239
4240	fn can_forward_htlc_to_outgoing_channel(
4241		&self, chan: &mut Channel<SP>, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails
4242	) -> Result<(), (&'static str, u16)> {
4243		if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
4244			// Note that the behavior here should be identical to the above block - we
4245			// should NOT reveal the existence or non-existence of a private channel if
4246			// we don't allow forwards outbound over them.
4247			return Err(("Refusing to forward to a private channel based on our config.", 0x4000 | 10));
4248		}
4249		if chan.context.get_channel_type().supports_scid_privacy() && next_packet.outgoing_scid != chan.context.outbound_scid_alias() {
4250			// `option_scid_alias` (referred to in LDK as `scid_privacy`) means
4251			// "refuse to forward unless the SCID alias was used", so we pretend
4252			// we don't have the channel here.
4253			return Err(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10));
4254		}
4255
4256		// Note that we could technically not return an error yet here and just hope
4257		// that the connection is reestablished or monitor updated by the time we get
4258		// around to doing the actual forward, but better to fail early if we can and
4259		// hopefully an attacker trying to path-trace payments cannot make this occur
4260		// on a small/per-node/per-channel scale.
4261		if !chan.context.is_live() {
4262			if !chan.context.is_enabled() {
4263				// channel_disabled
4264				return Err(("Forwarding channel has been disconnected for some time.", 0x1000 | 20));
4265			} else {
4266				// temporary_channel_failure
4267				return Err(("Forwarding channel is not in a ready state.", 0x1000 | 7));
4268			}
4269		}
4270		if next_packet.outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
4271			return Err(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11));
4272		}
4273		if let Err((err, code)) = chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value) {
4274			return Err((err, code));
4275		}
4276
4277		Ok(())
4278	}
4279
4280	/// Executes a callback `C` that returns some value `X` on the channel found with the given
4281	/// `scid`. `None` is returned when the channel is not found.
4282	fn do_funded_channel_callback<X, C: Fn(&mut Channel<SP>) -> X>(
4283		&self, scid: u64, callback: C,
4284	) -> Option<X> {
4285		let (counterparty_node_id, channel_id) = match self.short_to_chan_info.read().unwrap().get(&scid).cloned() {
4286			None => return None,
4287			Some((cp_id, id)) => (cp_id, id),
4288		};
4289		let per_peer_state = self.per_peer_state.read().unwrap();
4290		let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
4291		if peer_state_mutex_opt.is_none() {
4292			return None;
4293		}
4294		let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
4295		let peer_state = &mut *peer_state_lock;
4296		match peer_state.channel_by_id.get_mut(&channel_id).and_then(
4297			|chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None }
4298		) {
4299			None => None,
4300			Some(chan) => Some(callback(chan)),
4301		}
4302	}
4303
4304	fn can_forward_htlc(
4305		&self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails
4306	) -> Result<(), (&'static str, u16)> {
4307		match self.do_funded_channel_callback(next_packet_details.outgoing_scid, |chan: &mut Channel<SP>| {
4308			self.can_forward_htlc_to_outgoing_channel(chan, msg, next_packet_details)
4309		}) {
4310			Some(Ok(())) => {},
4311			Some(Err(e)) => return Err(e),
4312			None => {
4313				// If we couldn't find the channel info for the scid, it may be a phantom or
4314				// intercept forward.
4315				if (self.default_configuration.accept_intercept_htlcs &&
4316					fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash)) ||
4317					fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash)
4318				{} else {
4319					return Err(("Don't have available channel for forwarding as requested.", 0x4000 | 10));
4320				}
4321			}
4322		}
4323
4324		let cur_height = self.best_block.read().unwrap().height + 1;
4325		if let Err((err_msg, err_code)) = check_incoming_htlc_cltv(
4326			cur_height, next_packet_details.outgoing_cltv_value, msg.cltv_expiry
4327		) {
4328			return Err((err_msg, err_code));
4329		}
4330
4331		Ok(())
4332	}
4333
4334	fn htlc_failure_from_update_add_err(
4335		&self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, err_msg: &'static str,
4336		err_code: u16, is_intro_node_blinded_forward: bool,
4337		shared_secret: &[u8; 32]
4338	) -> HTLCFailureMsg {
4339		// at capacity, we write fields `htlc_msat` and `len`
4340		let mut res = VecWriter(Vec::with_capacity(8 + 2));
4341		if err_code & 0x1000 == 0x1000 {
4342			if err_code == 0x1000 | 11 || err_code == 0x1000 | 12 {
4343				msg.amount_msat.write(&mut res).expect("Writes cannot fail");
4344			}
4345			else if err_code == 0x1000 | 13 {
4346				msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
4347			}
4348			else if err_code == 0x1000 | 20 {
4349				// TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
4350				0u16.write(&mut res).expect("Writes cannot fail");
4351			}
4352			// See https://github.com/lightning/bolts/blob/247e83d/04-onion-routing.md?plain=1#L1414-L1415
4353			(0u16).write(&mut res).expect("Writes cannot fail");
4354		}
4355
4356		log_info!(
4357			WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash)),
4358			"Failed to accept/forward incoming HTLC: {}", err_msg
4359		);
4360		// If `msg.blinding_point` is set, we must always fail with malformed.
4361		if msg.blinding_point.is_some() {
4362			return HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
4363				channel_id: msg.channel_id,
4364				htlc_id: msg.htlc_id,
4365				sha256_of_onion: [0; 32],
4366				failure_code: INVALID_ONION_BLINDING,
4367			});
4368		}
4369
4370		let (err_code, err_data) = if is_intro_node_blinded_forward {
4371			(INVALID_ONION_BLINDING, &[0; 32][..])
4372		} else {
4373			(err_code, &res.0[..])
4374		};
4375		HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
4376			channel_id: msg.channel_id,
4377			htlc_id: msg.htlc_id,
4378			reason: HTLCFailReason::reason(err_code, err_data.to_vec())
4379				.get_encrypted_failure_packet(shared_secret, &None),
4380		})
4381	}
4382
4383	fn decode_update_add_htlc_onion(
4384		&self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey,
4385	) -> Result<
4386		(onion_utils::Hop, [u8; 32], Option<Result<PublicKey, secp256k1::Error>>), HTLCFailureMsg
4387	> {
4388		let (next_hop, shared_secret, next_packet_details_opt) = decode_incoming_update_add_htlc_onion(
4389			msg, &*self.node_signer, &*self.logger, &self.secp_ctx
4390		)?;
4391
4392		let next_packet_details = match next_packet_details_opt {
4393			Some(next_packet_details) => next_packet_details,
4394			// it is a receive, so no need for outbound checks
4395			None => return Ok((next_hop, shared_secret, None)),
4396		};
4397
4398		// Perform outbound checks here instead of in [`Self::construct_pending_htlc_info`] because we
4399		// can't hold the outbound peer state lock at the same time as the inbound peer state lock.
4400		self.can_forward_htlc(&msg, &next_packet_details).map_err(|e| {
4401			let (err_msg, err_code) = e;
4402			self.htlc_failure_from_update_add_err(
4403				msg, counterparty_node_id, err_msg, err_code,
4404				next_hop.is_intro_node_blinded_forward(), &shared_secret
4405			)
4406		})?;
4407
4408		Ok((next_hop, shared_secret, Some(next_packet_details.next_packet_pubkey)))
4409	}
4410
4411	fn construct_pending_htlc_status<'a>(
4412		&self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, shared_secret: [u8; 32],
4413		decoded_hop: onion_utils::Hop, allow_underpay: bool,
4414		next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>,
4415	) -> PendingHTLCStatus {
4416		macro_rules! return_err {
4417			($msg: expr, $err_code: expr, $data: expr) => {
4418				{
4419					let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash));
4420					log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
4421					if msg.blinding_point.is_some() {
4422						return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
4423							msgs::UpdateFailMalformedHTLC {
4424								channel_id: msg.channel_id,
4425								htlc_id: msg.htlc_id,
4426								sha256_of_onion: [0; 32],
4427								failure_code: INVALID_ONION_BLINDING,
4428							}
4429						))
4430					}
4431					return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
4432						channel_id: msg.channel_id,
4433						htlc_id: msg.htlc_id,
4434						reason: HTLCFailReason::reason($err_code, $data.to_vec())
4435							.get_encrypted_failure_packet(&shared_secret, &None),
4436					}));
4437				}
4438			}
4439		}
4440		match decoded_hop {
4441			onion_utils::Hop::Receive(next_hop_data) => {
4442				// OUR PAYMENT!
4443				let current_height: u32 = self.best_block.read().unwrap().height;
4444				match create_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash,
4445					msg.amount_msat, msg.cltv_expiry, None, allow_underpay, msg.skimmed_fee_msat,
4446					current_height)
4447				{
4448					Ok(info) => {
4449						// Note that we could obviously respond immediately with an update_fulfill_htlc
4450						// message, however that would leak that we are the recipient of this payment, so
4451						// instead we stay symmetric with the forwarding case, only responding (after a
4452						// delay) once they've send us a commitment_signed!
4453						PendingHTLCStatus::Forward(info)
4454					},
4455					Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
4456				}
4457			},
4458			onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => {
4459				match create_fwd_pending_htlc_info(msg, next_hop_data, next_hop_hmac,
4460					new_packet_bytes, shared_secret, next_packet_pubkey_opt) {
4461					Ok(info) => PendingHTLCStatus::Forward(info),
4462					Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
4463				}
4464			}
4465		}
4466	}
4467
4468	/// Gets the current [`channel_update`] for the given channel. This first checks if the channel is
4469	/// public, and thus should be called whenever the result is going to be passed out in a
4470	/// [`MessageSendEvent::BroadcastChannelUpdate`] event.
4471	///
4472	/// Note that in [`internal_closing_signed`], this function is called without the `peer_state`
4473	/// corresponding to the channel's counterparty locked, as the channel been removed from the
4474	/// storage and the `peer_state` lock has been dropped.
4475	///
4476	/// [`channel_update`]: msgs::ChannelUpdate
4477	/// [`internal_closing_signed`]: Self::internal_closing_signed
4478	fn get_channel_update_for_broadcast(&self, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
4479		if !chan.context.should_announce() {
4480			return Err(LightningError {
4481				err: "Cannot broadcast a channel_update for a private channel".to_owned(),
4482				action: msgs::ErrorAction::IgnoreError
4483			});
4484		}
4485		if chan.context.get_short_channel_id().is_none() {
4486			return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError});
4487		}
4488		let logger = WithChannelContext::from(&self.logger, &chan.context, None);
4489		log_trace!(logger, "Attempting to generate broadcast channel update for channel {}", &chan.context.channel_id());
4490		self.get_channel_update_for_unicast(chan)
4491	}
4492
4493	/// Gets the current [`channel_update`] for the given channel. This does not check if the channel
4494	/// is public (only returning an `Err` if the channel does not yet have an assigned SCID),
4495	/// and thus MUST NOT be called unless the recipient of the resulting message has already
4496	/// provided evidence that they know about the existence of the channel.
4497	///
4498	/// Note that through [`internal_closing_signed`], this function is called without the
4499	/// `peer_state`  corresponding to the channel's counterparty locked, as the channel been
4500	/// removed from the storage and the `peer_state` lock has been dropped.
4501	///
4502	/// [`channel_update`]: msgs::ChannelUpdate
4503	/// [`internal_closing_signed`]: Self::internal_closing_signed
4504	fn get_channel_update_for_unicast(&self, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
4505		let logger = WithChannelContext::from(&self.logger, &chan.context, None);
4506		log_trace!(logger, "Attempting to generate channel update for channel {}", chan.context.channel_id());
4507		let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) {
4508			None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
4509			Some(id) => id,
4510		};
4511
4512		let logger = WithChannelContext::from(&self.logger, &chan.context, None);
4513		log_trace!(logger, "Generating channel update for channel {}", chan.context.channel_id());
4514		let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
4515		let enabled = chan.context.is_enabled();
4516
4517		let unsigned = msgs::UnsignedChannelUpdate {
4518			chain_hash: self.chain_hash,
4519			short_channel_id,
4520			timestamp: chan.context.get_update_time_counter(),
4521			message_flags: 1, // Only must_be_one
4522			channel_flags: (!were_node_one) as u8 | ((!enabled as u8) << 1),
4523			cltv_expiry_delta: chan.context.get_cltv_expiry_delta(),
4524			htlc_minimum_msat: chan.context.get_counterparty_htlc_minimum_msat(),
4525			htlc_maximum_msat: chan.context.get_announced_htlc_max_msat(),
4526			fee_base_msat: chan.context.get_outbound_forwarding_fee_base_msat(),
4527			fee_proportional_millionths: chan.context.get_fee_proportional_millionths(),
4528			excess_data: Vec::new(),
4529		};
4530		// Panic on failure to signal LDK should be restarted to retry signing the `ChannelUpdate`.
4531		// If we returned an error and the `node_signer` cannot provide a signature for whatever
4532		// reason`, we wouldn't be able to receive inbound payments through the corresponding
4533		// channel.
4534		let sig = self.node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelUpdate(&unsigned)).unwrap();
4535
4536		Ok(msgs::ChannelUpdate {
4537			signature: sig,
4538			contents: unsigned
4539		})
4540	}
4541
4542	#[cfg(test)]
4543	pub(crate) fn test_send_payment_along_path(&self, path: &Path, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
4544		let _lck = self.total_consistency_lock.read().unwrap();
4545		self.send_payment_along_path(SendAlongPathArgs {
4546			path, payment_hash, recipient_onion: &recipient_onion, total_value,
4547			cur_height, payment_id, keysend_preimage, invoice_request: None, session_priv_bytes
4548		})
4549	}
4550
4551	fn send_payment_along_path(&self, args: SendAlongPathArgs) -> Result<(), APIError> {
4552		let SendAlongPathArgs {
4553			path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage,
4554			invoice_request, session_priv_bytes
4555		} = args;
4556		// The top-level caller should hold the total_consistency_lock read lock.
4557		debug_assert!(self.total_consistency_lock.try_write().is_err());
4558		let prng_seed = self.entropy_source.get_secure_random_bytes();
4559		let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
4560
4561		let (onion_packet, htlc_msat, htlc_cltv) = onion_utils::create_payment_onion(
4562			&self.secp_ctx, &path, &session_priv, total_value, recipient_onion, cur_height,
4563			payment_hash, keysend_preimage, invoice_request, prng_seed
4564		).map_err(|e| {
4565			let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None, Some(*payment_hash));
4566			log_error!(logger, "Failed to build an onion for path for payment hash {}", payment_hash);
4567			e
4568		})?;
4569
4570		let err: Result<(), _> = loop {
4571			let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.hops.first().unwrap().short_channel_id) {
4572				None => {
4573					let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None, Some(*payment_hash));
4574					log_error!(logger, "Failed to find first-hop for payment hash {}", payment_hash);
4575					return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()})
4576				},
4577				Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
4578			};
4579
4580			let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(id), Some(*payment_hash));
4581			log_trace!(logger,
4582				"Attempting to send payment with payment hash {} along path with next hop {}",
4583				payment_hash, path.hops.first().unwrap().short_channel_id);
4584
4585			let per_peer_state = self.per_peer_state.read().unwrap();
4586			let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
4587				.ok_or_else(|| APIError::ChannelUnavailable{err: "No peer matching the path's first hop found!".to_owned() })?;
4588			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4589			let peer_state = &mut *peer_state_lock;
4590			if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(id) {
4591				match chan_phase_entry.get_mut() {
4592					ChannelPhase::Funded(chan) => {
4593						if !chan.context.is_live() {
4594							return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
4595						}
4596						let funding_txo = chan.context.get_funding_txo().unwrap();
4597						let logger = WithChannelContext::from(&self.logger, &chan.context, Some(*payment_hash));
4598						let send_res = chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(),
4599							htlc_cltv, HTLCSource::OutboundRoute {
4600								path: path.clone(),
4601								session_priv: session_priv.clone(),
4602								first_hop_htlc_msat: htlc_msat,
4603								payment_id,
4604							}, onion_packet, None, &self.fee_estimator, &&logger);
4605						match break_chan_phase_entry!(self, peer_state, send_res, chan_phase_entry) {
4606							Some(monitor_update) => {
4607								match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
4608									false => {
4609										// Note that MonitorUpdateInProgress here indicates (per function
4610										// docs) that we will resend the commitment update once monitor
4611										// updating completes. Therefore, we must return an error
4612										// indicating that it is unsafe to retry the payment wholesale,
4613										// which we do in the send_payment check for
4614										// MonitorUpdateInProgress, below.
4615										return Err(APIError::MonitorUpdateInProgress);
4616									},
4617									true => {},
4618								}
4619							},
4620							None => {},
4621						}
4622					},
4623					_ => return Err(APIError::ChannelUnavailable{err: "Channel to first hop is unfunded".to_owned()}),
4624				};
4625			} else {
4626				// The channel was likely removed after we fetched the id from the
4627				// `short_to_chan_info` map, but before we successfully locked the
4628				// `channel_by_id` map.
4629				// This can occur as no consistency guarantees exists between the two maps.
4630				return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()});
4631			}
4632			return Ok(());
4633		};
4634		match handle_error!(self, err, path.hops.first().unwrap().pubkey) {
4635			Ok(_) => unreachable!(),
4636			Err(e) => {
4637				Err(APIError::ChannelUnavailable { err: e.err })
4638			},
4639		}
4640	}
4641
4642	/// Sends a payment along a given route. See [`Self::send_payment`] for more info.
4643	///
4644	/// LDK will not automatically retry this payment, though it may be manually re-sent after an
4645	/// [`Event::PaymentFailed`] is generated.
4646	pub fn send_payment_with_route(
4647		&self, mut route: Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields,
4648		payment_id: PaymentId
4649	) -> Result<(), RetryableSendFailure> {
4650		let best_block_height = self.best_block.read().unwrap().height;
4651		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4652		let route_params = route.route_params.clone().unwrap_or_else(|| {
4653			// Create a dummy route params since they're a required parameter but unused in this case
4654			let (payee_node_id, cltv_delta) = route.paths.first()
4655				.and_then(|path| path.hops.last().map(|hop| (hop.pubkey, hop.cltv_expiry_delta as u32)))
4656				.unwrap_or_else(|| (PublicKey::from_slice(&[2; 32]).unwrap(), MIN_FINAL_CLTV_EXPIRY_DELTA as u32));
4657			let dummy_payment_params = PaymentParameters::from_node_id(payee_node_id, cltv_delta);
4658			RouteParameters::from_payment_params_and_value(dummy_payment_params, route.get_total_amount())
4659		});
4660		if route.route_params.is_none() { route.route_params = Some(route_params.clone()); }
4661		let router = FixedRouter::new(route);
4662		self.pending_outbound_payments
4663			.send_payment(payment_hash, recipient_onion, payment_id, Retry::Attempts(0),
4664				route_params, &&router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
4665				&self.entropy_source, &self.node_signer, best_block_height, &self.logger,
4666				&self.pending_events, |args| self.send_payment_along_path(args))
4667	}
4668
4669	/// Sends a payment to the route found using the provided [`RouteParameters`], retrying failed
4670	/// payment paths based on the provided `Retry`.
4671	///
4672	/// May generate [`UpdateHTLCs`] message(s) event on success, which should be relayed (e.g. via
4673	/// [`PeerManager::process_events`]).
4674	///
4675	/// # Avoiding Duplicate Payments
4676	///
4677	/// If a pending payment is currently in-flight with the same [`PaymentId`] provided, this
4678	/// method will error with [`RetryableSendFailure::DuplicatePayment`]. Note, however, that once a
4679	/// payment is no longer pending (either via [`ChannelManager::abandon_payment`], or handling of
4680	/// an [`Event::PaymentSent`] or [`Event::PaymentFailed`]) LDK will not stop you from sending a
4681	/// second payment with the same [`PaymentId`].
4682	///
4683	/// Thus, in order to ensure duplicate payments are not sent, you should implement your own
4684	/// tracking of payments, including state to indicate once a payment has completed. Because you
4685	/// should also ensure that [`PaymentHash`]es are not re-used, for simplicity, you should
4686	/// consider using the [`PaymentHash`] as the key for tracking payments. In that case, the
4687	/// [`PaymentId`] should be a copy of the [`PaymentHash`] bytes.
4688	///
4689	/// Additionally, in the scenario where we begin the process of sending a payment, but crash
4690	/// before `send_payment` returns (or prior to [`ChannelMonitorUpdate`] persistence if you're
4691	/// using [`ChannelMonitorUpdateStatus::InProgress`]), the payment may be lost on restart. See
4692	/// [`ChannelManager::list_recent_payments`] for more information.
4693	///
4694	/// Routes are automatically found using the [`Router] provided on startup. To fix a route for a
4695	/// particular payment, use [`Self::send_payment_with_route`] or match the [`PaymentId`] passed to
4696	/// [`Router::find_route_with_id`].
4697	///
4698	/// [`Event::PaymentSent`]: events::Event::PaymentSent
4699	/// [`Event::PaymentFailed`]: events::Event::PaymentFailed
4700	/// [`UpdateHTLCs`]: events::MessageSendEvent::UpdateHTLCs
4701	/// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
4702	/// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
4703	pub fn send_payment(
4704		&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId,
4705		route_params: RouteParameters, retry_strategy: Retry
4706	) -> Result<(), RetryableSendFailure> {
4707		let best_block_height = self.best_block.read().unwrap().height;
4708		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4709		self.pending_outbound_payments
4710			.send_payment(payment_hash, recipient_onion, payment_id, retry_strategy, route_params,
4711				&self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
4712				&self.entropy_source, &self.node_signer, best_block_height, &self.logger,
4713				&self.pending_events, |args| self.send_payment_along_path(args))
4714	}
4715
4716	#[cfg(test)]
4717	pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
4718		let best_block_height = self.best_block.read().unwrap().height;
4719		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4720		self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion,
4721			keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer,
4722			best_block_height, |args| self.send_payment_along_path(args))
4723	}
4724
4725	#[cfg(test)]
4726	pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route: &Route) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
4727		let best_block_height = self.best_block.read().unwrap().height;
4728		self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, recipient_onion, payment_id, route, None, &self.entropy_source, best_block_height)
4729	}
4730
4731	#[cfg(test)]
4732	pub(crate) fn test_set_payment_metadata(&self, payment_id: PaymentId, new_payment_metadata: Option<Vec<u8>>) {
4733		self.pending_outbound_payments.test_set_payment_metadata(payment_id, new_payment_metadata);
4734	}
4735
4736	/// Pays the [`Bolt12Invoice`] associated with the `payment_id` encoded in its `payer_metadata`.
4737	///
4738	/// The invoice's `payer_metadata` is used to authenticate that the invoice was indeed requested
4739	/// before attempting a payment. [`Bolt12PaymentError::UnexpectedInvoice`] is returned if this
4740	/// fails or if the encoded `payment_id` is not recognized. The latter may happen once the
4741	/// payment is no longer tracked because the payment was attempted after:
4742	/// - an invoice for the `payment_id` was already paid,
4743	/// - one full [timer tick] has elapsed since initially requesting the invoice when paying an
4744	///   offer, or
4745	/// - the refund corresponding to the invoice has already expired.
4746	///
4747	/// To retry the payment, request another invoice using a new `payment_id`.
4748	///
4749	/// Attempting to pay the same invoice twice while the first payment is still pending will
4750	/// result in a [`Bolt12PaymentError::DuplicateInvoice`].
4751	///
4752	/// Otherwise, either [`Event::PaymentSent`] or [`Event::PaymentFailed`] are used to indicate
4753	/// whether or not the payment was successful.
4754	///
4755	/// [timer tick]: Self::timer_tick_occurred
4756	pub fn send_payment_for_bolt12_invoice(
4757		&self, invoice: &Bolt12Invoice, context: Option<&OffersContext>,
4758	) -> Result<(), Bolt12PaymentError> {
4759		match self.verify_bolt12_invoice(invoice, context) {
4760			Ok(payment_id) => self.send_payment_for_verified_bolt12_invoice(invoice, payment_id),
4761			Err(()) => Err(Bolt12PaymentError::UnexpectedInvoice),
4762		}
4763	}
4764
4765	fn verify_bolt12_invoice(
4766		&self, invoice: &Bolt12Invoice, context: Option<&OffersContext>,
4767	) -> Result<PaymentId, ()> {
4768		let secp_ctx = &self.secp_ctx;
4769		let expanded_key = &self.inbound_payment_key;
4770
4771		match context {
4772			None if invoice.is_for_refund_without_paths() => {
4773				invoice.verify_using_metadata(expanded_key, secp_ctx)
4774			},
4775			Some(&OffersContext::OutboundPayment { payment_id, nonce, .. }) => {
4776				invoice.verify_using_payer_data(payment_id, nonce, expanded_key, secp_ctx)
4777			},
4778			_ => Err(()),
4779		}
4780	}
4781
4782	fn send_payment_for_verified_bolt12_invoice(&self, invoice: &Bolt12Invoice, payment_id: PaymentId) -> Result<(), Bolt12PaymentError> {
4783		let best_block_height = self.best_block.read().unwrap().height;
4784		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4785		let features = self.bolt12_invoice_features();
4786		self.pending_outbound_payments
4787			.send_payment_for_bolt12_invoice(
4788				invoice, payment_id, &self.router, self.list_usable_channels(), features,
4789				|| self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, &self,
4790				&self.secp_ctx, best_block_height, &self.logger, &self.pending_events,
4791				|args| self.send_payment_along_path(args)
4792			)
4793	}
4794
4795	#[cfg(async_payments)]
4796	fn initiate_async_payment(
4797		&self, invoice: &StaticInvoice, payment_id: PaymentId
4798	) -> Result<(), Bolt12PaymentError> {
4799		let mut res = Ok(());
4800		PersistenceNotifierGuard::optionally_notify(self, || {
4801			let best_block_height = self.best_block.read().unwrap().height;
4802			let features = self.bolt12_invoice_features();
4803			let outbound_pmts_res = self.pending_outbound_payments.static_invoice_received(
4804				invoice, payment_id, features, best_block_height, &*self.entropy_source,
4805				&self.pending_events
4806			);
4807			match outbound_pmts_res {
4808				Ok(()) => {},
4809				Err(Bolt12PaymentError::UnexpectedInvoice) | Err(Bolt12PaymentError::DuplicateInvoice) => {
4810					res = outbound_pmts_res.map(|_| ());
4811					return NotifyOption::SkipPersistNoEvents
4812				},
4813				Err(e) => {
4814					res = Err(e);
4815					return NotifyOption::DoPersist
4816				}
4817			};
4818
4819			let nonce = Nonce::from_entropy_source(&*self.entropy_source);
4820			let hmac = payment_id.hmac_for_async_payment(nonce, &self.inbound_payment_key);
4821			let reply_paths = match self.create_blinded_paths(
4822				MessageContext::AsyncPayments(
4823					AsyncPaymentsContext::OutboundPayment { payment_id, nonce, hmac }
4824				)
4825			) {
4826				Ok(paths) => paths,
4827				Err(()) => {
4828					self.abandon_payment_with_reason(payment_id, PaymentFailureReason::BlindedPathCreationFailed);
4829					res = Err(Bolt12PaymentError::BlindedPathCreationFailed);
4830					return NotifyOption::DoPersist
4831				}
4832			};
4833
4834			let mut pending_async_payments_messages = self.pending_async_payments_messages.lock().unwrap();
4835			const HTLC_AVAILABLE_LIMIT: usize = 10;
4836			reply_paths
4837				.iter()
4838				.flat_map(|reply_path| invoice.message_paths().iter().map(move |invoice_path| (invoice_path, reply_path)))
4839				.take(HTLC_AVAILABLE_LIMIT)
4840				.for_each(|(invoice_path, reply_path)| {
4841					let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
4842						destination: Destination::BlindedPath(invoice_path.clone()),
4843						reply_path: reply_path.clone(),
4844					};
4845					let message = AsyncPaymentsMessage::HeldHtlcAvailable(HeldHtlcAvailable {});
4846					pending_async_payments_messages.push((message, instructions));
4847				});
4848
4849			NotifyOption::DoPersist
4850		});
4851
4852		res
4853	}
4854
4855	#[cfg(async_payments)]
4856	fn send_payment_for_static_invoice(
4857		&self, payment_id: PaymentId
4858	) -> Result<(), Bolt12PaymentError> {
4859		let best_block_height = self.best_block.read().unwrap().height;
4860		let mut res = Ok(());
4861		PersistenceNotifierGuard::optionally_notify(self, || {
4862			let outbound_pmts_res = self.pending_outbound_payments.send_payment_for_static_invoice(
4863				payment_id, &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
4864				&self.entropy_source, &self.node_signer, &self, &self.secp_ctx, best_block_height,
4865				&self.logger, &self.pending_events, |args| self.send_payment_along_path(args)
4866			);
4867			match outbound_pmts_res {
4868				Err(Bolt12PaymentError::UnexpectedInvoice) | Err(Bolt12PaymentError::DuplicateInvoice) => {
4869					res = outbound_pmts_res.map(|_| ());
4870					NotifyOption::SkipPersistNoEvents
4871				},
4872				other_res => {
4873					res = other_res;
4874					NotifyOption::DoPersist
4875				}
4876			}
4877		});
4878		res
4879	}
4880
4881	/// Signals that no further attempts for the given payment should occur. Useful if you have a
4882	/// pending outbound payment with retries remaining, but wish to stop retrying the payment before
4883	/// retries are exhausted.
4884	///
4885	/// # Event Generation
4886	///
4887	/// If no [`Event::PaymentFailed`] event had been generated before, one will be generated as soon
4888	/// as there are no remaining pending HTLCs for this payment.
4889	///
4890	/// Note that calling this method does *not* prevent a payment from succeeding. You must still
4891	/// wait until you receive either a [`Event::PaymentFailed`] or [`Event::PaymentSent`] event to
4892	/// determine the ultimate status of a payment.
4893	///
4894	/// # Requested Invoices
4895	///
4896	/// In the case of paying a [`Bolt12Invoice`] via [`ChannelManager::pay_for_offer`], abandoning
4897	/// the payment prior to receiving the invoice will result in an [`Event::PaymentFailed`] and
4898	/// prevent any attempts at paying it once received.
4899	///
4900	/// # Restart Behavior
4901	///
4902	/// If an [`Event::PaymentFailed`] is generated and we restart without first persisting the
4903	/// [`ChannelManager`], another [`Event::PaymentFailed`] may be generated.
4904	///
4905	/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
4906	pub fn abandon_payment(&self, payment_id: PaymentId) {
4907		self.abandon_payment_with_reason(payment_id, PaymentFailureReason::UserAbandoned)
4908	}
4909
4910	fn abandon_payment_with_reason(&self, payment_id: PaymentId, reason: PaymentFailureReason) {
4911		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4912		self.pending_outbound_payments.abandon_payment(payment_id, reason, &self.pending_events);
4913	}
4914
4915	/// Send a spontaneous payment, which is a payment that does not require the recipient to have
4916	/// generated an invoice. Optionally, you may specify the preimage. If you do choose to specify
4917	/// the preimage, it must be a cryptographically secure random value that no intermediate node
4918	/// would be able to guess -- otherwise, an intermediate node may claim the payment and it will
4919	/// never reach the recipient.
4920	///
4921	/// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See
4922	/// [`send_payment`] for more information about the risks of duplicate preimage usage.
4923	///
4924	/// See [`send_payment`] documentation for more details on the idempotency guarantees provided by
4925	/// the [`PaymentId`] key.
4926	///
4927	/// See [`PaymentParameters::for_keysend`] for help in constructing `route_params` for spontaneous
4928	/// payments.
4929	///
4930	/// [`send_payment`]: Self::send_payment
4931	/// [`PaymentParameters::for_keysend`]: crate::routing::router::PaymentParameters::for_keysend
4932	pub fn send_spontaneous_payment(
4933		&self, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields,
4934		payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry
4935	) -> Result<PaymentHash, RetryableSendFailure> {
4936		let best_block_height = self.best_block.read().unwrap().height;
4937		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4938		self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, recipient_onion,
4939			payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(),
4940			|| self.compute_inflight_htlcs(),  &self.entropy_source, &self.node_signer, best_block_height,
4941			&self.logger, &self.pending_events, |args| self.send_payment_along_path(args))
4942	}
4943
4944	/// Send a payment that is probing the given route for liquidity. We calculate the
4945	/// [`PaymentHash`] of probes based on a static secret and a random [`PaymentId`], which allows
4946	/// us to easily discern them from real payments.
4947	pub fn send_probe(&self, path: Path) -> Result<(PaymentHash, PaymentId), ProbeSendFailure> {
4948		let best_block_height = self.best_block.read().unwrap().height;
4949		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4950		self.pending_outbound_payments.send_probe(path, self.probing_cookie_secret,
4951			&self.entropy_source, &self.node_signer, best_block_height,
4952			|args| self.send_payment_along_path(args))
4953	}
4954
4955	/// Returns whether a payment with the given [`PaymentHash`] and [`PaymentId`] is, in fact, a
4956	/// payment probe.
4957	#[cfg(test)]
4958	pub(crate) fn payment_is_probe(&self, payment_hash: &PaymentHash, payment_id: &PaymentId) -> bool {
4959		outbound_payment::payment_is_probe(payment_hash, payment_id, self.probing_cookie_secret)
4960	}
4961
4962	/// Sends payment probes over all paths of a route that would be used to pay the given
4963	/// amount to the given `node_id`.
4964	///
4965	/// See [`ChannelManager::send_preflight_probes`] for more information.
4966	pub fn send_spontaneous_preflight_probes(
4967		&self, node_id: PublicKey, amount_msat: u64, final_cltv_expiry_delta: u32,
4968		liquidity_limit_multiplier: Option<u64>,
4969	) -> Result<Vec<(PaymentHash, PaymentId)>, ProbeSendFailure> {
4970		let payment_params =
4971			PaymentParameters::from_node_id(node_id, final_cltv_expiry_delta);
4972
4973		let route_params = RouteParameters::from_payment_params_and_value(payment_params, amount_msat);
4974
4975		self.send_preflight_probes(route_params, liquidity_limit_multiplier)
4976	}
4977
4978	/// Sends payment probes over all paths of a route that would be used to pay a route found
4979	/// according to the given [`RouteParameters`].
4980	///
4981	/// This may be used to send "pre-flight" probes, i.e., to train our scorer before conducting
4982	/// the actual payment. Note this is only useful if there likely is sufficient time for the
4983	/// probe to settle before sending out the actual payment, e.g., when waiting for user
4984	/// confirmation in a wallet UI.
4985	///
4986	/// Otherwise, there is a chance the probe could take up some liquidity needed to complete the
4987	/// actual payment. Users should therefore be cautious and might avoid sending probes if
4988	/// liquidity is scarce and/or they don't expect the probe to return before they send the
4989	/// payment. To mitigate this issue, channels with available liquidity less than the required
4990	/// amount times the given `liquidity_limit_multiplier` won't be used to send pre-flight
4991	/// probes. If `None` is given as `liquidity_limit_multiplier`, it defaults to `3`.
4992	pub fn send_preflight_probes(
4993		&self, route_params: RouteParameters, liquidity_limit_multiplier: Option<u64>,
4994	) -> Result<Vec<(PaymentHash, PaymentId)>, ProbeSendFailure> {
4995		let liquidity_limit_multiplier = liquidity_limit_multiplier.unwrap_or(3);
4996
4997		let payer = self.get_our_node_id();
4998		let usable_channels = self.list_usable_channels();
4999		let first_hops = usable_channels.iter().collect::<Vec<_>>();
5000		let inflight_htlcs = self.compute_inflight_htlcs();
5001
5002		let route = self
5003			.router
5004			.find_route(&payer, &route_params, Some(&first_hops), inflight_htlcs)
5005			.map_err(|e| {
5006				log_error!(self.logger, "Failed to find path for payment probe: {:?}", e);
5007				ProbeSendFailure::RouteNotFound
5008			})?;
5009
5010		let mut used_liquidity_map = hash_map_with_capacity(first_hops.len());
5011
5012		let mut res = Vec::new();
5013
5014		for mut path in route.paths {
5015			// If the last hop is probably an unannounced channel we refrain from probing all the
5016			// way through to the end and instead probe up to the second-to-last channel.
5017			while let Some(last_path_hop) = path.hops.last() {
5018				if last_path_hop.maybe_announced_channel {
5019					// We found a potentially announced last hop.
5020					break;
5021				} else {
5022					// Drop the last hop, as it's likely unannounced.
5023					log_debug!(
5024						self.logger,
5025						"Avoided sending payment probe all the way to last hop {} as it is likely unannounced.",
5026						last_path_hop.short_channel_id
5027					);
5028					let final_value_msat = path.final_value_msat();
5029					path.hops.pop();
5030					if let Some(new_last) = path.hops.last_mut() {
5031						new_last.fee_msat += final_value_msat;
5032					}
5033				}
5034			}
5035
5036			if path.hops.len() < 2 {
5037				log_debug!(
5038					self.logger,
5039					"Skipped sending payment probe over path with less than two hops."
5040				);
5041				continue;
5042			}
5043
5044			if let Some(first_path_hop) = path.hops.first() {
5045				if let Some(first_hop) = first_hops.iter().find(|h| {
5046					h.get_outbound_payment_scid() == Some(first_path_hop.short_channel_id)
5047				}) {
5048					let path_value = path.final_value_msat() + path.fee_msat();
5049					let used_liquidity =
5050						used_liquidity_map.entry(first_path_hop.short_channel_id).or_insert(0);
5051
5052					if first_hop.next_outbound_htlc_limit_msat
5053						< (*used_liquidity + path_value) * liquidity_limit_multiplier
5054					{
5055						log_debug!(self.logger, "Skipped sending payment probe to avoid putting channel {} under the liquidity limit.", first_path_hop.short_channel_id);
5056						continue;
5057					} else {
5058						*used_liquidity += path_value;
5059					}
5060				}
5061			}
5062
5063			res.push(self.send_probe(path).map_err(|e| {
5064				log_error!(self.logger, "Failed to send pre-flight probe: {:?}", e);
5065				e
5066			})?);
5067		}
5068
5069		Ok(res)
5070	}
5071
5072	/// Handles the generation of a funding transaction, optionally (for tests) with a function
5073	/// which checks the correctness of the funding transaction given the associated channel.
5074	fn funding_transaction_generated_intern<FundingOutput: FnMut(&OutboundV1Channel<SP>) -> Result<OutPoint, &'static str>>(
5075		&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding_transaction: Transaction, is_batch_funding: bool,
5076		mut find_funding_output: FundingOutput, is_manual_broadcast: bool,
5077	) -> Result<(), APIError> {
5078		let per_peer_state = self.per_peer_state.read().unwrap();
5079		let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
5080			.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
5081
5082		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5083		let peer_state = &mut *peer_state_lock;
5084		let funding_txo;
5085		let (mut chan, msg_opt) = match peer_state.channel_by_id.remove(&temporary_channel_id) {
5086			Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => {
5087				macro_rules! close_chan { ($err: expr, $api_err: expr, $chan: expr) => { {
5088					let counterparty;
5089					let err = if let ChannelError::Close((msg, reason)) = $err {
5090						let channel_id = $chan.context.channel_id();
5091						counterparty = chan.context.get_counterparty_node_id();
5092						let shutdown_res = $chan.context.force_shutdown(false, reason);
5093						MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None)
5094					} else { unreachable!(); };
5095
5096					mem::drop(peer_state_lock);
5097					mem::drop(per_peer_state);
5098					let _: Result<(), _> = handle_error!(self, Err(err), counterparty);
5099					Err($api_err)
5100				} } }
5101				match find_funding_output(&chan) {
5102					Ok(found_funding_txo) => funding_txo = found_funding_txo,
5103					Err(err) => {
5104						let chan_err = ChannelError::close(err.to_owned());
5105						let api_err = APIError::APIMisuseError { err: err.to_owned() };
5106						return close_chan!(chan_err, api_err, chan);
5107					},
5108				}
5109
5110				let logger = WithChannelContext::from(&self.logger, &chan.context, None);
5111				let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger);
5112				match funding_res {
5113					Ok(funding_msg) => (chan, funding_msg),
5114					Err((mut chan, chan_err)) => {
5115						let api_err = APIError::ChannelUnavailable { err: "Signer refused to sign the initial commitment transaction".to_owned() };
5116						return close_chan!(chan_err, api_err, chan);
5117					}
5118				}
5119			},
5120			Some(phase) => {
5121				peer_state.channel_by_id.insert(temporary_channel_id, phase);
5122				return Err(APIError::APIMisuseError {
5123					err: format!(
5124						"Channel with id {} for the passed counterparty node_id {} is not an unfunded, outbound V1 channel",
5125						temporary_channel_id, counterparty_node_id),
5126				})
5127			},
5128			None => return Err(APIError::ChannelUnavailable {err: format!(
5129				"Channel with id {} not found for the passed counterparty node_id {}",
5130				temporary_channel_id, counterparty_node_id),
5131				}),
5132		};
5133
5134		if let Some(msg) = msg_opt {
5135			peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
5136				node_id: chan.context.get_counterparty_node_id(),
5137				msg,
5138			});
5139		}
5140		if is_manual_broadcast {
5141			chan.context.set_manual_broadcast();
5142		}
5143		match peer_state.channel_by_id.entry(chan.context.channel_id()) {
5144			hash_map::Entry::Occupied(_) => {
5145				panic!("Generated duplicate funding txid?");
5146			},
5147			hash_map::Entry::Vacant(e) => {
5148				let mut outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
5149				match outpoint_to_peer.entry(funding_txo) {
5150					hash_map::Entry::Vacant(e) => { e.insert(chan.context.get_counterparty_node_id()); },
5151					hash_map::Entry::Occupied(o) => {
5152						let err = format!(
5153							"An existing channel using outpoint {} is open with peer {}",
5154							funding_txo, o.get()
5155						);
5156						mem::drop(outpoint_to_peer);
5157						mem::drop(peer_state_lock);
5158						mem::drop(per_peer_state);
5159						let reason = ClosureReason::ProcessingError { err: err.clone() };
5160						self.finish_close_channel(chan.context.force_shutdown(true, reason));
5161						return Err(APIError::ChannelUnavailable { err });
5162					}
5163				}
5164				e.insert(ChannelPhase::UnfundedOutboundV1(chan));
5165			}
5166		}
5167		Ok(())
5168	}
5169
5170	#[cfg(test)]
5171	pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> {
5172		let txid = funding_transaction.compute_txid();
5173		self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, false, |_| {
5174			Ok(OutPoint { txid, index: output_index })
5175		}, false)
5176	}
5177
5178	/// Call this upon creation of a funding transaction for the given channel.
5179	///
5180	/// Returns an [`APIError::APIMisuseError`] if the funding_transaction spent non-SegWit outputs
5181	/// or if no output was found which matches the parameters in [`Event::FundingGenerationReady`].
5182	///
5183	/// Returns [`APIError::APIMisuseError`] if the funding transaction is not final for propagation
5184	/// across the p2p network.
5185	///
5186	/// Returns [`APIError::ChannelUnavailable`] if a funding transaction has already been provided
5187	/// for the channel or if the channel has been closed as indicated by [`Event::ChannelClosed`].
5188	///
5189	/// May panic if the output found in the funding transaction is duplicative with some other
5190	/// channel (note that this should be trivially prevented by using unique funding transaction
5191	/// keys per-channel).
5192	///
5193	/// Do NOT broadcast the funding transaction yourself. When we have safely received our
5194	/// counterparty's signature the funding transaction will automatically be broadcast via the
5195	/// [`BroadcasterInterface`] provided when this `ChannelManager` was constructed.
5196	///
5197	/// Note that this includes RBF or similar transaction replacement strategies - lightning does
5198	/// not currently support replacing a funding transaction on an existing channel. Instead,
5199	/// create a new channel with a conflicting funding transaction.
5200	///
5201	/// Note to keep the miner incentives aligned in moving the blockchain forward, we recommend
5202	/// the wallet software generating the funding transaction to apply anti-fee sniping as
5203	/// implemented by Bitcoin Core wallet. See <https://bitcoinops.org/en/topics/fee-sniping/>
5204	/// for more details.
5205	///
5206	/// [`Event::FundingGenerationReady`]: crate::events::Event::FundingGenerationReady
5207	/// [`Event::ChannelClosed`]: crate::events::Event::ChannelClosed
5208	pub fn funding_transaction_generated(&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
5209		self.batch_funding_transaction_generated(&[(&temporary_channel_id, &counterparty_node_id)], funding_transaction)
5210	}
5211
5212
5213	/// **Unsafe**: This method does not validate the spent output. It is the caller's
5214	/// responsibility to ensure the spent outputs are SegWit, as well as making sure the funding
5215	/// transaction has a final absolute locktime, i.e., its locktime is lower than the next block height.
5216	///
5217	/// For a safer method, please refer to [`ChannelManager::funding_transaction_generated`].
5218	///
5219	/// Call this in response to a [`Event::FundingGenerationReady`] event.
5220	///
5221	/// Note that if this method is called successfully, the funding transaction won't be
5222	/// broadcasted and you are expected to broadcast it manually when receiving the
5223	/// [`Event::FundingTxBroadcastSafe`] event.
5224	///
5225	/// Returns [`APIError::ChannelUnavailable`] if a funding transaction has already been provided
5226	/// for the channel or if the channel has been closed as indicated by [`Event::ChannelClosed`].
5227	///
5228	/// May panic if the funding output is duplicative with some other channel (note that this
5229	/// should be trivially prevented by using unique funding transaction keys per-channel).
5230	///
5231	/// Note to keep the miner incentives aligned in moving the blockchain forward, we recommend
5232	/// the wallet software generating the funding transaction to apply anti-fee sniping as
5233	/// implemented by Bitcoin Core wallet. See <https://bitcoinops.org/en/topics/fee-sniping/> for
5234	/// more details.
5235	///
5236	/// [`Event::FundingGenerationReady`]: crate::events::Event::FundingGenerationReady
5237	/// [`Event::FundingTxBroadcastSafe`]: crate::events::Event::FundingTxBroadcastSafe
5238	/// [`Event::ChannelClosed`]: crate::events::Event::ChannelClosed
5239	/// [`ChannelManager::funding_transaction_generated`]: crate::ln::channelmanager::ChannelManager::funding_transaction_generated
5240	pub fn unsafe_manual_funding_transaction_generated(&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding: OutPoint) -> Result<(), APIError> {
5241		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5242
5243		let temporary_channels = &[(&temporary_channel_id, &counterparty_node_id)];
5244		return self.batch_funding_transaction_generated_intern(temporary_channels, FundingType::Unchecked(funding));
5245
5246	}
5247
5248	/// Call this upon creation of a batch funding transaction for the given channels.
5249	///
5250	/// Return values are identical to [`Self::funding_transaction_generated`], respective to
5251	/// each individual channel and transaction output.
5252	///
5253	/// Do NOT broadcast the funding transaction yourself. This batch funding transaction
5254	/// will only be broadcast when we have safely received and persisted the counterparty's
5255	/// signature for each channel.
5256	///
5257	/// If there is an error, all channels in the batch are to be considered closed.
5258	pub fn batch_funding_transaction_generated(&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding_transaction: Transaction) -> Result<(), APIError> {
5259		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5260		self.batch_funding_transaction_generated_intern(temporary_channels, FundingType::Checked(funding_transaction))
5261	}
5262
5263	fn batch_funding_transaction_generated_intern(&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding: FundingType) -> Result<(), APIError> {
5264		let mut result = Ok(());
5265		if let FundingType::Checked(funding_transaction) = &funding {
5266			if !funding_transaction.is_coinbase() {
5267				for inp in funding_transaction.input.iter() {
5268					if inp.witness.is_empty() {
5269						result = result.and(Err(APIError::APIMisuseError {
5270							err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned()
5271						}));
5272					}
5273				}
5274			}
5275
5276			if funding_transaction.output.len() > u16::max_value() as usize {
5277				result = result.and(Err(APIError::APIMisuseError {
5278					err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
5279				}));
5280			}
5281			let height = self.best_block.read().unwrap().height;
5282			// Transactions are evaluated as final by network mempools if their locktime is strictly
5283			// lower than the next block height. However, the modules constituting our Lightning
5284			// node might not have perfect sync about their blockchain views. Thus, if the wallet
5285			// module is ahead of LDK, only allow one more block of headroom.
5286			if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) &&
5287				funding_transaction.lock_time.is_block_height() &&
5288					funding_transaction.lock_time.to_consensus_u32() > height + 1
5289			{
5290				result = result.and(Err(APIError::APIMisuseError {
5291					err: "Funding transaction absolute timelock is non-final".to_owned()
5292				}));
5293			}
5294		}
5295
5296		let txid = funding.txid();
5297		let is_batch_funding = temporary_channels.len() > 1;
5298		let mut funding_batch_states = if is_batch_funding {
5299			Some(self.funding_batch_states.lock().unwrap())
5300		} else {
5301			None
5302		};
5303		let mut funding_batch_state = funding_batch_states.as_mut().and_then(|states| {
5304			match states.entry(txid) {
5305				btree_map::Entry::Occupied(_) => {
5306					result = result.clone().and(Err(APIError::APIMisuseError {
5307						err: "Batch funding transaction with the same txid already exists".to_owned()
5308					}));
5309					None
5310				},
5311				btree_map::Entry::Vacant(vacant) => Some(vacant.insert(Vec::new())),
5312			}
5313		});
5314		let is_manual_broadcast = funding.is_manual_broadcast();
5315		for &(temporary_channel_id, counterparty_node_id) in temporary_channels {
5316			result = result.and_then(|_| self.funding_transaction_generated_intern(
5317				*temporary_channel_id,
5318				*counterparty_node_id,
5319				funding.transaction_or_dummy(),
5320				is_batch_funding,
5321				|chan| {
5322					let mut output_index = None;
5323					let expected_spk = chan.context.get_funding_redeemscript().to_p2wsh();
5324					let outpoint = match &funding {
5325						FundingType::Checked(tx) => {
5326							for (idx, outp) in tx.output.iter().enumerate() {
5327								if outp.script_pubkey == expected_spk && outp.value.to_sat() == chan.context.get_value_satoshis() {
5328									if output_index.is_some() {
5329										return Err("Multiple outputs matched the expected script and value");
5330									}
5331									output_index = Some(idx as u16);
5332								}
5333							}
5334							if output_index.is_none() {
5335								return Err("No output matched the script_pubkey and value in the FundingGenerationReady event");
5336							}
5337							OutPoint { txid, index: output_index.unwrap() }
5338						},
5339						FundingType::Unchecked(outpoint) => outpoint.clone(),
5340					};
5341					if let Some(funding_batch_state) = funding_batch_state.as_mut() {
5342						// TODO(dual_funding): We only do batch funding for V1 channels at the moment, but we'll probably
5343						// need to fix this somehow to not rely on using the outpoint for the channel ID if we
5344						// want to support V2 batching here as well.
5345						funding_batch_state.push((ChannelId::v1_from_funding_outpoint(outpoint), *counterparty_node_id, false));
5346					}
5347					Ok(outpoint)
5348				},
5349				is_manual_broadcast)
5350			);
5351		}
5352		if let Err(ref e) = result {
5353			// Remaining channels need to be removed on any error.
5354			let e = format!("Error in transaction funding: {:?}", e);
5355			let mut channels_to_remove = Vec::new();
5356			channels_to_remove.extend(funding_batch_states.as_mut()
5357				.and_then(|states| states.remove(&txid))
5358				.into_iter().flatten()
5359				.map(|(chan_id, node_id, _state)| (chan_id, node_id))
5360			);
5361			channels_to_remove.extend(temporary_channels.iter()
5362				.map(|(&chan_id, &node_id)| (chan_id, node_id))
5363			);
5364			let mut shutdown_results = Vec::new();
5365			{
5366				let per_peer_state = self.per_peer_state.read().unwrap();
5367				for (channel_id, counterparty_node_id) in channels_to_remove {
5368					per_peer_state.get(&counterparty_node_id)
5369						.map(|peer_state_mutex| peer_state_mutex.lock().unwrap())
5370						.and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id).map(|chan| (chan, peer_state)))
5371						.map(|(mut chan, mut peer_state)| {
5372							let closure_reason = ClosureReason::ProcessingError { err: e.clone() };
5373							let mut close_res = chan.context_mut().force_shutdown(false, closure_reason);
5374							locked_close_channel!(self, peer_state, chan.context(), close_res);
5375							shutdown_results.push(close_res);
5376							peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
5377								node_id: counterparty_node_id,
5378								action: msgs::ErrorAction::SendErrorMessage {
5379									msg: msgs::ErrorMessage {
5380										channel_id,
5381										data: "Failed to fund channel".to_owned(),
5382									}
5383								},
5384							});
5385						});
5386				}
5387			}
5388			mem::drop(funding_batch_states);
5389			for shutdown_result in shutdown_results.drain(..) {
5390				self.finish_close_channel(shutdown_result);
5391			}
5392		}
5393		result
5394	}
5395
5396	/// Atomically applies partial updates to the [`ChannelConfig`] of the given channels.
5397	///
5398	/// Once the updates are applied, each eligible channel (advertised with a known short channel
5399	/// ID and a change in [`forwarding_fee_proportional_millionths`], [`forwarding_fee_base_msat`],
5400	/// or [`cltv_expiry_delta`]) has a [`BroadcastChannelUpdate`] event message generated
5401	/// containing the new [`ChannelUpdate`] message which should be broadcast to the network.
5402	///
5403	/// Returns [`ChannelUnavailable`] when a channel is not found or an incorrect
5404	/// `counterparty_node_id` is provided.
5405	///
5406	/// Returns [`APIMisuseError`] when a [`cltv_expiry_delta`] update is to be applied with a value
5407	/// below [`MIN_CLTV_EXPIRY_DELTA`].
5408	///
5409	/// If an error is returned, none of the updates should be considered applied.
5410	///
5411	/// [`forwarding_fee_proportional_millionths`]: ChannelConfig::forwarding_fee_proportional_millionths
5412	/// [`forwarding_fee_base_msat`]: ChannelConfig::forwarding_fee_base_msat
5413	/// [`cltv_expiry_delta`]: ChannelConfig::cltv_expiry_delta
5414	/// [`BroadcastChannelUpdate`]: events::MessageSendEvent::BroadcastChannelUpdate
5415	/// [`ChannelUpdate`]: msgs::ChannelUpdate
5416	/// [`ChannelUnavailable`]: APIError::ChannelUnavailable
5417	/// [`APIMisuseError`]: APIError::APIMisuseError
5418	pub fn update_partial_channel_config(
5419		&self, counterparty_node_id: &PublicKey, channel_ids: &[ChannelId], config_update: &ChannelConfigUpdate,
5420	) -> Result<(), APIError> {
5421		if config_update.cltv_expiry_delta.map(|delta| delta < MIN_CLTV_EXPIRY_DELTA).unwrap_or(false) {
5422			return Err(APIError::APIMisuseError {
5423				err: format!("The chosen CLTV expiry delta is below the minimum of {}", MIN_CLTV_EXPIRY_DELTA),
5424			});
5425		}
5426
5427		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5428		let per_peer_state = self.per_peer_state.read().unwrap();
5429		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
5430			.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
5431		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5432		let peer_state = &mut *peer_state_lock;
5433
5434		for channel_id in channel_ids {
5435			if !peer_state.has_channel(channel_id) {
5436				return Err(APIError::ChannelUnavailable {
5437					err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, counterparty_node_id),
5438				});
5439			};
5440		}
5441		for channel_id in channel_ids {
5442			if let Some(channel_phase) = peer_state.channel_by_id.get_mut(channel_id) {
5443				let mut config = channel_phase.context().config();
5444				config.apply(config_update);
5445				if !channel_phase.context_mut().update_config(&config) {
5446					continue;
5447				}
5448				if let ChannelPhase::Funded(channel) = channel_phase {
5449					if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
5450						let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
5451						pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
5452					} else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
5453						peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
5454							node_id: channel.context.get_counterparty_node_id(),
5455							msg,
5456						});
5457					}
5458				}
5459				continue;
5460			} else {
5461				// This should not be reachable as we've already checked for non-existence in the previous channel_id loop.
5462				debug_assert!(false);
5463				return Err(APIError::ChannelUnavailable {
5464					err: format!(
5465						"Channel with ID {} for passed counterparty_node_id {} disappeared after we confirmed its existence - this should not be reachable!",
5466						channel_id, counterparty_node_id),
5467				});
5468			};
5469		}
5470		Ok(())
5471	}
5472
5473	/// Atomically updates the [`ChannelConfig`] for the given channels.
5474	///
5475	/// Once the updates are applied, each eligible channel (advertised with a known short channel
5476	/// ID and a change in [`forwarding_fee_proportional_millionths`], [`forwarding_fee_base_msat`],
5477	/// or [`cltv_expiry_delta`]) has a [`BroadcastChannelUpdate`] event message generated
5478	/// containing the new [`ChannelUpdate`] message which should be broadcast to the network.
5479	///
5480	/// Returns [`ChannelUnavailable`] when a channel is not found or an incorrect
5481	/// `counterparty_node_id` is provided.
5482	///
5483	/// Returns [`APIMisuseError`] when a [`cltv_expiry_delta`] update is to be applied with a value
5484	/// below [`MIN_CLTV_EXPIRY_DELTA`].
5485	///
5486	/// If an error is returned, none of the updates should be considered applied.
5487	///
5488	/// [`forwarding_fee_proportional_millionths`]: ChannelConfig::forwarding_fee_proportional_millionths
5489	/// [`forwarding_fee_base_msat`]: ChannelConfig::forwarding_fee_base_msat
5490	/// [`cltv_expiry_delta`]: ChannelConfig::cltv_expiry_delta
5491	/// [`BroadcastChannelUpdate`]: events::MessageSendEvent::BroadcastChannelUpdate
5492	/// [`ChannelUpdate`]: msgs::ChannelUpdate
5493	/// [`ChannelUnavailable`]: APIError::ChannelUnavailable
5494	/// [`APIMisuseError`]: APIError::APIMisuseError
5495	pub fn update_channel_config(
5496		&self, counterparty_node_id: &PublicKey, channel_ids: &[ChannelId], config: &ChannelConfig,
5497	) -> Result<(), APIError> {
5498		return self.update_partial_channel_config(counterparty_node_id, channel_ids, &(*config).into());
5499	}
5500
5501	/// Attempts to forward an intercepted HTLC over the provided channel id and with the provided
5502	/// amount to forward. Should only be called in response to an [`HTLCIntercepted`] event.
5503	///
5504	/// Intercepted HTLCs can be useful for Lightning Service Providers (LSPs) to open a just-in-time
5505	/// channel to a receiving node if the node lacks sufficient inbound liquidity.
5506	///
5507	/// To make use of intercepted HTLCs, set [`UserConfig::accept_intercept_htlcs`] and use
5508	/// [`ChannelManager::get_intercept_scid`] to generate short channel id(s) to put in the
5509	/// receiver's invoice route hints. These route hints will signal to LDK to generate an
5510	/// [`HTLCIntercepted`] event when it receives the forwarded HTLC, and this method or
5511	/// [`ChannelManager::fail_intercepted_htlc`] MUST be called in response to the event.
5512	///
5513	/// Note that LDK does not enforce fee requirements in `amt_to_forward_msat`, and will not stop
5514	/// you from forwarding more than you received. See
5515	/// [`HTLCIntercepted::expected_outbound_amount_msat`] for more on forwarding a different amount
5516	/// than expected.
5517	///
5518	/// Errors if the event was not handled in time, in which case the HTLC was automatically failed
5519	/// backwards.
5520	///
5521	/// [`UserConfig::accept_intercept_htlcs`]: crate::util::config::UserConfig::accept_intercept_htlcs
5522	/// [`HTLCIntercepted`]: events::Event::HTLCIntercepted
5523	/// [`HTLCIntercepted::expected_outbound_amount_msat`]: events::Event::HTLCIntercepted::expected_outbound_amount_msat
5524	// TODO: when we move to deciding the best outbound channel at forward time, only take
5525	// `next_node_id` and not `next_hop_channel_id`
5526	pub fn forward_intercepted_htlc(&self, intercept_id: InterceptId, next_hop_channel_id: &ChannelId, next_node_id: PublicKey, amt_to_forward_msat: u64) -> Result<(), APIError> {
5527		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5528
5529		let next_hop_scid = {
5530			let peer_state_lock = self.per_peer_state.read().unwrap();
5531			let peer_state_mutex = peer_state_lock.get(&next_node_id)
5532				.ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", next_node_id) })?;
5533			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5534			let peer_state = &mut *peer_state_lock;
5535			match peer_state.channel_by_id.get(next_hop_channel_id) {
5536				Some(ChannelPhase::Funded(chan)) => {
5537					if !chan.context.is_usable() {
5538						return Err(APIError::ChannelUnavailable {
5539							err: format!("Channel with id {} not fully established", next_hop_channel_id)
5540						})
5541					}
5542					chan.context.get_short_channel_id().unwrap_or(chan.context.outbound_scid_alias())
5543				},
5544				Some(_) => return Err(APIError::ChannelUnavailable {
5545					err: format!("Channel with id {} for the passed counterparty node_id {} is still opening.",
5546						next_hop_channel_id, next_node_id)
5547				}),
5548				None => {
5549					let error = format!("Channel with id {} not found for the passed counterparty node_id {}",
5550						next_hop_channel_id, next_node_id);
5551					let logger = WithContext::from(&self.logger, Some(next_node_id), Some(*next_hop_channel_id), None);
5552					log_error!(logger, "{} when attempting to forward intercepted HTLC", error);
5553					return Err(APIError::ChannelUnavailable {
5554						err: error
5555					})
5556				}
5557			}
5558		};
5559
5560		let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id)
5561			.ok_or_else(|| APIError::APIMisuseError {
5562				err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0))
5563			})?;
5564
5565		let routing = match payment.forward_info.routing {
5566			PendingHTLCRouting::Forward { onion_packet, blinded, incoming_cltv_expiry, .. } => {
5567				PendingHTLCRouting::Forward {
5568					onion_packet, blinded, incoming_cltv_expiry, short_channel_id: next_hop_scid,
5569				}
5570			},
5571			_ => unreachable!() // Only `PendingHTLCRouting::Forward`s are intercepted
5572		};
5573		let skimmed_fee_msat =
5574			payment.forward_info.outgoing_amt_msat.saturating_sub(amt_to_forward_msat);
5575		let pending_htlc_info = PendingHTLCInfo {
5576			skimmed_fee_msat: if skimmed_fee_msat == 0 { None } else { Some(skimmed_fee_msat) },
5577			outgoing_amt_msat: amt_to_forward_msat, routing, ..payment.forward_info
5578		};
5579
5580		let mut per_source_pending_forward = [(
5581			payment.prev_short_channel_id,
5582			payment.prev_counterparty_node_id,
5583			payment.prev_funding_outpoint,
5584			payment.prev_channel_id,
5585			payment.prev_user_channel_id,
5586			vec![(pending_htlc_info, payment.prev_htlc_id)]
5587		)];
5588		self.forward_htlcs(&mut per_source_pending_forward);
5589		Ok(())
5590	}
5591
5592	/// Fails the intercepted HTLC indicated by intercept_id. Should only be called in response to
5593	/// an [`HTLCIntercepted`] event. See [`ChannelManager::forward_intercepted_htlc`].
5594	///
5595	/// Errors if the event was not handled in time, in which case the HTLC was automatically failed
5596	/// backwards.
5597	///
5598	/// [`HTLCIntercepted`]: events::Event::HTLCIntercepted
5599	pub fn fail_intercepted_htlc(&self, intercept_id: InterceptId) -> Result<(), APIError> {
5600		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5601
5602		let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id)
5603			.ok_or_else(|| APIError::APIMisuseError {
5604				err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0))
5605			})?;
5606
5607		if let PendingHTLCRouting::Forward { short_channel_id, incoming_cltv_expiry, .. } = payment.forward_info.routing {
5608			let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
5609				short_channel_id: payment.prev_short_channel_id,
5610				user_channel_id: Some(payment.prev_user_channel_id),
5611				outpoint: payment.prev_funding_outpoint,
5612				channel_id: payment.prev_channel_id,
5613				counterparty_node_id: payment.prev_counterparty_node_id,
5614				htlc_id: payment.prev_htlc_id,
5615				incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret,
5616				phantom_shared_secret: None,
5617				blinded_failure: payment.forward_info.routing.blinded_failure(),
5618				cltv_expiry: incoming_cltv_expiry,
5619			});
5620
5621			let failure_reason = HTLCFailReason::from_failure_code(0x4000 | 10);
5622			let destination = HTLCDestination::UnknownNextHop { requested_forward_scid: short_channel_id };
5623			self.fail_htlc_backwards_internal(&htlc_source, &payment.forward_info.payment_hash, &failure_reason, destination);
5624		} else { unreachable!() } // Only `PendingHTLCRouting::Forward`s are intercepted
5625
5626		Ok(())
5627	}
5628
5629	fn process_pending_update_add_htlcs(&self) {
5630		let mut decode_update_add_htlcs = new_hash_map();
5631		mem::swap(&mut decode_update_add_htlcs, &mut self.decode_update_add_htlcs.lock().unwrap());
5632
5633		let get_failed_htlc_destination = |outgoing_scid_opt: Option<u64>, payment_hash: PaymentHash| {
5634			if let Some(outgoing_scid) = outgoing_scid_opt {
5635				match self.short_to_chan_info.read().unwrap().get(&outgoing_scid) {
5636					Some((outgoing_counterparty_node_id, outgoing_channel_id)) =>
5637						HTLCDestination::NextHopChannel {
5638							node_id: Some(*outgoing_counterparty_node_id),
5639							channel_id: *outgoing_channel_id,
5640						},
5641					None => HTLCDestination::UnknownNextHop {
5642						requested_forward_scid: outgoing_scid,
5643					},
5644				}
5645			} else {
5646				HTLCDestination::FailedPayment { payment_hash }
5647			}
5648		};
5649
5650		'outer_loop: for (incoming_scid, update_add_htlcs) in decode_update_add_htlcs {
5651			let incoming_channel_details_opt = self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel<SP>| {
5652				let counterparty_node_id = chan.context.get_counterparty_node_id();
5653				let channel_id = chan.context.channel_id();
5654				let funding_txo = chan.context.get_funding_txo().unwrap();
5655				let user_channel_id = chan.context.get_user_id();
5656				let accept_underpaying_htlcs = chan.context.config().accept_underpaying_htlcs;
5657				(counterparty_node_id, channel_id, funding_txo, user_channel_id, accept_underpaying_htlcs)
5658			});
5659			let (
5660				incoming_counterparty_node_id, incoming_channel_id, incoming_funding_txo,
5661				incoming_user_channel_id, incoming_accept_underpaying_htlcs
5662			 ) = if let Some(incoming_channel_details) = incoming_channel_details_opt {
5663				incoming_channel_details
5664			} else {
5665				// The incoming channel no longer exists, HTLCs should be resolved onchain instead.
5666				continue;
5667			};
5668
5669			let mut htlc_forwards = Vec::new();
5670			let mut htlc_fails = Vec::new();
5671			for update_add_htlc in &update_add_htlcs {
5672				let (next_hop, shared_secret, next_packet_details_opt) = match decode_incoming_update_add_htlc_onion(
5673					&update_add_htlc, &*self.node_signer, &*self.logger, &self.secp_ctx
5674				) {
5675					Ok(decoded_onion) => decoded_onion,
5676					Err(htlc_fail) => {
5677						htlc_fails.push((htlc_fail, HTLCDestination::InvalidOnion));
5678						continue;
5679					},
5680				};
5681
5682				let is_intro_node_blinded_forward = next_hop.is_intro_node_blinded_forward();
5683				let outgoing_scid_opt = next_packet_details_opt.as_ref().map(|d| d.outgoing_scid);
5684
5685				// Process the HTLC on the incoming channel.
5686				match self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel<SP>| {
5687					let logger = WithChannelContext::from(&self.logger, &chan.context, Some(update_add_htlc.payment_hash));
5688					chan.can_accept_incoming_htlc(
5689						update_add_htlc, &self.fee_estimator, &logger,
5690					)
5691				}) {
5692					Some(Ok(_)) => {},
5693					Some(Err((err, code))) => {
5694						let htlc_fail = self.htlc_failure_from_update_add_err(
5695							&update_add_htlc, &incoming_counterparty_node_id, err, code,
5696							is_intro_node_blinded_forward, &shared_secret,
5697						);
5698						let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
5699						htlc_fails.push((htlc_fail, htlc_destination));
5700						continue;
5701					},
5702					// The incoming channel no longer exists, HTLCs should be resolved onchain instead.
5703					None => continue 'outer_loop,
5704				}
5705
5706				// Now process the HTLC on the outgoing channel if it's a forward.
5707				if let Some(next_packet_details) = next_packet_details_opt.as_ref() {
5708					if let Err((err, code)) = self.can_forward_htlc(
5709						&update_add_htlc, next_packet_details
5710					) {
5711						let htlc_fail = self.htlc_failure_from_update_add_err(
5712							&update_add_htlc, &incoming_counterparty_node_id, err, code,
5713							is_intro_node_blinded_forward, &shared_secret,
5714						);
5715						let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
5716						htlc_fails.push((htlc_fail, htlc_destination));
5717						continue;
5718					}
5719				}
5720
5721				match self.construct_pending_htlc_status(
5722					&update_add_htlc, &incoming_counterparty_node_id, shared_secret, next_hop,
5723					incoming_accept_underpaying_htlcs, next_packet_details_opt.map(|d| d.next_packet_pubkey),
5724				) {
5725					PendingHTLCStatus::Forward(htlc_forward) => {
5726						htlc_forwards.push((htlc_forward, update_add_htlc.htlc_id));
5727					},
5728					PendingHTLCStatus::Fail(htlc_fail) => {
5729						let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
5730						htlc_fails.push((htlc_fail, htlc_destination));
5731					},
5732				}
5733			}
5734
5735			// Process all of the forwards and failures for the channel in which the HTLCs were
5736			// proposed to as a batch.
5737			let pending_forwards = (
5738				incoming_scid, Some(incoming_counterparty_node_id), incoming_funding_txo,
5739				incoming_channel_id, incoming_user_channel_id, htlc_forwards.drain(..).collect()
5740			);
5741			self.forward_htlcs_without_forward_event(&mut [pending_forwards]);
5742			for (htlc_fail, htlc_destination) in htlc_fails.drain(..) {
5743				let failure = match htlc_fail {
5744					HTLCFailureMsg::Relay(fail_htlc) => HTLCForwardInfo::FailHTLC {
5745						htlc_id: fail_htlc.htlc_id,
5746						err_packet: fail_htlc.reason,
5747					},
5748					HTLCFailureMsg::Malformed(fail_malformed_htlc) => HTLCForwardInfo::FailMalformedHTLC {
5749						htlc_id: fail_malformed_htlc.htlc_id,
5750						sha256_of_onion: fail_malformed_htlc.sha256_of_onion,
5751						failure_code: fail_malformed_htlc.failure_code,
5752					},
5753				};
5754				self.forward_htlcs.lock().unwrap().entry(incoming_scid).or_default().push(failure);
5755				self.pending_events.lock().unwrap().push_back((events::Event::HTLCHandlingFailed {
5756					prev_channel_id: incoming_channel_id,
5757					failed_next_destination: htlc_destination,
5758				}, None));
5759			}
5760		}
5761	}
5762
5763	/// Processes HTLCs which are pending waiting on random forward delay.
5764	///
5765	/// Should only really ever be called in response to a PendingHTLCsForwardable event.
5766	/// Will likely generate further events.
5767	pub fn process_pending_htlc_forwards(&self) {
5768		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5769
5770		self.process_pending_update_add_htlcs();
5771
5772		let mut new_events = VecDeque::new();
5773		let mut failed_forwards = Vec::new();
5774		let mut phantom_receives: Vec<(u64, Option<PublicKey>, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
5775		{
5776			let mut forward_htlcs = new_hash_map();
5777			mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
5778
5779			for (short_chan_id, mut pending_forwards) in forward_htlcs {
5780				if short_chan_id != 0 {
5781					let mut forwarding_counterparty = None;
5782					macro_rules! forwarding_channel_not_found {
5783						($forward_infos: expr) => {
5784							for forward_info in $forward_infos {
5785								match forward_info {
5786									HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
5787										prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
5788										prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo {
5789											routing, incoming_shared_secret, payment_hash, outgoing_amt_msat,
5790											outgoing_cltv_value, ..
5791										}
5792									}) => {
5793										let cltv_expiry = routing.incoming_cltv_expiry();
5794										macro_rules! failure_handler {
5795											($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
5796												let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id), Some(payment_hash));
5797												log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
5798
5799												let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
5800													short_channel_id: prev_short_channel_id,
5801													user_channel_id: Some(prev_user_channel_id),
5802													channel_id: prev_channel_id,
5803													outpoint: prev_funding_outpoint,
5804													counterparty_node_id: prev_counterparty_node_id,
5805													htlc_id: prev_htlc_id,
5806													incoming_packet_shared_secret: incoming_shared_secret,
5807													phantom_shared_secret: $phantom_ss,
5808													blinded_failure: routing.blinded_failure(),
5809													cltv_expiry,
5810												});
5811
5812												let reason = if $next_hop_unknown {
5813													HTLCDestination::UnknownNextHop { requested_forward_scid: short_chan_id }
5814												} else {
5815													HTLCDestination::FailedPayment{ payment_hash }
5816												};
5817
5818												failed_forwards.push((htlc_source, payment_hash,
5819													HTLCFailReason::reason($err_code, $err_data),
5820													reason
5821												));
5822												continue;
5823											}
5824										}
5825										macro_rules! fail_forward {
5826											($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
5827												{
5828													failure_handler!($msg, $err_code, $err_data, $phantom_ss, true);
5829												}
5830											}
5831										}
5832										macro_rules! failed_payment {
5833											($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
5834												{
5835													failure_handler!($msg, $err_code, $err_data, $phantom_ss, false);
5836												}
5837											}
5838										}
5839										if let PendingHTLCRouting::Forward { ref onion_packet, .. } = routing {
5840											let phantom_pubkey_res = self.node_signer.get_node_id(Recipient::PhantomNode);
5841											if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.chain_hash) {
5842												let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes();
5843												let next_hop = match onion_utils::decode_next_payment_hop(
5844													phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac,
5845													payment_hash, None, &*self.node_signer
5846												) {
5847													Ok(res) => res,
5848													Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
5849														let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).to_byte_array();
5850														// In this scenario, the phantom would have sent us an
5851														// `update_fail_malformed_htlc`, meaning here we encrypt the error as
5852														// if it came from us (the second-to-last hop) but contains the sha256
5853														// of the onion.
5854														failed_payment!(err_msg, err_code, sha256_of_onion.to_vec(), None);
5855													},
5856													Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
5857														failed_payment!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
5858													},
5859												};
5860												match next_hop {
5861													onion_utils::Hop::Receive(hop_data) => {
5862														let current_height: u32 = self.best_block.read().unwrap().height;
5863														match create_recv_pending_htlc_info(hop_data,
5864															incoming_shared_secret, payment_hash, outgoing_amt_msat,
5865															outgoing_cltv_value, Some(phantom_shared_secret), false, None,
5866															current_height)
5867														{
5868															Ok(info) => phantom_receives.push((
5869																prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint,
5870																prev_channel_id, prev_user_channel_id, vec![(info, prev_htlc_id)]
5871															)),
5872															Err(InboundHTLCErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
5873														}
5874													},
5875													_ => panic!(),
5876												}
5877											} else {
5878												fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
5879											}
5880										} else {
5881											fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
5882										}
5883									},
5884									HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => {
5885										// Channel went away before we could fail it. This implies
5886										// the channel is now on chain and our counterparty is
5887										// trying to broadcast the HTLC-Timeout, but that's their
5888										// problem, not ours.
5889									}
5890								}
5891							}
5892						}
5893					}
5894					let chan_info_opt = self.short_to_chan_info.read().unwrap().get(&short_chan_id).cloned();
5895					let (counterparty_node_id, forward_chan_id) = match chan_info_opt {
5896						Some((cp_id, chan_id)) => (cp_id, chan_id),
5897						None => {
5898							forwarding_channel_not_found!(pending_forwards.drain(..));
5899							continue;
5900						}
5901					};
5902					forwarding_counterparty = Some(counterparty_node_id);
5903					let per_peer_state = self.per_peer_state.read().unwrap();
5904					let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
5905					if peer_state_mutex_opt.is_none() {
5906						forwarding_channel_not_found!(pending_forwards.drain(..));
5907						continue;
5908					}
5909					let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
5910					let peer_state = &mut *peer_state_lock;
5911					let mut draining_pending_forwards = pending_forwards.drain(..);
5912					while let Some(forward_info) = draining_pending_forwards.next() {
5913						let queue_fail_htlc_res = match forward_info {
5914							HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
5915								prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
5916								prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo {
5917									incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
5918									routing: PendingHTLCRouting::Forward {
5919										ref onion_packet, blinded, incoming_cltv_expiry, ..
5920									}, skimmed_fee_msat, ..
5921								},
5922							}) => {
5923								let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
5924									short_channel_id: prev_short_channel_id,
5925									user_channel_id: Some(prev_user_channel_id),
5926									counterparty_node_id: prev_counterparty_node_id,
5927									channel_id: prev_channel_id,
5928									outpoint: prev_funding_outpoint,
5929									htlc_id: prev_htlc_id,
5930									incoming_packet_shared_secret: incoming_shared_secret,
5931									// Phantom payments are only PendingHTLCRouting::Receive.
5932									phantom_shared_secret: None,
5933									blinded_failure: blinded.map(|b| b.failure),
5934									cltv_expiry: incoming_cltv_expiry,
5935								});
5936								let next_blinding_point = blinded.and_then(|b| {
5937									b.next_blinding_override.or_else(|| {
5938										let encrypted_tlvs_ss = self.node_signer.ecdh(
5939											Recipient::Node, &b.inbound_blinding_point, None
5940										).unwrap().secret_bytes();
5941										onion_utils::next_hop_pubkey(
5942											&self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss
5943										).ok()
5944									})
5945								});
5946
5947								// Forward the HTLC over the most appropriate channel with the corresponding peer,
5948								// applying non-strict forwarding.
5949								// The channel with the least amount of outbound liquidity will be used to maximize the
5950								// probability of being able to successfully forward a subsequent HTLC.
5951								let maybe_optimal_channel = peer_state.channel_by_id.values_mut().filter_map(|phase| match phase {
5952									ChannelPhase::Funded(chan) => {
5953										let balances = chan.context.get_available_balances(&self.fee_estimator);
5954										if outgoing_amt_msat <= balances.next_outbound_htlc_limit_msat &&
5955											outgoing_amt_msat >= balances.next_outbound_htlc_minimum_msat &&
5956											chan.context.is_usable() {
5957											Some((chan, balances))
5958										} else {
5959											None
5960										}
5961									},
5962									_ => None,
5963								}).min_by_key(|(_, balances)| balances.next_outbound_htlc_limit_msat).map(|(c, _)| c);
5964								let optimal_channel = match maybe_optimal_channel {
5965									Some(chan) => chan,
5966									None => {
5967										// Fall back to the specified channel to return an appropriate error.
5968										if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
5969											chan
5970										} else {
5971											forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
5972											break;
5973										}
5974									}
5975								};
5976
5977								let logger = WithChannelContext::from(&self.logger, &optimal_channel.context, Some(payment_hash));
5978								let channel_description = if optimal_channel.context.get_short_channel_id() == Some(short_chan_id) {
5979									"specified"
5980								} else {
5981									"alternate"
5982								};
5983								log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} channel {} with corresponding peer {}",
5984									prev_short_channel_id, &payment_hash, short_chan_id, channel_description, optimal_channel.context.channel_id(), &counterparty_node_id);
5985								if let Err(e) = optimal_channel.queue_add_htlc(outgoing_amt_msat,
5986										payment_hash, outgoing_cltv_value, htlc_source.clone(),
5987										onion_packet.clone(), skimmed_fee_msat, next_blinding_point, &self.fee_estimator,
5988										&&logger)
5989								{
5990									if let ChannelError::Ignore(msg) = e {
5991										log_trace!(logger, "Failed to forward HTLC with payment_hash {} to peer {}: {}", &payment_hash, &counterparty_node_id, msg);
5992									} else {
5993										panic!("Stated return value requirements in send_htlc() were not met");
5994									}
5995
5996									if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
5997										let failure_code = 0x1000|7;
5998										let data = self.get_htlc_inbound_temp_fail_data(failure_code);
5999										failed_forwards.push((htlc_source, payment_hash,
6000											HTLCFailReason::reason(failure_code, data),
6001											HTLCDestination::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id }
6002										));
6003									} else {
6004										forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
6005										break;
6006									}
6007								}
6008								None
6009							},
6010							HTLCForwardInfo::AddHTLC { .. } => {
6011								panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
6012							},
6013							HTLCForwardInfo::FailHTLC { htlc_id, ref err_packet } => {
6014								if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
6015									let logger = WithChannelContext::from(&self.logger, &chan.context, None);
6016									log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
6017									Some((chan.queue_fail_htlc(htlc_id, err_packet.clone(), &&logger), htlc_id))
6018								} else {
6019									forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
6020									break;
6021								}
6022							},
6023							HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
6024								if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
6025									let logger = WithChannelContext::from(&self.logger, &chan.context, None);
6026									log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
6027									let res = chan.queue_fail_malformed_htlc(
6028										htlc_id, failure_code, sha256_of_onion, &&logger
6029									);
6030									Some((res, htlc_id))
6031								} else {
6032									forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
6033									break;
6034								}
6035							},
6036						};
6037						if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
6038							if let Err(e) = queue_fail_htlc_res {
6039								if let ChannelError::Ignore(msg) = e {
6040									if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
6041										let logger = WithChannelContext::from(&self.logger, &chan.context, None);
6042										log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
6043									}
6044								} else {
6045									panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
6046								}
6047								// fail-backs are best-effort, we probably already have one
6048								// pending, and if not that's OK, if not, the channel is on
6049								// the chain and sending the HTLC-Timeout is their problem.
6050							}
6051						}
6052					}
6053				} else {
6054					'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
6055						match forward_info {
6056							HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
6057								prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
6058								prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo {
6059									routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat,
6060									skimmed_fee_msat, ..
6061								}
6062							}) => {
6063								let blinded_failure = routing.blinded_failure();
6064								let (
6065									cltv_expiry, onion_payload, payment_data, payment_context, phantom_shared_secret,
6066									mut onion_fields, has_recipient_created_payment_secret
6067								) = match routing {
6068									PendingHTLCRouting::Receive {
6069										payment_data, payment_metadata, payment_context,
6070										incoming_cltv_expiry, phantom_shared_secret, custom_tlvs,
6071										requires_blinded_error: _
6072									} => {
6073										let _legacy_hop_data = Some(payment_data.clone());
6074										let onion_fields = RecipientOnionFields { payment_secret: Some(payment_data.payment_secret),
6075												payment_metadata, custom_tlvs };
6076										(incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data },
6077											Some(payment_data), payment_context, phantom_shared_secret, onion_fields,
6078											true)
6079									},
6080									PendingHTLCRouting::ReceiveKeysend {
6081										payment_data, payment_preimage, payment_metadata,
6082										incoming_cltv_expiry, custom_tlvs, requires_blinded_error: _,
6083										has_recipient_created_payment_secret,
6084									} => {
6085										let onion_fields = RecipientOnionFields {
6086											payment_secret: payment_data.as_ref().map(|data| data.payment_secret),
6087											payment_metadata,
6088											custom_tlvs,
6089										};
6090										(incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage),
6091											payment_data, None, None, onion_fields, has_recipient_created_payment_secret)
6092									},
6093									_ => {
6094										panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
6095									}
6096								};
6097								let claimable_htlc = ClaimableHTLC {
6098									prev_hop: HTLCPreviousHopData {
6099										short_channel_id: prev_short_channel_id,
6100										user_channel_id: Some(prev_user_channel_id),
6101										counterparty_node_id: prev_counterparty_node_id,
6102										channel_id: prev_channel_id,
6103										outpoint: prev_funding_outpoint,
6104										htlc_id: prev_htlc_id,
6105										incoming_packet_shared_secret: incoming_shared_secret,
6106										phantom_shared_secret,
6107										blinded_failure,
6108										cltv_expiry: Some(cltv_expiry),
6109									},
6110									// We differentiate the received value from the sender intended value
6111									// if possible so that we don't prematurely mark MPP payments complete
6112									// if routing nodes overpay
6113									value: incoming_amt_msat.unwrap_or(outgoing_amt_msat),
6114									sender_intended_value: outgoing_amt_msat,
6115									timer_ticks: 0,
6116									total_value_received: None,
6117									total_msat: if let Some(data) = &payment_data { data.total_msat } else { outgoing_amt_msat },
6118									cltv_expiry,
6119									onion_payload,
6120									counterparty_skimmed_fee_msat: skimmed_fee_msat,
6121								};
6122
6123								let mut committed_to_claimable = false;
6124
6125								macro_rules! fail_htlc {
6126									($htlc: expr, $payment_hash: expr) => {
6127										debug_assert!(!committed_to_claimable);
6128										let mut htlc_msat_height_data = $htlc.value.to_be_bytes().to_vec();
6129										htlc_msat_height_data.extend_from_slice(
6130											&self.best_block.read().unwrap().height.to_be_bytes(),
6131										);
6132										failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
6133												short_channel_id: $htlc.prev_hop.short_channel_id,
6134												user_channel_id: $htlc.prev_hop.user_channel_id,
6135												counterparty_node_id: $htlc.prev_hop.counterparty_node_id,
6136												channel_id: prev_channel_id,
6137												outpoint: prev_funding_outpoint,
6138												htlc_id: $htlc.prev_hop.htlc_id,
6139												incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
6140												phantom_shared_secret,
6141												blinded_failure,
6142												cltv_expiry: Some(cltv_expiry),
6143											}), payment_hash,
6144											HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data),
6145											HTLCDestination::FailedPayment { payment_hash: $payment_hash },
6146										));
6147										continue 'next_forwardable_htlc;
6148									}
6149								}
6150								let phantom_shared_secret = claimable_htlc.prev_hop.phantom_shared_secret;
6151								let mut receiver_node_id = self.our_network_pubkey;
6152								if phantom_shared_secret.is_some() {
6153									receiver_node_id = self.node_signer.get_node_id(Recipient::PhantomNode)
6154										.expect("Failed to get node_id for phantom node recipient");
6155								}
6156
6157								macro_rules! check_total_value {
6158									($purpose: expr) => {{
6159										let mut payment_claimable_generated = false;
6160										let is_keysend = $purpose.is_keysend();
6161										let mut claimable_payments = self.claimable_payments.lock().unwrap();
6162										if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
6163											fail_htlc!(claimable_htlc, payment_hash);
6164										}
6165										let ref mut claimable_payment = claimable_payments.claimable_payments
6166											.entry(payment_hash)
6167											// Note that if we insert here we MUST NOT fail_htlc!()
6168											.or_insert_with(|| {
6169												committed_to_claimable = true;
6170												ClaimablePayment {
6171													purpose: $purpose.clone(), htlcs: Vec::new(), onion_fields: None,
6172												}
6173											});
6174										if $purpose != claimable_payment.purpose {
6175											let log_keysend = |keysend| if keysend { "keysend" } else { "non-keysend" };
6176											log_trace!(self.logger, "Failing new {} HTLC with payment_hash {} as we already had an existing {} HTLC with the same payment hash", log_keysend(is_keysend), &payment_hash, log_keysend(!is_keysend));
6177											fail_htlc!(claimable_htlc, payment_hash);
6178										}
6179										if let Some(earlier_fields) = &mut claimable_payment.onion_fields {
6180											if earlier_fields.check_merge(&mut onion_fields).is_err() {
6181												fail_htlc!(claimable_htlc, payment_hash);
6182											}
6183										} else {
6184											claimable_payment.onion_fields = Some(onion_fields);
6185										}
6186										let mut total_value = claimable_htlc.sender_intended_value;
6187										let mut earliest_expiry = claimable_htlc.cltv_expiry;
6188										for htlc in claimable_payment.htlcs.iter() {
6189											total_value += htlc.sender_intended_value;
6190											earliest_expiry = cmp::min(earliest_expiry, htlc.cltv_expiry);
6191											if htlc.total_msat != claimable_htlc.total_msat {
6192												log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
6193													&payment_hash, claimable_htlc.total_msat, htlc.total_msat);
6194												total_value = msgs::MAX_VALUE_MSAT;
6195											}
6196											if total_value >= msgs::MAX_VALUE_MSAT { break; }
6197										}
6198										// The condition determining whether an MPP is complete must
6199										// match exactly the condition used in `timer_tick_occurred`
6200										if total_value >= msgs::MAX_VALUE_MSAT {
6201											fail_htlc!(claimable_htlc, payment_hash);
6202										} else if total_value - claimable_htlc.sender_intended_value >= claimable_htlc.total_msat {
6203											log_trace!(self.logger, "Failing HTLC with payment_hash {} as payment is already claimable",
6204												&payment_hash);
6205											fail_htlc!(claimable_htlc, payment_hash);
6206										} else if total_value >= claimable_htlc.total_msat {
6207											#[allow(unused_assignments)] {
6208												committed_to_claimable = true;
6209											}
6210											claimable_payment.htlcs.push(claimable_htlc);
6211											let amount_msat =
6212												claimable_payment.htlcs.iter().map(|htlc| htlc.value).sum();
6213											claimable_payment.htlcs.iter_mut()
6214												.for_each(|htlc| htlc.total_value_received = Some(amount_msat));
6215											let counterparty_skimmed_fee_msat = claimable_payment.htlcs.iter()
6216												.map(|htlc| htlc.counterparty_skimmed_fee_msat.unwrap_or(0)).sum();
6217											debug_assert!(total_value.saturating_sub(amount_msat) <=
6218												counterparty_skimmed_fee_msat);
6219											claimable_payment.htlcs.sort();
6220											let payment_id =
6221												claimable_payment.inbound_payment_id(&self.inbound_payment_id_secret);
6222											new_events.push_back((events::Event::PaymentClaimable {
6223												receiver_node_id: Some(receiver_node_id),
6224												payment_hash,
6225												purpose: $purpose,
6226												amount_msat,
6227												counterparty_skimmed_fee_msat,
6228												via_channel_id: Some(prev_channel_id),
6229												via_user_channel_id: Some(prev_user_channel_id),
6230												claim_deadline: Some(earliest_expiry - HTLC_FAIL_BACK_BUFFER),
6231												onion_fields: claimable_payment.onion_fields.clone(),
6232												payment_id: Some(payment_id),
6233											}, None));
6234											payment_claimable_generated = true;
6235										} else {
6236											// Nothing to do - we haven't reached the total
6237											// payment value yet, wait until we receive more
6238											// MPP parts.
6239											claimable_payment.htlcs.push(claimable_htlc);
6240											#[allow(unused_assignments)] {
6241												committed_to_claimable = true;
6242											}
6243										}
6244										payment_claimable_generated
6245									}}
6246								}
6247
6248								// Check that the payment hash and secret are known. Note that we
6249								// MUST take care to handle the "unknown payment hash" and
6250								// "incorrect payment secret" cases here identically or we'd expose
6251								// that we are the ultimate recipient of the given payment hash.
6252								// Further, we must not expose whether we have any other HTLCs
6253								// associated with the same payment_hash pending or not.
6254								let payment_preimage = if has_recipient_created_payment_secret {
6255									if let Some(ref payment_data) = payment_data {
6256										let (payment_preimage, min_final_cltv_expiry_delta) = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) {
6257											Ok(result) => result,
6258											Err(()) => {
6259												log_trace!(self.logger, "Failing new HTLC with payment_hash {} as payment verification failed", &payment_hash);
6260												fail_htlc!(claimable_htlc, payment_hash);
6261											}
6262										};
6263										if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta {
6264											let expected_min_expiry_height = (self.current_best_block().height + min_final_cltv_expiry_delta as u32) as u64;
6265											if (cltv_expiry as u64) < expected_min_expiry_height {
6266												log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})",
6267												&payment_hash, cltv_expiry, expected_min_expiry_height);
6268												fail_htlc!(claimable_htlc, payment_hash);
6269											}
6270										}
6271										payment_preimage
6272									} else { fail_htlc!(claimable_htlc, payment_hash); }
6273								} else { None };
6274								match claimable_htlc.onion_payload {
6275									OnionPayload::Invoice { .. } => {
6276										let payment_data = payment_data.unwrap();
6277										let purpose = events::PaymentPurpose::from_parts(
6278											payment_preimage,
6279											payment_data.payment_secret,
6280											payment_context,
6281										);
6282										check_total_value!(purpose);
6283									},
6284									OnionPayload::Spontaneous(preimage) => {
6285										let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
6286										check_total_value!(purpose);
6287									}
6288								}
6289							},
6290							HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => {
6291								panic!("Got pending fail of our own HTLC");
6292							}
6293						}
6294					}
6295				}
6296			}
6297		}
6298
6299		let best_block_height = self.best_block.read().unwrap().height;
6300		self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(),
6301			|| self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
6302			&self.pending_events, &self.logger, |args| self.send_payment_along_path(args));
6303
6304		for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
6305			self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
6306		}
6307		self.forward_htlcs(&mut phantom_receives);
6308
6309		// Freeing the holding cell here is relatively redundant - in practice we'll do it when we
6310		// next get a `get_and_clear_pending_msg_events` call, but some tests rely on it, and it's
6311		// nice to do the work now if we can rather than while we're trying to get messages in the
6312		// network stack.
6313		self.check_free_holding_cells();
6314
6315		if new_events.is_empty() { return }
6316		let mut events = self.pending_events.lock().unwrap();
6317		events.append(&mut new_events);
6318	}
6319
6320	/// Free the background events, generally called from [`PersistenceNotifierGuard`] constructors.
6321	///
6322	/// Expects the caller to have a total_consistency_lock read lock.
6323	fn process_background_events(&self) -> NotifyOption {
6324		debug_assert_ne!(self.total_consistency_lock.held_by_thread(), LockHeldState::NotHeldByThread);
6325
6326		self.background_events_processed_since_startup.store(true, Ordering::Release);
6327
6328		let mut background_events = Vec::new();
6329		mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events);
6330		if background_events.is_empty() {
6331			return NotifyOption::SkipPersistNoEvents;
6332		}
6333
6334		for event in background_events.drain(..) {
6335			match event {
6336				BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, _channel_id, update)) => {
6337					// The channel has already been closed, so no use bothering to care about the
6338					// monitor updating completing.
6339					let _ = self.chain_monitor.update_channel(funding_txo, &update);
6340				},
6341				BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
6342					self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
6343				},
6344				BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
6345					let per_peer_state = self.per_peer_state.read().unwrap();
6346					if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
6347						let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6348						let peer_state = &mut *peer_state_lock;
6349						if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
6350							if chan.blocked_monitor_updates_pending() == 0 {
6351								handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
6352							}
6353						} else {
6354							let update_actions = peer_state.monitor_update_blocked_actions
6355								.remove(&channel_id).unwrap_or(Vec::new());
6356							mem::drop(peer_state_lock);
6357							mem::drop(per_peer_state);
6358							self.handle_monitor_update_completion_actions(update_actions);
6359						}
6360					}
6361				},
6362			}
6363		}
6364		NotifyOption::DoPersist
6365	}
6366
6367	#[cfg(any(test, feature = "_test_utils"))]
6368	/// Process background events, for functional testing
6369	pub fn test_process_background_events(&self) {
6370		let _lck = self.total_consistency_lock.read().unwrap();
6371		let _ = self.process_background_events();
6372	}
6373
6374	fn update_channel_fee(&self, chan_id: &ChannelId, chan: &mut Channel<SP>, new_feerate: u32) -> NotifyOption {
6375		if !chan.context.is_outbound() { return NotifyOption::SkipPersistNoEvents; }
6376
6377		let logger = WithChannelContext::from(&self.logger, &chan.context, None);
6378
6379		// If the feerate has decreased by less than half, don't bother
6380		if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
6381			return NotifyOption::SkipPersistNoEvents;
6382		}
6383		if !chan.context.is_live() {
6384			log_trace!(logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
6385				chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
6386			return NotifyOption::SkipPersistNoEvents;
6387		}
6388		log_trace!(logger, "Channel {} qualifies for a feerate change from {} to {}.",
6389			&chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
6390
6391		chan.queue_update_fee(new_feerate, &self.fee_estimator, &&logger);
6392		NotifyOption::DoPersist
6393	}
6394
6395	#[cfg(fuzzing)]
6396	/// In chanmon_consistency we want to sometimes do the channel fee updates done in
6397	/// timer_tick_occurred, but we can't generate the disabled channel updates as it considers
6398	/// these a fuzz failure (as they usually indicate a channel force-close, which is exactly what
6399	/// it wants to detect). Thus, we have a variant exposed here for its benefit.
6400	pub fn maybe_update_chan_fees(&self) {
6401		PersistenceNotifierGuard::optionally_notify(self, || {
6402			let mut should_persist = NotifyOption::SkipPersistNoEvents;
6403
6404			let non_anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6405			let anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee);
6406
6407			let per_peer_state = self.per_peer_state.read().unwrap();
6408			for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
6409				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6410				let peer_state = &mut *peer_state_lock;
6411				for (chan_id, chan) in peer_state.channel_by_id.iter_mut().filter_map(
6412					|(chan_id, phase)| if let ChannelPhase::Funded(chan) = phase { Some((chan_id, chan)) } else { None }
6413				) {
6414					let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6415						anchor_feerate
6416					} else {
6417						non_anchor_feerate
6418					};
6419					let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
6420					if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
6421				}
6422			}
6423
6424			should_persist
6425		});
6426	}
6427
6428	/// Performs actions which should happen on startup and roughly once per minute thereafter.
6429	///
6430	/// This currently includes:
6431	///  * Increasing or decreasing the on-chain feerate estimates for our outbound channels,
6432	///  * Broadcasting [`ChannelUpdate`] messages if we've been disconnected from our peer for more
6433	///    than a minute, informing the network that they should no longer attempt to route over
6434	///    the channel.
6435	///  * Expiring a channel's previous [`ChannelConfig`] if necessary to only allow forwarding HTLCs
6436	///    with the current [`ChannelConfig`].
6437	///  * Removing peers which have disconnected but and no longer have any channels.
6438	///  * Force-closing and removing channels which have not completed establishment in a timely manner.
6439	///  * Forgetting about stale outbound payments, either those that have already been fulfilled
6440	///    or those awaiting an invoice that hasn't been delivered in the necessary amount of time.
6441	///    The latter is determined using the system clock in `std` and the highest seen block time
6442	///    minus two hours in non-`std`.
6443	///
6444	/// Note that this may cause reentrancy through [`chain::Watch::update_channel`] calls or feerate
6445	/// estimate fetches.
6446	///
6447	/// [`ChannelUpdate`]: msgs::ChannelUpdate
6448	/// [`ChannelConfig`]: crate::util::config::ChannelConfig
6449	pub fn timer_tick_occurred(&self) {
6450		PersistenceNotifierGuard::optionally_notify(self, || {
6451			let mut should_persist = NotifyOption::SkipPersistNoEvents;
6452
6453			let non_anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6454			let anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee);
6455
6456			let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
6457			let mut timed_out_mpp_htlcs = Vec::new();
6458			let mut pending_peers_awaiting_removal = Vec::new();
6459			let mut shutdown_channels = Vec::new();
6460
6461			macro_rules! process_unfunded_channel_tick {
6462				($peer_state: expr, $chan: expr, $pending_msg_events: expr) => { {
6463					let context = &mut $chan.context;
6464					context.maybe_expire_prev_config();
6465					if $chan.unfunded_context.should_expire_unfunded_channel() {
6466						let logger = WithChannelContext::from(&self.logger, context, None);
6467						log_error!(logger,
6468							"Force-closing pending channel with ID {} for not establishing in a timely manner",
6469							context.channel_id());
6470						let mut close_res = context.force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) });
6471						locked_close_channel!(self, $peer_state, context, close_res);
6472						shutdown_channels.push(close_res);
6473						$pending_msg_events.push(MessageSendEvent::HandleError {
6474							node_id: context.get_counterparty_node_id(),
6475							action: msgs::ErrorAction::SendErrorMessage {
6476								msg: msgs::ErrorMessage {
6477									channel_id: context.channel_id(),
6478									data: "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(),
6479								},
6480							},
6481						});
6482						false
6483					} else {
6484						true
6485					}
6486				} }
6487			}
6488
6489			{
6490				let per_peer_state = self.per_peer_state.read().unwrap();
6491				for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
6492					let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6493					let peer_state = &mut *peer_state_lock;
6494					let pending_msg_events = &mut peer_state.pending_msg_events;
6495					let counterparty_node_id = *counterparty_node_id;
6496					peer_state.channel_by_id.retain(|chan_id, phase| {
6497						match phase {
6498							ChannelPhase::Funded(chan) => {
6499								let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6500									anchor_feerate
6501								} else {
6502									non_anchor_feerate
6503								};
6504								let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
6505								if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
6506
6507								if let Err(e) = chan.timer_check_closing_negotiation_progress() {
6508									let (needs_close, err) = convert_chan_phase_err!(self, peer_state, e, chan, chan_id, FUNDED_CHANNEL);
6509									handle_errors.push((Err(err), counterparty_node_id));
6510									if needs_close { return false; }
6511								}
6512
6513								match chan.channel_update_status() {
6514									ChannelUpdateStatus::Enabled if !chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)),
6515									ChannelUpdateStatus::Disabled if chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)),
6516									ChannelUpdateStatus::DisabledStaged(_) if chan.context.is_live()
6517										=> chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
6518									ChannelUpdateStatus::EnabledStaged(_) if !chan.context.is_live()
6519										=> chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
6520									ChannelUpdateStatus::DisabledStaged(mut n) if !chan.context.is_live() => {
6521										n += 1;
6522										if n >= DISABLE_GOSSIP_TICKS {
6523											chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
6524											if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
6525												let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
6526												pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
6527													msg: update
6528												});
6529											}
6530											should_persist = NotifyOption::DoPersist;
6531										} else {
6532											chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(n));
6533										}
6534									},
6535									ChannelUpdateStatus::EnabledStaged(mut n) if chan.context.is_live() => {
6536										n += 1;
6537										if n >= ENABLE_GOSSIP_TICKS {
6538											chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
6539											if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
6540												let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
6541												pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
6542													msg: update
6543												});
6544											}
6545											should_persist = NotifyOption::DoPersist;
6546										} else {
6547											chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(n));
6548										}
6549									},
6550									_ => {},
6551								}
6552
6553								chan.context.maybe_expire_prev_config();
6554
6555								if chan.should_disconnect_peer_awaiting_response() {
6556									let logger = WithChannelContext::from(&self.logger, &chan.context, None);
6557									log_debug!(logger, "Disconnecting peer {} due to not making any progress on channel {}",
6558											counterparty_node_id, chan_id);
6559									pending_msg_events.push(MessageSendEvent::HandleError {
6560										node_id: counterparty_node_id,
6561										action: msgs::ErrorAction::DisconnectPeerWithWarning {
6562											msg: msgs::WarningMessage {
6563												channel_id: *chan_id,
6564												data: "Disconnecting due to timeout awaiting response".to_owned(),
6565											},
6566										},
6567									});
6568								}
6569
6570								true
6571							},
6572							ChannelPhase::UnfundedInboundV1(chan) => {
6573								process_unfunded_channel_tick!(peer_state, chan, pending_msg_events)
6574							},
6575							ChannelPhase::UnfundedOutboundV1(chan) => {
6576								process_unfunded_channel_tick!(peer_state, chan, pending_msg_events)
6577							},
6578							ChannelPhase::UnfundedInboundV2(chan) => {
6579								process_unfunded_channel_tick!(peer_state, chan, pending_msg_events)
6580							},
6581							ChannelPhase::UnfundedOutboundV2(chan) => {
6582								process_unfunded_channel_tick!(peer_state, chan, pending_msg_events)
6583							},
6584						}
6585					});
6586
6587					for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() {
6588						if { req.ticks_remaining -= 1 ; req.ticks_remaining } <= 0 {
6589							let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*chan_id), None);
6590							log_error!(logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id);
6591							peer_state.pending_msg_events.push(
6592								events::MessageSendEvent::HandleError {
6593									node_id: counterparty_node_id,
6594									action: msgs::ErrorAction::SendErrorMessage {
6595										msg: msgs::ErrorMessage { channel_id: chan_id.clone(), data: "Channel force-closed".to_owned() }
6596									},
6597								}
6598							);
6599						}
6600					}
6601					peer_state.inbound_channel_request_by_id.retain(|_, req| req.ticks_remaining > 0);
6602
6603					if peer_state.ok_to_remove(true) {
6604						pending_peers_awaiting_removal.push(counterparty_node_id);
6605					}
6606				}
6607			}
6608
6609			// When a peer disconnects but still has channels, the peer's `peer_state` entry in the
6610			// `per_peer_state` is not removed by the `peer_disconnected` function. If the channels
6611			// of to that peer is later closed while still being disconnected (i.e. force closed),
6612			// we therefore need to remove the peer from `peer_state` separately.
6613			// To avoid having to take the `per_peer_state` `write` lock once the channels are
6614			// closed, we instead remove such peers awaiting removal here on a timer, to limit the
6615			// negative effects on parallelism as much as possible.
6616			if pending_peers_awaiting_removal.len() > 0 {
6617				let mut per_peer_state = self.per_peer_state.write().unwrap();
6618				for counterparty_node_id in pending_peers_awaiting_removal {
6619					match per_peer_state.entry(counterparty_node_id) {
6620						hash_map::Entry::Occupied(entry) => {
6621							// Remove the entry if the peer is still disconnected and we still
6622							// have no channels to the peer.
6623							let remove_entry = {
6624								let peer_state = entry.get().lock().unwrap();
6625								peer_state.ok_to_remove(true)
6626							};
6627							if remove_entry {
6628								entry.remove_entry();
6629							}
6630						},
6631						hash_map::Entry::Vacant(_) => { /* The PeerState has already been removed */ }
6632					}
6633				}
6634			}
6635
6636			self.claimable_payments.lock().unwrap().claimable_payments.retain(|payment_hash, payment| {
6637				if payment.htlcs.is_empty() {
6638					// This should be unreachable
6639					debug_assert!(false);
6640					return false;
6641				}
6642				if let OnionPayload::Invoice { .. } = payment.htlcs[0].onion_payload {
6643					// Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
6644					// In this case we're not going to handle any timeouts of the parts here.
6645					// This condition determining whether the MPP is complete here must match
6646					// exactly the condition used in `process_pending_htlc_forwards`.
6647					if payment.htlcs[0].total_msat <= payment.htlcs.iter()
6648						.fold(0, |total, htlc| total + htlc.sender_intended_value)
6649					{
6650						return true;
6651					} else if payment.htlcs.iter_mut().any(|htlc| {
6652						htlc.timer_ticks += 1;
6653						return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
6654					}) {
6655						timed_out_mpp_htlcs.extend(payment.htlcs.drain(..)
6656							.map(|htlc: ClaimableHTLC| (htlc.prev_hop, *payment_hash)));
6657						return false;
6658					}
6659				}
6660				true
6661			});
6662
6663			for htlc_source in timed_out_mpp_htlcs.drain(..) {
6664				let source = HTLCSource::PreviousHopData(htlc_source.0.clone());
6665				let reason = HTLCFailReason::from_failure_code(23);
6666				let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
6667				self.fail_htlc_backwards_internal(&source, &htlc_source.1, &reason, receiver);
6668			}
6669
6670			for (err, counterparty_node_id) in handle_errors.drain(..) {
6671				let _ = handle_error!(self, err, counterparty_node_id);
6672			}
6673
6674			for shutdown_res in shutdown_channels {
6675				self.finish_close_channel(shutdown_res);
6676			}
6677
6678			#[cfg(feature = "std")]
6679			let duration_since_epoch = std::time::SystemTime::now()
6680				.duration_since(std::time::SystemTime::UNIX_EPOCH)
6681				.expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
6682			#[cfg(not(feature = "std"))]
6683			let duration_since_epoch = Duration::from_secs(
6684				self.highest_seen_timestamp.load(Ordering::Acquire).saturating_sub(7200) as u64
6685			);
6686
6687			self.pending_outbound_payments.remove_stale_payments(
6688				duration_since_epoch, &self.pending_events
6689			);
6690
6691			// Technically we don't need to do this here, but if we have holding cell entries in a
6692			// channel that need freeing, it's better to do that here and block a background task
6693			// than block the message queueing pipeline.
6694			if self.check_free_holding_cells() {
6695				should_persist = NotifyOption::DoPersist;
6696			}
6697
6698			should_persist
6699		});
6700	}
6701
6702	/// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect
6703	/// after a PaymentClaimable event, failing the HTLC back to its origin and freeing resources
6704	/// along the path (including in our own channel on which we received it).
6705	///
6706	/// Note that in some cases around unclean shutdown, it is possible the payment may have
6707	/// already been claimed by you via [`ChannelManager::claim_funds`] prior to you seeing (a
6708	/// second copy of) the [`events::Event::PaymentClaimable`] event. Alternatively, the payment
6709	/// may have already been failed automatically by LDK if it was nearing its expiration time.
6710	///
6711	/// While LDK will never claim a payment automatically on your behalf (i.e. without you calling
6712	/// [`ChannelManager::claim_funds`]), you should still monitor for
6713	/// [`events::Event::PaymentClaimed`] events even for payments you intend to fail, especially on
6714	/// startup during which time claims that were in-progress at shutdown may be replayed.
6715	pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
6716		self.fail_htlc_backwards_with_reason(payment_hash, FailureCode::IncorrectOrUnknownPaymentDetails);
6717	}
6718
6719	/// This is a variant of [`ChannelManager::fail_htlc_backwards`] that allows you to specify the
6720	/// reason for the failure.
6721	///
6722	/// See [`FailureCode`] for valid failure codes.
6723	pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: FailureCode) {
6724		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
6725
6726		let removed_source = self.claimable_payments.lock().unwrap().claimable_payments.remove(payment_hash);
6727		if let Some(payment) = removed_source {
6728			for htlc in payment.htlcs {
6729				let reason = self.get_htlc_fail_reason_from_failure_code(failure_code, &htlc);
6730				let source = HTLCSource::PreviousHopData(htlc.prev_hop);
6731				let receiver = HTLCDestination::FailedPayment { payment_hash: *payment_hash };
6732				self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
6733			}
6734		}
6735	}
6736
6737	/// Gets error data to form an [`HTLCFailReason`] given a [`FailureCode`] and [`ClaimableHTLC`].
6738	fn get_htlc_fail_reason_from_failure_code(&self, failure_code: FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason {
6739		match failure_code {
6740			FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(failure_code.into()),
6741			FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code.into()),
6742			FailureCode::IncorrectOrUnknownPaymentDetails => {
6743				let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
6744				htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes());
6745				HTLCFailReason::reason(failure_code.into(), htlc_msat_height_data)
6746			},
6747			FailureCode::InvalidOnionPayload(data) => {
6748				let fail_data = match data {
6749					Some((typ, offset)) => [BigSize(typ).encode(), offset.encode()].concat(),
6750					None => Vec::new(),
6751				};
6752				HTLCFailReason::reason(failure_code.into(), fail_data)
6753			}
6754		}
6755	}
6756
6757	/// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code
6758	/// that we want to return and a channel.
6759	///
6760	/// This is for failures on the channel on which the HTLC was *received*, not failures
6761	/// forwarding
6762	fn get_htlc_inbound_temp_fail_data(&self, err_code: u16) -> Vec<u8> {
6763		debug_assert_eq!(err_code & 0x1000, 0x1000);
6764		debug_assert_ne!(err_code, 0x1000|11);
6765		debug_assert_ne!(err_code, 0x1000|12);
6766		debug_assert_ne!(err_code, 0x1000|13);
6767		// at capacity, we write fields `disabled_flags` and `len`
6768		let mut enc = VecWriter(Vec::with_capacity(4));
6769		if err_code == 0x1000 | 20 {
6770			// No flags for `disabled_flags` are currently defined so they're always two zero bytes.
6771			// See https://github.com/lightning/bolts/blob/341ec84/04-onion-routing.md?plain=1#L1008
6772			0u16.write(&mut enc).expect("Writes cannot fail");
6773		}
6774		// See https://github.com/lightning/bolts/blob/247e83d/04-onion-routing.md?plain=1#L1414-L1415
6775		(0u16).write(&mut enc).expect("Writes cannot fail");
6776		enc.0
6777	}
6778
6779	// Fail a list of HTLCs that were just freed from the holding cell. The HTLCs need to be
6780	// failed backwards or, if they were one of our outgoing HTLCs, then their failure needs to
6781	// be surfaced to the user.
6782	fn fail_holding_cell_htlcs(
6783		&self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: ChannelId,
6784		counterparty_node_id: &PublicKey
6785	) {
6786		let (failure_code, onion_failure_data) = {
6787			let per_peer_state = self.per_peer_state.read().unwrap();
6788			if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
6789				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6790				let peer_state = &mut *peer_state_lock;
6791				match peer_state.channel_by_id.entry(channel_id) {
6792					hash_map::Entry::Occupied(chan_phase_entry) => {
6793						if let ChannelPhase::Funded(_chan) = chan_phase_entry.get() {
6794							let failure_code = 0x1000|7;
6795							let data = self.get_htlc_inbound_temp_fail_data(failure_code);
6796							(failure_code, data)
6797						} else {
6798							// We shouldn't be trying to fail holding cell HTLCs on an unfunded channel.
6799							debug_assert!(false);
6800							(0x4000|10, Vec::new())
6801						}
6802					},
6803					hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
6804				}
6805			} else { (0x4000|10, Vec::new()) }
6806		};
6807
6808		for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
6809			let reason = HTLCFailReason::reason(failure_code, onion_failure_data.clone());
6810			let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id };
6811			self.fail_htlc_backwards_internal(&htlc_src, &payment_hash, &reason, receiver);
6812		}
6813	}
6814
6815	fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
6816		let push_forward_event = self.fail_htlc_backwards_internal_without_forward_event(source, payment_hash, onion_error, destination);
6817		if push_forward_event { self.push_pending_forwards_ev(); }
6818	}
6819
6820	/// Fails an HTLC backwards to the sender of it to us.
6821	/// Note that we do not assume that channels corresponding to failed HTLCs are still available.
6822	fn fail_htlc_backwards_internal_without_forward_event(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) -> bool {
6823		// Ensure that no peer state channel storage lock is held when calling this function.
6824		// This ensures that future code doesn't introduce a lock-order requirement for
6825		// `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
6826		// this function with any `per_peer_state` peer lock acquired would.
6827		#[cfg(debug_assertions)]
6828		for (_, peer) in self.per_peer_state.read().unwrap().iter() {
6829			debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
6830		}
6831
6832		//TODO: There is a timing attack here where if a node fails an HTLC back to us they can
6833		//identify whether we sent it or not based on the (I presume) very different runtime
6834		//between the branches here. We should make this async and move it into the forward HTLCs
6835		//timer handling.
6836
6837		// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
6838		// from block_connected which may run during initialization prior to the chain_monitor
6839		// being fully configured. See the docs for `ChannelManagerReadArgs` for more.
6840		let mut push_forward_event;
6841		match source {
6842			HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => {
6843				push_forward_event = self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path,
6844					session_priv, payment_id, self.probing_cookie_secret, &self.secp_ctx,
6845					&self.pending_events, &self.logger);
6846			},
6847			HTLCSource::PreviousHopData(HTLCPreviousHopData {
6848				ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret,
6849				ref phantom_shared_secret, outpoint: _, ref blinded_failure, ref channel_id, ..
6850			}) => {
6851				log_trace!(
6852					WithContext::from(&self.logger, None, Some(*channel_id), Some(*payment_hash)),
6853					"Failing {}HTLC with payment_hash {} backwards from us: {:?}",
6854					if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error
6855				);
6856				let failure = match blinded_failure {
6857					Some(BlindedFailure::FromIntroductionNode) => {
6858						let blinded_onion_error = HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32]);
6859						let err_packet = blinded_onion_error.get_encrypted_failure_packet(
6860							incoming_packet_shared_secret, phantom_shared_secret
6861						);
6862						HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }
6863					},
6864					Some(BlindedFailure::FromBlindedNode) => {
6865						HTLCForwardInfo::FailMalformedHTLC {
6866							htlc_id: *htlc_id,
6867							failure_code: INVALID_ONION_BLINDING,
6868							sha256_of_onion: [0; 32]
6869						}
6870					},
6871					None => {
6872						let err_packet = onion_error.get_encrypted_failure_packet(
6873							incoming_packet_shared_secret, phantom_shared_secret
6874						);
6875						HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }
6876					}
6877				};
6878
6879				push_forward_event = self.decode_update_add_htlcs.lock().unwrap().is_empty();
6880				let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
6881				push_forward_event &= forward_htlcs.is_empty();
6882				match forward_htlcs.entry(*short_channel_id) {
6883					hash_map::Entry::Occupied(mut entry) => {
6884						entry.get_mut().push(failure);
6885					},
6886					hash_map::Entry::Vacant(entry) => {
6887						entry.insert(vec!(failure));
6888					}
6889				}
6890				mem::drop(forward_htlcs);
6891				let mut pending_events = self.pending_events.lock().unwrap();
6892				pending_events.push_back((events::Event::HTLCHandlingFailed {
6893					prev_channel_id: *channel_id,
6894					failed_next_destination: destination,
6895				}, None));
6896			},
6897		}
6898		push_forward_event
6899	}
6900
6901	/// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any
6902	/// [`MessageSendEvent`]s needed to claim the payment.
6903	///
6904	/// This method is guaranteed to ensure the payment has been claimed but only if the current
6905	/// height is strictly below [`Event::PaymentClaimable::claim_deadline`]. To avoid race
6906	/// conditions, you should wait for an [`Event::PaymentClaimed`] before considering the payment
6907	/// successful. It will generally be available in the next [`process_pending_events`] call.
6908	///
6909	/// Note that if you did not set an `amount_msat` when calling [`create_inbound_payment`] or
6910	/// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentClaimable`
6911	/// event matches your expectation. If you fail to do so and call this method, you may provide
6912	/// the sender "proof-of-payment" when they did not fulfill the full expected payment.
6913	///
6914	/// This function will fail the payment if it has custom TLVs with even type numbers, as we
6915	/// will assume they are unknown. If you intend to accept even custom TLVs, you should use
6916	/// [`claim_funds_with_known_custom_tlvs`].
6917	///
6918	/// [`Event::PaymentClaimable`]: crate::events::Event::PaymentClaimable
6919	/// [`Event::PaymentClaimable::claim_deadline`]: crate::events::Event::PaymentClaimable::claim_deadline
6920	/// [`Event::PaymentClaimed`]: crate::events::Event::PaymentClaimed
6921	/// [`process_pending_events`]: EventsProvider::process_pending_events
6922	/// [`create_inbound_payment`]: Self::create_inbound_payment
6923	/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
6924	/// [`claim_funds_with_known_custom_tlvs`]: Self::claim_funds_with_known_custom_tlvs
6925	pub fn claim_funds(&self, payment_preimage: PaymentPreimage) {
6926		self.claim_payment_internal(payment_preimage, false);
6927	}
6928
6929	/// This is a variant of [`claim_funds`] that allows accepting a payment with custom TLVs with
6930	/// even type numbers.
6931	///
6932	/// # Note
6933	///
6934	/// You MUST check you've understood all even TLVs before using this to
6935	/// claim, otherwise you may unintentionally agree to some protocol you do not understand.
6936	///
6937	/// [`claim_funds`]: Self::claim_funds
6938	pub fn claim_funds_with_known_custom_tlvs(&self, payment_preimage: PaymentPreimage) {
6939		self.claim_payment_internal(payment_preimage, true);
6940	}
6941
6942	fn claim_payment_internal(&self, payment_preimage: PaymentPreimage, custom_tlvs_known: bool) {
6943		let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).to_byte_array());
6944
6945		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
6946
6947		let (sources, claiming_payment) = {
6948			let res = self.claimable_payments.lock().unwrap().begin_claiming_payment(
6949				payment_hash, &self.node_signer, &self.logger, &self.inbound_payment_id_secret,
6950				custom_tlvs_known,
6951			);
6952
6953			match res {
6954				Ok((htlcs, payment_info)) => (htlcs, payment_info),
6955				Err(htlcs) => {
6956					for htlc in htlcs {
6957						let reason = self.get_htlc_fail_reason_from_failure_code(FailureCode::InvalidOnionPayload(None), &htlc);
6958						let source = HTLCSource::PreviousHopData(htlc.prev_hop);
6959						let receiver = HTLCDestination::FailedPayment { payment_hash };
6960						self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
6961					}
6962					return;
6963				}
6964			}
6965		};
6966		debug_assert!(!sources.is_empty());
6967
6968		// Just in case one HTLC has been failed between when we generated the `PaymentClaimable`
6969		// and when we got here we need to check that the amount we're about to claim matches the
6970		// amount we told the user in the last `PaymentClaimable`. We also do a sanity-check that
6971		// the MPP parts all have the same `total_msat`.
6972		let mut claimable_amt_msat = 0;
6973		let mut prev_total_msat = None;
6974		let mut expected_amt_msat = None;
6975		let mut valid_mpp = true;
6976		let mut errs = Vec::new();
6977		let per_peer_state = self.per_peer_state.read().unwrap();
6978		for htlc in sources.iter() {
6979			if prev_total_msat.is_some() && prev_total_msat != Some(htlc.total_msat) {
6980				log_error!(self.logger, "Somehow ended up with an MPP payment with different expected total amounts - this should not be reachable!");
6981				debug_assert!(false);
6982				valid_mpp = false;
6983				break;
6984			}
6985			prev_total_msat = Some(htlc.total_msat);
6986
6987			if expected_amt_msat.is_some() && expected_amt_msat != htlc.total_value_received {
6988				log_error!(self.logger, "Somehow ended up with an MPP payment with different received total amounts - this should not be reachable!");
6989				debug_assert!(false);
6990				valid_mpp = false;
6991				break;
6992			}
6993			expected_amt_msat = htlc.total_value_received;
6994			claimable_amt_msat += htlc.value;
6995		}
6996		mem::drop(per_peer_state);
6997		if sources.is_empty() || expected_amt_msat.is_none() {
6998			self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
6999			log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!");
7000			return;
7001		}
7002		if claimable_amt_msat != expected_amt_msat.unwrap() {
7003			self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
7004			log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.",
7005				expected_amt_msat.unwrap(), claimable_amt_msat);
7006			return;
7007		}
7008		if valid_mpp {
7009			let mpp_parts: Vec<_> = sources.iter().filter_map(|htlc| {
7010				if let Some(cp_id) = htlc.prev_hop.counterparty_node_id {
7011					Some(MPPClaimHTLCSource {
7012						counterparty_node_id: cp_id,
7013						funding_txo: htlc.prev_hop.outpoint,
7014						channel_id: htlc.prev_hop.channel_id,
7015						htlc_id: htlc.prev_hop.htlc_id,
7016					})
7017				} else {
7018					None
7019				}
7020			}).collect();
7021			let pending_mpp_claim_ptr_opt = if sources.len() > 1 {
7022				let mut channels_without_preimage = Vec::with_capacity(mpp_parts.len());
7023				for part in mpp_parts.iter() {
7024					let chan = (part.counterparty_node_id, part.funding_txo, part.channel_id);
7025					if !channels_without_preimage.contains(&chan) {
7026						channels_without_preimage.push(chan);
7027					}
7028				}
7029				Some(Arc::new(Mutex::new(PendingMPPClaim {
7030					channels_without_preimage,
7031					channels_with_preimage: Vec::new(),
7032				})))
7033			} else {
7034				None
7035			};
7036			let payment_info = Some(PaymentClaimDetails { mpp_parts, claiming_payment });
7037			for htlc in sources {
7038				let this_mpp_claim = pending_mpp_claim_ptr_opt.as_ref().and_then(|pending_mpp_claim|
7039					if let Some(cp_id) = htlc.prev_hop.counterparty_node_id {
7040						let claim_ptr = PendingMPPClaimPointer(Arc::clone(pending_mpp_claim));
7041						Some((cp_id, htlc.prev_hop.channel_id, claim_ptr))
7042					} else {
7043						None
7044					}
7045				);
7046				let raa_blocker = pending_mpp_claim_ptr_opt.as_ref().map(|pending_claim| {
7047					RAAMonitorUpdateBlockingAction::ClaimedMPPPayment {
7048						pending_claim: PendingMPPClaimPointer(Arc::clone(pending_claim)),
7049					}
7050				});
7051				self.claim_funds_from_hop(
7052					htlc.prev_hop, payment_preimage, payment_info.clone(),
7053					|_, definitely_duplicate| {
7054						debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment");
7055						(Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim: this_mpp_claim }), raa_blocker)
7056					}
7057				);
7058			}
7059		} else {
7060			for htlc in sources {
7061				let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
7062				htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes());
7063				let source = HTLCSource::PreviousHopData(htlc.prev_hop);
7064				let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data);
7065				let receiver = HTLCDestination::FailedPayment { payment_hash };
7066				self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
7067			}
7068			self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
7069		}
7070
7071		// Now we can handle any errors which were generated.
7072		for (counterparty_node_id, err) in errs.drain(..) {
7073			let res: Result<(), _> = Err(err);
7074			let _ = handle_error!(self, res, counterparty_node_id);
7075		}
7076	}
7077
7078	fn claim_funds_from_hop<
7079		ComplFunc: FnOnce(Option<u64>, bool) -> (Option<MonitorUpdateCompletionAction>, Option<RAAMonitorUpdateBlockingAction>)
7080	>(
7081		&self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage,
7082		payment_info: Option<PaymentClaimDetails>, completion_action: ComplFunc,
7083	) {
7084		let counterparty_node_id = prev_hop.counterparty_node_id.or_else(|| {
7085			let short_to_chan_info = self.short_to_chan_info.read().unwrap();
7086			short_to_chan_info.get(&prev_hop.short_channel_id).map(|(cp_id, _)| *cp_id)
7087		});
7088
7089		let htlc_source = HTLCClaimSource {
7090			counterparty_node_id,
7091			funding_txo: prev_hop.outpoint,
7092			channel_id: prev_hop.channel_id,
7093			htlc_id: prev_hop.htlc_id,
7094		};
7095		self.claim_mpp_part(htlc_source, payment_preimage, payment_info, completion_action)
7096	}
7097
7098	fn claim_mpp_part<
7099		ComplFunc: FnOnce(Option<u64>, bool) -> (Option<MonitorUpdateCompletionAction>, Option<RAAMonitorUpdateBlockingAction>)
7100	>(
7101		&self, prev_hop: HTLCClaimSource, payment_preimage: PaymentPreimage,
7102		payment_info: Option<PaymentClaimDetails>, completion_action: ComplFunc,
7103	) {
7104		//TODO: Delay the claimed_funds relaying just like we do outbound relay!
7105
7106		// If we haven't yet run background events assume we're still deserializing and shouldn't
7107		// actually pass `ChannelMonitorUpdate`s to users yet. Instead, queue them up as
7108		// `BackgroundEvent`s.
7109		let during_init = !self.background_events_processed_since_startup.load(Ordering::Acquire);
7110
7111		// As we may call handle_monitor_update_completion_actions in rather rare cases, check that
7112		// the required mutexes are not held before we start.
7113		debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
7114		debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread);
7115
7116		let per_peer_state = self.per_peer_state.read().unwrap();
7117		let chan_id = prev_hop.channel_id;
7118
7119		const MISSING_MON_ERROR: &'static str =
7120			"If we're going to claim an HTLC against a channel, we should always have *some* state for the channel, even if just the latest ChannelMonitor update_id. This failure indicates we need to claim an HTLC from a channel for which we did not have a ChannelMonitor at startup and didn't create one while running.";
7121
7122		// Note here that `peer_state_opt` is always `Some` if `prev_hop.counterparty_node_id` is
7123		// `Some`. This is relied on in the closed-channel case below.
7124		let mut peer_state_opt = prev_hop.counterparty_node_id.as_ref().map(
7125			|counterparty_node_id| per_peer_state.get(counterparty_node_id)
7126				.map(|peer_mutex| peer_mutex.lock().unwrap())
7127				.expect(MISSING_MON_ERROR)
7128		);
7129
7130		if let Some(peer_state_lock) = peer_state_opt.as_mut() {
7131			let peer_state = &mut **peer_state_lock;
7132			if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(chan_id) {
7133				if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
7134					let logger = WithChannelContext::from(&self.logger, &chan.context, None);
7135					let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, payment_info, &&logger);
7136
7137					match fulfill_res {
7138						UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } => {
7139							let (action_opt, raa_blocker_opt) = completion_action(Some(htlc_value_msat), false);
7140							if let Some(action) = action_opt {
7141								log_trace!(logger, "Tracking monitor update completion action for channel {}: {:?}",
7142									chan_id, action);
7143								peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7144							}
7145							if let Some(raa_blocker) = raa_blocker_opt {
7146								peer_state.actions_blocking_raa_monitor_updates.entry(chan_id).or_insert_with(Vec::new).push(raa_blocker);
7147							}
7148							handle_new_monitor_update!(self, prev_hop.funding_txo, monitor_update, peer_state_opt,
7149								peer_state, per_peer_state, chan);
7150						}
7151						UpdateFulfillCommitFetch::DuplicateClaim {} => {
7152							let (action_opt, raa_blocker_opt) = completion_action(None, true);
7153							if let Some(raa_blocker) = raa_blocker_opt {
7154								// If we're making a claim during startup, its a replay of a
7155								// payment claim from a `ChannelMonitor`. In some cases (MPP or
7156								// if the HTLC was only recently removed) we make such claims
7157								// after an HTLC has been removed from a channel entirely, and
7158								// thus the RAA blocker has long since completed.
7159								//
7160								// In any other case, the RAA blocker must still be present and
7161								// blocking RAAs.
7162								debug_assert!(during_init ||
7163									peer_state.actions_blocking_raa_monitor_updates.get(&chan_id).unwrap().contains(&raa_blocker));
7164							}
7165							let action = if let Some(action) = action_opt {
7166								action
7167							} else {
7168								return;
7169							};
7170
7171							mem::drop(peer_state_opt);
7172
7173							log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
7174								chan_id, action);
7175							if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
7176								downstream_counterparty_node_id: node_id,
7177								downstream_funding_outpoint: _,
7178								blocking_action: blocker, downstream_channel_id: channel_id,
7179							} = action {
7180								if let Some(peer_state_mtx) = per_peer_state.get(&node_id) {
7181									let mut peer_state = peer_state_mtx.lock().unwrap();
7182									if let Some(blockers) = peer_state
7183										.actions_blocking_raa_monitor_updates
7184										.get_mut(&channel_id)
7185									{
7186										let mut found_blocker = false;
7187										blockers.retain(|iter| {
7188											// Note that we could actually be blocked, in
7189											// which case we need to only remove the one
7190											// blocker which was added duplicatively.
7191											let first_blocker = !found_blocker;
7192											if *iter == blocker { found_blocker = true; }
7193											*iter != blocker || !first_blocker
7194										});
7195										debug_assert!(found_blocker);
7196									}
7197								} else {
7198									debug_assert!(false);
7199								}
7200							} else if matches!(action, MonitorUpdateCompletionAction::PaymentClaimed { .. }) {
7201								debug_assert!(during_init,
7202									"Duplicate claims should always either be for forwarded payments(freeing another channel immediately) or during init (for claim replay)");
7203								mem::drop(per_peer_state);
7204								self.handle_monitor_update_completion_actions([action]);
7205							} else {
7206								debug_assert!(false,
7207									"Duplicate claims should always either be for forwarded payments(freeing another channel immediately) or during init (for claim replay)");
7208								return;
7209							};
7210						}
7211					}
7212				}
7213				return;
7214			}
7215		}
7216
7217		if prev_hop.counterparty_node_id.is_none() {
7218			let payment_hash: PaymentHash = payment_preimage.into();
7219			panic!(
7220				"Prior to upgrading to LDK 0.1, all pending HTLCs forwarded by LDK 0.0.123 or before must be resolved. It appears at least the HTLC with payment_hash {} (preimage {}) was not resolved. Please downgrade to LDK 0.0.125 and resolve the HTLC prior to upgrading.",
7221				payment_hash,
7222				payment_preimage,
7223			);
7224		}
7225		let counterparty_node_id = prev_hop.counterparty_node_id.expect("Checked immediately above");
7226		let mut peer_state = peer_state_opt.expect("peer_state_opt is always Some when the counterparty_node_id is Some");
7227
7228		let update_id = if let Some(latest_update_id) = peer_state.closed_channel_monitor_update_ids.get_mut(&chan_id) {
7229			*latest_update_id = latest_update_id.saturating_add(1);
7230			*latest_update_id
7231		} else {
7232			let err = "We need the latest ChannelMonitorUpdate ID to build a new update.
7233This should have been checked for availability on startup but somehow it is no longer available.
7234This indicates a bug inside LDK. Please report this error at https://github.com/lightningdevkit/rust-lightning/issues/new";
7235			log_error!(self.logger, "{}", err);
7236			panic!("{}", err);
7237		};
7238
7239		let preimage_update = ChannelMonitorUpdate {
7240			update_id,
7241			counterparty_node_id: Some(counterparty_node_id),
7242			updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
7243				payment_preimage,
7244				payment_info,
7245			}],
7246			channel_id: Some(prev_hop.channel_id),
7247		};
7248
7249		// Note that we do process the completion action here. This totally could be a
7250		// duplicate claim, but we have no way of knowing without interrogating the
7251		// `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
7252		// generally always allowed to be duplicative (and it's specifically noted in
7253		// `PaymentForwarded`).
7254		let (action_opt, raa_blocker_opt) = completion_action(None, false);
7255
7256		if let Some(raa_blocker) = raa_blocker_opt {
7257			peer_state.actions_blocking_raa_monitor_updates
7258				.entry(prev_hop.channel_id)
7259				.or_default()
7260				.push(raa_blocker);
7261		}
7262
7263		// Given the fact that we're in a bit of a weird edge case, its worth hashing the preimage
7264		// to include the `payment_hash` in the log metadata here.
7265		let payment_hash = payment_preimage.into();
7266		let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(chan_id), Some(payment_hash));
7267
7268		if let Some(action) = action_opt {
7269			log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7270				chan_id, action);
7271			peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7272		}
7273
7274		handle_new_monitor_update!(
7275			self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state,
7276			counterparty_node_id, chan_id, POST_CHANNEL_CLOSE
7277		);
7278	}
7279
7280	fn finalize_claims(&self, sources: Vec<HTLCSource>) {
7281		self.pending_outbound_payments.finalize_claims(sources, &self.pending_events);
7282	}
7283
7284	fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
7285		forwarded_htlc_value_msat: Option<u64>, skimmed_fee_msat: Option<u64>, from_onchain: bool,
7286		startup_replay: bool, next_channel_counterparty_node_id: Option<PublicKey>,
7287		next_channel_outpoint: OutPoint, next_channel_id: ChannelId, next_user_channel_id: Option<u128>,
7288	) {
7289		match source {
7290			HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
7291				debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire),
7292					"We don't support claim_htlc claims during startup - monitors may not be available yet");
7293				if let Some(pubkey) = next_channel_counterparty_node_id {
7294					debug_assert_eq!(pubkey, path.hops[0].pubkey);
7295				}
7296				let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
7297					channel_funding_outpoint: next_channel_outpoint, channel_id: next_channel_id,
7298					counterparty_node_id: path.hops[0].pubkey,
7299				};
7300				self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage,
7301					session_priv, path, from_onchain, ev_completion_action, &self.pending_events,
7302					&self.logger);
7303			},
7304			HTLCSource::PreviousHopData(hop_data) => {
7305				let prev_channel_id = hop_data.channel_id;
7306				let prev_user_channel_id = hop_data.user_channel_id;
7307				let prev_node_id = hop_data.counterparty_node_id;
7308				let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
7309				self.claim_funds_from_hop(hop_data, payment_preimage, None,
7310					|htlc_claim_value_msat, definitely_duplicate| {
7311						let chan_to_release =
7312							if let Some(node_id) = next_channel_counterparty_node_id {
7313								Some(EventUnblockedChannel {
7314									counterparty_node_id: node_id,
7315									funding_txo: next_channel_outpoint,
7316									channel_id: next_channel_id,
7317									blocking_action: completed_blocker
7318								})
7319							} else {
7320								// We can only get `None` here if we are processing a
7321								// `ChannelMonitor`-originated event, in which case we
7322								// don't care about ensuring we wake the downstream
7323								// channel's monitor updating - the channel is already
7324								// closed.
7325								None
7326							};
7327
7328						if definitely_duplicate && startup_replay {
7329							// On startup we may get redundant claims which are related to
7330							// monitor updates still in flight. In that case, we shouldn't
7331							// immediately free, but instead let that monitor update complete
7332							// in the background.
7333							(None, None)
7334						} else if definitely_duplicate {
7335							if let Some(other_chan) = chan_to_release {
7336								(Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
7337									downstream_counterparty_node_id: other_chan.counterparty_node_id,
7338									downstream_funding_outpoint: other_chan.funding_txo,
7339									downstream_channel_id: other_chan.channel_id,
7340									blocking_action: other_chan.blocking_action,
7341								}), None)
7342							} else { (None, None) }
7343						} else {
7344							let total_fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
7345								if let Some(claimed_htlc_value) = htlc_claim_value_msat {
7346									Some(claimed_htlc_value - forwarded_htlc_value)
7347								} else { None }
7348							} else { None };
7349							debug_assert!(skimmed_fee_msat <= total_fee_earned_msat,
7350								"skimmed_fee_msat must always be included in total_fee_earned_msat");
7351							(Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
7352								event: events::Event::PaymentForwarded {
7353									prev_channel_id: Some(prev_channel_id),
7354									next_channel_id: Some(next_channel_id),
7355									prev_user_channel_id,
7356									next_user_channel_id,
7357									prev_node_id,
7358									next_node_id: next_channel_counterparty_node_id,
7359									total_fee_earned_msat,
7360									skimmed_fee_msat,
7361									claim_from_onchain_tx: from_onchain,
7362									outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
7363								},
7364								downstream_counterparty_and_funding_outpoint: chan_to_release,
7365							}), None)
7366						}
7367					});
7368			},
7369		}
7370	}
7371
7372	/// Gets the node_id held by this ChannelManager
7373	pub fn get_our_node_id(&self) -> PublicKey {
7374		self.our_network_pubkey
7375	}
7376
7377	fn handle_monitor_update_completion_actions<I: IntoIterator<Item=MonitorUpdateCompletionAction>>(&self, actions: I) {
7378		debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
7379		debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread);
7380		debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
7381
7382		let mut freed_channels = Vec::new();
7383
7384		for action in actions.into_iter() {
7385			match action {
7386				MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim } => {
7387					if let Some((counterparty_node_id, chan_id, claim_ptr)) = pending_mpp_claim {
7388						let per_peer_state = self.per_peer_state.read().unwrap();
7389						per_peer_state.get(&counterparty_node_id).map(|peer_state_mutex| {
7390							let mut peer_state = peer_state_mutex.lock().unwrap();
7391							let blockers_entry = peer_state.actions_blocking_raa_monitor_updates.entry(chan_id);
7392							if let btree_map::Entry::Occupied(mut blockers) = blockers_entry {
7393								blockers.get_mut().retain(|blocker|
7394									if let &RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { pending_claim } = &blocker {
7395										if *pending_claim == claim_ptr {
7396											let mut pending_claim_state_lock = pending_claim.0.lock().unwrap();
7397											let pending_claim_state = &mut *pending_claim_state_lock;
7398											pending_claim_state.channels_without_preimage.retain(|(cp, op, cid)| {
7399												let this_claim =
7400													*cp == counterparty_node_id && *cid == chan_id;
7401												if this_claim {
7402													pending_claim_state.channels_with_preimage.push((*cp, *op, *cid));
7403													false
7404												} else { true }
7405											});
7406											if pending_claim_state.channels_without_preimage.is_empty() {
7407												for (cp, op, cid) in pending_claim_state.channels_with_preimage.iter() {
7408													let freed_chan = (*cp, *op, *cid, blocker.clone());
7409													freed_channels.push(freed_chan);
7410												}
7411											}
7412											!pending_claim_state.channels_without_preimage.is_empty()
7413										} else { true }
7414									} else { true }
7415								);
7416								if blockers.get().is_empty() {
7417									blockers.remove();
7418								}
7419							}
7420						});
7421					}
7422
7423					let payment = self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
7424					if let Some(ClaimingPayment {
7425						amount_msat,
7426						payment_purpose: purpose,
7427						receiver_node_id,
7428						htlcs,
7429						sender_intended_value: sender_intended_total_msat,
7430						onion_fields,
7431						payment_id,
7432					}) = payment {
7433						let event = events::Event::PaymentClaimed {
7434							payment_hash,
7435							purpose,
7436							amount_msat,
7437							receiver_node_id: Some(receiver_node_id),
7438							htlcs,
7439							sender_intended_total_msat,
7440							onion_fields,
7441							payment_id,
7442						};
7443						let event_action = (event, None);
7444						let mut pending_events = self.pending_events.lock().unwrap();
7445						// If we're replaying a claim on startup we may end up duplicating an event
7446						// that's already in our queue, so check before we push another one. The
7447						// `payment_id` should suffice to ensure we never spuriously drop a second
7448						// event for a duplicate payment.
7449						if !pending_events.contains(&event_action) {
7450							pending_events.push_back(event_action);
7451						}
7452					}
7453				},
7454				MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
7455					event, downstream_counterparty_and_funding_outpoint
7456				} => {
7457					self.pending_events.lock().unwrap().push_back((event, None));
7458					if let Some(unblocked) = downstream_counterparty_and_funding_outpoint {
7459						self.handle_monitor_update_release(
7460							unblocked.counterparty_node_id, unblocked.funding_txo,
7461							unblocked.channel_id, Some(unblocked.blocking_action),
7462						);
7463					}
7464				},
7465				MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
7466					downstream_counterparty_node_id, downstream_funding_outpoint, downstream_channel_id, blocking_action,
7467				} => {
7468					self.handle_monitor_update_release(
7469						downstream_counterparty_node_id,
7470						downstream_funding_outpoint,
7471						downstream_channel_id,
7472						Some(blocking_action),
7473					);
7474				},
7475			}
7476		}
7477
7478		for (node_id, funding_outpoint, channel_id, blocker) in freed_channels {
7479			self.handle_monitor_update_release(node_id, funding_outpoint, channel_id, Some(blocker));
7480		}
7481	}
7482
7483	/// Handles a channel reentering a functional state, either due to reconnect or a monitor
7484	/// update completion.
7485	fn handle_channel_resumption(&self, pending_msg_events: &mut Vec<MessageSendEvent>,
7486		channel: &mut Channel<SP>, raa: Option<msgs::RevokeAndACK>,
7487		commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
7488		pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_update_adds: Vec<msgs::UpdateAddHTLC>,
7489		funding_broadcastable: Option<Transaction>,
7490		channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>,
7491		tx_signatures: Option<msgs::TxSignatures>
7492	) -> (Option<(u64, Option<PublicKey>, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec<msgs::UpdateAddHTLC>)>) {
7493		let logger = WithChannelContext::from(&self.logger, &channel.context, None);
7494		log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures",
7495			&channel.context.channel_id(),
7496			if raa.is_some() { "an" } else { "no" },
7497			if commitment_update.is_some() { "a" } else { "no" },
7498			pending_forwards.len(), pending_update_adds.len(),
7499			if funding_broadcastable.is_some() { "" } else { "not " },
7500			if channel_ready.is_some() { "sending" } else { "without" },
7501			if announcement_sigs.is_some() { "sending" } else { "without" },
7502			if tx_signatures.is_some() { "sending" } else { "without" },
7503		);
7504
7505		let counterparty_node_id = channel.context.get_counterparty_node_id();
7506		let short_channel_id = channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias());
7507
7508		let mut htlc_forwards = None;
7509		if !pending_forwards.is_empty() {
7510			htlc_forwards = Some((
7511				short_channel_id, Some(channel.context.get_counterparty_node_id()),
7512				channel.context.get_funding_txo().unwrap(), channel.context.channel_id(),
7513				channel.context.get_user_id(), pending_forwards
7514			));
7515		}
7516		let mut decode_update_add_htlcs = None;
7517		if !pending_update_adds.is_empty() {
7518			decode_update_add_htlcs = Some((short_channel_id, pending_update_adds));
7519		}
7520
7521		if let Some(msg) = channel_ready {
7522			send_channel_ready!(self, pending_msg_events, channel, msg);
7523		}
7524		if let Some(msg) = announcement_sigs {
7525			pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
7526				node_id: counterparty_node_id,
7527				msg,
7528			});
7529		}
7530		if let Some(msg) = tx_signatures {
7531			pending_msg_events.push(events::MessageSendEvent::SendTxSignatures {
7532				node_id: counterparty_node_id,
7533				msg,
7534			});
7535		}
7536
7537		macro_rules! handle_cs { () => {
7538			if let Some(update) = commitment_update {
7539				pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
7540					node_id: counterparty_node_id,
7541					updates: update,
7542				});
7543			}
7544		} }
7545		macro_rules! handle_raa { () => {
7546			if let Some(revoke_and_ack) = raa {
7547				pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
7548					node_id: counterparty_node_id,
7549					msg: revoke_and_ack,
7550				});
7551			}
7552		} }
7553		match order {
7554			RAACommitmentOrder::CommitmentFirst => {
7555				handle_cs!();
7556				handle_raa!();
7557			},
7558			RAACommitmentOrder::RevokeAndACKFirst => {
7559				handle_raa!();
7560				handle_cs!();
7561			},
7562		}
7563
7564		if let Some(tx) = funding_broadcastable {
7565			if channel.context.is_manual_broadcast() {
7566				log_info!(logger, "Not broadcasting funding transaction with txid {} as it is manually managed", tx.compute_txid());
7567				let mut pending_events = self.pending_events.lock().unwrap();
7568				match channel.context.get_funding_txo() {
7569					Some(funding_txo) => {
7570						emit_funding_tx_broadcast_safe_event!(pending_events, channel, funding_txo.into_bitcoin_outpoint())
7571					},
7572					None => {
7573						debug_assert!(false, "Channel resumed without a funding txo, this should never happen!");
7574						return (htlc_forwards, decode_update_add_htlcs);
7575					}
7576				};
7577			} else {
7578				log_info!(logger, "Broadcasting funding transaction with txid {}", tx.compute_txid());
7579				self.tx_broadcaster.broadcast_transactions(&[&tx]);
7580			}
7581		}
7582
7583		{
7584			let mut pending_events = self.pending_events.lock().unwrap();
7585			emit_channel_pending_event!(pending_events, channel);
7586			emit_channel_ready_event!(pending_events, channel);
7587		}
7588
7589		(htlc_forwards, decode_update_add_htlcs)
7590	}
7591
7592	fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
7593		debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
7594
7595		let counterparty_node_id = match counterparty_node_id {
7596			Some(cp_id) => cp_id.clone(),
7597			None => {
7598				// TODO: Once we can rely on the counterparty_node_id from the
7599				// monitor event, this and the outpoint_to_peer map should be removed.
7600				let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
7601				match outpoint_to_peer.get(funding_txo) {
7602					Some(cp_id) => cp_id.clone(),
7603					None => return,
7604				}
7605			}
7606		};
7607		let per_peer_state = self.per_peer_state.read().unwrap();
7608		let mut peer_state_lock;
7609		let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
7610		if peer_state_mutex_opt.is_none() { return }
7611		peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
7612		let peer_state = &mut *peer_state_lock;
7613
7614		let remaining_in_flight =
7615			if let Some(pending) = peer_state.in_flight_monitor_updates.get_mut(funding_txo) {
7616				pending.retain(|upd| upd.update_id > highest_applied_update_id);
7617				pending.len()
7618			} else { 0 };
7619
7620		let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*channel_id), None);
7621		log_trace!(logger, "ChannelMonitor updated to {}. {} pending in-flight updates.",
7622			highest_applied_update_id, remaining_in_flight);
7623
7624		if remaining_in_flight != 0 {
7625			return;
7626		}
7627
7628		if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(channel_id) {
7629			if chan.is_awaiting_monitor_update() {
7630				if chan.blocked_monitor_updates_pending() == 0 {
7631					log_trace!(logger, "Channel is open and awaiting update, resuming it");
7632					handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
7633				} else {
7634					log_trace!(logger, "Channel is open and awaiting update, leaving it blocked due to a blocked monitor update");
7635				}
7636			} else {
7637				log_trace!(logger, "Channel is open but not awaiting update");
7638			}
7639		} else {
7640			let update_actions = peer_state.monitor_update_blocked_actions
7641				.remove(channel_id).unwrap_or(Vec::new());
7642			log_trace!(logger, "Channel is closed, applying {} post-update actions", update_actions.len());
7643			mem::drop(peer_state_lock);
7644			mem::drop(per_peer_state);
7645			self.handle_monitor_update_completion_actions(update_actions);
7646		}
7647	}
7648
7649	/// Accepts a request to open a channel after a [`Event::OpenChannelRequest`].
7650	///
7651	/// The `temporary_channel_id` parameter indicates which inbound channel should be accepted,
7652	/// and the `counterparty_node_id` parameter is the id of the peer which has requested to open
7653	/// the channel.
7654	///
7655	/// The `user_channel_id` parameter will be provided back in
7656	/// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond
7657	/// with which `accept_inbound_channel`/`accept_inbound_channel_from_trusted_peer_0conf` call.
7658	///
7659	/// Note that this method will return an error and reject the channel, if it requires support
7660	/// for zero confirmations. Instead, `accept_inbound_channel_from_trusted_peer_0conf` must be
7661	/// used to accept such channels.
7662	///
7663	/// NOTE: LDK makes no attempt to prevent the counterparty from using non-standard inputs which
7664	/// will prevent the funding transaction from being relayed on the bitcoin network and hence being
7665	/// confirmed.
7666	///
7667	/// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
7668	/// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
7669	pub fn accept_inbound_channel(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
7670		self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, false, user_channel_id, vec![], Weight::from_wu(0))
7671	}
7672
7673	/// Accepts a request to open a channel after a [`events::Event::OpenChannelRequest`], treating
7674	/// it as confirmed immediately.
7675	///
7676	/// The `user_channel_id` parameter will be provided back in
7677	/// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond
7678	/// with which `accept_inbound_channel`/`accept_inbound_channel_from_trusted_peer_0conf` call.
7679	///
7680	/// Unlike [`ChannelManager::accept_inbound_channel`], this method accepts the incoming channel
7681	/// and (if the counterparty agrees), enables forwarding of payments immediately.
7682	///
7683	/// This fully trusts that the counterparty has honestly and correctly constructed the funding
7684	/// transaction and blindly assumes that it will eventually confirm.
7685	///
7686	/// If it does not confirm before we decide to close the channel, or if the funding transaction
7687	/// does not pay to the correct script the correct amount, *you will lose funds*.
7688	///
7689	/// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
7690	/// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
7691	pub fn accept_inbound_channel_from_trusted_peer_0conf(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
7692		self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, true, user_channel_id, vec![], Weight::from_wu(0))
7693	}
7694
7695	fn do_accept_inbound_channel(
7696		&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool,
7697		user_channel_id: u128, _funding_inputs: Vec<(TxIn, TransactionU16LenLimited)>,
7698		_total_witness_weight: Weight,
7699	) -> Result<(), APIError> {
7700		let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id), None);
7701		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
7702
7703		let peers_without_funded_channels =
7704			self.peers_without_funded_channels(|peer| { peer.total_channel_count() > 0 });
7705		let per_peer_state = self.per_peer_state.read().unwrap();
7706		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
7707		.ok_or_else(|| {
7708			let err_str = format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id);
7709			log_error!(logger, "{}", err_str);
7710
7711			APIError::ChannelUnavailable { err: err_str }
7712		})?;
7713		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
7714		let peer_state = &mut *peer_state_lock;
7715		let is_only_peer_channel = peer_state.total_channel_count() == 1;
7716
7717		// Find (and remove) the channel in the unaccepted table. If it's not there, something weird is
7718		// happening and return an error. N.B. that we create channel with an outbound SCID of zero so
7719		// that we can delay allocating the SCID until after we're sure that the checks below will
7720		// succeed.
7721		let res = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) {
7722			Some(unaccepted_channel) => {
7723				let best_block_height = self.best_block.read().unwrap().height;
7724				match unaccepted_channel.open_channel_msg {
7725					OpenChannelMessage::V1(open_channel_msg) => {
7726						InboundV1Channel::new(
7727							&self.fee_estimator, &self.entropy_source, &self.signer_provider, *counterparty_node_id,
7728							&self.channel_type_features(), &peer_state.latest_features, &open_channel_msg,
7729							user_channel_id, &self.default_configuration, best_block_height, &self.logger, accept_0conf
7730						).map_err(|err| MsgHandleErrInternal::from_chan_no_close(err, *temporary_channel_id)
7731						).map(|mut channel| {
7732							let logger = WithChannelContext::from(&self.logger, &channel.context, None);
7733							let message_send_event = channel.accept_inbound_channel(&&logger).map(|msg| {
7734								events::MessageSendEvent::SendAcceptChannel {
7735									node_id: *counterparty_node_id,
7736									msg,
7737								}
7738							});
7739							(*temporary_channel_id, ChannelPhase::UnfundedInboundV1(channel), message_send_event)
7740						})
7741					},
7742					#[cfg(dual_funding)]
7743					OpenChannelMessage::V2(open_channel_msg) => {
7744						InboundV2Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
7745							self.get_our_node_id(), *counterparty_node_id, &self.channel_type_features(), &peer_state.latest_features,
7746							&open_channel_msg, _funding_inputs, _total_witness_weight, user_channel_id,
7747							&self.default_configuration, best_block_height, &self.logger
7748						).map_err(|_| MsgHandleErrInternal::from_chan_no_close(
7749							ChannelError::Close(
7750								(
7751									"V2 channel rejected due to sender error".into(),
7752									ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) },
7753								)
7754							), *temporary_channel_id)
7755						).map(|channel| {
7756							let message_send_event =  events::MessageSendEvent::SendAcceptChannelV2 {
7757								node_id: channel.context.get_counterparty_node_id(),
7758								msg: channel.accept_inbound_dual_funded_channel()
7759							};
7760							(channel.context.channel_id(), ChannelPhase::UnfundedInboundV2(channel), Some(message_send_event))
7761						})
7762					},
7763				}
7764			},
7765			None => {
7766				let err_str = "No such channel awaiting to be accepted.".to_owned();
7767				log_error!(logger, "{}", err_str);
7768
7769				return Err(APIError::APIMisuseError { err: err_str });
7770			}
7771		};
7772
7773		// We have to match below instead of map_err on the above as in the map_err closure the borrow checker
7774		// would consider peer_state moved even though we would bail out with the `?` operator.
7775		let (channel_id, mut channel_phase, message_send_event) = match res {
7776			Ok(res) => res,
7777			Err(err) => {
7778				mem::drop(peer_state_lock);
7779				mem::drop(per_peer_state);
7780				// TODO(dunxen): Find/make less icky way to do this.
7781				match handle_error!(self, Result::<(), MsgHandleErrInternal>::Err(err), *counterparty_node_id) {
7782					Ok(_) => unreachable!("`handle_error` only returns Err as we've passed in an Err"),
7783					Err(e) => {
7784						return Err(APIError::ChannelUnavailable { err: e.err });
7785					},
7786				}
7787			}
7788		};
7789
7790		if accept_0conf {
7791			// This should have been correctly configured by the call to Inbound(V1/V2)Channel::new.
7792			debug_assert!(channel_phase.context().minimum_depth().unwrap() == 0);
7793		} else if channel_phase.context().get_channel_type().requires_zero_conf() {
7794			let send_msg_err_event = events::MessageSendEvent::HandleError {
7795				node_id: channel_phase.context().get_counterparty_node_id(),
7796				action: msgs::ErrorAction::SendErrorMessage{
7797					msg: msgs::ErrorMessage { channel_id: *temporary_channel_id, data: "No zero confirmation channels accepted".to_owned(), }
7798				}
7799			};
7800			peer_state.pending_msg_events.push(send_msg_err_event);
7801			let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
7802			log_error!(logger, "{}", err_str);
7803
7804			return Err(APIError::APIMisuseError { err: err_str });
7805		} else {
7806			// If this peer already has some channels, a new channel won't increase our number of peers
7807			// with unfunded channels, so as long as we aren't over the maximum number of unfunded
7808			// channels per-peer we can accept channels from a peer with existing ones.
7809			if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
7810				let send_msg_err_event = events::MessageSendEvent::HandleError {
7811					node_id: channel_phase.context().get_counterparty_node_id(),
7812					action: msgs::ErrorAction::SendErrorMessage{
7813						msg: msgs::ErrorMessage { channel_id: *temporary_channel_id, data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
7814					}
7815				};
7816				peer_state.pending_msg_events.push(send_msg_err_event);
7817				let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
7818				log_error!(logger, "{}", err_str);
7819
7820				return Err(APIError::APIMisuseError { err: err_str });
7821			}
7822		}
7823
7824		// Now that we know we have a channel, assign an outbound SCID alias.
7825		let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
7826		channel_phase.context_mut().set_outbound_scid_alias(outbound_scid_alias);
7827
7828		if let Some(message_send_event) = message_send_event {
7829			peer_state.pending_msg_events.push(message_send_event);
7830		}
7831		peer_state.channel_by_id.insert(channel_id, channel_phase);
7832
7833		Ok(())
7834	}
7835
7836	/// Gets the number of peers which match the given filter and do not have any funded, outbound,
7837	/// or 0-conf channels.
7838	///
7839	/// The filter is called for each peer and provided with the number of unfunded, inbound, and
7840	/// non-0-conf channels we have with the peer.
7841	fn peers_without_funded_channels<Filter>(&self, maybe_count_peer: Filter) -> usize
7842	where Filter: Fn(&PeerState<SP>) -> bool {
7843		let mut peers_without_funded_channels = 0;
7844		let best_block_height = self.best_block.read().unwrap().height;
7845		{
7846			let peer_state_lock = self.per_peer_state.read().unwrap();
7847			for (_, peer_mtx) in peer_state_lock.iter() {
7848				let peer = peer_mtx.lock().unwrap();
7849				if !maybe_count_peer(&*peer) { continue; }
7850				let num_unfunded_channels = Self::unfunded_channel_count(&peer, best_block_height);
7851				if num_unfunded_channels == peer.total_channel_count() {
7852					peers_without_funded_channels += 1;
7853				}
7854			}
7855		}
7856		return peers_without_funded_channels;
7857	}
7858
7859	fn unfunded_channel_count(
7860		peer: &PeerState<SP>, best_block_height: u32
7861	) -> usize {
7862		let mut num_unfunded_channels = 0;
7863		for (_, phase) in peer.channel_by_id.iter() {
7864			match phase {
7865				ChannelPhase::Funded(chan) => {
7866					// This covers non-zero-conf inbound `Channel`s that we are currently monitoring, but those
7867					// which have not yet had any confirmations on-chain.
7868					if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 &&
7869						chan.context.get_funding_tx_confirmations(best_block_height) == 0
7870					{
7871						num_unfunded_channels += 1;
7872					}
7873				},
7874				ChannelPhase::UnfundedInboundV1(chan) => {
7875					if chan.context.minimum_depth().unwrap_or(1) != 0 {
7876						num_unfunded_channels += 1;
7877					}
7878				},
7879				ChannelPhase::UnfundedInboundV2(chan) => {
7880					// Only inbound V2 channels that are not 0conf and that we do not contribute to will be
7881					// included in the unfunded count.
7882					if chan.context.minimum_depth().unwrap_or(1) != 0 &&
7883						chan.dual_funding_context.our_funding_satoshis == 0 {
7884						num_unfunded_channels += 1;
7885					}
7886				},
7887				ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedOutboundV2(_) => {
7888					// Outbound channels don't contribute to the unfunded count in the DoS context.
7889					continue;
7890				},
7891			}
7892		}
7893		num_unfunded_channels + peer.inbound_channel_request_by_id.len()
7894	}
7895
7896	fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: OpenChannelMessageRef<'_>) -> Result<(), MsgHandleErrInternal> {
7897		let common_fields = match msg {
7898			OpenChannelMessageRef::V1(msg) => &msg.common_fields,
7899			#[cfg(dual_funding)]
7900			OpenChannelMessageRef::V2(msg) => &msg.common_fields,
7901		};
7902
7903		// Do common open_channel(2) checks
7904
7905		// Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
7906		// likely to be lost on restart!
7907		if common_fields.chain_hash != self.chain_hash {
7908			return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(),
7909				 common_fields.temporary_channel_id));
7910		}
7911
7912		if !self.default_configuration.accept_inbound_channels {
7913			return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(),
7914				 common_fields.temporary_channel_id));
7915		}
7916
7917		// Get the number of peers with channels, but without funded ones. We don't care too much
7918		// about peers that never open a channel, so we filter by peers that have at least one
7919		// channel, and then limit the number of those with unfunded channels.
7920		let channeled_peers_without_funding =
7921			self.peers_without_funded_channels(|node| node.total_channel_count() > 0);
7922
7923		let per_peer_state = self.per_peer_state.read().unwrap();
7924		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
7925		    .ok_or_else(|| {
7926				debug_assert!(false);
7927				MsgHandleErrInternal::send_err_msg_no_close(
7928					format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
7929					common_fields.temporary_channel_id)
7930			})?;
7931		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
7932		let peer_state = &mut *peer_state_lock;
7933
7934		// If this peer already has some channels, a new channel won't increase our number of peers
7935		// with unfunded channels, so as long as we aren't over the maximum number of unfunded
7936		// channels per-peer we can accept channels from a peer with existing ones.
7937		if peer_state.total_channel_count() == 0 &&
7938			channeled_peers_without_funding >= MAX_UNFUNDED_CHANNEL_PEERS &&
7939			!self.default_configuration.manually_accept_inbound_channels
7940		{
7941			return Err(MsgHandleErrInternal::send_err_msg_no_close(
7942				"Have too many peers with unfunded channels, not accepting new ones".to_owned(),
7943				common_fields.temporary_channel_id));
7944		}
7945
7946		let best_block_height = self.best_block.read().unwrap().height;
7947		if Self::unfunded_channel_count(peer_state, best_block_height) >= MAX_UNFUNDED_CHANS_PER_PEER {
7948			return Err(MsgHandleErrInternal::send_err_msg_no_close(
7949				format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER),
7950				common_fields.temporary_channel_id));
7951		}
7952
7953		let channel_id = common_fields.temporary_channel_id;
7954		let channel_exists = peer_state.has_channel(&channel_id);
7955		if channel_exists {
7956			return Err(MsgHandleErrInternal::send_err_msg_no_close(
7957				"temporary_channel_id collision for the same peer!".to_owned(),
7958				common_fields.temporary_channel_id));
7959		}
7960
7961		// We can get the channel type at this point already as we'll need it immediately in both the
7962		// manual and the automatic acceptance cases.
7963		let channel_type = channel::channel_type_from_open_channel(
7964			common_fields, &peer_state.latest_features, &self.channel_type_features()
7965		).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, common_fields.temporary_channel_id))?;
7966
7967		// If we're doing manual acceptance checks on the channel, then defer creation until we're sure we want to accept.
7968		if self.default_configuration.manually_accept_inbound_channels {
7969			let mut pending_events = self.pending_events.lock().unwrap();
7970			let is_announced = (common_fields.channel_flags & 1) == 1;
7971			pending_events.push_back((events::Event::OpenChannelRequest {
7972				temporary_channel_id: common_fields.temporary_channel_id,
7973				counterparty_node_id: *counterparty_node_id,
7974				funding_satoshis: common_fields.funding_satoshis,
7975				channel_negotiation_type: match msg {
7976					OpenChannelMessageRef::V1(msg) => InboundChannelFunds::PushMsat(msg.push_msat),
7977					#[cfg(dual_funding)]
7978					OpenChannelMessageRef::V2(_) => InboundChannelFunds::DualFunded,
7979				},
7980				channel_type,
7981				is_announced,
7982				params: common_fields.channel_parameters(),
7983			}, None));
7984			peer_state.inbound_channel_request_by_id.insert(channel_id, InboundChannelRequest {
7985				open_channel_msg: match msg {
7986					OpenChannelMessageRef::V1(msg) => OpenChannelMessage::V1(msg.clone()),
7987					#[cfg(dual_funding)]
7988					OpenChannelMessageRef::V2(msg) => OpenChannelMessage::V2(msg.clone()),
7989				},
7990				ticks_remaining: UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS,
7991			});
7992			return Ok(());
7993		}
7994
7995		// Otherwise create the channel right now.
7996		let mut random_bytes = [0u8; 16];
7997		random_bytes.copy_from_slice(&self.entropy_source.get_secure_random_bytes()[..16]);
7998		let user_channel_id = u128::from_be_bytes(random_bytes);
7999
8000		if channel_type.requires_zero_conf() {
8001			return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), common_fields.temporary_channel_id));
8002		}
8003		if channel_type.requires_anchors_zero_fee_htlc_tx() {
8004			return Err(MsgHandleErrInternal::send_err_msg_no_close("No channels with anchor outputs accepted".to_owned(), common_fields.temporary_channel_id));
8005		}
8006
8007		let (mut channel_phase, message_send_event) = match msg {
8008			OpenChannelMessageRef::V1(msg) => {
8009				let mut channel = InboundV1Channel::new(
8010					&self.fee_estimator, &self.entropy_source, &self.signer_provider, *counterparty_node_id,
8011					&self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id,
8012					&self.default_configuration, best_block_height, &self.logger, /*is_0conf=*/false
8013				).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id))?;
8014				let logger = WithChannelContext::from(&self.logger, &channel.context, None);
8015				let message_send_event = channel.accept_inbound_channel(&&logger).map(|msg| {
8016					events::MessageSendEvent::SendAcceptChannel {
8017						node_id: *counterparty_node_id,
8018						msg,
8019					}
8020				});
8021				(ChannelPhase::UnfundedInboundV1(channel), message_send_event)
8022			},
8023			#[cfg(dual_funding)]
8024			OpenChannelMessageRef::V2(msg) => {
8025				let channel = InboundV2Channel::new(&self.fee_estimator, &self.entropy_source,
8026					&self.signer_provider, self.get_our_node_id(), *counterparty_node_id,
8027					&self.channel_type_features(), &peer_state.latest_features, msg, vec![], Weight::from_wu(0),
8028					user_channel_id, &self.default_configuration, best_block_height, &self.logger
8029				).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id))?;
8030				let message_send_event = events::MessageSendEvent::SendAcceptChannelV2 {
8031					node_id: *counterparty_node_id,
8032					msg: channel.accept_inbound_dual_funded_channel(),
8033				};
8034				(ChannelPhase::UnfundedInboundV2(channel), Some(message_send_event))
8035			},
8036		};
8037
8038		let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
8039		channel_phase.context_mut().set_outbound_scid_alias(outbound_scid_alias);
8040
8041		if let Some(message_send_event) = message_send_event {
8042			peer_state.pending_msg_events.push(message_send_event);
8043		}
8044		peer_state.channel_by_id.insert(channel_phase.context().channel_id(), channel_phase);
8045
8046		Ok(())
8047	}
8048
8049	fn internal_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
8050		// Note that the ChannelManager is NOT re-persisted on disk after this, so any changes are
8051		// likely to be lost on restart!
8052		let (value, output_script, user_id) = {
8053			let per_peer_state = self.per_peer_state.read().unwrap();
8054			let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8055				.ok_or_else(|| {
8056					debug_assert!(false);
8057					MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id)
8058				})?;
8059			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8060			let peer_state = &mut *peer_state_lock;
8061			match peer_state.channel_by_id.entry(msg.common_fields.temporary_channel_id) {
8062				hash_map::Entry::Occupied(mut phase) => {
8063					match phase.get_mut() {
8064						ChannelPhase::UnfundedOutboundV1(chan) => {
8065							try_chan_phase_entry!(self, peer_state, chan.accept_channel(msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), phase);
8066							(chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_p2wsh(), chan.context.get_user_id())
8067						},
8068						_ => {
8069							return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id));
8070						}
8071					}
8072				},
8073				hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id))
8074			}
8075		};
8076		let mut pending_events = self.pending_events.lock().unwrap();
8077		pending_events.push_back((events::Event::FundingGenerationReady {
8078			temporary_channel_id: msg.common_fields.temporary_channel_id,
8079			counterparty_node_id: *counterparty_node_id,
8080			channel_value_satoshis: value,
8081			output_script,
8082			user_channel_id: user_id,
8083		}, None));
8084		Ok(())
8085	}
8086
8087	fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
8088		let best_block = *self.best_block.read().unwrap();
8089
8090		let per_peer_state = self.per_peer_state.read().unwrap();
8091		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8092			.ok_or_else(|| {
8093				debug_assert!(false);
8094				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id)
8095			})?;
8096
8097		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8098		let peer_state = &mut *peer_state_lock;
8099		let (mut chan, funding_msg_opt, monitor) =
8100			match peer_state.channel_by_id.remove(&msg.temporary_channel_id) {
8101				Some(ChannelPhase::UnfundedInboundV1(inbound_chan)) => {
8102					let logger = WithChannelContext::from(&self.logger, &inbound_chan.context, None);
8103					match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) {
8104						Ok(res) => res,
8105						Err((inbound_chan, err)) => {
8106							// We've already removed this inbound channel from the map in `PeerState`
8107							// above so at this point we just need to clean up any lingering entries
8108							// concerning this channel as it is safe to do so.
8109							debug_assert!(matches!(err, ChannelError::Close(_)));
8110							// Really we should be returning the channel_id the peer expects based
8111							// on their funding info here, but they're horribly confused anyway, so
8112							// there's not a lot we can do to save them.
8113							return Err(convert_chan_phase_err!(self, peer_state, err, &mut ChannelPhase::UnfundedInboundV1(inbound_chan), &msg.temporary_channel_id).1);
8114						},
8115					}
8116				},
8117				Some(mut phase) => {
8118					let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id);
8119					let err = ChannelError::close(err_msg);
8120					return Err(convert_chan_phase_err!(self, peer_state, err, &mut phase, &msg.temporary_channel_id).1);
8121				},
8122				None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
8123			};
8124
8125		let funded_channel_id = chan.context.channel_id();
8126
8127		macro_rules! fail_chan { ($err: expr) => { {
8128			// Note that at this point we've filled in the funding outpoint on our
8129			// channel, but its actually in conflict with another channel. Thus, if
8130			// we call `convert_chan_phase_err` immediately (thus calling
8131			// `locked_close_channel`), we'll remove the existing channel from `outpoint_to_peer`.
8132			// Thus, we must first unset the funding outpoint on the channel.
8133			let err = ChannelError::close($err.to_owned());
8134			chan.unset_funding_info(msg.temporary_channel_id);
8135			return Err(convert_chan_phase_err!(self, peer_state, err, chan, &funded_channel_id, UNFUNDED_CHANNEL).1);
8136		} } }
8137
8138		match peer_state.channel_by_id.entry(funded_channel_id) {
8139			hash_map::Entry::Occupied(_) => {
8140				fail_chan!("Already had channel with the new channel_id");
8141			},
8142			hash_map::Entry::Vacant(e) => {
8143				let mut outpoint_to_peer_lock = self.outpoint_to_peer.lock().unwrap();
8144				match outpoint_to_peer_lock.entry(monitor.get_funding_txo().0) {
8145					hash_map::Entry::Occupied(_) => {
8146						fail_chan!("The funding_created message had the same funding_txid as an existing channel - funding is not possible");
8147					},
8148					hash_map::Entry::Vacant(i_e) => {
8149						let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
8150						if let Ok(persist_state) = monitor_res {
8151							i_e.insert(chan.context.get_counterparty_node_id());
8152							mem::drop(outpoint_to_peer_lock);
8153
8154							// There's no problem signing a counterparty's funding transaction if our monitor
8155							// hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
8156							// accepted payment from yet. We do, however, need to wait to send our channel_ready
8157							// until we have persisted our monitor.
8158							if let Some(msg) = funding_msg_opt {
8159								peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
8160									node_id: counterparty_node_id.clone(),
8161									msg,
8162								});
8163							}
8164
8165							if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) {
8166								handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
8167									per_peer_state, chan, INITIAL_MONITOR);
8168							} else {
8169								unreachable!("This must be a funded channel as we just inserted it.");
8170							}
8171							Ok(())
8172						} else {
8173							let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8174							log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
8175							fail_chan!("Duplicate funding outpoint");
8176						}
8177					}
8178				}
8179			}
8180		}
8181	}
8182
8183	fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
8184		let best_block = *self.best_block.read().unwrap();
8185		let per_peer_state = self.per_peer_state.read().unwrap();
8186		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8187			.ok_or_else(|| {
8188				debug_assert!(false);
8189				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8190			})?;
8191
8192		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8193		let peer_state = &mut *peer_state_lock;
8194		match peer_state.channel_by_id.entry(msg.channel_id) {
8195			hash_map::Entry::Occupied(chan_phase_entry) => {
8196				if matches!(chan_phase_entry.get(), ChannelPhase::UnfundedOutboundV1(_)) {
8197					let chan = if let ChannelPhase::UnfundedOutboundV1(chan) = chan_phase_entry.remove() { chan } else { unreachable!() };
8198					let logger = WithContext::from(
8199						&self.logger,
8200						Some(chan.context.get_counterparty_node_id()),
8201						Some(chan.context.channel_id()),
8202						None
8203					);
8204					let res =
8205						chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger);
8206					match res {
8207						Ok((mut chan, monitor)) => {
8208							if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
8209								// We really should be able to insert here without doing a second
8210								// lookup, but sadly rust stdlib doesn't currently allow keeping
8211								// the original Entry around with the value removed.
8212								let mut chan = peer_state.channel_by_id.entry(msg.channel_id).or_insert(ChannelPhase::Funded(chan));
8213								if let ChannelPhase::Funded(ref mut chan) = &mut chan {
8214									handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
8215								} else { unreachable!(); }
8216								Ok(())
8217							} else {
8218								let e = ChannelError::close("Channel funding outpoint was a duplicate".to_owned());
8219								// We weren't able to watch the channel to begin with, so no
8220								// updates should be made on it. Previously, full_stack_target
8221								// found an (unreachable) panic when the monitor update contained
8222								// within `shutdown_finish` was applied.
8223								chan.unset_funding_info(msg.channel_id);
8224								return Err(convert_chan_phase_err!(self, peer_state, e, &mut ChannelPhase::Funded(chan), &msg.channel_id).1);
8225							}
8226						},
8227						Err((chan, e)) => {
8228							debug_assert!(matches!(e, ChannelError::Close(_)),
8229								"We don't have a channel anymore, so the error better have expected close");
8230							// We've already removed this outbound channel from the map in
8231							// `PeerState` above so at this point we just need to clean up any
8232							// lingering entries concerning this channel as it is safe to do so.
8233							return Err(convert_chan_phase_err!(self, peer_state, e, &mut ChannelPhase::UnfundedOutboundV1(chan), &msg.channel_id).1);
8234						}
8235					}
8236				} else {
8237					return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id));
8238				}
8239			},
8240			hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
8241		}
8242	}
8243
8244	fn internal_tx_msg<HandleTxMsgFn: Fn(&mut ChannelPhase<SP>) -> Result<MessageSendEvent, &'static str>>(
8245		&self, counterparty_node_id: &PublicKey, channel_id: ChannelId, tx_msg_handler: HandleTxMsgFn
8246	) -> Result<(), MsgHandleErrInternal> {
8247		let per_peer_state = self.per_peer_state.read().unwrap();
8248		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8249			.ok_or_else(|| {
8250				debug_assert!(false);
8251				MsgHandleErrInternal::send_err_msg_no_close(
8252					format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
8253					channel_id)
8254			})?;
8255		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8256		let peer_state = &mut *peer_state_lock;
8257		match peer_state.channel_by_id.entry(channel_id) {
8258			hash_map::Entry::Occupied(mut chan_phase_entry) => {
8259				let channel_phase = chan_phase_entry.get_mut();
8260				let msg_send_event = match tx_msg_handler(channel_phase) {
8261					Ok(msg_send_event) => msg_send_event,
8262					Err(tx_msg_str) =>  return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(
8263						format!("Got a {tx_msg_str} message with no interactive transaction construction expected or in-progress")
8264					), channel_id)),
8265				};
8266				peer_state.pending_msg_events.push(msg_send_event);
8267				Ok(())
8268			},
8269			hash_map::Entry::Vacant(_) => {
8270				Err(MsgHandleErrInternal::send_err_msg_no_close(format!(
8271					"Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}",
8272					counterparty_node_id), channel_id)
8273				)
8274			}
8275		}
8276	}
8277
8278	fn internal_tx_add_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddInput) -> Result<(), MsgHandleErrInternal> {
8279		self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase<SP>| {
8280			match channel_phase {
8281				ChannelPhase::UnfundedInboundV2(ref mut channel) => {
8282					Ok(channel.tx_add_input(msg).into_msg_send_event(counterparty_node_id))
8283				},
8284				ChannelPhase::UnfundedOutboundV2(ref mut channel) => {
8285					Ok(channel.tx_add_input(msg).into_msg_send_event(counterparty_node_id))
8286				},
8287				_ => Err("tx_add_input"),
8288			}
8289		})
8290	}
8291
8292	fn internal_tx_add_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddOutput) -> Result<(), MsgHandleErrInternal> {
8293		self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase<SP>| {
8294			match channel_phase {
8295				ChannelPhase::UnfundedInboundV2(ref mut channel) => {
8296					Ok(channel.tx_add_output(msg).into_msg_send_event(counterparty_node_id))
8297				},
8298				ChannelPhase::UnfundedOutboundV2(ref mut channel) => {
8299					Ok(channel.tx_add_output(msg).into_msg_send_event(counterparty_node_id))
8300				},
8301				_ => Err("tx_add_output"),
8302			}
8303		})
8304	}
8305
8306	fn internal_tx_remove_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveInput) -> Result<(), MsgHandleErrInternal> {
8307		self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase<SP>| {
8308			match channel_phase {
8309				ChannelPhase::UnfundedInboundV2(ref mut channel) => {
8310					Ok(channel.tx_remove_input(msg).into_msg_send_event(counterparty_node_id))
8311				},
8312				ChannelPhase::UnfundedOutboundV2(ref mut channel) => {
8313					Ok(channel.tx_remove_input(msg).into_msg_send_event(counterparty_node_id))
8314				},
8315				_ => Err("tx_remove_input"),
8316			}
8317		})
8318	}
8319
8320	fn internal_tx_remove_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveOutput) -> Result<(), MsgHandleErrInternal> {
8321		self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase<SP>| {
8322			match channel_phase {
8323				ChannelPhase::UnfundedInboundV2(ref mut channel) => {
8324					Ok(channel.tx_remove_output(msg).into_msg_send_event(counterparty_node_id))
8325				},
8326				ChannelPhase::UnfundedOutboundV2(ref mut channel) => {
8327					Ok(channel.tx_remove_output(msg).into_msg_send_event(counterparty_node_id))
8328				},
8329				_ => Err("tx_remove_output"),
8330			}
8331		})
8332	}
8333
8334	fn internal_tx_complete(&self, counterparty_node_id: PublicKey, msg: &msgs::TxComplete) -> Result<(), MsgHandleErrInternal> {
8335		let per_peer_state = self.per_peer_state.read().unwrap();
8336		let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
8337			.ok_or_else(|| {
8338				debug_assert!(false);
8339				MsgHandleErrInternal::send_err_msg_no_close(
8340					format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
8341					msg.channel_id)
8342			})?;
8343		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8344		let peer_state = &mut *peer_state_lock;
8345		match peer_state.channel_by_id.entry(msg.channel_id) {
8346			hash_map::Entry::Occupied(mut chan_phase_entry) => {
8347				let channel_phase = chan_phase_entry.get_mut();
8348				let (msg_send_event_opt, signing_session_opt) = match channel_phase {
8349					ChannelPhase::UnfundedInboundV2(channel) => channel.tx_complete(msg)
8350						.into_msg_send_event_or_signing_session(counterparty_node_id),
8351					ChannelPhase::UnfundedOutboundV2(channel) => channel.tx_complete(msg)
8352						.into_msg_send_event_or_signing_session(counterparty_node_id),
8353					_ => try_chan_phase_entry!(self, peer_state, Err(ChannelError::Close(
8354						(
8355							"Got a tx_complete message with no interactive transaction construction expected or in-progress".into(),
8356							ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) },
8357						))), chan_phase_entry)
8358				};
8359				if let Some(msg_send_event) = msg_send_event_opt {
8360					peer_state.pending_msg_events.push(msg_send_event);
8361				};
8362				if let Some(mut signing_session) = signing_session_opt {
8363					let (commitment_signed, funding_ready_for_sig_event_opt) = match chan_phase_entry.get_mut() {
8364						ChannelPhase::UnfundedOutboundV2(chan) => {
8365							chan.funding_tx_constructed(&mut signing_session, &self.logger)
8366						},
8367						ChannelPhase::UnfundedInboundV2(chan) => {
8368							chan.funding_tx_constructed(&mut signing_session, &self.logger)
8369						},
8370						_ => Err(ChannelError::Warn(
8371							"Got a tx_complete message with no interactive transaction construction expected or in-progress"
8372							.into())),
8373					}.map_err(|err| MsgHandleErrInternal::send_err_msg_no_close(format!("{}", err), msg.channel_id))?;
8374					let (channel_id, channel_phase) = chan_phase_entry.remove_entry();
8375					let channel = match channel_phase {
8376						ChannelPhase::UnfundedOutboundV2(chan) => chan.into_channel(signing_session),
8377						ChannelPhase::UnfundedInboundV2(chan) => chan.into_channel(signing_session),
8378						_ => {
8379							debug_assert!(false); // It cannot be another variant as we are in the `Ok` branch of the above match.
8380							Err(ChannelError::Warn(
8381								"Got a tx_complete message with no interactive transaction construction expected or in-progress"
8382									.into()))
8383						},
8384					}.map_err(|err| MsgHandleErrInternal::send_err_msg_no_close(format!("{}", err), msg.channel_id))?;
8385					peer_state.channel_by_id.insert(channel_id, ChannelPhase::Funded(channel));
8386					if let Some(funding_ready_for_sig_event) = funding_ready_for_sig_event_opt {
8387						let mut pending_events = self.pending_events.lock().unwrap();
8388						pending_events.push_back((funding_ready_for_sig_event, None));
8389					}
8390					peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
8391						node_id: counterparty_node_id,
8392						updates: CommitmentUpdate {
8393							commitment_signed,
8394							update_add_htlcs: vec![],
8395							update_fulfill_htlcs: vec![],
8396							update_fail_htlcs: vec![],
8397							update_fail_malformed_htlcs: vec![],
8398							update_fee: None,
8399						},
8400					});
8401				}
8402				Ok(())
8403			},
8404			hash_map::Entry::Vacant(_) => {
8405				Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8406			}
8407		}
8408	}
8409
8410	fn internal_tx_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxSignatures)
8411	-> Result<(), MsgHandleErrInternal> {
8412		let per_peer_state = self.per_peer_state.read().unwrap();
8413		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8414			.ok_or_else(|| {
8415				debug_assert!(false);
8416				MsgHandleErrInternal::send_err_msg_no_close(
8417					format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
8418					msg.channel_id)
8419			})?;
8420		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8421		let peer_state = &mut *peer_state_lock;
8422		match peer_state.channel_by_id.entry(msg.channel_id) {
8423			hash_map::Entry::Occupied(mut chan_phase_entry) => {
8424				let channel_phase = chan_phase_entry.get_mut();
8425				match channel_phase {
8426					ChannelPhase::Funded(chan) => {
8427						let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8428						let (tx_signatures_opt, funding_tx_opt) = try_chan_phase_entry!(self, peer_state, chan.tx_signatures(msg, &&logger), chan_phase_entry);
8429						if let Some(tx_signatures) = tx_signatures_opt {
8430							peer_state.pending_msg_events.push(events::MessageSendEvent::SendTxSignatures {
8431								node_id: *counterparty_node_id,
8432								msg: tx_signatures,
8433							});
8434						}
8435						if let Some(ref funding_tx) = funding_tx_opt {
8436							self.tx_broadcaster.broadcast_transactions(&[funding_tx]);
8437							{
8438								let mut pending_events = self.pending_events.lock().unwrap();
8439								emit_channel_pending_event!(pending_events, chan);
8440							}
8441						}
8442					},
8443					_ => try_chan_phase_entry!(self, peer_state, Err(ChannelError::Close(
8444						(
8445							"Got an unexpected tx_signatures message".into(),
8446							ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) },
8447						))), chan_phase_entry)
8448				}
8449				Ok(())
8450			},
8451			hash_map::Entry::Vacant(_) => {
8452				Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8453			}
8454		}
8455	}
8456
8457	fn internal_tx_abort(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAbort)
8458	-> Result<(), MsgHandleErrInternal> {
8459		let per_peer_state = self.per_peer_state.read().unwrap();
8460		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8461			.ok_or_else(|| {
8462				debug_assert!(false);
8463				MsgHandleErrInternal::send_err_msg_no_close(
8464					format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
8465					msg.channel_id)
8466			})?;
8467		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8468		let peer_state = &mut *peer_state_lock;
8469		match peer_state.channel_by_id.entry(msg.channel_id) {
8470			hash_map::Entry::Occupied(mut chan_phase_entry) => {
8471				let channel_phase = chan_phase_entry.get_mut();
8472				let tx_constructor = match channel_phase {
8473					ChannelPhase::UnfundedInboundV2(chan) => chan.interactive_tx_constructor_mut(),
8474					ChannelPhase::UnfundedOutboundV2(chan) => chan.interactive_tx_constructor_mut(),
8475					ChannelPhase::Funded(_) => {
8476						// TODO(splicing)/TODO(RBF): We'll also be doing interactive tx construction
8477						// for a "ChannelPhase::Funded" when we want to bump the fee on an interactively
8478						// constructed funding tx or during splicing. For now we send an error as we would
8479						// never ack an RBF attempt or a splice for now:
8480						try_chan_phase_entry!(self, peer_state, Err(ChannelError::Warn(
8481							"Got an unexpected tx_abort message: After initial funding transaction is signed, \
8482							splicing and RBF attempts of interactive funding transactions are not supported yet so \
8483							we don't have any negotiation in progress".into(),
8484						)), chan_phase_entry)
8485					}
8486					ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
8487						try_chan_phase_entry!(self, peer_state, Err(ChannelError::Warn(
8488							"Got an unexpected tx_abort message: This is an unfunded channel created with V1 channel \
8489							establishment".into(),
8490						)), chan_phase_entry)
8491					},
8492				};
8493				// This checks for and resets the interactive negotiation state by `take()`ing it from the channel.
8494				// The existence of the `tx_constructor` indicates that we have not moved into the signing
8495				// phase for this interactively constructed transaction and hence we have not exchanged
8496				// `tx_signatures`. Either way, we never close the channel upon receiving a `tx_abort`:
8497				//   https://github.com/lightning/bolts/blob/247e83d/02-peer-protocol.md?plain=1#L574-L576
8498				if tx_constructor.take().is_some() {
8499					let msg = msgs::TxAbort {
8500						channel_id: msg.channel_id,
8501						data: "Acknowledged tx_abort".to_string().into_bytes(),
8502					};
8503					// NOTE: Since at this point we have not sent a `tx_abort` message for this negotiation
8504					// previously (tx_constructor was `Some`), we need to echo back a tx_abort message according
8505					// to the spec:
8506					//   https://github.com/lightning/bolts/blob/247e83d/02-peer-protocol.md?plain=1#L560-L561
8507					// For rationale why we echo back `tx_abort`:
8508					//   https://github.com/lightning/bolts/blob/247e83d/02-peer-protocol.md?plain=1#L578-L580
8509					peer_state.pending_msg_events.push(events::MessageSendEvent::SendTxAbort {
8510						node_id: *counterparty_node_id,
8511						msg,
8512					});
8513				}
8514				Ok(())
8515			},
8516			hash_map::Entry::Vacant(_) => {
8517				Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8518			}
8519		}
8520	}
8521
8522	fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {
8523		// Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
8524		// closing a channel), so any changes are likely to be lost on restart!
8525		let per_peer_state = self.per_peer_state.read().unwrap();
8526		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8527			.ok_or_else(|| {
8528				debug_assert!(false);
8529				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8530			})?;
8531		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8532		let peer_state = &mut *peer_state_lock;
8533		match peer_state.channel_by_id.entry(msg.channel_id) {
8534			hash_map::Entry::Occupied(mut chan_phase_entry) => {
8535				if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8536					let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8537					let announcement_sigs_opt = try_chan_phase_entry!(self, peer_state, chan.channel_ready(&msg, &self.node_signer,
8538						self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &&logger), chan_phase_entry);
8539					if let Some(announcement_sigs) = announcement_sigs_opt {
8540						log_trace!(logger, "Sending announcement_signatures for channel {}", chan.context.channel_id());
8541						peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
8542							node_id: counterparty_node_id.clone(),
8543							msg: announcement_sigs,
8544						});
8545					} else if chan.context.is_usable() {
8546						// If we're sending an announcement_signatures, we'll send the (public)
8547						// channel_update after sending a channel_announcement when we receive our
8548						// counterparty's announcement_signatures. Thus, we only bother to send a
8549						// channel_update here if the channel is not public, i.e. we're not sending an
8550						// announcement_signatures.
8551						log_trace!(logger, "Sending private initial channel_update for our counterparty on channel {}", chan.context.channel_id());
8552						if let Ok(msg) = self.get_channel_update_for_unicast(chan) {
8553							peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
8554								node_id: counterparty_node_id.clone(),
8555								msg,
8556							});
8557						}
8558					}
8559
8560					{
8561						let mut pending_events = self.pending_events.lock().unwrap();
8562						emit_channel_ready_event!(pending_events, chan);
8563					}
8564
8565					Ok(())
8566				} else {
8567					try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8568						"Got a channel_ready message for an unfunded channel!".into())), chan_phase_entry)
8569				}
8570			},
8571			hash_map::Entry::Vacant(_) => {
8572				Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8573			}
8574		}
8575	}
8576
8577	fn internal_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
8578		let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new();
8579		let mut finish_shutdown = None;
8580		{
8581			let per_peer_state = self.per_peer_state.read().unwrap();
8582			let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8583				.ok_or_else(|| {
8584					debug_assert!(false);
8585					MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8586				})?;
8587			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8588			let peer_state = &mut *peer_state_lock;
8589			if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) {
8590				let phase = chan_phase_entry.get_mut();
8591				match phase {
8592					ChannelPhase::Funded(chan) => {
8593						if !chan.received_shutdown() {
8594							let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8595							log_info!(logger, "Received a shutdown message from our counterparty for channel {}{}.",
8596								msg.channel_id,
8597								if chan.sent_shutdown() { " after we initiated shutdown" } else { "" });
8598						}
8599
8600						let funding_txo_opt = chan.context.get_funding_txo();
8601						let (shutdown, monitor_update_opt, htlcs) = try_chan_phase_entry!(self, peer_state,
8602							chan.shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_phase_entry);
8603						dropped_htlcs = htlcs;
8604
8605						if let Some(msg) = shutdown {
8606							// We can send the `shutdown` message before updating the `ChannelMonitor`
8607							// here as we don't need the monitor update to complete until we send a
8608							// `shutdown_signed`, which we'll delay if we're pending a monitor update.
8609							peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
8610								node_id: *counterparty_node_id,
8611								msg,
8612							});
8613						}
8614						// Update the monitor with the shutdown script if necessary.
8615						if let Some(monitor_update) = monitor_update_opt {
8616							handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
8617								peer_state_lock, peer_state, per_peer_state, chan);
8618						}
8619					},
8620					ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) |
8621					ChannelPhase::UnfundedInboundV2(_) | ChannelPhase::UnfundedOutboundV2(_) => {
8622						let context = phase.context_mut();
8623						let logger = WithChannelContext::from(&self.logger, context, None);
8624						log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
8625						let mut close_res = phase.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
8626						remove_channel_phase!(self, peer_state, chan_phase_entry, close_res);
8627						finish_shutdown = Some(close_res);
8628					},
8629				}
8630			} else {
8631				return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8632			}
8633		}
8634		for htlc_source in dropped_htlcs.drain(..) {
8635			let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id };
8636			let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
8637			self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
8638		}
8639		if let Some(shutdown_res) = finish_shutdown {
8640			self.finish_close_channel(shutdown_res);
8641		}
8642
8643		Ok(())
8644	}
8645
8646	fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
8647		let per_peer_state = self.per_peer_state.read().unwrap();
8648		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8649			.ok_or_else(|| {
8650				debug_assert!(false);
8651				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8652			})?;
8653		let (tx, chan_option, shutdown_result) = {
8654			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8655			let peer_state = &mut *peer_state_lock;
8656			match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
8657				hash_map::Entry::Occupied(mut chan_phase_entry) => {
8658					if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8659						let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8660						let (closing_signed, tx, shutdown_result) = try_chan_phase_entry!(self, peer_state, chan.closing_signed(&self.fee_estimator, &msg, &&logger), chan_phase_entry);
8661						debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown());
8662						if let Some(msg) = closing_signed {
8663							peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
8664								node_id: counterparty_node_id.clone(),
8665								msg,
8666							});
8667						}
8668						if let Some(mut close_res) = shutdown_result {
8669							// We're done with this channel, we've got a signed closing transaction and
8670							// will send the closing_signed back to the remote peer upon return. This
8671							// also implies there are no pending HTLCs left on the channel, so we can
8672							// fully delete it from tracking (the channel monitor is still around to
8673							// watch for old state broadcasts)!
8674							debug_assert!(tx.is_some());
8675							let channel_phase = remove_channel_phase!(self, peer_state, chan_phase_entry, close_res);
8676							(tx, Some(channel_phase), Some(close_res))
8677						} else {
8678							debug_assert!(tx.is_none());
8679							(tx, None, None)
8680						}
8681					} else {
8682						return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8683							"Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry);
8684					}
8685				},
8686				hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8687			}
8688		};
8689		if let Some(broadcast_tx) = tx {
8690			let channel_id = chan_option.as_ref().map(|channel| channel.context().channel_id());
8691			log_info!(WithContext::from(&self.logger, Some(*counterparty_node_id), channel_id, None), "Broadcasting {}", log_tx!(broadcast_tx));
8692			self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
8693		}
8694		if let Some(ChannelPhase::Funded(chan)) = chan_option {
8695			if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
8696				let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
8697				pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
8698					msg: update
8699				});
8700			}
8701		}
8702		mem::drop(per_peer_state);
8703		if let Some(shutdown_result) = shutdown_result {
8704			self.finish_close_channel(shutdown_result);
8705		}
8706		Ok(())
8707	}
8708
8709	fn internal_update_add_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), MsgHandleErrInternal> {
8710		//TODO: BOLT 4 points out a specific attack where a peer may re-send an onion packet and
8711		//determine the state of the payment based on our response/if we forward anything/the time
8712		//we take to respond. We should take care to avoid allowing such an attack.
8713		//
8714		//TODO: There exists a further attack where a node may garble the onion data, forward it to
8715		//us repeatedly garbled in different ways, and compare our error messages, which are
8716		//encrypted with the same key. It's not immediately obvious how to usefully exploit that,
8717		//but we should prevent it anyway.
8718
8719		// Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
8720		// closing a channel), so any changes are likely to be lost on restart!
8721
8722		let decoded_hop_res = self.decode_update_add_htlc_onion(msg, counterparty_node_id);
8723		let per_peer_state = self.per_peer_state.read().unwrap();
8724		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8725			.ok_or_else(|| {
8726				debug_assert!(false);
8727				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8728			})?;
8729		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8730		let peer_state = &mut *peer_state_lock;
8731		match peer_state.channel_by_id.entry(msg.channel_id) {
8732			hash_map::Entry::Occupied(mut chan_phase_entry) => {
8733				if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8734					let mut pending_forward_info = match decoded_hop_res {
8735						Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
8736							self.construct_pending_htlc_status(
8737								msg, counterparty_node_id, shared_secret, next_hop,
8738								chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt,
8739							),
8740						Err(e) => PendingHTLCStatus::Fail(e)
8741					};
8742					let logger = WithChannelContext::from(&self.logger, &chan.context, Some(msg.payment_hash));
8743					// If the update_add is completely bogus, the call will Err and we will close,
8744					// but if we've sent a shutdown and they haven't acknowledged it yet, we just
8745					// want to reject the new HTLC and fail it backwards instead of forwarding.
8746					if let Err((_, error_code)) = chan.can_accept_incoming_htlc(&msg, &self.fee_estimator, &logger) {
8747						if msg.blinding_point.is_some() {
8748							pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
8749								msgs::UpdateFailMalformedHTLC {
8750									channel_id: msg.channel_id,
8751									htlc_id: msg.htlc_id,
8752									sha256_of_onion: [0; 32],
8753									failure_code: INVALID_ONION_BLINDING,
8754								}
8755							))
8756						} else {
8757							match pending_forward_info {
8758								PendingHTLCStatus::Forward(PendingHTLCInfo {
8759									ref incoming_shared_secret, ref routing, ..
8760								}) => {
8761									let reason = if routing.blinded_failure().is_some() {
8762										HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
8763									} else if (error_code & 0x1000) != 0 {
8764										let error_data = self.get_htlc_inbound_temp_fail_data(error_code);
8765										HTLCFailReason::reason(error_code, error_data)
8766									} else {
8767										HTLCFailReason::from_failure_code(error_code)
8768									}.get_encrypted_failure_packet(incoming_shared_secret, &None);
8769									let msg = msgs::UpdateFailHTLC {
8770										channel_id: msg.channel_id,
8771										htlc_id: msg.htlc_id,
8772										reason
8773									};
8774									pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg));
8775								},
8776								_ => {},
8777							}
8778						}
8779					}
8780					try_chan_phase_entry!(self, peer_state, chan.update_add_htlc(&msg, pending_forward_info, &self.fee_estimator), chan_phase_entry);
8781				} else {
8782					return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8783						"Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry);
8784				}
8785			},
8786			hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8787		}
8788		Ok(())
8789	}
8790
8791	fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
8792		let funding_txo;
8793		let next_user_channel_id;
8794		let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = {
8795			let per_peer_state = self.per_peer_state.read().unwrap();
8796			let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8797				.ok_or_else(|| {
8798					debug_assert!(false);
8799					MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8800				})?;
8801			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8802			let peer_state = &mut *peer_state_lock;
8803			match peer_state.channel_by_id.entry(msg.channel_id) {
8804				hash_map::Entry::Occupied(mut chan_phase_entry) => {
8805					if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8806						let res = try_chan_phase_entry!(self, peer_state, chan.update_fulfill_htlc(&msg), chan_phase_entry);
8807						if let HTLCSource::PreviousHopData(prev_hop) = &res.0 {
8808							let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8809							log_trace!(logger,
8810								"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
8811								msg.channel_id);
8812							peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id)
8813								.or_insert_with(Vec::new)
8814								.push(RAAMonitorUpdateBlockingAction::from_prev_hop_data(&prev_hop));
8815						}
8816						// Note that we do not need to push an `actions_blocking_raa_monitor_updates`
8817						// entry here, even though we *do* need to block the next RAA monitor update.
8818						// We do this instead in the `claim_funds_internal` by attaching a
8819						// `ReleaseRAAChannelMonitorUpdate` action to the event generated when the
8820						// outbound HTLC is claimed. This is guaranteed to all complete before we
8821						// process the RAA as messages are processed from single peers serially.
8822						funding_txo = chan.context.get_funding_txo().expect("We won't accept a fulfill until funded");
8823						next_user_channel_id = chan.context.get_user_id();
8824						res
8825					} else {
8826						return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8827							"Got an update_fulfill_htlc message for an unfunded channel!".into())), chan_phase_entry);
8828					}
8829				},
8830				hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8831			}
8832		};
8833		self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(),
8834			Some(forwarded_htlc_value), skimmed_fee_msat, false, false, Some(*counterparty_node_id),
8835			funding_txo, msg.channel_id, Some(next_user_channel_id),
8836		);
8837
8838		Ok(())
8839	}
8840
8841	fn internal_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
8842		// Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
8843		// closing a channel), so any changes are likely to be lost on restart!
8844		let per_peer_state = self.per_peer_state.read().unwrap();
8845		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8846			.ok_or_else(|| {
8847				debug_assert!(false);
8848				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8849			})?;
8850		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8851		let peer_state = &mut *peer_state_lock;
8852		match peer_state.channel_by_id.entry(msg.channel_id) {
8853			hash_map::Entry::Occupied(mut chan_phase_entry) => {
8854				if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8855					try_chan_phase_entry!(self, peer_state, chan.update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan_phase_entry);
8856				} else {
8857					return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8858						"Got an update_fail_htlc message for an unfunded channel!".into())), chan_phase_entry);
8859				}
8860			},
8861			hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8862		}
8863		Ok(())
8864	}
8865
8866	fn internal_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
8867		// Note that the ChannelManager is NOT re-persisted on disk after this (unless we error
8868		// closing a channel), so any changes are likely to be lost on restart!
8869		let per_peer_state = self.per_peer_state.read().unwrap();
8870		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8871			.ok_or_else(|| {
8872				debug_assert!(false);
8873				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8874			})?;
8875		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8876		let peer_state = &mut *peer_state_lock;
8877		match peer_state.channel_by_id.entry(msg.channel_id) {
8878			hash_map::Entry::Occupied(mut chan_phase_entry) => {
8879				if (msg.failure_code & 0x8000) == 0 {
8880					let chan_err = ChannelError::close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
8881					try_chan_phase_entry!(self, peer_state, Err(chan_err), chan_phase_entry);
8882				}
8883				if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8884					try_chan_phase_entry!(self, peer_state, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan_phase_entry);
8885				} else {
8886					return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8887						"Got an update_fail_malformed_htlc message for an unfunded channel!".into())), chan_phase_entry);
8888				}
8889				Ok(())
8890			},
8891			hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8892		}
8893	}
8894
8895	fn internal_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> {
8896		let best_block = *self.best_block.read().unwrap();
8897		let per_peer_state = self.per_peer_state.read().unwrap();
8898		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8899			.ok_or_else(|| {
8900				debug_assert!(false);
8901				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8902			})?;
8903		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8904		let peer_state = &mut *peer_state_lock;
8905		match peer_state.channel_by_id.entry(msg.channel_id) {
8906			hash_map::Entry::Occupied(mut chan_phase_entry) => {
8907				if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8908					let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8909					let funding_txo = chan.context.get_funding_txo();
8910
8911					if chan.interactive_tx_signing_session.is_some() {
8912						let monitor = try_chan_phase_entry!(
8913							self, peer_state, chan.commitment_signed_initial_v2(msg, best_block, &self.signer_provider, &&logger),
8914							chan_phase_entry);
8915						let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
8916						if let Ok(persist_state) = monitor_res {
8917							handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
8918								per_peer_state, chan, INITIAL_MONITOR);
8919						} else {
8920							let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8921							log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
8922							try_chan_phase_entry!(self, peer_state, Err(ChannelError::Close(
8923								(
8924									"Channel funding outpoint was a duplicate".to_owned(),
8925									ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) },
8926								)
8927							)), chan_phase_entry)
8928						}
8929					} else {
8930						let monitor_update_opt = try_chan_phase_entry!(
8931							self, peer_state, chan.commitment_signed(msg, &&logger), chan_phase_entry);
8932						if let Some(monitor_update) = monitor_update_opt {
8933							handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
8934								peer_state, per_peer_state, chan);
8935						}
8936					}
8937					Ok(())
8938				} else {
8939					return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8940						"Got a commitment_signed message for an unfunded channel!".into())), chan_phase_entry);
8941				}
8942			},
8943			hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8944		}
8945	}
8946
8947	fn push_decode_update_add_htlcs(&self, mut update_add_htlcs: (u64, Vec<msgs::UpdateAddHTLC>)) {
8948		let mut push_forward_event = self.forward_htlcs.lock().unwrap().is_empty();
8949		let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
8950		push_forward_event &= decode_update_add_htlcs.is_empty();
8951		let scid = update_add_htlcs.0;
8952		match decode_update_add_htlcs.entry(scid) {
8953			hash_map::Entry::Occupied(mut e) => { e.get_mut().append(&mut update_add_htlcs.1); },
8954			hash_map::Entry::Vacant(e) => { e.insert(update_add_htlcs.1); },
8955		}
8956		if push_forward_event { self.push_pending_forwards_ev(); }
8957	}
8958
8959	#[inline]
8960	fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Option<PublicKey>, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) {
8961		let push_forward_event = self.forward_htlcs_without_forward_event(per_source_pending_forwards);
8962		if push_forward_event { self.push_pending_forwards_ev() }
8963	}
8964
8965	#[inline]
8966	fn forward_htlcs_without_forward_event(&self, per_source_pending_forwards: &mut [(u64, Option<PublicKey>, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) -> bool {
8967		let mut push_forward_event = false;
8968		for &mut (prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
8969			let mut new_intercept_events = VecDeque::new();
8970			let mut failed_intercept_forwards = Vec::new();
8971			if !pending_forwards.is_empty() {
8972				for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
8973					let scid = match forward_info.routing {
8974						PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
8975						PendingHTLCRouting::Receive { .. } => 0,
8976						PendingHTLCRouting::ReceiveKeysend { .. } => 0,
8977					};
8978					// Pull this now to avoid introducing a lock order with `forward_htlcs`.
8979					let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid);
8980
8981					let decode_update_add_htlcs_empty = self.decode_update_add_htlcs.lock().unwrap().is_empty();
8982					let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
8983					let forward_htlcs_empty = forward_htlcs.is_empty();
8984					match forward_htlcs.entry(scid) {
8985						hash_map::Entry::Occupied(mut entry) => {
8986							entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
8987								prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint,
8988								prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info
8989							}));
8990						},
8991						hash_map::Entry::Vacant(entry) => {
8992							if !is_our_scid && forward_info.incoming_amt_msat.is_some() &&
8993							   fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, scid, &self.chain_hash)
8994							{
8995								let intercept_id = InterceptId(Sha256::hash(&forward_info.incoming_shared_secret).to_byte_array());
8996								let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
8997								match pending_intercepts.entry(intercept_id) {
8998									hash_map::Entry::Vacant(entry) => {
8999										new_intercept_events.push_back((events::Event::HTLCIntercepted {
9000											requested_next_hop_scid: scid,
9001											payment_hash: forward_info.payment_hash,
9002											inbound_amount_msat: forward_info.incoming_amt_msat.unwrap(),
9003											expected_outbound_amount_msat: forward_info.outgoing_amt_msat,
9004											intercept_id
9005										}, None));
9006										entry.insert(PendingAddHTLCInfo {
9007											prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint,
9008											prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info
9009										});
9010									},
9011									hash_map::Entry::Occupied(_) => {
9012										let logger = WithContext::from(&self.logger, None, Some(prev_channel_id), Some(forward_info.payment_hash));
9013										log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
9014										let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
9015											short_channel_id: prev_short_channel_id,
9016											user_channel_id: Some(prev_user_channel_id),
9017											counterparty_node_id: prev_counterparty_node_id,
9018											outpoint: prev_funding_outpoint,
9019											channel_id: prev_channel_id,
9020											htlc_id: prev_htlc_id,
9021											incoming_packet_shared_secret: forward_info.incoming_shared_secret,
9022											phantom_shared_secret: None,
9023											blinded_failure: forward_info.routing.blinded_failure(),
9024											cltv_expiry: forward_info.routing.incoming_cltv_expiry(),
9025										});
9026
9027										failed_intercept_forwards.push((htlc_source, forward_info.payment_hash,
9028												HTLCFailReason::from_failure_code(0x4000 | 10),
9029												HTLCDestination::InvalidForward { requested_forward_scid: scid },
9030										));
9031									}
9032								}
9033							} else {
9034								// We don't want to generate a PendingHTLCsForwardable event if only intercepted
9035								// payments are being processed.
9036								push_forward_event |= forward_htlcs_empty && decode_update_add_htlcs_empty;
9037								entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
9038									prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint,
9039									prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info
9040								})));
9041							}
9042						}
9043					}
9044				}
9045			}
9046
9047			for (htlc_source, payment_hash, failure_reason, destination) in failed_intercept_forwards.drain(..) {
9048				push_forward_event |= self.fail_htlc_backwards_internal_without_forward_event(&htlc_source, &payment_hash, &failure_reason, destination);
9049			}
9050
9051			if !new_intercept_events.is_empty() {
9052				let mut events = self.pending_events.lock().unwrap();
9053				events.append(&mut new_intercept_events);
9054			}
9055		}
9056		push_forward_event
9057	}
9058
9059	fn push_pending_forwards_ev(&self) {
9060		let mut pending_events = self.pending_events.lock().unwrap();
9061		let is_processing_events = self.pending_events_processor.load(Ordering::Acquire);
9062		let num_forward_events = pending_events.iter().filter(|(ev, _)|
9063			if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false }
9064		).count();
9065		// We only want to push a PendingHTLCsForwardable event if no others are queued. Processing
9066		// events is done in batches and they are not removed until we're done processing each
9067		// batch. Since handling a `PendingHTLCsForwardable` event will call back into the
9068		// `ChannelManager`, we'll still see the original forwarding event not removed. Phantom
9069		// payments will need an additional forwarding event before being claimed to make them look
9070		// real by taking more time.
9071		if (is_processing_events && num_forward_events <= 1) || num_forward_events < 1 {
9072			pending_events.push_back((Event::PendingHTLCsForwardable {
9073				time_forwardable: Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS),
9074			}, None));
9075		}
9076	}
9077
9078	/// Checks whether [`ChannelMonitorUpdate`]s generated by the receipt of a remote
9079	/// [`msgs::RevokeAndACK`] should be held for the given channel until some other action
9080	/// completes. Note that this needs to happen in the same [`PeerState`] mutex as any release of
9081	/// the [`ChannelMonitorUpdate`] in question.
9082	fn raa_monitor_updates_held(&self,
9083		actions_blocking_raa_monitor_updates: &BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
9084		channel_funding_outpoint: OutPoint, channel_id: ChannelId, counterparty_node_id: PublicKey
9085	) -> bool {
9086		actions_blocking_raa_monitor_updates
9087			.get(&channel_id).map(|v| !v.is_empty()).unwrap_or(false)
9088		|| self.pending_events.lock().unwrap().iter().any(|(_, action)| {
9089			action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
9090				channel_funding_outpoint,
9091				channel_id,
9092				counterparty_node_id,
9093			})
9094		})
9095	}
9096
9097	#[cfg(any(test, feature = "_test_utils"))]
9098	pub(crate) fn test_raa_monitor_updates_held(&self,
9099		counterparty_node_id: PublicKey, channel_id: ChannelId
9100	) -> bool {
9101		let per_peer_state = self.per_peer_state.read().unwrap();
9102		if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
9103			let mut peer_state_lck = peer_state_mtx.lock().unwrap();
9104			let peer_state = &mut *peer_state_lck;
9105
9106			if let Some(chan) = peer_state.channel_by_id.get(&channel_id) {
9107				return self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
9108					chan.context().get_funding_txo().unwrap(), channel_id, counterparty_node_id);
9109			}
9110		}
9111		false
9112	}
9113
9114	fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
9115		let htlcs_to_fail = {
9116			let per_peer_state = self.per_peer_state.read().unwrap();
9117			let mut peer_state_lock = per_peer_state.get(counterparty_node_id)
9118				.ok_or_else(|| {
9119					debug_assert!(false);
9120					MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
9121				}).map(|mtx| mtx.lock().unwrap())?;
9122			let peer_state = &mut *peer_state_lock;
9123			match peer_state.channel_by_id.entry(msg.channel_id) {
9124				hash_map::Entry::Occupied(mut chan_phase_entry) => {
9125					if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
9126						let logger = WithChannelContext::from(&self.logger, &chan.context, None);
9127						let funding_txo_opt = chan.context.get_funding_txo();
9128						let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
9129							self.raa_monitor_updates_held(
9130								&peer_state.actions_blocking_raa_monitor_updates, funding_txo, msg.channel_id,
9131								*counterparty_node_id)
9132						} else { false };
9133						let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self, peer_state,
9134							chan.revoke_and_ack(&msg, &self.fee_estimator, &&logger, mon_update_blocked), chan_phase_entry);
9135						if let Some(monitor_update) = monitor_update_opt {
9136							let funding_txo = funding_txo_opt
9137								.expect("Funding outpoint must have been set for RAA handling to succeed");
9138							handle_new_monitor_update!(self, funding_txo, monitor_update,
9139								peer_state_lock, peer_state, per_peer_state, chan);
9140						}
9141						htlcs_to_fail
9142					} else {
9143						return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
9144							"Got a revoke_and_ack message for an unfunded channel!".into())), chan_phase_entry);
9145					}
9146				},
9147				hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
9148			}
9149		};
9150		self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id);
9151		Ok(())
9152	}
9153
9154	fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
9155		let per_peer_state = self.per_peer_state.read().unwrap();
9156		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
9157			.ok_or_else(|| {
9158				debug_assert!(false);
9159				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
9160			})?;
9161		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9162		let peer_state = &mut *peer_state_lock;
9163		match peer_state.channel_by_id.entry(msg.channel_id) {
9164			hash_map::Entry::Occupied(mut chan_phase_entry) => {
9165				if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
9166					let logger = WithChannelContext::from(&self.logger, &chan.context, None);
9167					try_chan_phase_entry!(self, peer_state, chan.update_fee(&self.fee_estimator, &msg, &&logger), chan_phase_entry);
9168				} else {
9169					return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
9170						"Got an update_fee message for an unfunded channel!".into())), chan_phase_entry);
9171				}
9172			},
9173			hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
9174		}
9175		Ok(())
9176	}
9177
9178	fn internal_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> {
9179		let per_peer_state = self.per_peer_state.read().unwrap();
9180		let peer_state_mutex = per_peer_state.get(counterparty_node_id)
9181			.ok_or_else(|| {
9182				debug_assert!(false);
9183				MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
9184			})?;
9185		let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9186		let peer_state = &mut *peer_state_lock;
9187		match peer_state.channel_by_id.entry(msg.channel_id) {
9188			hash_map::Entry::Occupied(mut chan_phase_entry) => {
9189				if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
9190					if !chan.context.is_usable() {
9191						return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
9192					}
9193
9194					peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
9195						msg: try_chan_phase_entry!(self, peer_state, chan.announcement_signatures(
9196							&self.node_signer, self.chain_hash, self.best_block.read().unwrap().height,
9197							msg, &self.default_configuration
9198						), chan_phase_entry),
9199						// Note that announcement_signatures fails if the channel cannot be announced,
9200						// so get_channel_update_for_broadcast will never fail by the time we get here.
9201						update_msg: Some(self.get_channel_update_for_broadcast(chan).unwrap()),
9202					});
9203				} else {
9204					return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
9205						"Got an announcement_signatures message for an unfunded channel!".into())), chan_phase_entry);
9206				}
9207			},
9208			hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
9209		}
9210		Ok(())
9211	}
9212
9213	/// Returns DoPersist if anything changed, otherwise either SkipPersistNoEvents or an Err.
9214	fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
9215		let (chan_counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&msg.contents.short_channel_id) {
9216			Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
9217			None => {
9218				// It's not a local channel
9219				return Ok(NotifyOption::SkipPersistNoEvents)
9220			}
9221		};
9222		let per_peer_state = self.per_peer_state.read().unwrap();
9223		let peer_state_mutex_opt = per_peer_state.get(&chan_counterparty_node_id);
9224		if peer_state_mutex_opt.is_none() {
9225			return Ok(NotifyOption::SkipPersistNoEvents)
9226		}
9227		let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
9228		let peer_state = &mut *peer_state_lock;
9229		match peer_state.channel_by_id.entry(chan_id) {
9230			hash_map::Entry::Occupied(mut chan_phase_entry) => {
9231				if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
9232					if chan.context.get_counterparty_node_id() != *counterparty_node_id {
9233						if chan.context.should_announce() {
9234							// If the announcement is about a channel of ours which is public, some
9235							// other peer may simply be forwarding all its gossip to us. Don't provide
9236							// a scary-looking error message and return Ok instead.
9237							return Ok(NotifyOption::SkipPersistNoEvents);
9238						}
9239						return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
9240					}
9241					let were_node_one = self.get_our_node_id().serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
9242					let msg_from_node_one = msg.contents.channel_flags & 1 == 0;
9243					if were_node_one == msg_from_node_one {
9244						return Ok(NotifyOption::SkipPersistNoEvents);
9245					} else {
9246						let logger = WithChannelContext::from(&self.logger, &chan.context, None);
9247						log_debug!(logger, "Received channel_update {:?} for channel {}.", msg, chan_id);
9248						let did_change = try_chan_phase_entry!(self, peer_state, chan.channel_update(&msg), chan_phase_entry);
9249						// If nothing changed after applying their update, we don't need to bother
9250						// persisting.
9251						if !did_change {
9252							return Ok(NotifyOption::SkipPersistNoEvents);
9253						}
9254					}
9255				} else {
9256					return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
9257						"Got a channel_update for an unfunded channel!".into())), chan_phase_entry);
9258				}
9259			},
9260			hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersistNoEvents)
9261		}
9262		Ok(NotifyOption::DoPersist)
9263	}
9264
9265	fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<NotifyOption, MsgHandleErrInternal> {
9266		let need_lnd_workaround = {
9267			let per_peer_state = self.per_peer_state.read().unwrap();
9268
9269			let peer_state_mutex = per_peer_state.get(counterparty_node_id)
9270				.ok_or_else(|| {
9271					debug_assert!(false);
9272					MsgHandleErrInternal::send_err_msg_no_close(
9273						format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
9274						msg.channel_id
9275					)
9276				})?;
9277			let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), None);
9278			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9279			let peer_state = &mut *peer_state_lock;
9280			match peer_state.channel_by_id.entry(msg.channel_id) {
9281				hash_map::Entry::Occupied(mut chan_phase_entry) => {
9282					if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
9283						// Currently, we expect all holding cell update_adds to be dropped on peer
9284						// disconnect, so Channel's reestablish will never hand us any holding cell
9285						// freed HTLCs to fail backwards. If in the future we no longer drop pending
9286						// add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
9287						let responses = try_chan_phase_entry!(self, peer_state, chan.channel_reestablish(
9288							msg, &&logger, &self.node_signer, self.chain_hash,
9289							&self.default_configuration, &*self.best_block.read().unwrap()), chan_phase_entry);
9290						let mut channel_update = None;
9291						if let Some(msg) = responses.shutdown_msg {
9292							peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
9293								node_id: counterparty_node_id.clone(),
9294								msg,
9295							});
9296						} else if chan.context.is_usable() {
9297							// If the channel is in a usable state (ie the channel is not being shut
9298							// down), send a unicast channel_update to our counterparty to make sure
9299							// they have the latest channel parameters.
9300							if let Ok(msg) = self.get_channel_update_for_unicast(chan) {
9301								channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
9302									node_id: chan.context.get_counterparty_node_id(),
9303									msg,
9304								});
9305							}
9306						}
9307						let need_lnd_workaround = chan.context.workaround_lnd_bug_4006.take();
9308						let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption(
9309							&mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.order,
9310							Vec::new(), Vec::new(), None, responses.channel_ready, responses.announcement_sigs, None);
9311						debug_assert!(htlc_forwards.is_none());
9312						debug_assert!(decode_update_add_htlcs.is_none());
9313						if let Some(upd) = channel_update {
9314							peer_state.pending_msg_events.push(upd);
9315						}
9316						need_lnd_workaround
9317					} else {
9318						return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
9319							"Got a channel_reestablish message for an unfunded channel!".into())), chan_phase_entry);
9320					}
9321				},
9322				hash_map::Entry::Vacant(_) => {
9323					log_debug!(logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure",
9324						msg.channel_id);
9325					// Unfortunately, lnd doesn't force close on errors
9326					// (https://github.com/lightningnetwork/lnd/blob/abb1e3463f3a83bbb843d5c399869dbe930ad94f/htlcswitch/link.go#L2119).
9327					// One of the few ways to get an lnd counterparty to force close is by
9328					// replicating what they do when restoring static channel backups (SCBs). They
9329					// send an invalid `ChannelReestablish` with `0` commitment numbers and an
9330					// invalid `your_last_per_commitment_secret`.
9331					//
9332					// Since we received a `ChannelReestablish` for a channel that doesn't exist, we
9333					// can assume it's likely the channel closed from our point of view, but it
9334					// remains open on the counterparty's side. By sending this bogus
9335					// `ChannelReestablish` message now as a response to theirs, we trigger them to
9336					// force close broadcasting their latest state. If the closing transaction from
9337					// our point of view remains unconfirmed, it'll enter a race with the
9338					// counterparty's to-be-broadcast latest commitment transaction.
9339					peer_state.pending_msg_events.push(MessageSendEvent::SendChannelReestablish {
9340						node_id: *counterparty_node_id,
9341						msg: msgs::ChannelReestablish {
9342							channel_id: msg.channel_id,
9343							next_local_commitment_number: 0,
9344							next_remote_commitment_number: 0,
9345							your_last_per_commitment_secret: [1u8; 32],
9346							my_current_per_commitment_point: PublicKey::from_slice(&[2u8; 33]).unwrap(),
9347							next_funding_txid: None,
9348						},
9349					});
9350					return Err(MsgHandleErrInternal::send_err_msg_no_close(
9351						format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}",
9352							counterparty_node_id), msg.channel_id)
9353					)
9354				}
9355			}
9356		};
9357
9358		if let Some(channel_ready_msg) = need_lnd_workaround {
9359			self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
9360		}
9361		Ok(NotifyOption::SkipPersistHandleEvents)
9362	}
9363
9364	/// Process pending events from the [`chain::Watch`], returning whether any events were processed.
9365	fn process_pending_monitor_events(&self) -> bool {
9366		debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
9367
9368		let mut failed_channels = Vec::new();
9369		let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
9370		let has_pending_monitor_events = !pending_monitor_events.is_empty();
9371		for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
9372			for monitor_event in monitor_events.drain(..) {
9373				match monitor_event {
9374					MonitorEvent::HTLCEvent(htlc_update) => {
9375						let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id), Some(htlc_update.payment_hash));
9376						if let Some(preimage) = htlc_update.payment_preimage {
9377							log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
9378							self.claim_funds_internal(htlc_update.source, preimage,
9379								htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
9380								false, counterparty_node_id, funding_outpoint, channel_id, None);
9381						} else {
9382							log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
9383							let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
9384							let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
9385							self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
9386						}
9387					},
9388					MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => {
9389						let counterparty_node_id_opt = match counterparty_node_id {
9390							Some(cp_id) => Some(cp_id),
9391							None => {
9392								// TODO: Once we can rely on the counterparty_node_id from the
9393								// monitor event, this and the outpoint_to_peer map should be removed.
9394								let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
9395								outpoint_to_peer.get(&funding_outpoint).cloned()
9396							}
9397						};
9398						if let Some(counterparty_node_id) = counterparty_node_id_opt {
9399							let per_peer_state = self.per_peer_state.read().unwrap();
9400							if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
9401								let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9402								let peer_state = &mut *peer_state_lock;
9403								let pending_msg_events = &mut peer_state.pending_msg_events;
9404								if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) {
9405									let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
9406										reason
9407									} else {
9408										ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
9409									};
9410									let mut shutdown_res = chan_phase_entry.get_mut().context_mut().force_shutdown(false, reason.clone());
9411									let chan_phase = remove_channel_phase!(self, peer_state, chan_phase_entry, shutdown_res);
9412									failed_channels.push(shutdown_res);
9413									if let ChannelPhase::Funded(chan) = chan_phase {
9414										if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
9415											let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9416											pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
9417												msg: update
9418											});
9419										}
9420										pending_msg_events.push(events::MessageSendEvent::HandleError {
9421											node_id: chan.context.get_counterparty_node_id(),
9422											action: msgs::ErrorAction::DisconnectPeer {
9423												msg: Some(msgs::ErrorMessage {
9424													channel_id: chan.context.channel_id(),
9425													data: reason.to_string()
9426												})
9427											},
9428										});
9429									}
9430								}
9431							}
9432						}
9433					},
9434					MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => {
9435						self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref());
9436					},
9437				}
9438			}
9439		}
9440
9441		for failure in failed_channels.drain(..) {
9442			self.finish_close_channel(failure);
9443		}
9444
9445		has_pending_monitor_events
9446	}
9447
9448	/// In chanmon_consistency_target, we'd like to be able to restore monitor updating without
9449	/// handling all pending events (i.e. not PendingHTLCsForwardable). Thus, we expose monitor
9450	/// update events as a separate process method here.
9451	#[cfg(fuzzing)]
9452	pub fn process_monitor_events(&self) {
9453		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
9454		self.process_pending_monitor_events();
9455	}
9456
9457	/// Check the holding cell in each channel and free any pending HTLCs in them if possible.
9458	/// Returns whether there were any updates such as if pending HTLCs were freed or a monitor
9459	/// update was applied.
9460	fn check_free_holding_cells(&self) -> bool {
9461		let mut has_monitor_update = false;
9462		let mut failed_htlcs = Vec::new();
9463
9464		// Walk our list of channels and find any that need to update. Note that when we do find an
9465		// update, if it includes actions that must be taken afterwards, we have to drop the
9466		// per-peer state lock as well as the top level per_peer_state lock. Thus, we loop until we
9467		// manage to go through all our peers without finding a single channel to update.
9468		'peer_loop: loop {
9469			let per_peer_state = self.per_peer_state.read().unwrap();
9470			for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
9471				'chan_loop: loop {
9472					let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9473					let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
9474					for (channel_id, chan) in peer_state.channel_by_id.iter_mut().filter_map(
9475						|(chan_id, phase)| if let ChannelPhase::Funded(chan) = phase { Some((chan_id, chan)) } else { None }
9476					) {
9477						let counterparty_node_id = chan.context.get_counterparty_node_id();
9478						let funding_txo = chan.context.get_funding_txo();
9479						let (monitor_opt, holding_cell_failed_htlcs) =
9480							chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &&WithChannelContext::from(&self.logger, &chan.context, None));
9481						if !holding_cell_failed_htlcs.is_empty() {
9482							failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
9483						}
9484						if let Some(monitor_update) = monitor_opt {
9485							has_monitor_update = true;
9486
9487							handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
9488								peer_state_lock, peer_state, per_peer_state, chan);
9489							continue 'peer_loop;
9490						}
9491					}
9492					break 'chan_loop;
9493				}
9494			}
9495			break 'peer_loop;
9496		}
9497
9498		let has_update = has_monitor_update || !failed_htlcs.is_empty();
9499		for (failures, channel_id, counterparty_node_id) in failed_htlcs.drain(..) {
9500			self.fail_holding_cell_htlcs(failures, channel_id, &counterparty_node_id);
9501		}
9502
9503		has_update
9504	}
9505
9506	/// When a call to a [`ChannelSigner`] method returns an error, this indicates that the signer
9507	/// is (temporarily) unavailable, and the operation should be retried later.
9508	///
9509	/// This method allows for that retry - either checking for any signer-pending messages to be
9510	/// attempted in every channel, or in the specifically provided channel.
9511	///
9512	/// [`ChannelSigner`]: crate::sign::ChannelSigner
9513	pub fn signer_unblocked(&self, channel_opt: Option<(PublicKey, ChannelId)>) {
9514		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
9515
9516		// Returns whether we should remove this channel as it's just been closed.
9517		let unblock_chan = |phase: &mut ChannelPhase<SP>, pending_msg_events: &mut Vec<MessageSendEvent>| -> Option<ShutdownResult> {
9518			let node_id = phase.context().get_counterparty_node_id();
9519			match phase {
9520				ChannelPhase::Funded(chan) => {
9521					let msgs = chan.signer_maybe_unblocked(&self.logger);
9522					let cu_msg = msgs.commitment_update.map(|updates| events::MessageSendEvent::UpdateHTLCs {
9523						node_id,
9524						updates,
9525					});
9526					let raa_msg = msgs.revoke_and_ack.map(|msg| events::MessageSendEvent::SendRevokeAndACK {
9527						node_id,
9528						msg,
9529					});
9530					match (cu_msg, raa_msg) {
9531						(Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::CommitmentFirst => {
9532							pending_msg_events.push(cu);
9533							pending_msg_events.push(raa);
9534						},
9535						(Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::RevokeAndACKFirst => {
9536							pending_msg_events.push(raa);
9537							pending_msg_events.push(cu);
9538						},
9539						(Some(cu), _) => pending_msg_events.push(cu),
9540						(_, Some(raa)) => pending_msg_events.push(raa),
9541						(_, _) => {},
9542					}
9543					if let Some(msg) = msgs.funding_signed {
9544						pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
9545							node_id,
9546							msg,
9547						});
9548					}
9549					if let Some(msg) = msgs.channel_ready {
9550						send_channel_ready!(self, pending_msg_events, chan, msg);
9551					}
9552					if let Some(msg) = msgs.closing_signed {
9553						pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
9554							node_id,
9555							msg,
9556						});
9557					}
9558					if let Some(broadcast_tx) = msgs.signed_closing_tx {
9559						let channel_id = chan.context.channel_id();
9560						let counterparty_node_id = chan.context.get_counterparty_node_id();
9561						let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None);
9562						log_info!(logger, "Broadcasting closing tx {}", log_tx!(broadcast_tx));
9563						self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
9564
9565						if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
9566							pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
9567								msg: update
9568							});
9569						}
9570					}
9571					msgs.shutdown_result
9572				}
9573				ChannelPhase::UnfundedOutboundV1(chan) => {
9574					let (open_channel, funding_created) = chan.signer_maybe_unblocked(self.chain_hash.clone(), &self.logger);
9575					if let Some(msg) = open_channel {
9576						pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
9577							node_id,
9578							msg,
9579						});
9580					}
9581					if let Some(msg) = funding_created {
9582						pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
9583							node_id,
9584							msg,
9585						});
9586					}
9587					None
9588				}
9589				ChannelPhase::UnfundedInboundV1(chan) => {
9590					let logger = WithChannelContext::from(&self.logger, &chan.context, None);
9591					if let Some(msg) = chan.signer_maybe_unblocked(&&logger) {
9592						pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
9593							node_id,
9594							msg,
9595						});
9596					}
9597					None
9598				},
9599				ChannelPhase::UnfundedInboundV2(_) | ChannelPhase::UnfundedOutboundV2(_) => None,
9600			}
9601		};
9602
9603		let mut shutdown_results = Vec::new();
9604		let per_peer_state = self.per_peer_state.read().unwrap();
9605		let per_peer_state_iter = per_peer_state.iter().filter(|(cp_id, _)| {
9606			if let Some((counterparty_node_id, _)) = channel_opt {
9607				**cp_id == counterparty_node_id
9608			} else { true }
9609		});
9610		for (_cp_id, peer_state_mutex) in per_peer_state_iter {
9611			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9612			let peer_state = &mut *peer_state_lock;
9613			peer_state.channel_by_id.retain(|_, chan| {
9614				let shutdown_result = match channel_opt {
9615					Some((_, channel_id)) if chan.context().channel_id() != channel_id => None,
9616					_ => unblock_chan(chan, &mut peer_state.pending_msg_events),
9617				};
9618				if let Some(mut shutdown_result) = shutdown_result {
9619					let context = &chan.context();
9620					let logger = WithChannelContext::from(&self.logger, context, None);
9621					log_trace!(logger, "Removing channel {} now that the signer is unblocked", context.channel_id());
9622					locked_close_channel!(self, peer_state, context, shutdown_result);
9623					shutdown_results.push(shutdown_result);
9624					false
9625				} else {
9626					true
9627				}
9628			});
9629		}
9630		drop(per_peer_state);
9631		for shutdown_result in shutdown_results.drain(..) {
9632			self.finish_close_channel(shutdown_result);
9633		}
9634	}
9635
9636	/// Check whether any channels have finished removing all pending updates after a shutdown
9637	/// exchange and can now send a closing_signed.
9638	/// Returns whether any closing_signed messages were generated.
9639	fn maybe_generate_initial_closing_signed(&self) -> bool {
9640		let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new();
9641		let mut has_update = false;
9642		let mut shutdown_results = Vec::new();
9643		{
9644			let per_peer_state = self.per_peer_state.read().unwrap();
9645
9646			for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
9647				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9648				let peer_state = &mut *peer_state_lock;
9649				let pending_msg_events = &mut peer_state.pending_msg_events;
9650				peer_state.channel_by_id.retain(|channel_id, phase| {
9651					match phase {
9652						ChannelPhase::Funded(chan) => {
9653							let logger = WithChannelContext::from(&self.logger, &chan.context, None);
9654							match chan.maybe_propose_closing_signed(&self.fee_estimator, &&logger) {
9655								Ok((msg_opt, tx_opt, shutdown_result_opt)) => {
9656									if let Some(msg) = msg_opt {
9657										has_update = true;
9658										pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
9659											node_id: chan.context.get_counterparty_node_id(), msg,
9660										});
9661									}
9662									debug_assert_eq!(shutdown_result_opt.is_some(), chan.is_shutdown());
9663									if let Some(mut shutdown_result) = shutdown_result_opt {
9664										locked_close_channel!(self, peer_state, &chan.context, shutdown_result);
9665										shutdown_results.push(shutdown_result);
9666									}
9667									if let Some(tx) = tx_opt {
9668										// We're done with this channel. We got a closing_signed and sent back
9669										// a closing_signed with a closing transaction to broadcast.
9670										if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
9671											let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9672											pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
9673												msg: update
9674											});
9675										}
9676
9677										log_info!(logger, "Broadcasting {}", log_tx!(tx));
9678										self.tx_broadcaster.broadcast_transactions(&[&tx]);
9679										false
9680									} else { true }
9681								},
9682								Err(e) => {
9683									has_update = true;
9684									let (close_channel, res) = convert_chan_phase_err!(self, peer_state, e, chan, channel_id, FUNDED_CHANNEL);
9685									handle_errors.push((chan.context.get_counterparty_node_id(), Err(res)));
9686									!close_channel
9687								}
9688							}
9689						},
9690						_ => true, // Retain unfunded channels if present.
9691					}
9692				});
9693			}
9694		}
9695
9696		for (counterparty_node_id, err) in handle_errors.drain(..) {
9697			let _ = handle_error!(self, err, counterparty_node_id);
9698		}
9699
9700		for shutdown_result in shutdown_results.drain(..) {
9701			self.finish_close_channel(shutdown_result);
9702		}
9703
9704		has_update
9705	}
9706
9707	/// Utility for creating a BOLT11 invoice that can be verified by [`ChannelManager`] without
9708	/// storing any additional state. It achieves this by including a [`PaymentSecret`] in the
9709	/// invoice which it uses to verify that the invoice has not expired and the payment amount is
9710	/// sufficient, reproducing the [`PaymentPreimage`] if applicable.
9711	pub fn create_bolt11_invoice(
9712		&self, params: Bolt11InvoiceParameters,
9713	) -> Result<Bolt11Invoice, SignOrCreationError<()>> {
9714		let Bolt11InvoiceParameters {
9715			amount_msats, description, invoice_expiry_delta_secs, min_final_cltv_expiry_delta,
9716			payment_hash,
9717		} = params;
9718
9719		let currency =
9720			Network::from_chain_hash(self.chain_hash).map(Into::into).unwrap_or(Currency::Bitcoin);
9721
9722		#[cfg(feature = "std")]
9723		let duration_since_epoch = {
9724			use std::time::SystemTime;
9725			SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
9726				.expect("SystemTime::now() should be after SystemTime::UNIX_EPOCH")
9727		};
9728
9729		// This may be up to 2 hours in the future because of bitcoin's block time rule or about
9730		// 10-30 minutes in the past if a block hasn't been found recently. This should be fine as
9731		// the default invoice expiration is 2 hours, though shorter expirations may be problematic.
9732		#[cfg(not(feature = "std"))]
9733		let duration_since_epoch =
9734			Duration::from_secs(self.highest_seen_timestamp.load(Ordering::Acquire) as u64);
9735
9736		if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta {
9737			if min_final_cltv_expiry_delta.saturating_add(3) < MIN_FINAL_CLTV_EXPIRY_DELTA {
9738				return Err(SignOrCreationError::CreationError(CreationError::MinFinalCltvExpiryDeltaTooShort));
9739			}
9740		}
9741
9742		let (payment_hash, payment_secret) = match payment_hash {
9743			Some(payment_hash) => {
9744				let payment_secret = self
9745					.create_inbound_payment_for_hash(
9746						payment_hash, amount_msats,
9747						invoice_expiry_delta_secs.unwrap_or(DEFAULT_EXPIRY_TIME as u32),
9748						min_final_cltv_expiry_delta,
9749					)
9750					.map_err(|()| SignOrCreationError::CreationError(CreationError::InvalidAmount))?;
9751				(payment_hash, payment_secret)
9752			},
9753			None => {
9754				self
9755					.create_inbound_payment(
9756						amount_msats, invoice_expiry_delta_secs.unwrap_or(DEFAULT_EXPIRY_TIME as u32),
9757						min_final_cltv_expiry_delta,
9758					)
9759					.map_err(|()| SignOrCreationError::CreationError(CreationError::InvalidAmount))?
9760			},
9761		};
9762
9763		log_trace!(self.logger, "Creating invoice with payment hash {}", &payment_hash);
9764
9765		let invoice = Bolt11InvoiceBuilder::new(currency);
9766		let invoice = match description {
9767			Bolt11InvoiceDescription::Direct(description) => invoice.description(description.into_inner().0),
9768			Bolt11InvoiceDescription::Hash(hash) => invoice.description_hash(hash.0),
9769		};
9770
9771		let mut invoice = invoice
9772			.duration_since_epoch(duration_since_epoch)
9773			.payee_pub_key(self.get_our_node_id())
9774			.payment_hash(Hash::from_slice(&payment_hash.0).unwrap())
9775			.payment_secret(payment_secret)
9776			.basic_mpp()
9777			.min_final_cltv_expiry_delta(
9778				// Add a buffer of 3 to the delta if present, otherwise use LDK's minimum.
9779				min_final_cltv_expiry_delta.map(|x| x.saturating_add(3)).unwrap_or(MIN_FINAL_CLTV_EXPIRY_DELTA).into()
9780			);
9781
9782		if let Some(invoice_expiry_delta_secs) = invoice_expiry_delta_secs{
9783			invoice = invoice.expiry_time(Duration::from_secs(invoice_expiry_delta_secs.into()));
9784		}
9785
9786		if let Some(amount_msats) = amount_msats {
9787			invoice = invoice.amount_milli_satoshis(amount_msats);
9788		}
9789
9790		let channels = self.list_channels();
9791		let route_hints = super::invoice_utils::sort_and_filter_channels(channels, amount_msats, &self.logger);
9792		for hint in route_hints {
9793			invoice = invoice.private_route(hint);
9794		}
9795
9796		let raw_invoice = invoice.build_raw().map_err(|e| SignOrCreationError::CreationError(e))?;
9797		let signature = self.node_signer.sign_invoice(&raw_invoice, Recipient::Node);
9798
9799		raw_invoice
9800			.sign(|_| signature)
9801			.map(|invoice| Bolt11Invoice::from_signed(invoice).unwrap())
9802			.map_err(|e| SignOrCreationError::SignError(e))
9803	}
9804}
9805
9806/// Parameters used with [`create_bolt11_invoice`].
9807///
9808/// [`create_bolt11_invoice`]: ChannelManager::create_bolt11_invoice
9809pub struct Bolt11InvoiceParameters {
9810	/// The amount for the invoice, if any.
9811	pub amount_msats: Option<u64>,
9812
9813	/// The description for what the invoice is for, or hash of such description.
9814	pub description: Bolt11InvoiceDescription,
9815
9816	/// The invoice expiration relative to its creation time. If not set, the invoice will expire in
9817	/// [`DEFAULT_EXPIRY_TIME`] by default.
9818	///
9819	/// The creation time used is the duration since the Unix epoch for `std` builds. For non-`std`
9820	/// builds, the highest block timestamp seen is used instead. In the latter case, use a long
9821	/// enough expiry to account for the average block time.
9822	pub invoice_expiry_delta_secs: Option<u32>,
9823
9824	/// The minimum `cltv_expiry` for the last HTLC in the route. If not set, will use
9825	/// [`MIN_FINAL_CLTV_EXPIRY_DELTA`].
9826	///
9827	/// If set, must be at least [`MIN_FINAL_CLTV_EXPIRY_DELTA`], and a three-block buffer will be
9828	/// added as well to allow for up to a few new block confirmations during routing.
9829	pub min_final_cltv_expiry_delta: Option<u16>,
9830
9831	/// The payment hash used in the invoice. If not set, a payment hash will be generated using a
9832	/// preimage that can be reproduced by [`ChannelManager`] without storing any state.
9833	///
9834	/// Uses the payment hash if set. This may be useful if you're building an on-chain swap or
9835	/// involving another protocol where the payment hash is also involved outside the scope of
9836	/// lightning.
9837	pub payment_hash: Option<PaymentHash>,
9838}
9839
9840impl Default for Bolt11InvoiceParameters {
9841	fn default() -> Self {
9842		Self {
9843			amount_msats: None,
9844			description: Bolt11InvoiceDescription::Direct(Description::empty()),
9845			invoice_expiry_delta_secs: None,
9846			min_final_cltv_expiry_delta: None,
9847			payment_hash: None,
9848		}
9849	}
9850}
9851
9852macro_rules! create_offer_builder { ($self: ident, $builder: ty) => {
9853	/// Creates an [`OfferBuilder`] such that the [`Offer`] it builds is recognized by the
9854	/// [`ChannelManager`] when handling [`InvoiceRequest`] messages for the offer. The offer's
9855	/// expiration will be `absolute_expiry` if `Some`, otherwise it will not expire.
9856	///
9857	/// # Privacy
9858	///
9859	/// Uses [`MessageRouter`] to construct a [`BlindedMessagePath`] for the offer based on the given
9860	/// `absolute_expiry` according to [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`]. See those docs for
9861	/// privacy implications as well as those of the parameterized [`Router`], which implements
9862	/// [`MessageRouter`].
9863	///
9864	/// Also, uses a derived signing pubkey in the offer for recipient privacy.
9865	///
9866	/// # Limitations
9867	///
9868	/// Requires a direct connection to the introduction node in the responding [`InvoiceRequest`]'s
9869	/// reply path.
9870	///
9871	/// # Errors
9872	///
9873	/// Errors if the parameterized [`Router`] is unable to create a blinded path for the offer.
9874	///
9875	/// [`Offer`]: crate::offers::offer::Offer
9876	/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
9877	pub fn create_offer_builder(
9878		&$self, absolute_expiry: Option<Duration>
9879	) -> Result<$builder, Bolt12SemanticError> {
9880		let node_id = $self.get_our_node_id();
9881		let expanded_key = &$self.inbound_payment_key;
9882		let entropy = &*$self.entropy_source;
9883		let secp_ctx = &$self.secp_ctx;
9884
9885		let nonce = Nonce::from_entropy_source(entropy);
9886		let context = OffersContext::InvoiceRequest { nonce };
9887		let path = $self.create_blinded_paths_using_absolute_expiry(context, absolute_expiry)
9888			.and_then(|paths| paths.into_iter().next().ok_or(()))
9889			.map_err(|_| Bolt12SemanticError::MissingPaths)?;
9890		let builder = OfferBuilder::deriving_signing_pubkey(node_id, expanded_key, nonce, secp_ctx)
9891			.chain_hash($self.chain_hash)
9892			.path(path);
9893
9894		let builder = match absolute_expiry {
9895			None => builder,
9896			Some(absolute_expiry) => builder.absolute_expiry(absolute_expiry),
9897		};
9898
9899		Ok(builder.into())
9900	}
9901} }
9902
9903macro_rules! create_refund_builder { ($self: ident, $builder: ty) => {
9904	/// Creates a [`RefundBuilder`] such that the [`Refund`] it builds is recognized by the
9905	/// [`ChannelManager`] when handling [`Bolt12Invoice`] messages for the refund.
9906	///
9907	/// # Payment
9908	///
9909	/// The provided `payment_id` is used to ensure that only one invoice is paid for the refund.
9910	/// See [Avoiding Duplicate Payments] for other requirements once the payment has been sent.
9911	///
9912	/// The builder will have the provided expiration set. Any changes to the expiration on the
9913	/// returned builder will not be honored by [`ChannelManager`]. For non-`std`, the highest seen
9914	/// block time minus two hours is used for the current time when determining if the refund has
9915	/// expired.
9916	///
9917	/// To revoke the refund, use [`ChannelManager::abandon_payment`] prior to receiving the
9918	/// invoice. If abandoned, or an invoice isn't received before expiration, the payment will fail
9919	/// with an [`Event::PaymentFailed`].
9920	///
9921	/// If `max_total_routing_fee_msat` is not specified, The default from
9922	/// [`RouteParameters::from_payment_params_and_value`] is applied.
9923	///
9924	/// # Privacy
9925	///
9926	/// Uses [`MessageRouter`] to construct a [`BlindedMessagePath`] for the refund based on the given
9927	/// `absolute_expiry` according to [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`]. See those docs for
9928	/// privacy implications as well as those of the parameterized [`Router`], which implements
9929	/// [`MessageRouter`].
9930	///
9931	/// Also, uses a derived payer id in the refund for payer privacy.
9932	///
9933	/// # Limitations
9934	///
9935	/// Requires a direct connection to an introduction node in the responding
9936	/// [`Bolt12Invoice::payment_paths`].
9937	///
9938	/// # Errors
9939	///
9940	/// Errors if:
9941	/// - a duplicate `payment_id` is provided given the caveats in the aforementioned link,
9942	/// - `amount_msats` is invalid, or
9943	/// - the parameterized [`Router`] is unable to create a blinded path for the refund.
9944	///
9945	/// [`Refund`]: crate::offers::refund::Refund
9946	/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
9947	/// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths
9948	/// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments
9949	pub fn create_refund_builder(
9950		&$self, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId,
9951		retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>
9952	) -> Result<$builder, Bolt12SemanticError> {
9953		let node_id = $self.get_our_node_id();
9954		let expanded_key = &$self.inbound_payment_key;
9955		let entropy = &*$self.entropy_source;
9956		let secp_ctx = &$self.secp_ctx;
9957
9958		let nonce = Nonce::from_entropy_source(entropy);
9959		let context = OffersContext::OutboundPayment { payment_id, nonce, hmac: None };
9960		let path = $self.create_blinded_paths_using_absolute_expiry(context, Some(absolute_expiry))
9961			.and_then(|paths| paths.into_iter().next().ok_or(()))
9962			.map_err(|_| Bolt12SemanticError::MissingPaths)?;
9963
9964		let builder = RefundBuilder::deriving_signing_pubkey(
9965			node_id, expanded_key, nonce, secp_ctx, amount_msats, payment_id
9966		)?
9967			.chain_hash($self.chain_hash)
9968			.absolute_expiry(absolute_expiry)
9969			.path(path);
9970
9971		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop($self);
9972
9973		let expiration = StaleExpiration::AbsoluteTimeout(absolute_expiry);
9974		$self.pending_outbound_payments
9975			.add_new_awaiting_invoice(
9976				payment_id, expiration, retry_strategy, max_total_routing_fee_msat, None,
9977			)
9978			.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?;
9979
9980		Ok(builder.into())
9981	}
9982} }
9983
9984/// Defines the maximum number of [`OffersMessage`] including different reply paths to be sent
9985/// along different paths.
9986/// Sending multiple requests increases the chances of successful delivery in case some
9987/// paths are unavailable. However, only one invoice for a given [`PaymentId`] will be paid,
9988/// even if multiple invoices are received.
9989const OFFERS_MESSAGE_REQUEST_LIMIT: usize = 10;
9990
9991impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
9992where
9993	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
9994	T::Target: BroadcasterInterface,
9995	ES::Target: EntropySource,
9996	NS::Target: NodeSigner,
9997	SP::Target: SignerProvider,
9998	F::Target: FeeEstimator,
9999	R::Target: Router,
10000	MR::Target: MessageRouter,
10001	L::Target: Logger,
10002{
10003	#[cfg(not(c_bindings))]
10004	create_offer_builder!(self, OfferBuilder<DerivedMetadata, secp256k1::All>);
10005	#[cfg(not(c_bindings))]
10006	create_refund_builder!(self, RefundBuilder<secp256k1::All>);
10007
10008	#[cfg(c_bindings)]
10009	create_offer_builder!(self, OfferWithDerivedMetadataBuilder);
10010	#[cfg(c_bindings)]
10011	create_refund_builder!(self, RefundMaybeWithDerivedMetadataBuilder);
10012
10013	/// Pays for an [`Offer`] using the given parameters by creating an [`InvoiceRequest`] and
10014	/// enqueuing it to be sent via an onion message. [`ChannelManager`] will pay the actual
10015	/// [`Bolt12Invoice`] once it is received.
10016	///
10017	/// Uses [`InvoiceRequestBuilder`] such that the [`InvoiceRequest`] it builds is recognized by
10018	/// the [`ChannelManager`] when handling a [`Bolt12Invoice`] message in response to the request.
10019	/// The optional parameters are used in the builder, if `Some`:
10020	/// - `quantity` for [`InvoiceRequest::quantity`] which must be set if
10021	///   [`Offer::expects_quantity`] is `true`.
10022	/// - `amount_msats` if overpaying what is required for the given `quantity` is desired, and
10023	/// - `payer_note` for [`InvoiceRequest::payer_note`].
10024	///
10025	/// If `max_total_routing_fee_msat` is not specified, The default from
10026	/// [`RouteParameters::from_payment_params_and_value`] is applied.
10027	///
10028	/// # Payment
10029	///
10030	/// The provided `payment_id` is used to ensure that only one invoice is paid for the request
10031	/// when received. See [Avoiding Duplicate Payments] for other requirements once the payment has
10032	/// been sent.
10033	///
10034	/// To revoke the request, use [`ChannelManager::abandon_payment`] prior to receiving the
10035	/// invoice. If abandoned, or an invoice isn't received in a reasonable amount of time, the
10036	/// payment will fail with an [`Event::PaymentFailed`].
10037	///
10038	/// # Privacy
10039	///
10040	/// For payer privacy, uses a derived payer id and uses [`MessageRouter::create_blinded_paths`]
10041	/// to construct a [`BlindedMessagePath`] for the reply path. For further privacy implications, see the
10042	/// docs of the parameterized [`Router`], which implements [`MessageRouter`].
10043	///
10044	/// # Limitations
10045	///
10046	/// Requires a direct connection to an introduction node in [`Offer::paths`] or to
10047	/// [`Offer::issuer_signing_pubkey`], if empty. A similar restriction applies to the responding
10048	/// [`Bolt12Invoice::payment_paths`].
10049	///
10050	/// # Errors
10051	///
10052	/// Errors if:
10053	/// - a duplicate `payment_id` is provided given the caveats in the aforementioned link,
10054	/// - the provided parameters are invalid for the offer,
10055	/// - the offer is for an unsupported chain, or
10056	/// - the parameterized [`Router`] is unable to create a blinded reply path for the invoice
10057	///   request.
10058	///
10059	/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
10060	/// [`InvoiceRequest::quantity`]: crate::offers::invoice_request::InvoiceRequest::quantity
10061	/// [`InvoiceRequest::payer_note`]: crate::offers::invoice_request::InvoiceRequest::payer_note
10062	/// [`InvoiceRequestBuilder`]: crate::offers::invoice_request::InvoiceRequestBuilder
10063	/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
10064	/// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths
10065	/// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments
10066	pub fn pay_for_offer(
10067		&self, offer: &Offer, quantity: Option<u64>, amount_msats: Option<u64>,
10068		payer_note: Option<String>, payment_id: PaymentId, retry_strategy: Retry,
10069		max_total_routing_fee_msat: Option<u64>
10070	) -> Result<(), Bolt12SemanticError> {
10071		self.pay_for_offer_intern(offer, quantity, amount_msats, payer_note, payment_id, None, |invoice_request, nonce| {
10072			let expiration = StaleExpiration::TimerTicks(1);
10073			let retryable_invoice_request = RetryableInvoiceRequest {
10074				invoice_request: invoice_request.clone(),
10075				nonce,
10076			};
10077			self.pending_outbound_payments
10078				.add_new_awaiting_invoice(
10079					payment_id, expiration, retry_strategy, max_total_routing_fee_msat,
10080					Some(retryable_invoice_request)
10081				)
10082				.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
10083		})
10084	}
10085
10086	fn pay_for_offer_intern<CPP: FnOnce(&InvoiceRequest, Nonce) -> Result<(), Bolt12SemanticError>>(
10087		&self, offer: &Offer, quantity: Option<u64>, amount_msats: Option<u64>,
10088		payer_note: Option<String>, payment_id: PaymentId,
10089		human_readable_name: Option<HumanReadableName>, create_pending_payment: CPP,
10090	) -> Result<(), Bolt12SemanticError> {
10091		let expanded_key = &self.inbound_payment_key;
10092		let entropy = &*self.entropy_source;
10093		let secp_ctx = &self.secp_ctx;
10094
10095		let nonce = Nonce::from_entropy_source(entropy);
10096		let builder: InvoiceRequestBuilder<secp256k1::All> = offer
10097			.request_invoice(expanded_key, nonce, secp_ctx, payment_id)?
10098			.into();
10099		let builder = builder.chain_hash(self.chain_hash)?;
10100
10101		let builder = match quantity {
10102			None => builder,
10103			Some(quantity) => builder.quantity(quantity)?,
10104		};
10105		let builder = match amount_msats {
10106			None => builder,
10107			Some(amount_msats) => builder.amount_msats(amount_msats)?,
10108		};
10109		let builder = match payer_note {
10110			None => builder,
10111			Some(payer_note) => builder.payer_note(payer_note),
10112		};
10113		let builder = match human_readable_name {
10114			None => builder,
10115			Some(hrn) => builder.sourced_from_human_readable_name(hrn),
10116		};
10117		let invoice_request = builder.build_and_sign()?;
10118
10119		let hmac = payment_id.hmac_for_offer_payment(nonce, expanded_key);
10120		let context = MessageContext::Offers(
10121			OffersContext::OutboundPayment { payment_id, nonce, hmac: Some(hmac) }
10122		);
10123		let reply_paths = self.create_blinded_paths(context)
10124			.map_err(|_| Bolt12SemanticError::MissingPaths)?;
10125
10126		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
10127
10128		create_pending_payment(&invoice_request, nonce)?;
10129
10130		self.enqueue_invoice_request(invoice_request, reply_paths)
10131	}
10132
10133	fn enqueue_invoice_request(
10134		&self,
10135		invoice_request: InvoiceRequest,
10136		reply_paths: Vec<BlindedMessagePath>,
10137	) -> Result<(), Bolt12SemanticError> {
10138		let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
10139		if !invoice_request.paths().is_empty() {
10140			reply_paths
10141				.iter()
10142				.flat_map(|reply_path| invoice_request.paths().iter().map(move |path| (path, reply_path)))
10143				.take(OFFERS_MESSAGE_REQUEST_LIMIT)
10144				.for_each(|(path, reply_path)| {
10145					let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
10146						destination: Destination::BlindedPath(path.clone()),
10147						reply_path: reply_path.clone(),
10148					};
10149					let message = OffersMessage::InvoiceRequest(invoice_request.clone());
10150					pending_offers_messages.push((message, instructions));
10151				});
10152		} else if let Some(node_id) = invoice_request.issuer_signing_pubkey() {
10153			for reply_path in reply_paths {
10154				let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
10155					destination: Destination::Node(node_id),
10156					reply_path,
10157				};
10158				let message = OffersMessage::InvoiceRequest(invoice_request.clone());
10159				pending_offers_messages.push((message, instructions));
10160			}
10161		} else {
10162			debug_assert!(false);
10163			return Err(Bolt12SemanticError::MissingIssuerSigningPubkey);
10164		}
10165
10166		Ok(())
10167	}
10168
10169	/// Creates a [`Bolt12Invoice`] for a [`Refund`] and enqueues it to be sent via an onion
10170	/// message.
10171	///
10172	/// The resulting invoice uses a [`PaymentHash`] recognized by the [`ChannelManager`] and a
10173	/// [`BlindedPaymentPath`] containing the [`PaymentSecret`] needed to reconstruct the
10174	/// corresponding [`PaymentPreimage`]. It is returned purely for informational purposes.
10175	///
10176	/// # Limitations
10177	///
10178	/// Requires a direct connection to an introduction node in [`Refund::paths`] or to
10179	/// [`Refund::payer_signing_pubkey`], if empty. This request is best effort; an invoice will be
10180	/// sent to each node meeting the aforementioned criteria, but there's no guarantee that they
10181	/// will be received and no retries will be made.
10182	///
10183	/// # Errors
10184	///
10185	/// Errors if:
10186	/// - the refund is for an unsupported chain, or
10187	/// - the parameterized [`Router`] is unable to create a blinded payment path or reply path for
10188	///   the invoice.
10189	///
10190	/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
10191	pub fn request_refund_payment(
10192		&self, refund: &Refund
10193	) -> Result<Bolt12Invoice, Bolt12SemanticError> {
10194		let expanded_key = &self.inbound_payment_key;
10195		let entropy = &*self.entropy_source;
10196		let secp_ctx = &self.secp_ctx;
10197
10198		let amount_msats = refund.amount_msats();
10199		let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
10200
10201		if refund.chain() != self.chain_hash {
10202			return Err(Bolt12SemanticError::UnsupportedChain);
10203		}
10204
10205		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
10206
10207		match self.create_inbound_payment(Some(amount_msats), relative_expiry, None) {
10208			Ok((payment_hash, payment_secret)) => {
10209				let payment_context = PaymentContext::Bolt12Refund(Bolt12RefundContext {});
10210				let payment_paths = self.create_blinded_payment_paths(
10211					amount_msats, payment_secret, payment_context
10212				)
10213					.map_err(|_| Bolt12SemanticError::MissingPaths)?;
10214
10215				#[cfg(feature = "std")]
10216				let builder = refund.respond_using_derived_keys(
10217					payment_paths, payment_hash, expanded_key, entropy
10218				)?;
10219				#[cfg(not(feature = "std"))]
10220				let created_at = Duration::from_secs(
10221					self.highest_seen_timestamp.load(Ordering::Acquire) as u64
10222				);
10223				#[cfg(not(feature = "std"))]
10224				let builder = refund.respond_using_derived_keys_no_std(
10225					payment_paths, payment_hash, created_at, expanded_key, entropy
10226				)?;
10227				let builder: InvoiceBuilder<DerivedSigningPubkey> = builder.into();
10228				let invoice = builder.allow_mpp().build_and_sign(secp_ctx)?;
10229
10230				let nonce = Nonce::from_entropy_source(entropy);
10231				let hmac = payment_hash.hmac_for_offer_payment(nonce, expanded_key);
10232				let context = MessageContext::Offers(OffersContext::InboundPayment {
10233					payment_hash: invoice.payment_hash(), nonce, hmac
10234				});
10235				let reply_paths = self.create_blinded_paths(context)
10236					.map_err(|_| Bolt12SemanticError::MissingPaths)?;
10237
10238				let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
10239				if refund.paths().is_empty() {
10240					for reply_path in reply_paths {
10241						let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
10242							destination: Destination::Node(refund.payer_signing_pubkey()),
10243							reply_path,
10244						};
10245						let message = OffersMessage::Invoice(invoice.clone());
10246						pending_offers_messages.push((message, instructions));
10247					}
10248				} else {
10249					reply_paths
10250						.iter()
10251						.flat_map(|reply_path| refund.paths().iter().map(move |path| (path, reply_path)))
10252						.take(OFFERS_MESSAGE_REQUEST_LIMIT)
10253						.for_each(|(path, reply_path)| {
10254							let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
10255								destination: Destination::BlindedPath(path.clone()),
10256								reply_path: reply_path.clone(),
10257							};
10258							let message = OffersMessage::Invoice(invoice.clone());
10259							pending_offers_messages.push((message, instructions));
10260						});
10261				}
10262
10263				Ok(invoice)
10264			},
10265			Err(()) => Err(Bolt12SemanticError::InvalidAmount),
10266		}
10267	}
10268
10269	/// Pays for an [`Offer`] looked up using [BIP 353] Human Readable Names resolved by the DNS
10270	/// resolver(s) at `dns_resolvers` which resolve names according to bLIP 32.
10271	///
10272	/// If the wallet supports paying on-chain schemes, you should instead use
10273	/// [`OMNameResolver::resolve_name`] and [`OMNameResolver::handle_dnssec_proof_for_uri`] (by
10274	/// implementing [`DNSResolverMessageHandler`]) directly to look up a URI and then delegate to
10275	/// your normal URI handling.
10276	///
10277	/// If `max_total_routing_fee_msat` is not specified, the default from
10278	/// [`RouteParameters::from_payment_params_and_value`] is applied.
10279	///
10280	/// # Payment
10281	///
10282	/// The provided `payment_id` is used to ensure that only one invoice is paid for the request
10283	/// when received. See [Avoiding Duplicate Payments] for other requirements once the payment has
10284	/// been sent.
10285	///
10286	/// To revoke the request, use [`ChannelManager::abandon_payment`] prior to receiving the
10287	/// invoice. If abandoned, or an invoice isn't received in a reasonable amount of time, the
10288	/// payment will fail with an [`Event::InvoiceRequestFailed`].
10289	///
10290	/// # Privacy
10291	///
10292	/// For payer privacy, uses a derived payer id and uses [`MessageRouter::create_blinded_paths`]
10293	/// to construct a [`BlindedPath`] for the reply path. For further privacy implications, see the
10294	/// docs of the parameterized [`Router`], which implements [`MessageRouter`].
10295	///
10296	/// # Limitations
10297	///
10298	/// Requires a direct connection to the given [`Destination`] as well as an introduction node in
10299	/// [`Offer::paths`] or to [`Offer::signing_pubkey`], if empty. A similar restriction applies to
10300	/// the responding [`Bolt12Invoice::payment_paths`].
10301	///
10302	/// # Errors
10303	///
10304	/// Errors if:
10305	/// - a duplicate `payment_id` is provided given the caveats in the aforementioned link,
10306	///
10307	/// [`Bolt12Invoice::payment_paths`]: crate::offers::invoice::Bolt12Invoice::payment_paths
10308	/// [Avoiding Duplicate Payments]: #avoiding-duplicate-payments
10309	#[cfg(feature = "dnssec")]
10310	pub fn pay_for_offer_from_human_readable_name(
10311		&self, name: HumanReadableName, amount_msats: u64, payment_id: PaymentId,
10312		retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>,
10313		dns_resolvers: Vec<Destination>,
10314	) -> Result<(), ()> {
10315		let (onion_message, context) =
10316			self.hrn_resolver.resolve_name(payment_id, name, &*self.entropy_source)?;
10317		let reply_paths = self.create_blinded_paths(MessageContext::DNSResolver(context))?;
10318		let expiration = StaleExpiration::TimerTicks(1);
10319		self.pending_outbound_payments.add_new_awaiting_offer(payment_id, expiration, retry_strategy, max_total_routing_fee_msat, amount_msats)?;
10320		let message_params = dns_resolvers
10321			.iter()
10322			.flat_map(|destination| reply_paths.iter().map(move |path| (path, destination)))
10323			.take(OFFERS_MESSAGE_REQUEST_LIMIT);
10324		for (reply_path, destination) in message_params {
10325			self.pending_dns_onion_messages.lock().unwrap().push((
10326				DNSResolverMessage::DNSSECQuery(onion_message.clone()),
10327				MessageSendInstructions::WithSpecifiedReplyPath {
10328					destination: destination.clone(),
10329					reply_path: reply_path.clone(),
10330				},
10331			));
10332		}
10333		Ok(())
10334	}
10335
10336	/// Gets a payment secret and payment hash for use in an invoice given to a third party wishing
10337	/// to pay us.
10338	///
10339	/// This differs from [`create_inbound_payment_for_hash`] only in that it generates the
10340	/// [`PaymentHash`] and [`PaymentPreimage`] for you.
10341	///
10342	/// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentClaimable`] event, which
10343	/// will have the [`PaymentClaimable::purpose`] return `Some` for [`PaymentPurpose::preimage`]. That
10344	/// should then be passed directly to [`claim_funds`].
10345	///
10346	/// See [`create_inbound_payment_for_hash`] for detailed documentation on behavior and requirements.
10347	///
10348	/// Note that a malicious eavesdropper can intuit whether an inbound payment was created by
10349	/// `create_inbound_payment` or `create_inbound_payment_for_hash` based on runtime.
10350	///
10351	/// # Note
10352	///
10353	/// If you register an inbound payment with this method, then serialize the `ChannelManager`, then
10354	/// deserialize it with a node running 0.0.103 and earlier, the payment will fail to be received.
10355	///
10356	/// Errors if `min_value_msat` is greater than total bitcoin supply.
10357	///
10358	/// If `min_final_cltv_expiry_delta` is set to some value, then the payment will not be receivable
10359	/// on versions of LDK prior to 0.0.114.
10360	///
10361	/// [`claim_funds`]: Self::claim_funds
10362	/// [`PaymentClaimable`]: events::Event::PaymentClaimable
10363	/// [`PaymentClaimable::purpose`]: events::Event::PaymentClaimable::purpose
10364	/// [`PaymentPurpose::preimage`]: events::PaymentPurpose::preimage
10365	/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
10366	pub fn create_inbound_payment(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32,
10367		min_final_cltv_expiry_delta: Option<u16>) -> Result<(PaymentHash, PaymentSecret), ()> {
10368		inbound_payment::create(&self.inbound_payment_key, min_value_msat, invoice_expiry_delta_secs,
10369			&self.entropy_source, self.highest_seen_timestamp.load(Ordering::Acquire) as u64,
10370			min_final_cltv_expiry_delta)
10371	}
10372
10373	/// Gets a [`PaymentSecret`] for a given [`PaymentHash`], for which the payment preimage is
10374	/// stored external to LDK.
10375	///
10376	/// A [`PaymentClaimable`] event will only be generated if the [`PaymentSecret`] matches a
10377	/// payment secret fetched via this method or [`create_inbound_payment`], and which is at least
10378	/// the `min_value_msat` provided here, if one is provided.
10379	///
10380	/// The [`PaymentHash`] (and corresponding [`PaymentPreimage`]) should be globally unique, though
10381	/// note that LDK will not stop you from registering duplicate payment hashes for inbound
10382	/// payments.
10383	///
10384	/// `min_value_msat` should be set if the invoice being generated contains a value. Any payment
10385	/// received for the returned [`PaymentHash`] will be required to be at least `min_value_msat`
10386	/// before a [`PaymentClaimable`] event will be generated, ensuring that we do not provide the
10387	/// sender "proof-of-payment" unless they have paid the required amount.
10388	///
10389	/// `invoice_expiry_delta_secs` describes the number of seconds that the invoice is valid for
10390	/// in excess of the current time. This should roughly match the expiry time set in the invoice.
10391	/// After this many seconds, we will remove the inbound payment, resulting in any attempts to
10392	/// pay the invoice failing. The BOLT spec suggests 3,600 secs as a default validity time for
10393	/// invoices when no timeout is set.
10394	///
10395	/// Note that we use block header time to time-out pending inbound payments (with some margin
10396	/// to compensate for the inaccuracy of block header timestamps). Thus, in practice we will
10397	/// accept a payment and generate a [`PaymentClaimable`] event for some time after the expiry.
10398	/// If you need exact expiry semantics, you should enforce them upon receipt of
10399	/// [`PaymentClaimable`].
10400	///
10401	/// Note that invoices generated for inbound payments should have their `min_final_cltv_expiry_delta`
10402	/// set to at least [`MIN_FINAL_CLTV_EXPIRY_DELTA`].
10403	///
10404	/// Note that a malicious eavesdropper can intuit whether an inbound payment was created by
10405	/// `create_inbound_payment` or `create_inbound_payment_for_hash` based on runtime.
10406	///
10407	/// # Note
10408	///
10409	/// If you register an inbound payment with this method, then serialize the `ChannelManager`, then
10410	/// deserialize it with a node running 0.0.103 and earlier, the payment will fail to be received.
10411	///
10412	/// Errors if `min_value_msat` is greater than total bitcoin supply.
10413	///
10414	/// If `min_final_cltv_expiry_delta` is set to some value, then the payment will not be receivable
10415	/// on versions of LDK prior to 0.0.114.
10416	///
10417	/// [`create_inbound_payment`]: Self::create_inbound_payment
10418	/// [`PaymentClaimable`]: events::Event::PaymentClaimable
10419	pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>,
10420		invoice_expiry_delta_secs: u32, min_final_cltv_expiry: Option<u16>) -> Result<PaymentSecret, ()> {
10421		inbound_payment::create_from_hash(&self.inbound_payment_key, min_value_msat, payment_hash,
10422			invoice_expiry_delta_secs, self.highest_seen_timestamp.load(Ordering::Acquire) as u64,
10423			min_final_cltv_expiry)
10424	}
10425
10426	/// Gets an LDK-generated payment preimage from a payment hash and payment secret that were
10427	/// previously returned from [`create_inbound_payment`].
10428	///
10429	/// [`create_inbound_payment`]: Self::create_inbound_payment
10430	pub fn get_payment_preimage(&self, payment_hash: PaymentHash, payment_secret: PaymentSecret) -> Result<PaymentPreimage, APIError> {
10431		inbound_payment::get_payment_preimage(payment_hash, payment_secret, &self.inbound_payment_key)
10432	}
10433
10434	/// Creates a collection of blinded paths by delegating to [`MessageRouter`] based on
10435	/// the path's intended lifetime.
10436	///
10437	/// Whether or not the path is compact depends on whether the path is short-lived or long-lived,
10438	/// respectively, based on the given `absolute_expiry` as seconds since the Unix epoch. See
10439	/// [`MAX_SHORT_LIVED_RELATIVE_EXPIRY`].
10440	fn create_blinded_paths_using_absolute_expiry(
10441		&self, context: OffersContext, absolute_expiry: Option<Duration>,
10442	) -> Result<Vec<BlindedMessagePath>, ()> {
10443		let now = self.duration_since_epoch();
10444		let max_short_lived_absolute_expiry = now.saturating_add(MAX_SHORT_LIVED_RELATIVE_EXPIRY);
10445
10446		if absolute_expiry.unwrap_or(Duration::MAX) <= max_short_lived_absolute_expiry {
10447			self.create_compact_blinded_paths(context)
10448		} else {
10449			self.create_blinded_paths(MessageContext::Offers(context))
10450		}
10451	}
10452
10453	pub(super) fn duration_since_epoch(&self) -> Duration {
10454		#[cfg(not(feature = "std"))]
10455		let now = Duration::from_secs(
10456			self.highest_seen_timestamp.load(Ordering::Acquire) as u64
10457		);
10458		#[cfg(feature = "std")]
10459		let now = std::time::SystemTime::now()
10460			.duration_since(std::time::SystemTime::UNIX_EPOCH)
10461			.expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
10462
10463		now
10464	}
10465
10466	/// Creates a collection of blinded paths by delegating to
10467	/// [`MessageRouter::create_blinded_paths`].
10468	///
10469	/// Errors if the `MessageRouter` errors.
10470	fn create_blinded_paths(&self, context: MessageContext) -> Result<Vec<BlindedMessagePath>, ()> {
10471		let recipient = self.get_our_node_id();
10472		let secp_ctx = &self.secp_ctx;
10473
10474		let peers = self.per_peer_state.read().unwrap()
10475			.iter()
10476			.map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
10477			.filter(|(_, peer)| peer.is_connected)
10478			.filter(|(_, peer)| peer.latest_features.supports_onion_messages())
10479			.map(|(node_id, _)| *node_id)
10480			.collect::<Vec<_>>();
10481
10482		self.message_router
10483			.create_blinded_paths(recipient, context, peers, secp_ctx)
10484			.and_then(|paths| (!paths.is_empty()).then(|| paths).ok_or(()))
10485	}
10486
10487	/// Creates a collection of blinded paths by delegating to
10488	/// [`MessageRouter::create_compact_blinded_paths`].
10489	///
10490	/// Errors if the `MessageRouter` errors.
10491	fn create_compact_blinded_paths(&self, context: OffersContext) -> Result<Vec<BlindedMessagePath>, ()> {
10492		let recipient = self.get_our_node_id();
10493		let secp_ctx = &self.secp_ctx;
10494
10495		let peers = self.per_peer_state.read().unwrap()
10496			.iter()
10497			.map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
10498			.filter(|(_, peer)| peer.is_connected)
10499			.filter(|(_, peer)| peer.latest_features.supports_onion_messages())
10500			.map(|(node_id, peer)| MessageForwardNode {
10501				node_id: *node_id,
10502				short_channel_id: peer.channel_by_id
10503					.iter()
10504					.filter(|(_, channel)| channel.context().is_usable())
10505					.min_by_key(|(_, channel)| channel.context().channel_creation_height)
10506					.and_then(|(_, channel)| channel.context().get_short_channel_id()),
10507			})
10508			.collect::<Vec<_>>();
10509
10510		self.message_router
10511			.create_compact_blinded_paths(recipient, MessageContext::Offers(context), peers, secp_ctx)
10512			.and_then(|paths| (!paths.is_empty()).then(|| paths).ok_or(()))
10513	}
10514
10515	/// Creates multi-hop blinded payment paths for the given `amount_msats` by delegating to
10516	/// [`Router::create_blinded_payment_paths`].
10517	fn create_blinded_payment_paths(
10518		&self, amount_msats: u64, payment_secret: PaymentSecret, payment_context: PaymentContext
10519	) -> Result<Vec<BlindedPaymentPath>, ()> {
10520		let expanded_key = &self.inbound_payment_key;
10521		let entropy = &*self.entropy_source;
10522		let secp_ctx = &self.secp_ctx;
10523
10524		let first_hops = self.list_usable_channels();
10525		let payee_node_id = self.get_our_node_id();
10526		let max_cltv_expiry = self.best_block.read().unwrap().height + CLTV_FAR_FAR_AWAY
10527			+ LATENCY_GRACE_PERIOD_BLOCKS;
10528
10529		let payee_tlvs = UnauthenticatedReceiveTlvs {
10530			payment_secret,
10531			payment_constraints: PaymentConstraints {
10532				max_cltv_expiry,
10533				htlc_minimum_msat: 1,
10534			},
10535			payment_context,
10536		};
10537		let nonce = Nonce::from_entropy_source(entropy);
10538		let payee_tlvs = payee_tlvs.authenticate(nonce, expanded_key);
10539
10540		self.router.create_blinded_payment_paths(
10541			payee_node_id, first_hops, payee_tlvs, amount_msats, secp_ctx
10542		)
10543	}
10544
10545	/// Gets a fake short channel id for use in receiving [phantom node payments]. These fake scids
10546	/// are used when constructing the phantom invoice's route hints.
10547	///
10548	/// [phantom node payments]: crate::sign::PhantomKeysManager
10549	pub fn get_phantom_scid(&self) -> u64 {
10550		let best_block_height = self.best_block.read().unwrap().height;
10551		let short_to_chan_info = self.short_to_chan_info.read().unwrap();
10552		loop {
10553			let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
10554			// Ensure the generated scid doesn't conflict with a real channel.
10555			match short_to_chan_info.get(&scid_candidate) {
10556				Some(_) => continue,
10557				None => return scid_candidate
10558			}
10559		}
10560	}
10561
10562	/// Gets route hints for use in receiving [phantom node payments].
10563	///
10564	/// [phantom node payments]: crate::sign::PhantomKeysManager
10565	pub fn get_phantom_route_hints(&self) -> PhantomRouteHints {
10566		PhantomRouteHints {
10567			channels: self.list_usable_channels(),
10568			phantom_scid: self.get_phantom_scid(),
10569			real_node_pubkey: self.get_our_node_id(),
10570		}
10571	}
10572
10573	/// Gets a fake short channel id for use in receiving intercepted payments. These fake scids are
10574	/// used when constructing the route hints for HTLCs intended to be intercepted. See
10575	/// [`ChannelManager::forward_intercepted_htlc`].
10576	///
10577	/// Note that this method is not guaranteed to return unique values, you may need to call it a few
10578	/// times to get a unique scid.
10579	pub fn get_intercept_scid(&self) -> u64 {
10580		let best_block_height = self.best_block.read().unwrap().height;
10581		let short_to_chan_info = self.short_to_chan_info.read().unwrap();
10582		loop {
10583			let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
10584			// Ensure the generated scid doesn't conflict with a real channel.
10585			if short_to_chan_info.contains_key(&scid_candidate) { continue }
10586			return scid_candidate
10587		}
10588	}
10589
10590	/// Gets inflight HTLC information by processing pending outbound payments that are in
10591	/// our channels. May be used during pathfinding to account for in-use channel liquidity.
10592	pub fn compute_inflight_htlcs(&self) -> InFlightHtlcs {
10593		let mut inflight_htlcs = InFlightHtlcs::new();
10594
10595		let per_peer_state = self.per_peer_state.read().unwrap();
10596		for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
10597			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10598			let peer_state = &mut *peer_state_lock;
10599			for chan in peer_state.channel_by_id.values().filter_map(
10600				|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
10601			) {
10602				for (htlc_source, _) in chan.inflight_htlc_sources() {
10603					if let HTLCSource::OutboundRoute { path, .. } = htlc_source {
10604						inflight_htlcs.process_path(path, self.get_our_node_id());
10605					}
10606				}
10607			}
10608		}
10609
10610		inflight_htlcs
10611	}
10612
10613	#[cfg(any(test, feature = "_test_utils"))]
10614	pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
10615		let events = core::cell::RefCell::new(Vec::new());
10616		let event_handler = |event: events::Event| Ok(events.borrow_mut().push(event));
10617		self.process_pending_events(&event_handler);
10618		events.into_inner()
10619	}
10620
10621	#[cfg(feature = "_test_utils")]
10622	pub fn push_pending_event(&self, event: events::Event) {
10623		let mut events = self.pending_events.lock().unwrap();
10624		events.push_back((event, None));
10625	}
10626
10627	#[cfg(test)]
10628	pub fn pop_pending_event(&self) -> Option<events::Event> {
10629		let mut events = self.pending_events.lock().unwrap();
10630		events.pop_front().map(|(e, _)| e)
10631	}
10632
10633	#[cfg(test)]
10634	pub fn has_pending_payments(&self) -> bool {
10635		self.pending_outbound_payments.has_pending_payments()
10636	}
10637
10638	#[cfg(test)]
10639	pub fn clear_pending_payments(&self) {
10640		self.pending_outbound_payments.clear_pending_payments()
10641	}
10642
10643	/// When something which was blocking a channel from updating its [`ChannelMonitor`] (e.g. an
10644	/// [`Event`] being handled) completes, this should be called to restore the channel to normal
10645	/// operation. It will double-check that nothing *else* is also blocking the same channel from
10646	/// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly.
10647	fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey,
10648		channel_funding_outpoint: OutPoint, channel_id: ChannelId,
10649		mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
10650
10651		let logger = WithContext::from(
10652			&self.logger, Some(counterparty_node_id), Some(channel_id), None
10653		);
10654		loop {
10655			let per_peer_state = self.per_peer_state.read().unwrap();
10656			if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
10657				let mut peer_state_lck = peer_state_mtx.lock().unwrap();
10658				let peer_state = &mut *peer_state_lck;
10659				if let Some(blocker) = completed_blocker.take() {
10660					// Only do this on the first iteration of the loop.
10661					if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
10662						.get_mut(&channel_id)
10663					{
10664						blockers.retain(|iter| iter != &blocker);
10665					}
10666				}
10667
10668				if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
10669					channel_funding_outpoint, channel_id, counterparty_node_id) {
10670					// Check that, while holding the peer lock, we don't have anything else
10671					// blocking monitor updates for this channel. If we do, release the monitor
10672					// update(s) when those blockers complete.
10673					log_trace!(logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
10674						&channel_id);
10675					break;
10676				}
10677
10678				if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(
10679					channel_id) {
10680					if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
10681						debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint);
10682						if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
10683							log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor",
10684								channel_id);
10685							handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
10686								peer_state_lck, peer_state, per_peer_state, chan);
10687							if further_update_exists {
10688								// If there are more `ChannelMonitorUpdate`s to process, restart at the
10689								// top of the loop.
10690								continue;
10691							}
10692						} else {
10693							log_trace!(logger, "Unlocked monitor updating for channel {} without monitors to update",
10694								channel_id);
10695						}
10696					}
10697				}
10698			} else {
10699				log_debug!(logger,
10700					"Got a release post-RAA monitor update for peer {} but the channel is gone",
10701					log_pubkey!(counterparty_node_id));
10702			}
10703			break;
10704		}
10705	}
10706
10707	fn handle_post_event_actions(&self, actions: Vec<EventCompletionAction>) {
10708		for action in actions {
10709			match action {
10710				EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
10711					channel_funding_outpoint, channel_id, counterparty_node_id
10712				} => {
10713					self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, channel_id, None);
10714				}
10715			}
10716		}
10717	}
10718
10719	/// Processes any events asynchronously in the order they were generated since the last call
10720	/// using the given event handler.
10721	///
10722	/// See the trait-level documentation of [`EventsProvider`] for requirements.
10723	pub async fn process_pending_events_async<Future: core::future::Future<Output = Result<(), ReplayEvent>>, H: Fn(Event) -> Future>(
10724		&self, handler: H
10725	) {
10726		let mut ev;
10727		process_events_body!(self, ev, { handler(ev).await });
10728	}
10729}
10730
10731impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
10732where
10733	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
10734	T::Target: BroadcasterInterface,
10735	ES::Target: EntropySource,
10736	NS::Target: NodeSigner,
10737	SP::Target: SignerProvider,
10738	F::Target: FeeEstimator,
10739	R::Target: Router,
10740	MR::Target: MessageRouter,
10741	L::Target: Logger,
10742{
10743	/// Returns `MessageSendEvent`s strictly ordered per-peer, in the order they were generated.
10744	/// The returned array will contain `MessageSendEvent`s for different peers if
10745	/// `MessageSendEvent`s to more than one peer exists, but `MessageSendEvent`s to the same peer
10746	/// is always placed next to each other.
10747	///
10748	/// Note that that while `MessageSendEvent`s are strictly ordered per-peer, the peer order for
10749	/// the chunks of `MessageSendEvent`s for different peers is random. I.e. if the array contains
10750	/// `MessageSendEvent`s  for both `node_a` and `node_b`, the `MessageSendEvent`s for `node_a`
10751	/// will randomly be placed first or last in the returned array.
10752	///
10753	/// Note that even though `BroadcastChannelAnnouncement` and `BroadcastChannelUpdate`
10754	/// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be placed among
10755	/// the `MessageSendEvent`s to the specific peer they were generated under.
10756	fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
10757		let events = RefCell::new(Vec::new());
10758		PersistenceNotifierGuard::optionally_notify(self, || {
10759			let mut result = NotifyOption::SkipPersistNoEvents;
10760
10761			// TODO: This behavior should be documented. It's unintuitive that we query
10762			// ChannelMonitors when clearing other events.
10763			if self.process_pending_monitor_events() {
10764				result = NotifyOption::DoPersist;
10765			}
10766
10767			if self.check_free_holding_cells() {
10768				result = NotifyOption::DoPersist;
10769			}
10770			if self.maybe_generate_initial_closing_signed() {
10771				result = NotifyOption::DoPersist;
10772			}
10773
10774			let mut is_any_peer_connected = false;
10775			let mut pending_events = Vec::new();
10776			let per_peer_state = self.per_peer_state.read().unwrap();
10777			for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
10778				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10779				let peer_state = &mut *peer_state_lock;
10780				if peer_state.pending_msg_events.len() > 0 {
10781					pending_events.append(&mut peer_state.pending_msg_events);
10782				}
10783				if peer_state.is_connected {
10784					is_any_peer_connected = true
10785				}
10786			}
10787
10788			// Ensure that we are connected to some peers before getting broadcast messages.
10789			if is_any_peer_connected {
10790				let mut broadcast_msgs = self.pending_broadcast_messages.lock().unwrap();
10791				pending_events.append(&mut broadcast_msgs);
10792			}
10793
10794			if !pending_events.is_empty() {
10795				events.replace(pending_events);
10796			}
10797
10798			result
10799		});
10800		events.into_inner()
10801	}
10802}
10803
10804impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> EventsProvider for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
10805where
10806	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
10807	T::Target: BroadcasterInterface,
10808	ES::Target: EntropySource,
10809	NS::Target: NodeSigner,
10810	SP::Target: SignerProvider,
10811	F::Target: FeeEstimator,
10812	R::Target: Router,
10813	MR::Target: MessageRouter,
10814	L::Target: Logger,
10815{
10816	/// Processes events that must be periodically handled.
10817	///
10818	/// An [`EventHandler`] may safely call back to the provider in order to handle an event.
10819	/// However, it must not call [`Writeable::write`] as doing so would result in a deadlock.
10820	fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
10821		let mut ev;
10822		process_events_body!(self, ev, handler.handle_event(ev));
10823	}
10824}
10825
10826impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> chain::Listen for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
10827where
10828	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
10829	T::Target: BroadcasterInterface,
10830	ES::Target: EntropySource,
10831	NS::Target: NodeSigner,
10832	SP::Target: SignerProvider,
10833	F::Target: FeeEstimator,
10834	R::Target: Router,
10835	MR::Target: MessageRouter,
10836	L::Target: Logger,
10837{
10838	fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
10839		{
10840			let best_block = self.best_block.read().unwrap();
10841			assert_eq!(best_block.block_hash, header.prev_blockhash,
10842				"Blocks must be connected in chain-order - the connected header must build on the last connected header");
10843			assert_eq!(best_block.height, height - 1,
10844				"Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
10845		}
10846
10847		self.transactions_confirmed(header, txdata, height);
10848		self.best_block_updated(header, height);
10849	}
10850
10851	fn block_disconnected(&self, header: &Header, height: u32) {
10852		let _persistence_guard =
10853			PersistenceNotifierGuard::optionally_notify_skipping_background_events(
10854				self, || -> NotifyOption { NotifyOption::DoPersist });
10855		let new_height = height - 1;
10856		{
10857			let mut best_block = self.best_block.write().unwrap();
10858			assert_eq!(best_block.block_hash, header.block_hash(),
10859				"Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
10860			assert_eq!(best_block.height, height,
10861				"Blocks must be disconnected in chain-order - the disconnected block must have the correct height");
10862			*best_block = BestBlock::new(header.prev_blockhash, new_height)
10863		}
10864
10865		self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)));
10866	}
10867}
10868
10869impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> chain::Confirm for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
10870where
10871	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
10872	T::Target: BroadcasterInterface,
10873	ES::Target: EntropySource,
10874	NS::Target: NodeSigner,
10875	SP::Target: SignerProvider,
10876	F::Target: FeeEstimator,
10877	R::Target: Router,
10878	MR::Target: MessageRouter,
10879	L::Target: Logger,
10880{
10881	fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) {
10882		// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
10883		// during initialization prior to the chain_monitor being fully configured in some cases.
10884		// See the docs for `ChannelManagerReadArgs` for more.
10885
10886		let block_hash = header.block_hash();
10887		log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);
10888
10889		let _persistence_guard =
10890			PersistenceNotifierGuard::optionally_notify_skipping_background_events(
10891				self, || -> NotifyOption { NotifyOption::DoPersist });
10892		self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None))
10893			.map(|(a, b)| (a, Vec::new(), b)));
10894
10895		let last_best_block_height = self.best_block.read().unwrap().height;
10896		if height < last_best_block_height {
10897			let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
10898			self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)));
10899		}
10900	}
10901
10902	fn best_block_updated(&self, header: &Header, height: u32) {
10903		// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
10904		// during initialization prior to the chain_monitor being fully configured in some cases.
10905		// See the docs for `ChannelManagerReadArgs` for more.
10906
10907		let block_hash = header.block_hash();
10908		log_trace!(self.logger, "New best block: {} at height {}", block_hash, height);
10909
10910		let _persistence_guard =
10911			PersistenceNotifierGuard::optionally_notify_skipping_background_events(
10912				self, || -> NotifyOption { NotifyOption::DoPersist });
10913		*self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
10914
10915		let mut min_anchor_feerate = None;
10916		let mut min_non_anchor_feerate = None;
10917		if self.background_events_processed_since_startup.load(Ordering::Relaxed) {
10918			// If we're past the startup phase, update our feerate cache
10919			let mut last_days_feerates = self.last_days_feerates.lock().unwrap();
10920			if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS {
10921				last_days_feerates.pop_front();
10922			}
10923			let anchor_feerate = self.fee_estimator
10924				.bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedAnchorChannelRemoteFee);
10925			let non_anchor_feerate = self.fee_estimator
10926				.bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee);
10927			last_days_feerates.push_back((anchor_feerate, non_anchor_feerate));
10928			if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS {
10929				min_anchor_feerate = last_days_feerates.iter().map(|(f, _)| f).min().copied();
10930				min_non_anchor_feerate = last_days_feerates.iter().map(|(_, f)| f).min().copied();
10931			}
10932		}
10933
10934		self.do_chain_event(Some(height), |channel| {
10935			let logger = WithChannelContext::from(&self.logger, &channel.context, None);
10936			if channel.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
10937				if let Some(feerate) = min_anchor_feerate {
10938					channel.check_for_stale_feerate(&logger, feerate)?;
10939				}
10940			} else {
10941				if let Some(feerate) = min_non_anchor_feerate {
10942					channel.check_for_stale_feerate(&logger, feerate)?;
10943				}
10944			}
10945			channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None))
10946		});
10947
10948		macro_rules! max_time {
10949			($timestamp: expr) => {
10950				loop {
10951					// Update $timestamp to be the max of its current value and the block
10952					// timestamp. This should keep us close to the current time without relying on
10953					// having an explicit local time source.
10954					// Just in case we end up in a race, we loop until we either successfully
10955					// update $timestamp or decide we don't need to.
10956					let old_serial = $timestamp.load(Ordering::Acquire);
10957					if old_serial >= header.time as usize { break; }
10958					if $timestamp.compare_exchange(old_serial, header.time as usize, Ordering::AcqRel, Ordering::Relaxed).is_ok() {
10959						break;
10960					}
10961				}
10962			}
10963		}
10964		max_time!(self.highest_seen_timestamp);
10965		#[cfg(feature = "dnssec")] {
10966			let timestamp = self.highest_seen_timestamp.load(Ordering::Relaxed) as u32;
10967			self.hrn_resolver.new_best_block(height, timestamp);
10968		}
10969	}
10970
10971	fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option<BlockHash>)> {
10972		let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
10973		for (_cp_id, peer_state_mutex) in self.per_peer_state.read().unwrap().iter() {
10974			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10975			let peer_state = &mut *peer_state_lock;
10976			for chan in peer_state.channel_by_id.values().filter_map(|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }) {
10977				let txid_opt = chan.context.get_funding_txo();
10978				let height_opt = chan.context.get_funding_tx_confirmation_height();
10979				let hash_opt = chan.context.get_funding_tx_confirmed_in();
10980				if let (Some(funding_txo), Some(conf_height), Some(block_hash)) = (txid_opt, height_opt, hash_opt) {
10981					res.push((funding_txo.txid, conf_height, Some(block_hash)));
10982				}
10983			}
10984		}
10985		res
10986	}
10987
10988	fn transaction_unconfirmed(&self, txid: &Txid) {
10989		let _persistence_guard =
10990			PersistenceNotifierGuard::optionally_notify_skipping_background_events(
10991				self, || -> NotifyOption { NotifyOption::DoPersist });
10992		self.do_chain_event(None, |channel| {
10993			if let Some(funding_txo) = channel.context.get_funding_txo() {
10994				if funding_txo.txid == *txid {
10995					channel.funding_transaction_unconfirmed(&&WithChannelContext::from(&self.logger, &channel.context, None)).map(|()| (None, Vec::new(), None))
10996				} else { Ok((None, Vec::new(), None)) }
10997			} else { Ok((None, Vec::new(), None)) }
10998		});
10999	}
11000}
11001
11002impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
11003where
11004	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
11005	T::Target: BroadcasterInterface,
11006	ES::Target: EntropySource,
11007	NS::Target: NodeSigner,
11008	SP::Target: SignerProvider,
11009	F::Target: FeeEstimator,
11010	R::Target: Router,
11011	MR::Target: MessageRouter,
11012	L::Target: Logger,
11013{
11014	/// Calls a function which handles an on-chain event (blocks dis/connected, transactions
11015	/// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by
11016	/// the function.
11017	fn do_chain_event<FN: Fn(&mut Channel<SP>) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>>
11018			(&self, height_opt: Option<u32>, f: FN) {
11019		// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
11020		// during initialization prior to the chain_monitor being fully configured in some cases.
11021		// See the docs for `ChannelManagerReadArgs` for more.
11022
11023		let mut failed_channels = Vec::new();
11024		let mut timed_out_htlcs = Vec::new();
11025		{
11026			let per_peer_state = self.per_peer_state.read().unwrap();
11027			for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
11028				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11029				let peer_state = &mut *peer_state_lock;
11030				let pending_msg_events = &mut peer_state.pending_msg_events;
11031
11032				peer_state.channel_by_id.retain(|_, phase| {
11033					match phase {
11034						// Retain unfunded channels.
11035						ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) |
11036						ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => true,
11037						ChannelPhase::Funded(channel) => {
11038							let res = f(channel);
11039							if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
11040								for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
11041									let failure_code = 0x1000|14; /* expiry_too_soon */
11042									let data = self.get_htlc_inbound_temp_fail_data(failure_code);
11043									timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
11044										HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() }));
11045								}
11046								let logger = WithChannelContext::from(&self.logger, &channel.context, None);
11047								if let Some(channel_ready) = channel_ready_opt {
11048									send_channel_ready!(self, pending_msg_events, channel, channel_ready);
11049									if channel.context.is_usable() {
11050										log_trace!(logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", channel.context.channel_id());
11051										if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
11052											pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
11053												node_id: channel.context.get_counterparty_node_id(),
11054												msg,
11055											});
11056										}
11057									} else {
11058										log_trace!(logger, "Sending channel_ready WITHOUT channel_update for {}", channel.context.channel_id());
11059									}
11060								}
11061
11062								{
11063									let mut pending_events = self.pending_events.lock().unwrap();
11064									emit_channel_ready_event!(pending_events, channel);
11065								}
11066
11067								if let Some(height) = height_opt {
11068									// (re-)broadcast signed `channel_announcement`s and
11069									// `channel_update`s for any channels less than a week old.
11070									let funding_conf_height =
11071										channel.context.get_funding_tx_confirmation_height().unwrap_or(height);
11072									// To avoid broadcast storms after each block, only
11073									// re-broadcast every hour (6 blocks) after the initial
11074									// broadcast, or if this is the first time we're ready to
11075									// broadcast this channel.
11076									let rebroadcast_announcement = funding_conf_height < height + 1008
11077										&& funding_conf_height % 6 == height % 6;
11078									#[allow(unused_mut, unused_assignments)]
11079									let mut should_announce = announcement_sigs.is_some() || rebroadcast_announcement;
11080									// Most of our tests were written when we only broadcasted
11081									// `channel_announcement`s once and then never re-broadcasted
11082									// them again, so disable the re-broadcasting entirely in tests
11083									#[cfg(test)]
11084									{
11085										should_announce = announcement_sigs.is_some();
11086									}
11087									if should_announce {
11088										if let Some(announcement) = channel.get_signed_channel_announcement(
11089											&self.node_signer, self.chain_hash, height, &self.default_configuration,
11090										) {
11091											pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
11092												msg: announcement,
11093												// Note that get_signed_channel_announcement fails
11094												// if the channel cannot be announced, so
11095												// get_channel_update_for_broadcast will never fail
11096												// by the time we get here.
11097												update_msg: Some(self.get_channel_update_for_broadcast(channel).unwrap()),
11098											});
11099										}
11100									}
11101								}
11102								if let Some(announcement_sigs) = announcement_sigs {
11103									log_trace!(logger, "Sending announcement_signatures for channel {}", channel.context.channel_id());
11104									pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
11105										node_id: channel.context.get_counterparty_node_id(),
11106										msg: announcement_sigs,
11107									});
11108								}
11109								if channel.is_our_channel_ready() {
11110									if let Some(real_scid) = channel.context.get_short_channel_id() {
11111										// If we sent a 0conf channel_ready, and now have an SCID, we add it
11112										// to the short_to_chan_info map here. Note that we check whether we
11113										// can relay using the real SCID at relay-time (i.e.
11114										// enforce option_scid_alias then), and if the funding tx is ever
11115										// un-confirmed we force-close the channel, ensuring short_to_chan_info
11116										// is always consistent.
11117										let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
11118										let scid_insert = short_to_chan_info.insert(real_scid, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
11119										assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.context.get_counterparty_node_id(), channel.context.channel_id()),
11120											"SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
11121											fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
11122									}
11123								}
11124							} else if let Err(reason) = res {
11125								// It looks like our counterparty went on-chain or funding transaction was
11126								// reorged out of the main chain. Close the channel.
11127								let reason_message = format!("{}", reason);
11128								let mut close_res = channel.context.force_shutdown(true, reason);
11129								locked_close_channel!(self, peer_state, &channel.context, close_res);
11130								failed_channels.push(close_res);
11131								if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
11132									let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
11133									pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
11134										msg: update
11135									});
11136								}
11137								pending_msg_events.push(events::MessageSendEvent::HandleError {
11138									node_id: channel.context.get_counterparty_node_id(),
11139									action: msgs::ErrorAction::DisconnectPeer {
11140										msg: Some(msgs::ErrorMessage {
11141											channel_id: channel.context.channel_id(),
11142											data: reason_message,
11143										})
11144									},
11145								});
11146								return false;
11147							}
11148							true
11149						}
11150					}
11151				});
11152			}
11153		}
11154
11155		if let Some(height) = height_opt {
11156			self.claimable_payments.lock().unwrap().claimable_payments.retain(|payment_hash, payment| {
11157				payment.htlcs.retain(|htlc| {
11158					// If height is approaching the number of blocks we think it takes us to get
11159					// our commitment transaction confirmed before the HTLC expires, plus the
11160					// number of blocks we generally consider it to take to do a commitment update,
11161					// just give up on it and fail the HTLC.
11162					if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
11163						let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
11164						htlc_msat_height_data.extend_from_slice(&height.to_be_bytes());
11165
11166						timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(),
11167							HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data),
11168							HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
11169						false
11170					} else { true }
11171				});
11172				!payment.htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
11173			});
11174
11175			let mut intercepted_htlcs = self.pending_intercepted_htlcs.lock().unwrap();
11176			intercepted_htlcs.retain(|_, htlc| {
11177				if height >= htlc.forward_info.outgoing_cltv_value - HTLC_FAIL_BACK_BUFFER {
11178					let prev_hop_data = HTLCSource::PreviousHopData(HTLCPreviousHopData {
11179						short_channel_id: htlc.prev_short_channel_id,
11180						user_channel_id: Some(htlc.prev_user_channel_id),
11181						htlc_id: htlc.prev_htlc_id,
11182						incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret,
11183						phantom_shared_secret: None,
11184						counterparty_node_id: htlc.prev_counterparty_node_id,
11185						outpoint: htlc.prev_funding_outpoint,
11186						channel_id: htlc.prev_channel_id,
11187						blinded_failure: htlc.forward_info.routing.blinded_failure(),
11188						cltv_expiry: htlc.forward_info.routing.incoming_cltv_expiry(),
11189					});
11190
11191					let requested_forward_scid /* intercept scid */ = match htlc.forward_info.routing {
11192						PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
11193						_ => unreachable!(),
11194					};
11195					timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash,
11196							HTLCFailReason::from_failure_code(0x2000 | 2),
11197							HTLCDestination::InvalidForward { requested_forward_scid }));
11198					let logger = WithContext::from(
11199						&self.logger, None, Some(htlc.prev_channel_id), Some(htlc.forward_info.payment_hash)
11200					);
11201					log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
11202					false
11203				} else { true }
11204			});
11205		}
11206
11207		for failure in failed_channels {
11208			self.finish_close_channel(failure);
11209		}
11210
11211		for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) {
11212			self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, destination);
11213		}
11214	}
11215
11216	/// Gets a [`Future`] that completes when this [`ChannelManager`] may need to be persisted or
11217	/// may have events that need processing.
11218	///
11219	/// In order to check if this [`ChannelManager`] needs persisting, call
11220	/// [`Self::get_and_clear_needs_persistence`].
11221	///
11222	/// Note that callbacks registered on the [`Future`] MUST NOT call back into this
11223	/// [`ChannelManager`] and should instead register actions to be taken later.
11224	pub fn get_event_or_persistence_needed_future(&self) -> Future {
11225		self.event_persist_notifier.get_future()
11226	}
11227
11228	/// Returns true if this [`ChannelManager`] needs to be persisted.
11229	///
11230	/// See [`Self::get_event_or_persistence_needed_future`] for retrieving a [`Future`] that
11231	/// indicates this should be checked.
11232	pub fn get_and_clear_needs_persistence(&self) -> bool {
11233		self.needs_persist_flag.swap(false, Ordering::AcqRel)
11234	}
11235
11236	#[cfg(any(test, feature = "_test_utils"))]
11237	pub fn get_event_or_persist_condvar_value(&self) -> bool {
11238		self.event_persist_notifier.notify_pending()
11239	}
11240
11241	/// Gets the latest best block which was connected either via the [`chain::Listen`] or
11242	/// [`chain::Confirm`] interfaces.
11243	pub fn current_best_block(&self) -> BestBlock {
11244		self.best_block.read().unwrap().clone()
11245	}
11246
11247	/// Fetches the set of [`NodeFeatures`] flags that are provided by or required by
11248	/// [`ChannelManager`].
11249	pub fn node_features(&self) -> NodeFeatures {
11250		provided_node_features(&self.default_configuration)
11251	}
11252
11253	/// Fetches the set of [`Bolt11InvoiceFeatures`] flags that are provided by or required by
11254	/// [`ChannelManager`].
11255	///
11256	/// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice"
11257	/// or not. Thus, this method is not public.
11258	#[cfg(any(feature = "_test_utils", test))]
11259	pub fn bolt11_invoice_features(&self) -> Bolt11InvoiceFeatures {
11260		provided_bolt11_invoice_features(&self.default_configuration)
11261	}
11262
11263	/// Fetches the set of [`Bolt12InvoiceFeatures`] flags that are provided by or required by
11264	/// [`ChannelManager`].
11265	fn bolt12_invoice_features(&self) -> Bolt12InvoiceFeatures {
11266		provided_bolt12_invoice_features(&self.default_configuration)
11267	}
11268
11269	/// Fetches the set of [`ChannelFeatures`] flags that are provided by or required by
11270	/// [`ChannelManager`].
11271	pub fn channel_features(&self) -> ChannelFeatures {
11272		provided_channel_features(&self.default_configuration)
11273	}
11274
11275	/// Fetches the set of [`ChannelTypeFeatures`] flags that are provided by or required by
11276	/// [`ChannelManager`].
11277	pub fn channel_type_features(&self) -> ChannelTypeFeatures {
11278		provided_channel_type_features(&self.default_configuration)
11279	}
11280
11281	/// Fetches the set of [`InitFeatures`] flags that are provided by or required by
11282	/// [`ChannelManager`].
11283	pub fn init_features(&self) -> InitFeatures {
11284		provided_init_features(&self.default_configuration)
11285	}
11286}
11287
11288impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
11289	ChannelMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
11290where
11291	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
11292	T::Target: BroadcasterInterface,
11293	ES::Target: EntropySource,
11294	NS::Target: NodeSigner,
11295	SP::Target: SignerProvider,
11296	F::Target: FeeEstimator,
11297	R::Target: Router,
11298	MR::Target: MessageRouter,
11299	L::Target: Logger,
11300{
11301	fn handle_open_channel(&self, counterparty_node_id: PublicKey, msg: &msgs::OpenChannel) {
11302		// Note that we never need to persist the updated ChannelManager for an inbound
11303		// open_channel message - pre-funded channels are never written so there should be no
11304		// change to the contents.
11305		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11306			let res = self.internal_open_channel(&counterparty_node_id, OpenChannelMessageRef::V1(msg));
11307			let persist = match &res {
11308				Err(e) if e.closes_channel() => {
11309					debug_assert!(false, "We shouldn't close a new channel");
11310					NotifyOption::DoPersist
11311				},
11312				_ => NotifyOption::SkipPersistHandleEvents,
11313			};
11314			let _ = handle_error!(self, res, counterparty_node_id);
11315			persist
11316		});
11317	}
11318
11319	fn handle_open_channel_v2(&self, counterparty_node_id: PublicKey, msg: &msgs::OpenChannelV2) {
11320		// Note that we never need to persist the updated ChannelManager for an inbound
11321		// open_channel message - pre-funded channels are never written so there should be no
11322		// change to the contents.
11323		#[cfg(dual_funding)]
11324		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11325			let res = self.internal_open_channel(&counterparty_node_id, OpenChannelMessageRef::V2(msg));
11326			let persist = match &res {
11327				Err(e) if e.closes_channel() => {
11328					debug_assert!(false, "We shouldn't close a new channel");
11329					NotifyOption::DoPersist
11330				},
11331				_ => NotifyOption::SkipPersistHandleEvents,
11332			};
11333			let _ = handle_error!(self, res, counterparty_node_id);
11334			persist
11335		});
11336		#[cfg(not(dual_funding))]
11337		let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11338			"Dual-funded channels not supported".to_owned(),
11339			msg.common_fields.temporary_channel_id.clone())), counterparty_node_id);
11340	}
11341
11342	fn handle_accept_channel(&self, counterparty_node_id: PublicKey, msg: &msgs::AcceptChannel) {
11343		// Note that we never need to persist the updated ChannelManager for an inbound
11344		// accept_channel message - pre-funded channels are never written so there should be no
11345		// change to the contents.
11346		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11347			let _ = handle_error!(self, self.internal_accept_channel(&counterparty_node_id, msg), counterparty_node_id);
11348			NotifyOption::SkipPersistHandleEvents
11349		});
11350	}
11351
11352	fn handle_accept_channel_v2(&self, counterparty_node_id: PublicKey, msg: &msgs::AcceptChannelV2) {
11353		let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11354			"Dual-funded channels not supported".to_owned(),
11355			msg.common_fields.temporary_channel_id.clone())), counterparty_node_id);
11356	}
11357
11358	fn handle_funding_created(&self, counterparty_node_id: PublicKey, msg: &msgs::FundingCreated) {
11359		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11360		let _ = handle_error!(self, self.internal_funding_created(&counterparty_node_id, msg), counterparty_node_id);
11361	}
11362
11363	fn handle_funding_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::FundingSigned) {
11364		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11365		let _ = handle_error!(self, self.internal_funding_signed(&counterparty_node_id, msg), counterparty_node_id);
11366	}
11367
11368	fn handle_channel_ready(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelReady) {
11369		// Note that we never need to persist the updated ChannelManager for an inbound
11370		// channel_ready message - while the channel's state will change, any channel_ready message
11371		// will ultimately be re-sent on startup and the `ChannelMonitor` won't be updated so we
11372		// will not force-close the channel on startup.
11373		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11374			let res = self.internal_channel_ready(&counterparty_node_id, msg);
11375			let persist = match &res {
11376				Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11377				_ => NotifyOption::SkipPersistHandleEvents,
11378			};
11379			let _ = handle_error!(self, res, counterparty_node_id);
11380			persist
11381		});
11382	}
11383
11384	fn handle_stfu(&self, counterparty_node_id: PublicKey, msg: &msgs::Stfu) {
11385		let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11386			"Quiescence not supported".to_owned(),
11387			msg.channel_id.clone())), counterparty_node_id);
11388	}
11389
11390	#[cfg(splicing)]
11391	fn handle_splice_init(&self, counterparty_node_id: PublicKey, msg: &msgs::SpliceInit) {
11392		let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11393			"Splicing not supported".to_owned(),
11394			msg.channel_id.clone())), counterparty_node_id);
11395	}
11396
11397	#[cfg(splicing)]
11398	fn handle_splice_ack(&self, counterparty_node_id: PublicKey, msg: &msgs::SpliceAck) {
11399		let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11400			"Splicing not supported (splice_ack)".to_owned(),
11401			msg.channel_id.clone())), counterparty_node_id);
11402	}
11403
11404	#[cfg(splicing)]
11405	fn handle_splice_locked(&self, counterparty_node_id: PublicKey, msg: &msgs::SpliceLocked) {
11406		let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11407			"Splicing not supported (splice_locked)".to_owned(),
11408			msg.channel_id.clone())), counterparty_node_id);
11409	}
11410
11411	fn handle_shutdown(&self, counterparty_node_id: PublicKey, msg: &msgs::Shutdown) {
11412		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11413		let _ = handle_error!(self, self.internal_shutdown(&counterparty_node_id, msg), counterparty_node_id);
11414	}
11415
11416	fn handle_closing_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::ClosingSigned) {
11417		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11418		let _ = handle_error!(self, self.internal_closing_signed(&counterparty_node_id, msg), counterparty_node_id);
11419	}
11420
11421	fn handle_update_add_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateAddHTLC) {
11422		// Note that we never need to persist the updated ChannelManager for an inbound
11423		// update_add_htlc message - the message itself doesn't change our channel state only the
11424		// `commitment_signed` message afterwards will.
11425		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11426			let res = self.internal_update_add_htlc(&counterparty_node_id, msg);
11427			let persist = match &res {
11428				Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11429				Err(_) => NotifyOption::SkipPersistHandleEvents,
11430				Ok(()) => NotifyOption::SkipPersistNoEvents,
11431			};
11432			let _ = handle_error!(self, res, counterparty_node_id);
11433			persist
11434		});
11435	}
11436
11437	fn handle_update_fulfill_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFulfillHTLC) {
11438		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11439		let _ = handle_error!(self, self.internal_update_fulfill_htlc(&counterparty_node_id, msg), counterparty_node_id);
11440	}
11441
11442	fn handle_update_fail_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFailHTLC) {
11443		// Note that we never need to persist the updated ChannelManager for an inbound
11444		// update_fail_htlc message - the message itself doesn't change our channel state only the
11445		// `commitment_signed` message afterwards will.
11446		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11447			let res = self.internal_update_fail_htlc(&counterparty_node_id, msg);
11448			let persist = match &res {
11449				Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11450				Err(_) => NotifyOption::SkipPersistHandleEvents,
11451				Ok(()) => NotifyOption::SkipPersistNoEvents,
11452			};
11453			let _ = handle_error!(self, res, counterparty_node_id);
11454			persist
11455		});
11456	}
11457
11458	fn handle_update_fail_malformed_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFailMalformedHTLC) {
11459		// Note that we never need to persist the updated ChannelManager for an inbound
11460		// update_fail_malformed_htlc message - the message itself doesn't change our channel state
11461		// only the `commitment_signed` message afterwards will.
11462		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11463			let res = self.internal_update_fail_malformed_htlc(&counterparty_node_id, msg);
11464			let persist = match &res {
11465				Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11466				Err(_) => NotifyOption::SkipPersistHandleEvents,
11467				Ok(()) => NotifyOption::SkipPersistNoEvents,
11468			};
11469			let _ = handle_error!(self, res, counterparty_node_id);
11470			persist
11471		});
11472	}
11473
11474	fn handle_commitment_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::CommitmentSigned) {
11475		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11476		let _ = handle_error!(self, self.internal_commitment_signed(&counterparty_node_id, msg), counterparty_node_id);
11477	}
11478
11479	fn handle_revoke_and_ack(&self, counterparty_node_id: PublicKey, msg: &msgs::RevokeAndACK) {
11480		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11481		let _ = handle_error!(self, self.internal_revoke_and_ack(&counterparty_node_id, msg), counterparty_node_id);
11482	}
11483
11484	fn handle_update_fee(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFee) {
11485		// Note that we never need to persist the updated ChannelManager for an inbound
11486		// update_fee message - the message itself doesn't change our channel state only the
11487		// `commitment_signed` message afterwards will.
11488		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11489			let res = self.internal_update_fee(&counterparty_node_id, msg);
11490			let persist = match &res {
11491				Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11492				Err(_) => NotifyOption::SkipPersistHandleEvents,
11493				Ok(()) => NotifyOption::SkipPersistNoEvents,
11494			};
11495			let _ = handle_error!(self, res, counterparty_node_id);
11496			persist
11497		});
11498	}
11499
11500	fn handle_announcement_signatures(&self, counterparty_node_id: PublicKey, msg: &msgs::AnnouncementSignatures) {
11501		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11502		let _ = handle_error!(self, self.internal_announcement_signatures(&counterparty_node_id, msg), counterparty_node_id);
11503	}
11504
11505	fn handle_channel_update(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelUpdate) {
11506		PersistenceNotifierGuard::optionally_notify(self, || {
11507			if let Ok(persist) = handle_error!(self, self.internal_channel_update(&counterparty_node_id, msg), counterparty_node_id) {
11508				persist
11509			} else {
11510				NotifyOption::DoPersist
11511			}
11512		});
11513	}
11514
11515	fn handle_channel_reestablish(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelReestablish) {
11516		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11517			let res = self.internal_channel_reestablish(&counterparty_node_id, msg);
11518			let persist = match &res {
11519				Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11520				Err(_) => NotifyOption::SkipPersistHandleEvents,
11521				Ok(persist) => *persist,
11522			};
11523			let _ = handle_error!(self, res, counterparty_node_id);
11524			persist
11525		});
11526	}
11527
11528	fn peer_disconnected(&self, counterparty_node_id: PublicKey) {
11529		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(
11530			self, || NotifyOption::SkipPersistHandleEvents);
11531		let mut failed_channels = Vec::new();
11532		let mut per_peer_state = self.per_peer_state.write().unwrap();
11533		let remove_peer = {
11534			log_debug!(
11535				WithContext::from(&self.logger, Some(counterparty_node_id), None, None),
11536				"Marking channels with {} disconnected and generating channel_updates.",
11537				log_pubkey!(counterparty_node_id)
11538			);
11539			if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
11540				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11541				let peer_state = &mut *peer_state_lock;
11542				let pending_msg_events = &mut peer_state.pending_msg_events;
11543				peer_state.channel_by_id.retain(|_, phase| {
11544					let context = match phase {
11545						ChannelPhase::Funded(chan) => {
11546							let logger = WithChannelContext::from(&self.logger, &chan.context, None);
11547							if chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok() {
11548								// We only retain funded channels that are not shutdown.
11549								return true;
11550							}
11551							&mut chan.context
11552						},
11553						// If we get disconnected and haven't yet committed to a funding
11554						// transaction, we can replay the `open_channel` on reconnection, so don't
11555						// bother dropping the channel here. However, if we already committed to
11556						// the funding transaction we don't yet support replaying the funding
11557						// handshake (and bailing if the peer rejects it), so we force-close in
11558						// that case.
11559						ChannelPhase::UnfundedOutboundV1(chan) if chan.is_resumable() => return true,
11560						ChannelPhase::UnfundedOutboundV1(chan) => &mut chan.context,
11561						// Unfunded inbound channels will always be removed.
11562						ChannelPhase::UnfundedInboundV1(chan) => {
11563							&mut chan.context
11564						},
11565						ChannelPhase::UnfundedOutboundV2(chan) => {
11566							&mut chan.context
11567						},
11568						ChannelPhase::UnfundedInboundV2(chan) => {
11569							&mut chan.context
11570						},
11571					};
11572					// Clean up for removal.
11573					let mut close_res = context.force_shutdown(false, ClosureReason::DisconnectedPeer);
11574					locked_close_channel!(self, peer_state, &context, close_res);
11575					failed_channels.push(close_res);
11576					false
11577				});
11578				// Note that we don't bother generating any events for pre-accept channels -
11579				// they're not considered "channels" yet from the PoV of our events interface.
11580				peer_state.inbound_channel_request_by_id.clear();
11581				pending_msg_events.retain(|msg| {
11582					match msg {
11583						// V1 Channel Establishment
11584						&events::MessageSendEvent::SendAcceptChannel { .. } => false,
11585						&events::MessageSendEvent::SendOpenChannel { .. } => false,
11586						&events::MessageSendEvent::SendFundingCreated { .. } => false,
11587						&events::MessageSendEvent::SendFundingSigned { .. } => false,
11588						// V2 Channel Establishment
11589						&events::MessageSendEvent::SendAcceptChannelV2 { .. } => false,
11590						&events::MessageSendEvent::SendOpenChannelV2 { .. } => false,
11591						// Common Channel Establishment
11592						&events::MessageSendEvent::SendChannelReady { .. } => false,
11593						&events::MessageSendEvent::SendAnnouncementSignatures { .. } => false,
11594						// Quiescence
11595						&events::MessageSendEvent::SendStfu { .. } => false,
11596						// Splicing
11597						&events::MessageSendEvent::SendSpliceInit { .. } => false,
11598						&events::MessageSendEvent::SendSpliceAck { .. } => false,
11599						&events::MessageSendEvent::SendSpliceLocked { .. } => false,
11600						// Interactive Transaction Construction
11601						&events::MessageSendEvent::SendTxAddInput { .. } => false,
11602						&events::MessageSendEvent::SendTxAddOutput { .. } => false,
11603						&events::MessageSendEvent::SendTxRemoveInput { .. } => false,
11604						&events::MessageSendEvent::SendTxRemoveOutput { .. } => false,
11605						&events::MessageSendEvent::SendTxComplete { .. } => false,
11606						&events::MessageSendEvent::SendTxSignatures { .. } => false,
11607						&events::MessageSendEvent::SendTxInitRbf { .. } => false,
11608						&events::MessageSendEvent::SendTxAckRbf { .. } => false,
11609						&events::MessageSendEvent::SendTxAbort { .. } => false,
11610						// Channel Operations
11611						&events::MessageSendEvent::UpdateHTLCs { .. } => false,
11612						&events::MessageSendEvent::SendRevokeAndACK { .. } => false,
11613						&events::MessageSendEvent::SendClosingSigned { .. } => false,
11614						&events::MessageSendEvent::SendShutdown { .. } => false,
11615						&events::MessageSendEvent::SendChannelReestablish { .. } => false,
11616						&events::MessageSendEvent::HandleError { .. } => false,
11617						// Gossip
11618						&events::MessageSendEvent::SendChannelAnnouncement { .. } => false,
11619						&events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
11620						// [`ChannelManager::pending_broadcast_events`] holds the [`BroadcastChannelUpdate`]
11621						// This check here is to ensure exhaustivity.
11622						&events::MessageSendEvent::BroadcastChannelUpdate { .. } => {
11623							debug_assert!(false, "This event shouldn't have been here");
11624							false
11625						},
11626						&events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
11627						&events::MessageSendEvent::SendChannelUpdate { .. } => false,
11628						&events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
11629						&events::MessageSendEvent::SendShortIdsQuery { .. } => false,
11630						&events::MessageSendEvent::SendReplyChannelRange { .. } => false,
11631						&events::MessageSendEvent::SendGossipTimestampFilter { .. } => false,
11632					}
11633				});
11634				debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect");
11635				peer_state.is_connected = false;
11636				peer_state.ok_to_remove(true)
11637			} else { debug_assert!(false, "Unconnected peer disconnected"); true }
11638		};
11639		if remove_peer {
11640			per_peer_state.remove(&counterparty_node_id);
11641		}
11642		mem::drop(per_peer_state);
11643
11644		for failure in failed_channels.drain(..) {
11645			self.finish_close_channel(failure);
11646		}
11647	}
11648
11649	fn peer_connected(&self, counterparty_node_id: PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> {
11650		let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None);
11651		if !init_msg.features.supports_static_remote_key() {
11652			log_debug!(logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
11653			return Err(());
11654		}
11655
11656		let mut res = Ok(());
11657
11658		PersistenceNotifierGuard::optionally_notify(self, || {
11659			// If we have too many peers connected which don't have funded channels, disconnect the
11660			// peer immediately (as long as it doesn't have funded channels). If we have a bunch of
11661			// unfunded channels taking up space in memory for disconnected peers, we still let new
11662			// peers connect, but we'll reject new channels from them.
11663			let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected);
11664			let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
11665
11666			{
11667				let mut peer_state_lock = self.per_peer_state.write().unwrap();
11668				match peer_state_lock.entry(counterparty_node_id.clone()) {
11669					hash_map::Entry::Vacant(e) => {
11670						if inbound_peer_limited {
11671							res = Err(());
11672							return NotifyOption::SkipPersistNoEvents;
11673						}
11674						e.insert(Mutex::new(PeerState {
11675							channel_by_id: new_hash_map(),
11676							inbound_channel_request_by_id: new_hash_map(),
11677							latest_features: init_msg.features.clone(),
11678							pending_msg_events: Vec::new(),
11679							in_flight_monitor_updates: BTreeMap::new(),
11680							monitor_update_blocked_actions: BTreeMap::new(),
11681							actions_blocking_raa_monitor_updates: BTreeMap::new(),
11682							closed_channel_monitor_update_ids: BTreeMap::new(),
11683							is_connected: true,
11684						}));
11685					},
11686					hash_map::Entry::Occupied(e) => {
11687						let mut peer_state = e.get().lock().unwrap();
11688						peer_state.latest_features = init_msg.features.clone();
11689
11690						let best_block_height = self.best_block.read().unwrap().height;
11691						if inbound_peer_limited &&
11692							Self::unfunded_channel_count(&*peer_state, best_block_height) ==
11693							peer_state.channel_by_id.len()
11694						{
11695							res = Err(());
11696							return NotifyOption::SkipPersistNoEvents;
11697						}
11698
11699						debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
11700						peer_state.is_connected = true;
11701					},
11702				}
11703			}
11704
11705			log_debug!(logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
11706
11707			let per_peer_state = self.per_peer_state.read().unwrap();
11708			if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
11709				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11710				let peer_state = &mut *peer_state_lock;
11711				let pending_msg_events = &mut peer_state.pending_msg_events;
11712
11713				for (_, phase) in peer_state.channel_by_id.iter_mut() {
11714					match phase {
11715						ChannelPhase::Funded(chan) => {
11716							let logger = WithChannelContext::from(&self.logger, &chan.context, None);
11717							pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
11718								node_id: chan.context.get_counterparty_node_id(),
11719								msg: chan.get_channel_reestablish(&&logger),
11720							});
11721						}
11722
11723						ChannelPhase::UnfundedOutboundV1(chan) => {
11724							let logger = WithChannelContext::from(&self.logger, &chan.context, None);
11725							if let Some(msg) = chan.get_open_channel(self.chain_hash, &&logger) {
11726								pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
11727									node_id: chan.context.get_counterparty_node_id(),
11728									msg,
11729								});
11730							}
11731						}
11732
11733						ChannelPhase::UnfundedOutboundV2(chan) => {
11734							pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 {
11735								node_id: chan.context.get_counterparty_node_id(),
11736								msg: chan.get_open_channel_v2(self.chain_hash),
11737							});
11738						},
11739
11740						ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedInboundV2(_) => {
11741							// Since unfunded inbound channel maps are cleared upon disconnecting a peer,
11742							// they are not persisted and won't be recovered after a crash.
11743							// Therefore, they shouldn't exist at this point.
11744							debug_assert!(false);
11745						}
11746					}
11747				}
11748			}
11749
11750			return NotifyOption::SkipPersistHandleEvents;
11751			//TODO: Also re-broadcast announcement_signatures
11752		});
11753		res
11754	}
11755
11756	fn handle_error(&self, counterparty_node_id: PublicKey, msg: &msgs::ErrorMessage) {
11757		match &msg.data as &str {
11758			"cannot co-op close channel w/ active htlcs"|
11759			"link failed to shutdown" =>
11760			{
11761				// LND hasn't properly handled shutdown messages ever, and force-closes any time we
11762				// send one while HTLCs are still present. The issue is tracked at
11763				// https://github.com/lightningnetwork/lnd/issues/6039 and has had multiple patches
11764				// to fix it but none so far have managed to land upstream. The issue appears to be
11765				// very low priority for the LND team despite being marked "P1".
11766				// We're not going to bother handling this in a sensible way, instead simply
11767				// repeating the Shutdown message on repeat until morale improves.
11768				if !msg.channel_id.is_zero() {
11769					PersistenceNotifierGuard::optionally_notify(
11770						self,
11771						|| -> NotifyOption {
11772							let per_peer_state = self.per_peer_state.read().unwrap();
11773							let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
11774							if peer_state_mutex_opt.is_none() { return NotifyOption::SkipPersistNoEvents; }
11775							let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
11776							if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) {
11777								if let Some(msg) = chan.get_outbound_shutdown() {
11778									peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
11779										node_id: counterparty_node_id,
11780										msg,
11781									});
11782								}
11783								peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
11784									node_id: counterparty_node_id,
11785									action: msgs::ErrorAction::SendWarningMessage {
11786										msg: msgs::WarningMessage {
11787											channel_id: msg.channel_id,
11788											data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned()
11789										},
11790										log_level: Level::Trace,
11791									}
11792								});
11793								// This can happen in a fairly tight loop, so we absolutely cannot trigger
11794								// a `ChannelManager` write here.
11795								return NotifyOption::SkipPersistHandleEvents;
11796							}
11797							NotifyOption::SkipPersistNoEvents
11798						}
11799					);
11800				}
11801				return;
11802			}
11803			_ => {}
11804		}
11805
11806		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11807
11808		if msg.channel_id.is_zero() {
11809			let channel_ids: Vec<ChannelId> = {
11810				let per_peer_state = self.per_peer_state.read().unwrap();
11811				let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
11812				if peer_state_mutex_opt.is_none() { return; }
11813				let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
11814				let peer_state = &mut *peer_state_lock;
11815				// Note that we don't bother generating any events for pre-accept channels -
11816				// they're not considered "channels" yet from the PoV of our events interface.
11817				peer_state.inbound_channel_request_by_id.clear();
11818				peer_state.channel_by_id.keys().cloned().collect()
11819			};
11820			for channel_id in channel_ids {
11821				// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
11822				let _ = self.force_close_channel_with_peer(&channel_id, &counterparty_node_id, Some(&msg.data), true);
11823			}
11824		} else {
11825			{
11826				// First check if we can advance the channel type and try again.
11827				let per_peer_state = self.per_peer_state.read().unwrap();
11828				let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
11829				if peer_state_mutex_opt.is_none() { return; }
11830				let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
11831				let peer_state = &mut *peer_state_lock;
11832				match peer_state.channel_by_id.get_mut(&msg.channel_id) {
11833					Some(ChannelPhase::UnfundedOutboundV1(ref mut chan)) => {
11834						let logger = WithChannelContext::from(&self.logger, &chan.context, None);
11835						if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator, &&logger) {
11836							peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
11837								node_id: counterparty_node_id,
11838								msg,
11839							});
11840							return;
11841						}
11842					},
11843					Some(ChannelPhase::UnfundedOutboundV2(ref mut chan)) => {
11844						if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
11845							peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 {
11846								node_id: counterparty_node_id,
11847								msg,
11848							});
11849							return;
11850						}
11851					},
11852					None | Some(ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedInboundV2(_) | ChannelPhase::Funded(_)) => (),
11853				}
11854			}
11855
11856			// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
11857			let _ = self.force_close_channel_with_peer(&msg.channel_id, &counterparty_node_id, Some(&msg.data), true);
11858		}
11859	}
11860
11861	fn provided_node_features(&self) -> NodeFeatures {
11862		provided_node_features(&self.default_configuration)
11863	}
11864
11865	fn provided_init_features(&self, _their_init_features: PublicKey) -> InitFeatures {
11866		provided_init_features(&self.default_configuration)
11867	}
11868
11869	fn get_chain_hashes(&self) -> Option<Vec<ChainHash>> {
11870		Some(vec![self.chain_hash])
11871	}
11872
11873	fn handle_tx_add_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddInput) {
11874		// Note that we never need to persist the updated ChannelManager for an inbound
11875		// tx_add_input message - interactive transaction construction does not need to
11876		// be persisted before any signatures are exchanged.
11877		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11878			let _ = handle_error!(self, self.internal_tx_add_input(counterparty_node_id, msg), counterparty_node_id);
11879			NotifyOption::SkipPersistHandleEvents
11880		});
11881	}
11882
11883	fn handle_tx_add_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddOutput) {
11884		// Note that we never need to persist the updated ChannelManager for an inbound
11885		// tx_add_output message - interactive transaction construction does not need to
11886		// be persisted before any signatures are exchanged.
11887		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11888			let _ = handle_error!(self, self.internal_tx_add_output(counterparty_node_id, msg), counterparty_node_id);
11889			NotifyOption::SkipPersistHandleEvents
11890		});
11891	}
11892
11893	fn handle_tx_remove_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveInput) {
11894		// Note that we never need to persist the updated ChannelManager for an inbound
11895		// tx_remove_input message - interactive transaction construction does not need to
11896		// be persisted before any signatures are exchanged.
11897		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11898			let _ = handle_error!(self, self.internal_tx_remove_input(counterparty_node_id, msg), counterparty_node_id);
11899			NotifyOption::SkipPersistHandleEvents
11900		});
11901	}
11902
11903	fn handle_tx_remove_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveOutput) {
11904		// Note that we never need to persist the updated ChannelManager for an inbound
11905		// tx_remove_output message - interactive transaction construction does not need to
11906		// be persisted before any signatures are exchanged.
11907		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11908			let _ = handle_error!(self, self.internal_tx_remove_output(counterparty_node_id, msg), counterparty_node_id);
11909			NotifyOption::SkipPersistHandleEvents
11910		});
11911	}
11912
11913	fn handle_tx_complete(&self, counterparty_node_id: PublicKey, msg: &msgs::TxComplete) {
11914		// Note that we never need to persist the updated ChannelManager for an inbound
11915		// tx_complete message - interactive transaction construction does not need to
11916		// be persisted before any signatures are exchanged.
11917		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11918			let _ = handle_error!(self, self.internal_tx_complete(counterparty_node_id, msg), counterparty_node_id);
11919			NotifyOption::SkipPersistHandleEvents
11920		});
11921	}
11922
11923	fn handle_tx_signatures(&self, counterparty_node_id: PublicKey, msg: &msgs::TxSignatures) {
11924		let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11925		let _ = handle_error!(self, self.internal_tx_signatures(&counterparty_node_id, msg), counterparty_node_id);
11926	}
11927
11928	fn handle_tx_init_rbf(&self, counterparty_node_id: PublicKey, msg: &msgs::TxInitRbf) {
11929		let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11930			"Dual-funded channels not supported".to_owned(),
11931			msg.channel_id.clone())), counterparty_node_id);
11932	}
11933
11934	fn handle_tx_ack_rbf(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAckRbf) {
11935		let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11936			"Dual-funded channels not supported".to_owned(),
11937			msg.channel_id.clone())), counterparty_node_id);
11938	}
11939
11940	fn handle_tx_abort(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAbort) {
11941		// Note that we never need to persist the updated ChannelManager for an inbound
11942		// tx_abort message - interactive transaction construction does not need to
11943		// be persisted before any signatures are exchanged.
11944		let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11945			let _ = handle_error!(self, self.internal_tx_abort(&counterparty_node_id, msg), counterparty_node_id);
11946			NotifyOption::SkipPersistHandleEvents
11947		});
11948	}
11949
11950	fn message_received(&self) {
11951		for (payment_id, retryable_invoice_request) in self
11952			.pending_outbound_payments
11953			.release_invoice_requests_awaiting_invoice()
11954		{
11955			let RetryableInvoiceRequest { invoice_request, nonce } = retryable_invoice_request;
11956			let hmac = payment_id.hmac_for_offer_payment(nonce, &self.inbound_payment_key);
11957			let context = MessageContext::Offers(OffersContext::OutboundPayment {
11958				payment_id,
11959				nonce,
11960				hmac: Some(hmac)
11961			});
11962			match self.create_blinded_paths(context) {
11963				Ok(reply_paths) => match self.enqueue_invoice_request(invoice_request, reply_paths) {
11964					Ok(_) => {}
11965					Err(_) => {
11966						log_warn!(self.logger,
11967							"Retry failed for an invoice request with payment_id: {}",
11968							payment_id
11969						);
11970					}
11971				},
11972				Err(_) => {
11973					log_warn!(self.logger,
11974						"Retry failed for an invoice request with payment_id: {}. \
11975							Reason: router could not find a blinded path to include as the reply path",
11976						payment_id
11977					);
11978				}
11979			}
11980		}
11981	}
11982}
11983
11984impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
11985OffersMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
11986where
11987	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
11988	T::Target: BroadcasterInterface,
11989	ES::Target: EntropySource,
11990	NS::Target: NodeSigner,
11991	SP::Target: SignerProvider,
11992	F::Target: FeeEstimator,
11993	R::Target: Router,
11994	MR::Target: MessageRouter,
11995	L::Target: Logger,
11996{
11997	fn handle_message(
11998		&self, message: OffersMessage, context: Option<OffersContext>, responder: Option<Responder>,
11999	) -> Option<(OffersMessage, ResponseInstruction)> {
12000		let secp_ctx = &self.secp_ctx;
12001		let expanded_key = &self.inbound_payment_key;
12002
12003		macro_rules! handle_pay_invoice_res {
12004			($res: expr, $invoice: expr, $logger: expr) => {{
12005				let error = match $res {
12006					Err(Bolt12PaymentError::UnknownRequiredFeatures) => {
12007						log_trace!(
12008							$logger, "Invoice requires unknown features: {:?}",
12009							$invoice.invoice_features()
12010						);
12011						InvoiceError::from(Bolt12SemanticError::UnknownRequiredFeatures)
12012					},
12013					Err(Bolt12PaymentError::SendingFailed(e)) => {
12014						log_trace!($logger, "Failed paying invoice: {:?}", e);
12015						InvoiceError::from_string(format!("{:?}", e))
12016					},
12017					#[cfg(async_payments)]
12018					Err(Bolt12PaymentError::BlindedPathCreationFailed) => {
12019						let err_msg = "Failed to create a blinded path back to ourselves";
12020						log_trace!($logger, "{}", err_msg);
12021						InvoiceError::from_string(err_msg.to_string())
12022					},
12023					Err(Bolt12PaymentError::UnexpectedInvoice)
12024						| Err(Bolt12PaymentError::DuplicateInvoice)
12025						| Ok(()) => return None,
12026				};
12027
12028				match responder {
12029					Some(responder) => return Some((OffersMessage::InvoiceError(error), responder.respond())),
12030					None => {
12031						log_trace!($logger, "No reply path to send error: {:?}", error);
12032						return None
12033					},
12034				}
12035			}}
12036		}
12037
12038		match message {
12039			OffersMessage::InvoiceRequest(invoice_request) => {
12040				let responder = match responder {
12041					Some(responder) => responder,
12042					None => return None,
12043				};
12044
12045				let nonce = match context {
12046					None if invoice_request.metadata().is_some() => None,
12047					Some(OffersContext::InvoiceRequest { nonce }) => Some(nonce),
12048					_ => return None,
12049				};
12050
12051				let invoice_request = match nonce {
12052					Some(nonce) => match invoice_request.verify_using_recipient_data(
12053						nonce, expanded_key, secp_ctx,
12054					) {
12055						Ok(invoice_request) => invoice_request,
12056						Err(()) => return None,
12057					},
12058					None => match invoice_request.verify_using_metadata(expanded_key, secp_ctx) {
12059						Ok(invoice_request) => invoice_request,
12060						Err(()) => return None,
12061					},
12062				};
12063
12064				let amount_msats = match InvoiceBuilder::<DerivedSigningPubkey>::amount_msats(
12065					&invoice_request.inner
12066				) {
12067					Ok(amount_msats) => amount_msats,
12068					Err(error) => return Some((OffersMessage::InvoiceError(error.into()), responder.respond())),
12069				};
12070
12071				let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
12072				let (payment_hash, payment_secret) = match self.create_inbound_payment(
12073					Some(amount_msats), relative_expiry, None
12074				) {
12075					Ok((payment_hash, payment_secret)) => (payment_hash, payment_secret),
12076					Err(()) => {
12077						let error = Bolt12SemanticError::InvalidAmount;
12078						return Some((OffersMessage::InvoiceError(error.into()), responder.respond()));
12079					},
12080				};
12081
12082				let payment_context = PaymentContext::Bolt12Offer(Bolt12OfferContext {
12083					offer_id: invoice_request.offer_id,
12084					invoice_request: invoice_request.fields(),
12085				});
12086				let payment_paths = match self.create_blinded_payment_paths(
12087					amount_msats, payment_secret, payment_context
12088				) {
12089					Ok(payment_paths) => payment_paths,
12090					Err(()) => {
12091						let error = Bolt12SemanticError::MissingPaths;
12092						return Some((OffersMessage::InvoiceError(error.into()), responder.respond()));
12093					},
12094				};
12095
12096				#[cfg(not(feature = "std"))]
12097				let created_at = Duration::from_secs(
12098					self.highest_seen_timestamp.load(Ordering::Acquire) as u64
12099				);
12100
12101				let response = if invoice_request.keys.is_some() {
12102					#[cfg(feature = "std")]
12103					let builder = invoice_request.respond_using_derived_keys(
12104						payment_paths, payment_hash
12105					);
12106					#[cfg(not(feature = "std"))]
12107					let builder = invoice_request.respond_using_derived_keys_no_std(
12108						payment_paths, payment_hash, created_at
12109					);
12110					builder
12111						.map(InvoiceBuilder::<DerivedSigningPubkey>::from)
12112						.and_then(|builder| builder.allow_mpp().build_and_sign(secp_ctx))
12113						.map_err(InvoiceError::from)
12114				} else {
12115					#[cfg(feature = "std")]
12116					let builder = invoice_request.respond_with(payment_paths, payment_hash);
12117					#[cfg(not(feature = "std"))]
12118					let builder = invoice_request.respond_with_no_std(
12119						payment_paths, payment_hash, created_at
12120					);
12121					builder
12122						.map(InvoiceBuilder::<ExplicitSigningPubkey>::from)
12123						.and_then(|builder| builder.allow_mpp().build())
12124						.map_err(InvoiceError::from)
12125						.and_then(|invoice| {
12126							#[cfg(c_bindings)]
12127							let mut invoice = invoice;
12128							invoice
12129								.sign(|invoice: &UnsignedBolt12Invoice|
12130									self.node_signer.sign_bolt12_invoice(invoice)
12131								)
12132								.map_err(InvoiceError::from)
12133						})
12134				};
12135
12136				match response {
12137					Ok(invoice) => {
12138						let nonce = Nonce::from_entropy_source(&*self.entropy_source);
12139						let hmac = payment_hash.hmac_for_offer_payment(nonce, expanded_key);
12140						let context = MessageContext::Offers(OffersContext::InboundPayment { payment_hash, nonce, hmac });
12141						Some((OffersMessage::Invoice(invoice), responder.respond_with_reply_path(context)))
12142					},
12143					Err(error) => Some((OffersMessage::InvoiceError(error.into()), responder.respond())),
12144				}
12145			},
12146			OffersMessage::Invoice(invoice) => {
12147				let payment_id = match self.verify_bolt12_invoice(&invoice, context.as_ref()) {
12148					Ok(payment_id) => payment_id,
12149					Err(()) => return None,
12150				};
12151
12152				let logger = WithContext::from(
12153					&self.logger, None, None, Some(invoice.payment_hash()),
12154				);
12155
12156				if self.default_configuration.manually_handle_bolt12_invoices {
12157					// Update the corresponding entry in `PendingOutboundPayment` for this invoice.
12158					// This ensures that event generation remains idempotent in case we receive
12159					// the same invoice multiple times.
12160					self.pending_outbound_payments.mark_invoice_received(&invoice, payment_id).ok()?;
12161
12162					let event = Event::InvoiceReceived {
12163						payment_id, invoice, context, responder,
12164					};
12165					self.pending_events.lock().unwrap().push_back((event, None));
12166					return None;
12167				}
12168
12169				let res = self.send_payment_for_verified_bolt12_invoice(&invoice, payment_id);
12170				handle_pay_invoice_res!(res, invoice, logger);
12171			},
12172			#[cfg(async_payments)]
12173			OffersMessage::StaticInvoice(invoice) => {
12174				let payment_id = match context {
12175					Some(OffersContext::OutboundPayment { payment_id, nonce, hmac: Some(hmac) }) => {
12176						if payment_id.verify_for_offer_payment(hmac, nonce, expanded_key).is_err() {
12177							return None
12178						}
12179						payment_id
12180					},
12181					_ => return None
12182				};
12183				let res = self.initiate_async_payment(&invoice, payment_id);
12184				handle_pay_invoice_res!(res, invoice, self.logger);
12185			},
12186			OffersMessage::InvoiceError(invoice_error) => {
12187				let payment_hash = match context {
12188					Some(OffersContext::InboundPayment { payment_hash, nonce, hmac }) => {
12189						match payment_hash.verify_for_offer_payment(hmac, nonce, expanded_key) {
12190							Ok(_) => Some(payment_hash),
12191							Err(_) => None,
12192						}
12193					},
12194					_ => None,
12195				};
12196
12197				let logger = WithContext::from(&self.logger, None, None, payment_hash);
12198				log_trace!(logger, "Received invoice_error: {}", invoice_error);
12199
12200				match context {
12201					Some(OffersContext::OutboundPayment { payment_id, nonce, hmac: Some(hmac) }) => {
12202						if let Ok(()) = payment_id.verify_for_offer_payment(hmac, nonce, expanded_key) {
12203							self.abandon_payment_with_reason(
12204								payment_id, PaymentFailureReason::InvoiceRequestRejected,
12205							);
12206						}
12207					},
12208					_ => {},
12209				}
12210
12211				None
12212			},
12213		}
12214	}
12215
12216	fn release_pending_messages(&self) -> Vec<(OffersMessage, MessageSendInstructions)> {
12217		core::mem::take(&mut self.pending_offers_messages.lock().unwrap())
12218	}
12219}
12220
12221impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
12222AsyncPaymentsMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
12223where
12224	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
12225	T::Target: BroadcasterInterface,
12226	ES::Target: EntropySource,
12227	NS::Target: NodeSigner,
12228	SP::Target: SignerProvider,
12229	F::Target: FeeEstimator,
12230	R::Target: Router,
12231	MR::Target: MessageRouter,
12232	L::Target: Logger,
12233{
12234	fn handle_held_htlc_available(
12235		&self, _message: HeldHtlcAvailable, _responder: Option<Responder>
12236	) -> Option<(ReleaseHeldHtlc, ResponseInstruction)> {
12237		None
12238	}
12239
12240	fn handle_release_held_htlc(&self, _message: ReleaseHeldHtlc, _context: AsyncPaymentsContext) {
12241		#[cfg(async_payments)] {
12242			let AsyncPaymentsContext::OutboundPayment { payment_id, hmac, nonce } = _context;
12243			if payment_id.verify_for_async_payment(hmac, nonce, &self.inbound_payment_key).is_err() { return }
12244			if let Err(e) = self.send_payment_for_static_invoice(payment_id) {
12245				log_trace!(
12246					self.logger, "Failed to release held HTLC with payment id {}: {:?}", payment_id, e
12247				);
12248			}
12249		}
12250	}
12251
12252	fn release_pending_messages(&self) -> Vec<(AsyncPaymentsMessage, MessageSendInstructions)> {
12253		core::mem::take(&mut self.pending_async_payments_messages.lock().unwrap())
12254	}
12255}
12256
12257#[cfg(feature = "dnssec")]
12258impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
12259DNSResolverMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
12260where
12261	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
12262	T::Target: BroadcasterInterface,
12263	ES::Target: EntropySource,
12264	NS::Target: NodeSigner,
12265	SP::Target: SignerProvider,
12266	F::Target: FeeEstimator,
12267	R::Target: Router,
12268	MR::Target: MessageRouter,
12269	L::Target: Logger,
12270{
12271	fn handle_dnssec_query(
12272		&self, _message: DNSSECQuery, _responder: Option<Responder>,
12273	) -> Option<(DNSResolverMessage, ResponseInstruction)> {
12274		None
12275	}
12276
12277	fn handle_dnssec_proof(&self, message: DNSSECProof, context: DNSResolverContext) {
12278		let offer_opt = self.hrn_resolver.handle_dnssec_proof_for_offer(message, context);
12279		#[cfg_attr(not(feature = "_test_utils"), allow(unused_mut))]
12280		if let Some((completed_requests, mut offer)) = offer_opt {
12281			for (name, payment_id) in completed_requests {
12282				#[cfg(feature = "_test_utils")]
12283				if let Some(replacement_offer) = self.testing_dnssec_proof_offer_resolution_override.lock().unwrap().remove(&name) {
12284					// If we have multiple pending requests we may end up over-using the override
12285					// offer, but tests can deal with that.
12286					offer = replacement_offer;
12287				}
12288				if let Ok(amt_msats) = self.pending_outbound_payments.amt_msats_for_payment_awaiting_offer(payment_id) {
12289					let offer_pay_res =
12290						self.pay_for_offer_intern(&offer, None, Some(amt_msats), None, payment_id, Some(name),
12291							|invoice_request, nonce| {
12292								let retryable_invoice_request = RetryableInvoiceRequest {
12293									invoice_request: invoice_request.clone(),
12294									nonce,
12295								};
12296								self.pending_outbound_payments
12297									.received_offer(payment_id, Some(retryable_invoice_request))
12298									.map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
12299						});
12300					if offer_pay_res.is_err() {
12301						// The offer we tried to pay is the canonical current offer for the name we
12302						// wanted to pay. If we can't pay it, there's no way to recover so fail the
12303						// payment.
12304						// Note that the PaymentFailureReason should be ignored for an
12305						// AwaitingInvoice payment.
12306						self.pending_outbound_payments.abandon_payment(
12307							payment_id, PaymentFailureReason::RouteNotFound, &self.pending_events,
12308						);
12309					}
12310				}
12311			}
12312		}
12313	}
12314
12315	fn release_pending_messages(&self) -> Vec<(DNSResolverMessage, MessageSendInstructions)> {
12316		core::mem::take(&mut self.pending_dns_onion_messages.lock().unwrap())
12317	}
12318}
12319
12320impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
12321NodeIdLookUp for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
12322where
12323	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
12324	T::Target: BroadcasterInterface,
12325	ES::Target: EntropySource,
12326	NS::Target: NodeSigner,
12327	SP::Target: SignerProvider,
12328	F::Target: FeeEstimator,
12329	R::Target: Router,
12330	MR::Target: MessageRouter,
12331	L::Target: Logger,
12332{
12333	fn next_node_id(&self, short_channel_id: u64) -> Option<PublicKey> {
12334		self.short_to_chan_info.read().unwrap().get(&short_channel_id).map(|(pubkey, _)| *pubkey)
12335	}
12336}
12337
12338/// Fetches the set of [`NodeFeatures`] flags that are provided by or required by
12339/// [`ChannelManager`].
12340pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures {
12341	let mut node_features = provided_init_features(config).to_context();
12342	node_features.set_keysend_optional();
12343	node_features
12344}
12345
12346/// Fetches the set of [`Bolt11InvoiceFeatures`] flags that are provided by or required by
12347/// [`ChannelManager`].
12348///
12349/// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice"
12350/// or not. Thus, this method is not public.
12351#[cfg(any(feature = "_test_utils", test))]
12352pub(crate) fn provided_bolt11_invoice_features(config: &UserConfig) -> Bolt11InvoiceFeatures {
12353	provided_init_features(config).to_context()
12354}
12355
12356/// Fetches the set of [`Bolt12InvoiceFeatures`] flags that are provided by or required by
12357/// [`ChannelManager`].
12358pub(crate) fn provided_bolt12_invoice_features(config: &UserConfig) -> Bolt12InvoiceFeatures {
12359	provided_init_features(config).to_context()
12360}
12361
12362/// Fetches the set of [`ChannelFeatures`] flags that are provided by or required by
12363/// [`ChannelManager`].
12364pub(crate) fn provided_channel_features(config: &UserConfig) -> ChannelFeatures {
12365	provided_init_features(config).to_context()
12366}
12367
12368/// Fetches the set of [`ChannelTypeFeatures`] flags that are provided by or required by
12369/// [`ChannelManager`].
12370pub(crate) fn provided_channel_type_features(config: &UserConfig) -> ChannelTypeFeatures {
12371	ChannelTypeFeatures::from_init(&provided_init_features(config))
12372}
12373
12374/// Fetches the set of [`InitFeatures`] flags that are provided by or required by
12375/// [`ChannelManager`].
12376pub fn provided_init_features(config: &UserConfig) -> InitFeatures {
12377	// Note that if new features are added here which other peers may (eventually) require, we
12378	// should also add the corresponding (optional) bit to the [`ChannelMessageHandler`] impl for
12379	// [`ErroringMessageHandler`].
12380	let mut features = InitFeatures::empty();
12381	features.set_data_loss_protect_required();
12382	features.set_upfront_shutdown_script_optional();
12383	features.set_variable_length_onion_required();
12384	features.set_static_remote_key_required();
12385	features.set_payment_secret_required();
12386	features.set_basic_mpp_optional();
12387	features.set_wumbo_optional();
12388	features.set_shutdown_any_segwit_optional();
12389	features.set_channel_type_optional();
12390	features.set_scid_privacy_optional();
12391	features.set_zero_conf_optional();
12392	features.set_route_blinding_optional();
12393	if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx {
12394		features.set_anchors_zero_fee_htlc_tx_optional();
12395	}
12396	#[cfg(dual_funding)]
12397	features.set_dual_fund_optional();
12398	features
12399}
12400
12401const SERIALIZATION_VERSION: u8 = 1;
12402const MIN_SERIALIZATION_VERSION: u8 = 1;
12403
12404impl_writeable_tlv_based!(PhantomRouteHints, {
12405	(2, channels, required_vec),
12406	(4, phantom_scid, required),
12407	(6, real_node_pubkey, required),
12408});
12409
12410impl_writeable_tlv_based!(BlindedForward, {
12411	(0, inbound_blinding_point, required),
12412	(1, failure, (default_value, BlindedFailure::FromIntroductionNode)),
12413	(3, next_blinding_override, option),
12414});
12415
12416impl_writeable_tlv_based_enum!(PendingHTLCRouting,
12417	(0, Forward) => {
12418		(0, onion_packet, required),
12419		(1, blinded, option),
12420		(2, short_channel_id, required),
12421		(3, incoming_cltv_expiry, option),
12422	},
12423	(1, Receive) => {
12424		(0, payment_data, required),
12425		(1, phantom_shared_secret, option),
12426		(2, incoming_cltv_expiry, required),
12427		(3, payment_metadata, option),
12428		(5, custom_tlvs, optional_vec),
12429		(7, requires_blinded_error, (default_value, false)),
12430		(9, payment_context, option),
12431	},
12432	(2, ReceiveKeysend) => {
12433		(0, payment_preimage, required),
12434		(1, requires_blinded_error, (default_value, false)),
12435		(2, incoming_cltv_expiry, required),
12436		(3, payment_metadata, option),
12437		(4, payment_data, option), // Added in 0.0.116
12438		(5, custom_tlvs, optional_vec),
12439		(7, has_recipient_created_payment_secret, (default_value, false)),
12440	},
12441);
12442
12443impl_writeable_tlv_based!(PendingHTLCInfo, {
12444	(0, routing, required),
12445	(2, incoming_shared_secret, required),
12446	(4, payment_hash, required),
12447	(6, outgoing_amt_msat, required),
12448	(8, outgoing_cltv_value, required),
12449	(9, incoming_amt_msat, option),
12450	(10, skimmed_fee_msat, option),
12451});
12452
12453
12454impl Writeable for HTLCFailureMsg {
12455	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
12456		match self {
12457			HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id, htlc_id, reason }) => {
12458				0u8.write(writer)?;
12459				channel_id.write(writer)?;
12460				htlc_id.write(writer)?;
12461				reason.write(writer)?;
12462			},
12463			HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
12464				channel_id, htlc_id, sha256_of_onion, failure_code
12465			}) => {
12466				1u8.write(writer)?;
12467				channel_id.write(writer)?;
12468				htlc_id.write(writer)?;
12469				sha256_of_onion.write(writer)?;
12470				failure_code.write(writer)?;
12471			},
12472		}
12473		Ok(())
12474	}
12475}
12476
12477impl Readable for HTLCFailureMsg {
12478	fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
12479		let id: u8 = Readable::read(reader)?;
12480		match id {
12481			0 => {
12482				Ok(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
12483					channel_id: Readable::read(reader)?,
12484					htlc_id: Readable::read(reader)?,
12485					reason: Readable::read(reader)?,
12486				}))
12487			},
12488			1 => {
12489				Ok(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
12490					channel_id: Readable::read(reader)?,
12491					htlc_id: Readable::read(reader)?,
12492					sha256_of_onion: Readable::read(reader)?,
12493					failure_code: Readable::read(reader)?,
12494				}))
12495			},
12496			// In versions prior to 0.0.101, HTLCFailureMsg objects were written with type 0 or 1 but
12497			// weren't length-prefixed and thus didn't support reading the TLV stream suffix of the network
12498			// messages contained in the variants.
12499			// In version 0.0.101, support for reading the variants with these types was added, and
12500			// we should migrate to writing these variants when UpdateFailHTLC or
12501			// UpdateFailMalformedHTLC get TLV fields.
12502			2 => {
12503				let length: BigSize = Readable::read(reader)?;
12504				let mut s = FixedLengthReader::new(reader, length.0);
12505				let res = Readable::read(&mut s)?;
12506				s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
12507				Ok(HTLCFailureMsg::Relay(res))
12508			},
12509			3 => {
12510				let length: BigSize = Readable::read(reader)?;
12511				let mut s = FixedLengthReader::new(reader, length.0);
12512				let res = Readable::read(&mut s)?;
12513				s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
12514				Ok(HTLCFailureMsg::Malformed(res))
12515			},
12516			_ => Err(DecodeError::UnknownRequiredFeature),
12517		}
12518	}
12519}
12520
12521impl_writeable_tlv_based_enum_legacy!(PendingHTLCStatus, ;
12522	(0, Forward),
12523	(1, Fail),
12524);
12525
12526impl_writeable_tlv_based_enum!(BlindedFailure,
12527	(0, FromIntroductionNode) => {},
12528	(2, FromBlindedNode) => {},
12529);
12530
12531impl_writeable_tlv_based!(HTLCPreviousHopData, {
12532	(0, short_channel_id, required),
12533	(1, phantom_shared_secret, option),
12534	(2, outpoint, required),
12535	(3, blinded_failure, option),
12536	(4, htlc_id, required),
12537	(5, cltv_expiry, option),
12538	(6, incoming_packet_shared_secret, required),
12539	(7, user_channel_id, option),
12540	// Note that by the time we get past the required read for type 2 above, outpoint will be
12541	// filled in, so we can safely unwrap it here.
12542	(9, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(outpoint.0.unwrap()))),
12543	(11, counterparty_node_id, option),
12544});
12545
12546impl Writeable for ClaimableHTLC {
12547	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
12548		let (payment_data, keysend_preimage) = match &self.onion_payload {
12549			OnionPayload::Invoice { _legacy_hop_data } => {
12550				(_legacy_hop_data.as_ref(), None)
12551			},
12552			OnionPayload::Spontaneous(preimage) => (None, Some(preimage)),
12553		};
12554		write_tlv_fields!(writer, {
12555			(0, self.prev_hop, required),
12556			(1, self.total_msat, required),
12557			(2, self.value, required),
12558			(3, self.sender_intended_value, required),
12559			(4, payment_data, option),
12560			(5, self.total_value_received, option),
12561			(6, self.cltv_expiry, required),
12562			(8, keysend_preimage, option),
12563			(10, self.counterparty_skimmed_fee_msat, option),
12564		});
12565		Ok(())
12566	}
12567}
12568
12569impl Readable for ClaimableHTLC {
12570	fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
12571		_init_and_read_len_prefixed_tlv_fields!(reader, {
12572			(0, prev_hop, required),
12573			(1, total_msat, option),
12574			(2, value_ser, required),
12575			(3, sender_intended_value, option),
12576			(4, payment_data_opt, option),
12577			(5, total_value_received, option),
12578			(6, cltv_expiry, required),
12579			(8, keysend_preimage, option),
12580			(10, counterparty_skimmed_fee_msat, option),
12581		});
12582		let payment_data: Option<msgs::FinalOnionHopData> = payment_data_opt;
12583		let value = value_ser.0.unwrap();
12584		let onion_payload = match keysend_preimage {
12585			Some(p) => {
12586				if payment_data.is_some() {
12587					return Err(DecodeError::InvalidValue)
12588				}
12589				if total_msat.is_none() {
12590					total_msat = Some(value);
12591				}
12592				OnionPayload::Spontaneous(p)
12593			},
12594			None => {
12595				if total_msat.is_none() {
12596					if payment_data.is_none() {
12597						return Err(DecodeError::InvalidValue)
12598					}
12599					total_msat = Some(payment_data.as_ref().unwrap().total_msat);
12600				}
12601				OnionPayload::Invoice { _legacy_hop_data: payment_data }
12602			},
12603		};
12604		Ok(Self {
12605			prev_hop: prev_hop.0.unwrap(),
12606			timer_ticks: 0,
12607			value,
12608			sender_intended_value: sender_intended_value.unwrap_or(value),
12609			total_value_received,
12610			total_msat: total_msat.unwrap(),
12611			onion_payload,
12612			cltv_expiry: cltv_expiry.0.unwrap(),
12613			counterparty_skimmed_fee_msat,
12614		})
12615	}
12616}
12617
12618impl Readable for HTLCSource {
12619	fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
12620		let id: u8 = Readable::read(reader)?;
12621		match id {
12622			0 => {
12623				let mut session_priv: crate::util::ser::RequiredWrapper<SecretKey> = crate::util::ser::RequiredWrapper(None);
12624				let mut first_hop_htlc_msat: u64 = 0;
12625				let mut path_hops = Vec::new();
12626				let mut payment_id = None;
12627				let mut payment_params: Option<PaymentParameters> = None;
12628				let mut blinded_tail: Option<BlindedTail> = None;
12629				read_tlv_fields!(reader, {
12630					(0, session_priv, required),
12631					(1, payment_id, option),
12632					(2, first_hop_htlc_msat, required),
12633					(4, path_hops, required_vec),
12634					(5, payment_params, (option: ReadableArgs, 0)),
12635					(6, blinded_tail, option),
12636				});
12637				if payment_id.is_none() {
12638					// For backwards compat, if there was no payment_id written, use the session_priv bytes
12639					// instead.
12640					payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref()));
12641				}
12642				let path = Path { hops: path_hops, blinded_tail };
12643				if path.hops.len() == 0 {
12644					return Err(DecodeError::InvalidValue);
12645				}
12646				if let Some(params) = payment_params.as_mut() {
12647					if let Payee::Clear { ref mut final_cltv_expiry_delta, .. } = params.payee {
12648						if final_cltv_expiry_delta == &0 {
12649							*final_cltv_expiry_delta = path.final_cltv_expiry_delta().ok_or(DecodeError::InvalidValue)?;
12650						}
12651					}
12652				}
12653				Ok(HTLCSource::OutboundRoute {
12654					session_priv: session_priv.0.unwrap(),
12655					first_hop_htlc_msat,
12656					path,
12657					payment_id: payment_id.unwrap(),
12658				})
12659			}
12660			1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
12661			_ => Err(DecodeError::UnknownRequiredFeature),
12662		}
12663	}
12664}
12665
12666impl Writeable for HTLCSource {
12667	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), crate::io::Error> {
12668		match self {
12669			HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id } => {
12670				0u8.write(writer)?;
12671				let payment_id_opt = Some(payment_id);
12672				write_tlv_fields!(writer, {
12673					(0, session_priv, required),
12674					(1, payment_id_opt, option),
12675					(2, first_hop_htlc_msat, required),
12676					// 3 was previously used to write a PaymentSecret for the payment.
12677					(4, path.hops, required_vec),
12678					(5, None::<PaymentParameters>, option), // payment_params in LDK versions prior to 0.0.115
12679					(6, path.blinded_tail, option),
12680				 });
12681			}
12682			HTLCSource::PreviousHopData(ref field) => {
12683				1u8.write(writer)?;
12684				field.write(writer)?;
12685			}
12686		}
12687		Ok(())
12688	}
12689}
12690
12691impl_writeable_tlv_based!(PendingAddHTLCInfo, {
12692	(0, forward_info, required),
12693	(1, prev_user_channel_id, (default_value, 0)),
12694	(2, prev_short_channel_id, required),
12695	(4, prev_htlc_id, required),
12696	(6, prev_funding_outpoint, required),
12697	// Note that by the time we get past the required read for type 6 above, prev_funding_outpoint will be
12698	// filled in, so we can safely unwrap it here.
12699	(7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))),
12700	(9, prev_counterparty_node_id, option),
12701});
12702
12703impl Writeable for HTLCForwardInfo {
12704	fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
12705		const FAIL_HTLC_VARIANT_ID: u8 = 1;
12706		match self {
12707			Self::AddHTLC(info) => {
12708				0u8.write(w)?;
12709				info.write(w)?;
12710			},
12711			Self::FailHTLC { htlc_id, err_packet } => {
12712				FAIL_HTLC_VARIANT_ID.write(w)?;
12713				write_tlv_fields!(w, {
12714					(0, htlc_id, required),
12715					(2, err_packet, required),
12716				});
12717			},
12718			Self::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
12719				// Since this variant was added in 0.0.119, write this as `::FailHTLC` with an empty error
12720				// packet so older versions have something to fail back with, but serialize the real data as
12721				// optional TLVs for the benefit of newer versions.
12722				FAIL_HTLC_VARIANT_ID.write(w)?;
12723				let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
12724				write_tlv_fields!(w, {
12725					(0, htlc_id, required),
12726					(1, failure_code, required),
12727					(2, dummy_err_packet, required),
12728					(3, sha256_of_onion, required),
12729				});
12730			},
12731		}
12732		Ok(())
12733	}
12734}
12735
12736impl Readable for HTLCForwardInfo {
12737	fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
12738		let id: u8 = Readable::read(r)?;
12739		Ok(match id {
12740			0 => Self::AddHTLC(Readable::read(r)?),
12741			1 => {
12742				_init_and_read_len_prefixed_tlv_fields!(r, {
12743					(0, htlc_id, required),
12744					(1, malformed_htlc_failure_code, option),
12745					(2, err_packet, required),
12746					(3, sha256_of_onion, option),
12747				});
12748				if let Some(failure_code) = malformed_htlc_failure_code {
12749					Self::FailMalformedHTLC {
12750						htlc_id: _init_tlv_based_struct_field!(htlc_id, required),
12751						failure_code,
12752						sha256_of_onion: sha256_of_onion.ok_or(DecodeError::InvalidValue)?,
12753					}
12754				} else {
12755					Self::FailHTLC {
12756						htlc_id: _init_tlv_based_struct_field!(htlc_id, required),
12757						err_packet: _init_tlv_based_struct_field!(err_packet, required),
12758					}
12759				}
12760			},
12761			_ => return Err(DecodeError::InvalidValue),
12762		})
12763	}
12764}
12765
12766impl_writeable_tlv_based!(PendingInboundPayment, {
12767	(0, payment_secret, required),
12768	(2, expiry_time, required),
12769	(4, user_payment_id, required),
12770	(6, payment_preimage, required),
12771	(8, min_value_msat, required),
12772});
12773
12774impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> Writeable for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
12775where
12776	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
12777	T::Target: BroadcasterInterface,
12778	ES::Target: EntropySource,
12779	NS::Target: NodeSigner,
12780	SP::Target: SignerProvider,
12781	F::Target: FeeEstimator,
12782	R::Target: Router,
12783	MR::Target: MessageRouter,
12784	L::Target: Logger,
12785{
12786	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
12787		let _consistency_lock = self.total_consistency_lock.write().unwrap();
12788
12789		write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
12790
12791		self.chain_hash.write(writer)?;
12792		{
12793			let best_block = self.best_block.read().unwrap();
12794			best_block.height.write(writer)?;
12795			best_block.block_hash.write(writer)?;
12796		}
12797
12798		let per_peer_state = self.per_peer_state.write().unwrap();
12799
12800		let mut serializable_peer_count: u64 = 0;
12801		{
12802			let mut number_of_funded_channels = 0;
12803			for (_, peer_state_mutex) in per_peer_state.iter() {
12804				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
12805				let peer_state = &mut *peer_state_lock;
12806				if !peer_state.ok_to_remove(false) {
12807					serializable_peer_count += 1;
12808				}
12809
12810				number_of_funded_channels += peer_state.channel_by_id.iter().filter(
12811					|(_, phase)| if let ChannelPhase::Funded(chan) = phase { chan.context.is_funding_broadcast() } else { false }
12812				).count();
12813			}
12814
12815			(number_of_funded_channels as u64).write(writer)?;
12816
12817			for (_, peer_state_mutex) in per_peer_state.iter() {
12818				let mut peer_state_lock = peer_state_mutex.lock().unwrap();
12819				let peer_state = &mut *peer_state_lock;
12820				for channel in peer_state.channel_by_id.iter().filter_map(
12821					|(_, phase)| if let ChannelPhase::Funded(channel) = phase {
12822						if channel.context.is_funding_broadcast() { Some(channel) } else { None }
12823					} else { None }
12824				) {
12825					channel.write(writer)?;
12826				}
12827			}
12828		}
12829
12830		{
12831			let forward_htlcs = self.forward_htlcs.lock().unwrap();
12832			(forward_htlcs.len() as u64).write(writer)?;
12833			for (short_channel_id, pending_forwards) in forward_htlcs.iter() {
12834				short_channel_id.write(writer)?;
12835				(pending_forwards.len() as u64).write(writer)?;
12836				for forward in pending_forwards {
12837					forward.write(writer)?;
12838				}
12839			}
12840		}
12841
12842		let mut decode_update_add_htlcs_opt = None;
12843		let decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
12844		if !decode_update_add_htlcs.is_empty() {
12845			decode_update_add_htlcs_opt = Some(decode_update_add_htlcs);
12846		}
12847
12848		let claimable_payments = self.claimable_payments.lock().unwrap();
12849		let pending_outbound_payments = self.pending_outbound_payments.pending_outbound_payments.lock().unwrap();
12850
12851		let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
12852		let mut htlc_onion_fields: Vec<&_> = Vec::new();
12853		(claimable_payments.claimable_payments.len() as u64).write(writer)?;
12854		for (payment_hash, payment) in claimable_payments.claimable_payments.iter() {
12855			payment_hash.write(writer)?;
12856			(payment.htlcs.len() as u64).write(writer)?;
12857			for htlc in payment.htlcs.iter() {
12858				htlc.write(writer)?;
12859			}
12860			htlc_purposes.push(&payment.purpose);
12861			htlc_onion_fields.push(&payment.onion_fields);
12862		}
12863
12864		let mut monitor_update_blocked_actions_per_peer = None;
12865		let mut peer_states = Vec::new();
12866		for (_, peer_state_mutex) in per_peer_state.iter() {
12867			// Because we're holding the owning `per_peer_state` write lock here there's no chance
12868			// of a lockorder violation deadlock - no other thread can be holding any
12869			// per_peer_state lock at all.
12870			peer_states.push(peer_state_mutex.unsafe_well_ordered_double_lock_self());
12871		}
12872
12873		(serializable_peer_count).write(writer)?;
12874		for ((peer_pubkey, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
12875			// Peers which we have no channels to should be dropped once disconnected. As we
12876			// disconnect all peers when shutting down and serializing the ChannelManager, we
12877			// consider all peers as disconnected here. There's therefore no need write peers with
12878			// no channels.
12879			if !peer_state.ok_to_remove(false) {
12880				peer_pubkey.write(writer)?;
12881				peer_state.latest_features.write(writer)?;
12882				if !peer_state.monitor_update_blocked_actions.is_empty() {
12883					monitor_update_blocked_actions_per_peer
12884						.get_or_insert_with(Vec::new)
12885						.push((*peer_pubkey, &peer_state.monitor_update_blocked_actions));
12886				}
12887			}
12888		}
12889
12890		let events = self.pending_events.lock().unwrap();
12891		// LDK versions prior to 0.0.115 don't support post-event actions, thus if there's no
12892		// actions at all, skip writing the required TLV. Otherwise, pre-0.0.115 versions will
12893		// refuse to read the new ChannelManager.
12894		let events_not_backwards_compatible = events.iter().any(|(_, action)| action.is_some());
12895		if events_not_backwards_compatible {
12896			// If we're gonna write a even TLV that will overwrite our events anyway we might as
12897			// well save the space and not write any events here.
12898			0u64.write(writer)?;
12899		} else {
12900			(events.len() as u64).write(writer)?;
12901			for (event, _) in events.iter() {
12902				event.write(writer)?;
12903			}
12904		}
12905
12906		// LDK versions prior to 0.0.116 wrote the `pending_background_events`
12907		// `MonitorUpdateRegeneratedOnStartup`s here, however there was never a reason to do so -
12908		// the closing monitor updates were always effectively replayed on startup (either directly
12909		// by calling `broadcast_latest_holder_commitment_txn` on a `ChannelMonitor` during
12910		// deserialization or, in 0.0.115, by regenerating the monitor update itself).
12911		0u64.write(writer)?;
12912
12913		// Prior to 0.0.111 we tracked node_announcement serials here, however that now happens in
12914		// `PeerManager`, and thus we simply write the `highest_seen_timestamp` twice, which is
12915		// likely to be identical.
12916		(self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?;
12917		(self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?;
12918
12919		// LDK versions prior to 0.0.104 wrote `pending_inbound_payments` here, with deprecated support
12920		// for stateful inbound payments maintained until 0.0.116, after which no further inbound
12921		// payments could have been written here.
12922		(0 as u64).write(writer)?;
12923
12924		// For backwards compat, write the session privs and their total length.
12925		let mut num_pending_outbounds_compat: u64 = 0;
12926		for (_, outbound) in pending_outbound_payments.iter() {
12927			if !outbound.is_fulfilled() && !outbound.abandoned() {
12928				num_pending_outbounds_compat += outbound.remaining_parts() as u64;
12929			}
12930		}
12931		num_pending_outbounds_compat.write(writer)?;
12932		for (_, outbound) in pending_outbound_payments.iter() {
12933			match outbound {
12934				PendingOutboundPayment::Legacy { session_privs } |
12935				PendingOutboundPayment::Retryable { session_privs, .. } => {
12936					for session_priv in session_privs.iter() {
12937						session_priv.write(writer)?;
12938					}
12939				}
12940				PendingOutboundPayment::AwaitingInvoice { .. } => {},
12941				PendingOutboundPayment::AwaitingOffer { .. } => {},
12942				PendingOutboundPayment::InvoiceReceived { .. } => {},
12943				PendingOutboundPayment::StaticInvoiceReceived { .. } => {},
12944				PendingOutboundPayment::Fulfilled { .. } => {},
12945				PendingOutboundPayment::Abandoned { .. } => {},
12946			}
12947		}
12948
12949		// Encode without retry info for 0.0.101 compatibility.
12950		let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = new_hash_map();
12951		for (id, outbound) in pending_outbound_payments.iter() {
12952			match outbound {
12953				PendingOutboundPayment::Legacy { session_privs } |
12954				PendingOutboundPayment::Retryable { session_privs, .. } => {
12955					pending_outbound_payments_no_retry.insert(*id, session_privs.clone());
12956				},
12957				_ => {},
12958			}
12959		}
12960
12961		let mut pending_intercepted_htlcs = None;
12962		let our_pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
12963		if our_pending_intercepts.len() != 0 {
12964			pending_intercepted_htlcs = Some(our_pending_intercepts);
12965		}
12966
12967		let mut pending_claiming_payments = Some(&claimable_payments.pending_claiming_payments);
12968		if pending_claiming_payments.as_ref().unwrap().is_empty() {
12969			// LDK versions prior to 0.0.113 do not know how to read the pending claimed payments
12970			// map. Thus, if there are no entries we skip writing a TLV for it.
12971			pending_claiming_payments = None;
12972		}
12973
12974		let mut in_flight_monitor_updates: Option<HashMap<(&PublicKey, &OutPoint), &Vec<ChannelMonitorUpdate>>> = None;
12975		for ((counterparty_id, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
12976			for (funding_outpoint, updates) in peer_state.in_flight_monitor_updates.iter() {
12977				if !updates.is_empty() {
12978					if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(new_hash_map()); }
12979					in_flight_monitor_updates.as_mut().unwrap().insert((counterparty_id, funding_outpoint), updates);
12980				}
12981			}
12982		}
12983
12984		write_tlv_fields!(writer, {
12985			(1, pending_outbound_payments_no_retry, required),
12986			(2, pending_intercepted_htlcs, option),
12987			(3, pending_outbound_payments, required),
12988			(4, pending_claiming_payments, option),
12989			(5, self.our_network_pubkey, required),
12990			(6, monitor_update_blocked_actions_per_peer, option),
12991			(7, self.fake_scid_rand_bytes, required),
12992			(8, if events_not_backwards_compatible { Some(&*events) } else { None }, option),
12993			(9, htlc_purposes, required_vec),
12994			(10, in_flight_monitor_updates, option),
12995			(11, self.probing_cookie_secret, required),
12996			(13, htlc_onion_fields, optional_vec),
12997			(14, decode_update_add_htlcs_opt, option),
12998			(15, self.inbound_payment_id_secret, required),
12999		});
13000
13001		Ok(())
13002	}
13003}
13004
13005impl Writeable for VecDeque<(Event, Option<EventCompletionAction>)> {
13006	fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
13007		(self.len() as u64).write(w)?;
13008		for (event, action) in self.iter() {
13009			event.write(w)?;
13010			action.write(w)?;
13011			#[cfg(debug_assertions)] {
13012				// Events are MaybeReadable, in some cases indicating that they shouldn't actually
13013				// be persisted and are regenerated on restart. However, if such an event has a
13014				// post-event-handling action we'll write nothing for the event and would have to
13015				// either forget the action or fail on deserialization (which we do below). Thus,
13016				// check that the event is sane here.
13017				let event_encoded = event.encode();
13018				let event_read: Option<Event> =
13019					MaybeReadable::read(&mut &event_encoded[..]).unwrap();
13020				if action.is_some() { assert!(event_read.is_some()); }
13021			}
13022		}
13023		Ok(())
13024	}
13025}
13026impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
13027	fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
13028		let len: u64 = Readable::read(reader)?;
13029		const MAX_ALLOC_SIZE: u64 = 1024 * 16;
13030		let mut events: Self = VecDeque::with_capacity(cmp::min(
13031			MAX_ALLOC_SIZE/mem::size_of::<(events::Event, Option<EventCompletionAction>)>() as u64,
13032			len) as usize);
13033		for _ in 0..len {
13034			let ev_opt = MaybeReadable::read(reader)?;
13035			let action = Readable::read(reader)?;
13036			if let Some(ev) = ev_opt {
13037				events.push_back((ev, action));
13038			} else if action.is_some() {
13039				return Err(DecodeError::InvalidValue);
13040			}
13041		}
13042		Ok(events)
13043	}
13044}
13045
13046/// Arguments for the creation of a ChannelManager that are not deserialized.
13047///
13048/// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
13049/// is:
13050/// 1) Deserialize all stored [`ChannelMonitor`]s.
13051/// 2) Deserialize the [`ChannelManager`] by filling in this struct and calling:
13052///    `<(BlockHash, ChannelManager)>::read(reader, args)`
13053///    This may result in closing some channels if the [`ChannelMonitor`] is newer than the stored
13054///    [`ChannelManager`] state to ensure no loss of funds. Thus, transactions may be broadcasted.
13055/// 3) If you are not fetching full blocks, register all relevant [`ChannelMonitor`] outpoints the
13056///    same way you would handle a [`chain::Filter`] call using
13057///    [`ChannelMonitor::get_outputs_to_watch`] and [`ChannelMonitor::get_funding_txo`].
13058/// 4) Disconnect/connect blocks on your [`ChannelMonitor`]s to get them in sync with the chain.
13059/// 5) Disconnect/connect blocks on the [`ChannelManager`] to get it in sync with the chain.
13060/// 6) Optionally re-persist the [`ChannelMonitor`]s to ensure the latest state is on disk.
13061///    This is important if you have replayed a nontrivial number of blocks in step (4), allowing
13062///    you to avoid having to replay the same blocks if you shut down quickly after startup. It is
13063///    otherwise not required.
13064///    Note that if you're using a [`ChainMonitor`] for your [`chain::Watch`] implementation, you
13065///    will likely accomplish this as a side-effect of calling [`chain::Watch::watch_channel`] in
13066///    the next step.
13067/// 7) Move the [`ChannelMonitor`]s into your local [`chain::Watch`]. If you're using a
13068///    [`ChainMonitor`], this is done by calling [`chain::Watch::watch_channel`].
13069///
13070/// Note that the ordering of #4-7 is not of importance, however all four must occur before you
13071/// call any other methods on the newly-deserialized [`ChannelManager`].
13072///
13073/// Note that because some channels may be closed during deserialization, it is critical that you
13074/// always deserialize only the latest version of a ChannelManager and ChannelMonitors available to
13075/// you. If you deserialize an old ChannelManager (during which force-closure transactions may be
13076/// broadcast), and then later deserialize a newer version of the same ChannelManager (which will
13077/// not force-close the same channels but consider them live), you may end up revoking a state for
13078/// which you've already broadcasted the transaction.
13079///
13080/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
13081pub struct ChannelManagerReadArgs<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
13082where
13083	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
13084	T::Target: BroadcasterInterface,
13085	ES::Target: EntropySource,
13086	NS::Target: NodeSigner,
13087	SP::Target: SignerProvider,
13088	F::Target: FeeEstimator,
13089	R::Target: Router,
13090	MR::Target: MessageRouter,
13091	L::Target: Logger,
13092{
13093	/// A cryptographically secure source of entropy.
13094	pub entropy_source: ES,
13095
13096	/// A signer that is able to perform node-scoped cryptographic operations.
13097	pub node_signer: NS,
13098
13099	/// The keys provider which will give us relevant keys. Some keys will be loaded during
13100	/// deserialization and KeysInterface::read_chan_signer will be used to read per-Channel
13101	/// signing data.
13102	pub signer_provider: SP,
13103
13104	/// The fee_estimator for use in the ChannelManager in the future.
13105	///
13106	/// No calls to the FeeEstimator will be made during deserialization.
13107	pub fee_estimator: F,
13108	/// The chain::Watch for use in the ChannelManager in the future.
13109	///
13110	/// No calls to the chain::Watch will be made during deserialization. It is assumed that
13111	/// you have deserialized ChannelMonitors separately and will add them to your
13112	/// chain::Watch after deserializing this ChannelManager.
13113	pub chain_monitor: M,
13114
13115	/// The BroadcasterInterface which will be used in the ChannelManager in the future and may be
13116	/// used to broadcast the latest local commitment transactions of channels which must be
13117	/// force-closed during deserialization.
13118	pub tx_broadcaster: T,
13119	/// The router which will be used in the ChannelManager in the future for finding routes
13120	/// on-the-fly for trampoline payments. Absent in private nodes that don't support forwarding.
13121	///
13122	/// No calls to the router will be made during deserialization.
13123	pub router: R,
13124	/// The [`MessageRouter`] used for constructing [`BlindedMessagePath`]s for [`Offer`]s,
13125	/// [`Refund`]s, and any reply paths.
13126	pub message_router: MR,
13127	/// The Logger for use in the ChannelManager and which may be used to log information during
13128	/// deserialization.
13129	pub logger: L,
13130	/// Default settings used for new channels. Any existing channels will continue to use the
13131	/// runtime settings which were stored when the ChannelManager was serialized.
13132	pub default_config: UserConfig,
13133
13134	/// A map from channel funding outpoints to ChannelMonitors for those channels (ie
13135	/// value.context.get_funding_txo() should be the key).
13136	///
13137	/// If a monitor is inconsistent with the channel state during deserialization the channel will
13138	/// be force-closed using the data in the ChannelMonitor and the channel will be dropped. This
13139	/// is true for missing channels as well. If there is a monitor missing for which we find
13140	/// channel data Err(DecodeError::InvalidValue) will be returned.
13141	///
13142	/// In such cases the latest local transactions will be sent to the tx_broadcaster included in
13143	/// this struct.
13144	///
13145	/// This is not exported to bindings users because we have no HashMap bindings
13146	pub channel_monitors: HashMap<OutPoint, &'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
13147}
13148
13149impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
13150		ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>
13151where
13152	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
13153	T::Target: BroadcasterInterface,
13154	ES::Target: EntropySource,
13155	NS::Target: NodeSigner,
13156	SP::Target: SignerProvider,
13157	F::Target: FeeEstimator,
13158	R::Target: Router,
13159	MR::Target: MessageRouter,
13160	L::Target: Logger,
13161{
13162	/// Simple utility function to create a ChannelManagerReadArgs which creates the monitor
13163	/// HashMap for you. This is primarily useful for C bindings where it is not practical to
13164	/// populate a HashMap directly from C.
13165	pub fn new(
13166		entropy_source: ES, node_signer: NS, signer_provider: SP, fee_estimator: F,
13167		chain_monitor: M, tx_broadcaster: T, router: R, message_router: MR, logger: L,
13168		default_config: UserConfig,
13169		mut channel_monitors: Vec<&'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
13170	) -> Self {
13171		Self {
13172			entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor,
13173			tx_broadcaster, router, message_router, logger, default_config,
13174			channel_monitors: hash_map_from_iter(
13175				channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) })
13176			),
13177		}
13178	}
13179}
13180
13181// Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the
13182// SipmleArcChannelManager type:
13183impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
13184	ReadableArgs<ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>> for (BlockHash, Arc<ChannelManager<M, T, ES, NS, SP, F, R, MR, L>>)
13185where
13186	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
13187	T::Target: BroadcasterInterface,
13188	ES::Target: EntropySource,
13189	NS::Target: NodeSigner,
13190	SP::Target: SignerProvider,
13191	F::Target: FeeEstimator,
13192	R::Target: Router,
13193	MR::Target: MessageRouter,
13194	L::Target: Logger,
13195{
13196	fn read<Reader: io::Read>(reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>) -> Result<Self, DecodeError> {
13197		let (blockhash, chan_manager) = <(BlockHash, ChannelManager<M, T, ES, NS, SP, F, R, MR, L>)>::read(reader, args)?;
13198		Ok((blockhash, Arc::new(chan_manager)))
13199	}
13200}
13201
13202impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
13203	ReadableArgs<ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>> for (BlockHash, ChannelManager<M, T, ES, NS, SP, F, R, MR, L>)
13204where
13205	M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
13206	T::Target: BroadcasterInterface,
13207	ES::Target: EntropySource,
13208	NS::Target: NodeSigner,
13209	SP::Target: SignerProvider,
13210	F::Target: FeeEstimator,
13211	R::Target: Router,
13212	MR::Target: MessageRouter,
13213	L::Target: Logger,
13214{
13215	fn read<Reader: io::Read>(reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>) -> Result<Self, DecodeError> {
13216		let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
13217
13218		let chain_hash: ChainHash = Readable::read(reader)?;
13219		let best_block_height: u32 = Readable::read(reader)?;
13220		let best_block_hash: BlockHash = Readable::read(reader)?;
13221
13222		let empty_peer_state = || {
13223			PeerState {
13224				channel_by_id: new_hash_map(),
13225				inbound_channel_request_by_id: new_hash_map(),
13226				latest_features: InitFeatures::empty(),
13227				pending_msg_events: Vec::new(),
13228				in_flight_monitor_updates: BTreeMap::new(),
13229				monitor_update_blocked_actions: BTreeMap::new(),
13230				actions_blocking_raa_monitor_updates: BTreeMap::new(),
13231				closed_channel_monitor_update_ids: BTreeMap::new(),
13232				is_connected: false,
13233			}
13234		};
13235
13236		let mut failed_htlcs = Vec::new();
13237		let channel_count: u64 = Readable::read(reader)?;
13238		let mut funding_txo_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128));
13239		let mut per_peer_state = hash_map_with_capacity(cmp::min(channel_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
13240		let mut outpoint_to_peer = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
13241		let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
13242		let mut channel_closures = VecDeque::new();
13243		let mut close_background_events = Vec::new();
13244		let mut funding_txo_to_channel_id = hash_map_with_capacity(channel_count as usize);
13245		for _ in 0..channel_count {
13246			let mut channel: Channel<SP> = Channel::read(reader, (
13247				&args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
13248			))?;
13249			let logger = WithChannelContext::from(&args.logger, &channel.context, None);
13250			let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
13251			funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id());
13252			funding_txo_set.insert(funding_txo.clone());
13253			if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
13254				if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
13255						channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() ||
13256						channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
13257						channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
13258					// But if the channel is behind of the monitor, close the channel:
13259					log_error!(logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
13260					log_error!(logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
13261					if channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
13262						log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
13263							&channel.context.channel_id(), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
13264					}
13265					if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() {
13266						log_error!(logger, " The ChannelMonitor for channel {} is at holder commitment number {} but the ChannelManager is at holder commitment number {}.",
13267							&channel.context.channel_id(), monitor.get_cur_holder_commitment_number(), channel.get_cur_holder_commitment_transaction_number());
13268					}
13269					if channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() {
13270						log_error!(logger, " The ChannelMonitor for channel {} is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.",
13271							&channel.context.channel_id(), monitor.get_min_seen_secret(), channel.get_revoked_counterparty_commitment_transaction_number());
13272					}
13273					if channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() {
13274						log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
13275							&channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
13276					}
13277					let mut shutdown_result = channel.context.force_shutdown(true, ClosureReason::OutdatedChannelManager);
13278					if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
13279						return Err(DecodeError::InvalidValue);
13280					}
13281					if let Some((counterparty_node_id, funding_txo, channel_id, mut update)) = shutdown_result.monitor_update {
13282						// Our channel information is out of sync with the `ChannelMonitor`, so
13283						// force the update to use the `ChannelMonitor`'s update_id for the close
13284						// update.
13285						let latest_update_id = monitor.get_latest_update_id().saturating_add(1);
13286						update.update_id = latest_update_id;
13287						per_peer_state.entry(counterparty_node_id)
13288							.or_insert_with(|| Mutex::new(empty_peer_state()))
13289							.lock().unwrap()
13290							.closed_channel_monitor_update_ids.entry(channel_id)
13291								.and_modify(|v| *v = cmp::max(latest_update_id, *v))
13292								.or_insert(latest_update_id);
13293
13294						close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13295							counterparty_node_id, funding_txo, channel_id, update
13296						});
13297					}
13298					failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs);
13299					channel_closures.push_back((events::Event::ChannelClosed {
13300						channel_id: channel.context.channel_id(),
13301						user_channel_id: channel.context.get_user_id(),
13302						reason: ClosureReason::OutdatedChannelManager,
13303						counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
13304						channel_capacity_sats: Some(channel.context.get_value_satoshis()),
13305						channel_funding_txo: channel.context.get_funding_txo(),
13306						last_local_balance_msat: Some(channel.context.get_value_to_self_msat()),
13307					}, None));
13308					for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
13309						let mut found_htlc = false;
13310						for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() {
13311							if *channel_htlc_source == monitor_htlc_source { found_htlc = true; break; }
13312						}
13313						if !found_htlc {
13314							// If we have some HTLCs in the channel which are not present in the newer
13315							// ChannelMonitor, they have been removed and should be failed back to
13316							// ensure we don't forget them entirely. Note that if the missing HTLC(s)
13317							// were actually claimed we'd have generated and ensured the previous-hop
13318							// claim update ChannelMonitor updates were persisted prior to persising
13319							// the ChannelMonitor update for the forward leg, so attempting to fail the
13320							// backwards leg of the HTLC will simply be rejected.
13321							let logger = WithChannelContext::from(&args.logger, &channel.context, Some(*payment_hash));
13322							log_info!(logger,
13323								"Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager",
13324								&channel.context.channel_id(), &payment_hash);
13325							failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.context.get_counterparty_node_id(), channel.context.channel_id()));
13326						}
13327					}
13328				} else {
13329					channel.on_startup_drop_completed_blocked_mon_updates_through(&logger, monitor.get_latest_update_id());
13330					log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {} with {} blocked updates",
13331						&channel.context.channel_id(), channel.context.get_latest_monitor_update_id(),
13332						monitor.get_latest_update_id(), channel.blocked_monitor_updates_pending());
13333					if let Some(short_channel_id) = channel.context.get_short_channel_id() {
13334						short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
13335					}
13336					if let Some(funding_txo) = channel.context.get_funding_txo() {
13337						outpoint_to_peer.insert(funding_txo, channel.context.get_counterparty_node_id());
13338					}
13339					per_peer_state.entry(channel.context.get_counterparty_node_id())
13340						.or_insert_with(|| Mutex::new(empty_peer_state()))
13341						.get_mut().unwrap()
13342						.channel_by_id.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
13343				}
13344			} else if channel.is_awaiting_initial_mon_persist() {
13345				// If we were persisted and shut down while the initial ChannelMonitor persistence
13346				// was in-progress, we never broadcasted the funding transaction and can still
13347				// safely discard the channel.
13348				let _ = channel.context.force_shutdown(false, ClosureReason::DisconnectedPeer);
13349				channel_closures.push_back((events::Event::ChannelClosed {
13350					channel_id: channel.context.channel_id(),
13351					user_channel_id: channel.context.get_user_id(),
13352					reason: ClosureReason::DisconnectedPeer,
13353					counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
13354					channel_capacity_sats: Some(channel.context.get_value_satoshis()),
13355					channel_funding_txo: channel.context.get_funding_txo(),
13356					last_local_balance_msat: Some(channel.context.get_value_to_self_msat()),
13357				}, None));
13358			} else {
13359				log_error!(logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id());
13360				log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
13361				log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
13362				log_error!(logger, " Without the ChannelMonitor we cannot continue without risking funds.");
13363				log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
13364				return Err(DecodeError::InvalidValue);
13365			}
13366		}
13367
13368		for (funding_txo, monitor) in args.channel_monitors.iter() {
13369			if !funding_txo_set.contains(funding_txo) {
13370				let mut should_queue_fc_update = false;
13371				if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13372					// If the ChannelMonitor had any updates, we may need to update it further and
13373					// thus track it in `closed_channel_monitor_update_ids`. If the channel never
13374					// had any updates at all, there can't be any HTLCs pending which we need to
13375					// claim.
13376					// Note that a `ChannelMonitor` is created with `update_id` 0 and after we
13377					// provide it with a closure update its `update_id` will be at 1.
13378					if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 {
13379						should_queue_fc_update = !monitor.no_further_updates_allowed();
13380						let mut latest_update_id = monitor.get_latest_update_id();
13381						if should_queue_fc_update {
13382							latest_update_id += 1;
13383						}
13384						per_peer_state.entry(counterparty_node_id)
13385							.or_insert_with(|| Mutex::new(empty_peer_state()))
13386							.lock().unwrap()
13387							.closed_channel_monitor_update_ids.entry(monitor.channel_id())
13388								.and_modify(|v| *v = cmp::max(latest_update_id, *v))
13389								.or_insert(latest_update_id);
13390					}
13391				}
13392
13393				if !should_queue_fc_update {
13394					continue;
13395				}
13396
13397				let logger = WithChannelMonitor::from(&args.logger, monitor, None);
13398				let channel_id = monitor.channel_id();
13399				log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
13400					&channel_id);
13401				let mut monitor_update = ChannelMonitorUpdate {
13402					update_id: monitor.get_latest_update_id().saturating_add(1),
13403					counterparty_node_id: None,
13404					updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
13405					channel_id: Some(monitor.channel_id()),
13406				};
13407				if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13408					let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13409						counterparty_node_id,
13410						funding_txo: *funding_txo,
13411						channel_id,
13412						update: monitor_update,
13413					};
13414					close_background_events.push(update);
13415				} else {
13416					// This is a fairly old `ChannelMonitor` that hasn't seen an update to its
13417					// off-chain state since LDK 0.0.118 (as in LDK 0.0.119 any off-chain
13418					// `ChannelMonitorUpdate` will set the counterparty ID).
13419					// Thus, we assume that it has no pending HTLCs and we will not need to
13420					// generate a `ChannelMonitorUpdate` for it aside from this
13421					// `ChannelForceClosed` one.
13422					monitor_update.update_id = u64::MAX;
13423					close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, channel_id, monitor_update)));
13424				}
13425			}
13426		}
13427
13428		const MAX_ALLOC_SIZE: usize = 1024 * 64;
13429		let forward_htlcs_count: u64 = Readable::read(reader)?;
13430		let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128));
13431		for _ in 0..forward_htlcs_count {
13432			let short_channel_id = Readable::read(reader)?;
13433			let pending_forwards_count: u64 = Readable::read(reader)?;
13434			let mut pending_forwards = Vec::with_capacity(cmp::min(pending_forwards_count as usize, MAX_ALLOC_SIZE/mem::size_of::<HTLCForwardInfo>()));
13435			for _ in 0..pending_forwards_count {
13436				pending_forwards.push(Readable::read(reader)?);
13437			}
13438			forward_htlcs.insert(short_channel_id, pending_forwards);
13439		}
13440
13441		let claimable_htlcs_count: u64 = Readable::read(reader)?;
13442		let mut claimable_htlcs_list = Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128));
13443		for _ in 0..claimable_htlcs_count {
13444			let payment_hash = Readable::read(reader)?;
13445			let previous_hops_len: u64 = Readable::read(reader)?;
13446			let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, MAX_ALLOC_SIZE/mem::size_of::<ClaimableHTLC>()));
13447			for _ in 0..previous_hops_len {
13448				previous_hops.push(<ClaimableHTLC as Readable>::read(reader)?);
13449			}
13450			claimable_htlcs_list.push((payment_hash, previous_hops));
13451		}
13452
13453		let peer_count: u64 = Readable::read(reader)?;
13454		for _ in 0..peer_count {
13455			let peer_pubkey: PublicKey = Readable::read(reader)?;
13456			let latest_features = Readable::read(reader)?;
13457			if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) {
13458				peer_state.get_mut().unwrap().latest_features = latest_features;
13459			}
13460		}
13461
13462		let event_count: u64 = Readable::read(reader)?;
13463		let mut pending_events_read: VecDeque<(events::Event, Option<EventCompletionAction>)> =
13464			VecDeque::with_capacity(cmp::min(event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(events::Event, Option<EventCompletionAction>)>()));
13465		for _ in 0..event_count {
13466			match MaybeReadable::read(reader)? {
13467				Some(event) => pending_events_read.push_back((event, None)),
13468				None => continue,
13469			}
13470		}
13471
13472		let background_event_count: u64 = Readable::read(reader)?;
13473		for _ in 0..background_event_count {
13474			match <u8 as Readable>::read(reader)? {
13475				0 => {
13476					// LDK versions prior to 0.0.116 wrote pending `MonitorUpdateRegeneratedOnStartup`s here,
13477					// however we really don't (and never did) need them - we regenerate all
13478					// on-startup monitor updates.
13479					let _: OutPoint = Readable::read(reader)?;
13480					let _: ChannelMonitorUpdate = Readable::read(reader)?;
13481				}
13482				_ => return Err(DecodeError::InvalidValue),
13483			}
13484		}
13485
13486		let _last_node_announcement_serial: u32 = Readable::read(reader)?; // Only used < 0.0.111
13487		let highest_seen_timestamp: u32 = Readable::read(reader)?;
13488
13489		// The last version where a pending inbound payment may have been added was 0.0.116.
13490		let pending_inbound_payment_count: u64 = Readable::read(reader)?;
13491		for _ in 0..pending_inbound_payment_count {
13492			let payment_hash: PaymentHash = Readable::read(reader)?;
13493			let logger = WithContext::from(&args.logger, None, None, Some(payment_hash));
13494			let inbound: PendingInboundPayment = Readable::read(reader)?;
13495			log_warn!(logger, "Ignoring deprecated pending inbound payment with payment hash {}: {:?}", payment_hash, inbound);
13496		}
13497
13498		let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?;
13499		let mut pending_outbound_payments_compat: HashMap<PaymentId, PendingOutboundPayment> =
13500			hash_map_with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
13501		for _ in 0..pending_outbound_payments_count_compat {
13502			let session_priv = Readable::read(reader)?;
13503			let payment = PendingOutboundPayment::Legacy {
13504				session_privs: hash_set_from_iter([session_priv]),
13505			};
13506			if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() {
13507				return Err(DecodeError::InvalidValue)
13508			};
13509		}
13510
13511		// pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients.
13512		let mut pending_outbound_payments_no_retry: Option<HashMap<PaymentId, HashSet<[u8; 32]>>> = None;
13513		let mut pending_outbound_payments = None;
13514		let mut pending_intercepted_htlcs: Option<HashMap<InterceptId, PendingAddHTLCInfo>> = Some(new_hash_map());
13515		let mut received_network_pubkey: Option<PublicKey> = None;
13516		let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
13517		let mut probing_cookie_secret: Option<[u8; 32]> = None;
13518		let mut claimable_htlc_purposes = None;
13519		let mut claimable_htlc_onion_fields = None;
13520		let mut pending_claiming_payments = Some(new_hash_map());
13521		let mut monitor_update_blocked_actions_per_peer: Option<Vec<(_, BTreeMap<_, Vec<_>>)>> = Some(Vec::new());
13522		let mut events_override = None;
13523		let mut in_flight_monitor_updates: Option<HashMap<(PublicKey, OutPoint), Vec<ChannelMonitorUpdate>>> = None;
13524		let mut decode_update_add_htlcs: Option<HashMap<u64, Vec<msgs::UpdateAddHTLC>>> = None;
13525		let mut inbound_payment_id_secret = None;
13526		read_tlv_fields!(reader, {
13527			(1, pending_outbound_payments_no_retry, option),
13528			(2, pending_intercepted_htlcs, option),
13529			(3, pending_outbound_payments, option),
13530			(4, pending_claiming_payments, option),
13531			(5, received_network_pubkey, option),
13532			(6, monitor_update_blocked_actions_per_peer, option),
13533			(7, fake_scid_rand_bytes, option),
13534			(8, events_override, option),
13535			(9, claimable_htlc_purposes, optional_vec),
13536			(10, in_flight_monitor_updates, option),
13537			(11, probing_cookie_secret, option),
13538			(13, claimable_htlc_onion_fields, optional_vec),
13539			(14, decode_update_add_htlcs, option),
13540			(15, inbound_payment_id_secret, option),
13541		});
13542		let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map());
13543		if fake_scid_rand_bytes.is_none() {
13544			fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes());
13545		}
13546
13547		if probing_cookie_secret.is_none() {
13548			probing_cookie_secret = Some(args.entropy_source.get_secure_random_bytes());
13549		}
13550
13551		if inbound_payment_id_secret.is_none() {
13552			inbound_payment_id_secret = Some(args.entropy_source.get_secure_random_bytes());
13553		}
13554
13555		if let Some(events) = events_override {
13556			pending_events_read = events;
13557		}
13558
13559		if !channel_closures.is_empty() {
13560			pending_events_read.append(&mut channel_closures);
13561		}
13562
13563		if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
13564			pending_outbound_payments = Some(pending_outbound_payments_compat);
13565		} else if pending_outbound_payments.is_none() {
13566			let mut outbounds = new_hash_map();
13567			for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() {
13568				outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
13569			}
13570			pending_outbound_payments = Some(outbounds);
13571		}
13572		let pending_outbounds = OutboundPayments::new(pending_outbound_payments.unwrap());
13573
13574		// We have to replay (or skip, if they were completed after we wrote the `ChannelManager`)
13575		// each `ChannelMonitorUpdate` in `in_flight_monitor_updates`. After doing so, we have to
13576		// check that each channel we have isn't newer than the latest `ChannelMonitorUpdate`(s) we
13577		// replayed, and for each monitor update we have to replay we have to ensure there's a
13578		// `ChannelMonitor` for it.
13579		//
13580		// In order to do so we first walk all of our live channels (so that we can check their
13581		// state immediately after doing the update replays, when we have the `update_id`s
13582		// available) and then walk any remaining in-flight updates.
13583		//
13584		// Because the actual handling of the in-flight updates is the same, it's macro'ized here:
13585		let mut pending_background_events = Vec::new();
13586		macro_rules! handle_in_flight_updates {
13587			($counterparty_node_id: expr, $chan_in_flight_upds: expr, $funding_txo: expr,
13588			 $monitor: expr, $peer_state: expr, $logger: expr, $channel_info_log: expr
13589			) => { {
13590				let mut max_in_flight_update_id = 0;
13591				let starting_len =  $chan_in_flight_upds.len();
13592				$chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id());
13593				if $chan_in_flight_upds.len() < starting_len {
13594					log_debug!(
13595						$logger,
13596						"{} ChannelMonitorUpdates completed after ChannelManager was last serialized",
13597						starting_len - $chan_in_flight_upds.len()
13598					);
13599				}
13600				for update in $chan_in_flight_upds.iter() {
13601					log_debug!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
13602						update.update_id, $channel_info_log, &$monitor.channel_id());
13603					max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
13604					pending_background_events.push(
13605						BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13606							counterparty_node_id: $counterparty_node_id,
13607							funding_txo: $funding_txo,
13608							channel_id: $monitor.channel_id(),
13609							update: update.clone(),
13610						});
13611				}
13612				if $chan_in_flight_upds.is_empty() {
13613					// We had some updates to apply, but it turns out they had completed before we
13614					// were serialized, we just weren't notified of that. Thus, we may have to run
13615					// the completion actions for any monitor updates, but otherwise are done.
13616					pending_background_events.push(
13617						BackgroundEvent::MonitorUpdatesComplete {
13618							counterparty_node_id: $counterparty_node_id,
13619							channel_id: $monitor.channel_id(),
13620						});
13621				} else {
13622					$peer_state.closed_channel_monitor_update_ids.entry($monitor.channel_id())
13623						.and_modify(|v| *v = cmp::max(max_in_flight_update_id, *v))
13624						.or_insert(max_in_flight_update_id);
13625				}
13626				if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() {
13627					log_error!($logger, "Duplicate in-flight monitor update set for the same channel!");
13628					return Err(DecodeError::InvalidValue);
13629				}
13630				max_in_flight_update_id
13631			} }
13632		}
13633
13634		for (counterparty_id, peer_state_mtx) in per_peer_state.iter_mut() {
13635			let mut peer_state_lock = peer_state_mtx.lock().unwrap();
13636			let peer_state = &mut *peer_state_lock;
13637			for phase in peer_state.channel_by_id.values() {
13638				if let ChannelPhase::Funded(chan) = phase {
13639					let logger = WithChannelContext::from(&args.logger, &chan.context, None);
13640
13641					// Channels that were persisted have to be funded, otherwise they should have been
13642					// discarded.
13643					let funding_txo = chan.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
13644					let monitor = args.channel_monitors.get(&funding_txo)
13645						.expect("We already checked for monitor presence when loading channels");
13646					let mut max_in_flight_update_id = monitor.get_latest_update_id();
13647					if let Some(in_flight_upds) = &mut in_flight_monitor_updates {
13648						if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, funding_txo)) {
13649							max_in_flight_update_id = cmp::max(max_in_flight_update_id,
13650								handle_in_flight_updates!(*counterparty_id, chan_in_flight_upds,
13651									funding_txo, monitor, peer_state, logger, ""));
13652						}
13653					}
13654					if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id {
13655						// If the channel is ahead of the monitor, return DangerousValue:
13656						log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
13657						log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
13658							chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id);
13659						log_error!(logger, " but the ChannelManager is at update_id {}.", chan.get_latest_unblocked_monitor_update_id());
13660						log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
13661						log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
13662						log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
13663						log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
13664						return Err(DecodeError::DangerousValue);
13665					}
13666				} else {
13667					// We shouldn't have persisted (or read) any unfunded channel types so none should have been
13668					// created in this `channel_by_id` map.
13669					debug_assert!(false);
13670					return Err(DecodeError::InvalidValue);
13671				}
13672			}
13673		}
13674
13675		if let Some(in_flight_upds) = in_flight_monitor_updates {
13676			for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds {
13677				let channel_id = funding_txo_to_channel_id.get(&funding_txo).copied();
13678				let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id, None);
13679				if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
13680					// Now that we've removed all the in-flight monitor updates for channels that are
13681					// still open, we need to replay any monitor updates that are for closed channels,
13682					// creating the neccessary peer_state entries as we go.
13683					let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| {
13684						Mutex::new(empty_peer_state())
13685					});
13686					let mut peer_state = peer_state_mutex.lock().unwrap();
13687					handle_in_flight_updates!(counterparty_id, chan_in_flight_updates,
13688						funding_txo, monitor, peer_state, logger, "closed ");
13689				} else {
13690					log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
13691					log_error!(logger, " The ChannelMonitor for channel {} is missing.", if let Some(channel_id) =
13692						channel_id { channel_id.to_string() } else { format!("with outpoint {}", funding_txo) } );
13693					log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
13694					log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
13695					log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
13696					log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
13697					log_error!(logger, " Pending in-flight updates are: {:?}", chan_in_flight_updates);
13698					return Err(DecodeError::InvalidValue);
13699				}
13700			}
13701		}
13702
13703		// The newly generated `close_background_events` have to be added after any updates that
13704		// were already in-flight on shutdown, so we append them here.
13705		pending_background_events.reserve(close_background_events.len());
13706		'each_bg_event: for mut new_event in close_background_events {
13707			if let BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13708				counterparty_node_id, funding_txo, channel_id, update,
13709			} = &mut new_event {
13710				debug_assert_eq!(update.updates.len(), 1);
13711				debug_assert!(matches!(update.updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. }));
13712				let mut updated_id = false;
13713				for pending_event in pending_background_events.iter() {
13714					if let BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13715						counterparty_node_id: pending_cp, funding_txo: pending_funding,
13716						channel_id: pending_chan_id, update: pending_update,
13717					} = pending_event {
13718						let for_same_channel = counterparty_node_id == pending_cp
13719							&& funding_txo == pending_funding
13720							&& channel_id == pending_chan_id;
13721						if for_same_channel {
13722							debug_assert!(update.update_id >= pending_update.update_id);
13723							if pending_update.updates.iter().any(|upd| matches!(upd, ChannelMonitorUpdateStep::ChannelForceClosed { .. })) {
13724								// If the background event we're looking at is just
13725								// force-closing the channel which already has a pending
13726								// force-close update, no need to duplicate it.
13727								continue 'each_bg_event;
13728							}
13729							update.update_id = pending_update.update_id.saturating_add(1);
13730							updated_id = true;
13731						}
13732					}
13733				}
13734				let mut per_peer_state = per_peer_state.get(counterparty_node_id)
13735					.expect("If we have pending updates for a channel it must have an entry")
13736					.lock().unwrap();
13737				if updated_id {
13738					per_peer_state
13739						.closed_channel_monitor_update_ids.entry(*channel_id)
13740						.and_modify(|v| *v = cmp::max(update.update_id, *v))
13741						.or_insert(update.update_id);
13742				}
13743				let in_flight_updates = per_peer_state.in_flight_monitor_updates
13744					.entry(*funding_txo)
13745					.or_insert_with(Vec::new);
13746				debug_assert!(!in_flight_updates.iter().any(|upd| upd == update));
13747				in_flight_updates.push(update.clone());
13748			}
13749			pending_background_events.push(new_event);
13750		}
13751
13752		// If there's any preimages for forwarded HTLCs hanging around in ChannelMonitors we
13753		// should ensure we try them again on the inbound edge. We put them here and do so after we
13754		// have a fully-constructed `ChannelManager` at the end.
13755		let mut pending_claims_to_replay = Vec::new();
13756
13757		{
13758			// If we're tracking pending payments, ensure we haven't lost any by looking at the
13759			// ChannelMonitor data for any channels for which we do not have authorative state
13760			// (i.e. those for which we just force-closed above or we otherwise don't have a
13761			// corresponding `Channel` at all).
13762			// This avoids several edge-cases where we would otherwise "forget" about pending
13763			// payments which are still in-flight via their on-chain state.
13764			// We only rebuild the pending payments map if we were most recently serialized by
13765			// 0.0.102+
13766			for (_, monitor) in args.channel_monitors.iter() {
13767				let counterparty_opt = outpoint_to_peer.get(&monitor.get_funding_txo().0);
13768				if counterparty_opt.is_none() {
13769					for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
13770						let logger = WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash));
13771						if let HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } = htlc_source {
13772							if path.hops.is_empty() {
13773								log_error!(logger, "Got an empty path for a pending payment");
13774								return Err(DecodeError::InvalidValue);
13775							}
13776
13777							let mut session_priv_bytes = [0; 32];
13778							session_priv_bytes[..].copy_from_slice(&session_priv[..]);
13779							pending_outbounds.insert_from_monitor_on_startup(
13780								payment_id, htlc.payment_hash, session_priv_bytes, &path, best_block_height, logger
13781							);
13782						}
13783					}
13784					for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() {
13785						let logger = WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash));
13786						match htlc_source {
13787							HTLCSource::PreviousHopData(prev_hop_data) => {
13788								let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
13789									info.prev_funding_outpoint == prev_hop_data.outpoint &&
13790										info.prev_htlc_id == prev_hop_data.htlc_id
13791								};
13792								// The ChannelMonitor is now responsible for this HTLC's
13793								// failure/success and will let us know what its outcome is. If we
13794								// still have an entry for this HTLC in `forward_htlcs` or
13795								// `pending_intercepted_htlcs`, we were apparently not persisted after
13796								// the monitor was when forwarding the payment.
13797								decode_update_add_htlcs.retain(|scid, update_add_htlcs| {
13798									update_add_htlcs.retain(|update_add_htlc| {
13799										let matches = *scid == prev_hop_data.short_channel_id &&
13800											update_add_htlc.htlc_id == prev_hop_data.htlc_id;
13801										if matches {
13802											log_info!(logger, "Removing pending to-decode HTLC with hash {} as it was forwarded to the closed channel {}",
13803												&htlc.payment_hash, &monitor.channel_id());
13804										}
13805										!matches
13806									});
13807									!update_add_htlcs.is_empty()
13808								});
13809								forward_htlcs.retain(|_, forwards| {
13810									forwards.retain(|forward| {
13811										if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
13812											if pending_forward_matches_htlc(&htlc_info) {
13813												log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
13814													&htlc.payment_hash, &monitor.channel_id());
13815												false
13816											} else { true }
13817										} else { true }
13818									});
13819									!forwards.is_empty()
13820								});
13821								pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
13822									if pending_forward_matches_htlc(&htlc_info) {
13823										log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
13824											&htlc.payment_hash, &monitor.channel_id());
13825										pending_events_read.retain(|(event, _)| {
13826											if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
13827												intercepted_id != ev_id
13828											} else { true }
13829										});
13830										false
13831									} else { true }
13832								});
13833							},
13834							HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } => {
13835								if let Some(preimage) = preimage_opt {
13836									let pending_events = Mutex::new(pending_events_read);
13837									// Note that we set `from_onchain` to "false" here,
13838									// deliberately keeping the pending payment around forever.
13839									// Given it should only occur when we have a channel we're
13840									// force-closing for being stale that's okay.
13841									// The alternative would be to wipe the state when claiming,
13842									// generating a `PaymentPathSuccessful` event but regenerating
13843									// it and the `PaymentSent` on every restart until the
13844									// `ChannelMonitor` is removed.
13845									let compl_action =
13846										EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
13847											channel_funding_outpoint: monitor.get_funding_txo().0,
13848											channel_id: monitor.channel_id(),
13849											counterparty_node_id: path.hops[0].pubkey,
13850										};
13851									pending_outbounds.claim_htlc(payment_id, preimage, session_priv,
13852										path, false, compl_action, &pending_events, &&logger);
13853									pending_events_read = pending_events.into_inner().unwrap();
13854								}
13855							},
13856						}
13857					}
13858				}
13859
13860				// Whether the downstream channel was closed or not, try to re-apply any payment
13861				// preimages from it which may be needed in upstream channels for forwarded
13862				// payments.
13863				let mut fail_read = false;
13864				let outbound_claimed_htlcs_iter = monitor.get_all_current_outbound_htlcs()
13865					.into_iter()
13866					.filter_map(|(htlc_source, (htlc, preimage_opt))| {
13867						if let HTLCSource::PreviousHopData(prev_hop) = &htlc_source {
13868							if let Some(payment_preimage) = preimage_opt {
13869								let inbound_edge_monitor = args.channel_monitors.get(&prev_hop.outpoint);
13870								// Note that for channels which have gone to chain,
13871								// `get_all_current_outbound_htlcs` is never pruned and always returns
13872								// a constant set until the monitor is removed/archived. Thus, we
13873								// want to skip replaying claims that have definitely been resolved
13874								// on-chain.
13875
13876								// If the inbound monitor is not present, we assume it was fully
13877								// resolved and properly archived, implying this payment had plenty
13878								// of time to get claimed and we can safely skip any further
13879								// attempts to claim it (they wouldn't succeed anyway as we don't
13880								// have a monitor against which to do so).
13881								let inbound_edge_monitor = if let Some(monitor) = inbound_edge_monitor {
13882									monitor
13883								} else {
13884									return None;
13885								};
13886								// Second, if the inbound edge of the payment's monitor has been
13887								// fully claimed we've had at least `ANTI_REORG_DELAY` blocks to
13888								// get any PaymentForwarded event(s) to the user and assume that
13889								// there's no need to try to replay the claim just for that.
13890								let inbound_edge_balances = inbound_edge_monitor.get_claimable_balances();
13891								if inbound_edge_balances.is_empty() {
13892									return None;
13893								}
13894
13895								if prev_hop.counterparty_node_id.is_none() {
13896									// We no longer support claiming an HTLC where we don't have
13897									// the counterparty_node_id available if the claim has to go to
13898									// a closed channel. Its possible we can get away with it if
13899									// the channel is not yet closed, but its by no means a
13900									// guarantee.
13901
13902									// Thus, in this case we are a bit more aggressive with our
13903									// pruning - if we have no use for the claim (because the
13904									// inbound edge of the payment's monitor has already claimed
13905									// the HTLC) we skip trying to replay the claim.
13906									let htlc_payment_hash: PaymentHash = payment_preimage.into();
13907									let balance_could_incl_htlc = |bal| match bal {
13908										&Balance::ClaimableOnChannelClose { .. } => {
13909											// The channel is still open, assume we can still
13910											// claim against it
13911											true
13912										},
13913										&Balance::MaybePreimageClaimableHTLC { payment_hash, .. } => {
13914											payment_hash == htlc_payment_hash
13915										},
13916										_ => false,
13917									};
13918									let htlc_may_be_in_balances =
13919										inbound_edge_balances.iter().any(balance_could_incl_htlc);
13920									if !htlc_may_be_in_balances {
13921										return None;
13922									}
13923
13924									// First check if we're absolutely going to fail - if we need
13925									// to replay this claim to get the preimage into the inbound
13926									// edge monitor but the channel is closed (and thus we'll
13927									// immediately panic if we call claim_funds_from_hop).
13928									if short_to_chan_info.get(&prev_hop.short_channel_id).is_none() {
13929										log_error!(args.logger,
13930											"We need to replay the HTLC claim for payment_hash {} (preimage {}) but cannot do so as the HTLC was forwarded prior to LDK 0.0.124.\
13931											All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1",
13932											htlc_payment_hash,
13933											payment_preimage,
13934										);
13935										fail_read = true;
13936									}
13937
13938									// At this point we're confident we need the claim, but the
13939									// inbound edge channel is still live. As long as this remains
13940									// the case, we can conceivably proceed, but we run some risk
13941									// of panicking at runtime. The user ideally should have read
13942									// the release notes and we wouldn't be here, but we go ahead
13943									// and let things run in the hope that it'll all just work out.
13944									log_error!(args.logger,
13945										"We need to replay the HTLC claim for payment_hash {} (preimage {}) but don't have all the required information to do so reliably.\
13946										As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime!\
13947										All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1\
13948										Continuing anyway, though panics may occur!",
13949										htlc_payment_hash,
13950										payment_preimage,
13951									);
13952								}
13953
13954								Some((htlc_source, payment_preimage, htlc.amount_msat,
13955									// Check if `counterparty_opt.is_none()` to see if the
13956									// downstream chan is closed (because we don't have a
13957									// channel_id -> peer map entry).
13958									counterparty_opt.is_none(),
13959									counterparty_opt.cloned().or(monitor.get_counterparty_node_id()),
13960									monitor.get_funding_txo().0, monitor.channel_id()))
13961							} else { None }
13962						} else {
13963							// If it was an outbound payment, we've handled it above - if a preimage
13964							// came in and we persisted the `ChannelManager` we either handled it and
13965							// are good to go or the channel force-closed - we don't have to handle the
13966							// channel still live case here.
13967							None
13968						}
13969					});
13970				for tuple in outbound_claimed_htlcs_iter {
13971					pending_claims_to_replay.push(tuple);
13972				}
13973				if fail_read {
13974					return Err(DecodeError::InvalidValue);
13975				}
13976			}
13977		}
13978
13979		if !forward_htlcs.is_empty() || !decode_update_add_htlcs.is_empty() || pending_outbounds.needs_abandon() {
13980			// If we have pending HTLCs to forward, assume we either dropped a
13981			// `PendingHTLCsForwardable` or the user received it but never processed it as they
13982			// shut down before the timer hit. Either way, set the time_forwardable to a small
13983			// constant as enough time has likely passed that we should simply handle the forwards
13984			// now, or at least after the user gets a chance to reconnect to our peers.
13985			pending_events_read.push_back((events::Event::PendingHTLCsForwardable {
13986				time_forwardable: Duration::from_secs(2),
13987			}, None));
13988		}
13989
13990		let expanded_inbound_key = args.node_signer.get_inbound_payment_key();
13991
13992		let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len());
13993		if let Some(purposes) = claimable_htlc_purposes {
13994			if purposes.len() != claimable_htlcs_list.len() {
13995				return Err(DecodeError::InvalidValue);
13996			}
13997			if let Some(onion_fields) = claimable_htlc_onion_fields {
13998				if onion_fields.len() != claimable_htlcs_list.len() {
13999					return Err(DecodeError::InvalidValue);
14000				}
14001				for (purpose, (onion, (payment_hash, htlcs))) in
14002					purposes.into_iter().zip(onion_fields.into_iter().zip(claimable_htlcs_list.into_iter()))
14003				{
14004					let existing_payment = claimable_payments.insert(payment_hash, ClaimablePayment {
14005						purpose, htlcs, onion_fields: onion,
14006					});
14007					if existing_payment.is_some() { return Err(DecodeError::InvalidValue); }
14008				}
14009			} else {
14010				for (purpose, (payment_hash, htlcs)) in purposes.into_iter().zip(claimable_htlcs_list.into_iter()) {
14011					let existing_payment = claimable_payments.insert(payment_hash, ClaimablePayment {
14012						purpose, htlcs, onion_fields: None,
14013					});
14014					if existing_payment.is_some() { return Err(DecodeError::InvalidValue); }
14015				}
14016			}
14017		} else {
14018			// LDK versions prior to 0.0.107 did not write a `pending_htlc_purposes`, but do
14019			// include a `_legacy_hop_data` in the `OnionPayload`.
14020			for (payment_hash, htlcs) in claimable_htlcs_list.drain(..) {
14021				if htlcs.is_empty() {
14022					return Err(DecodeError::InvalidValue);
14023				}
14024				let purpose = match &htlcs[0].onion_payload {
14025					OnionPayload::Invoice { _legacy_hop_data } => {
14026						if let Some(hop_data) = _legacy_hop_data {
14027							events::PaymentPurpose::Bolt11InvoicePayment {
14028								payment_preimage:
14029									match inbound_payment::verify(
14030										payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger
14031									) {
14032										Ok((payment_preimage, _)) => payment_preimage,
14033										Err(()) => {
14034											log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", &payment_hash);
14035											return Err(DecodeError::InvalidValue);
14036										}
14037									},
14038								payment_secret: hop_data.payment_secret,
14039							}
14040						} else { return Err(DecodeError::InvalidValue); }
14041					},
14042					OnionPayload::Spontaneous(payment_preimage) =>
14043						events::PaymentPurpose::SpontaneousPayment(*payment_preimage),
14044				};
14045				claimable_payments.insert(payment_hash, ClaimablePayment {
14046					purpose, htlcs, onion_fields: None,
14047				});
14048			}
14049		}
14050
14051		// Similar to the above cases for forwarded payments, if we have any pending inbound HTLCs
14052		// which haven't yet been claimed, we may be missing counterparty_node_id info and would
14053		// panic if we attempted to claim them at this point.
14054		for (payment_hash, payment) in claimable_payments.iter() {
14055			for htlc in payment.htlcs.iter() {
14056				if htlc.prev_hop.counterparty_node_id.is_some() {
14057					continue;
14058				}
14059				if short_to_chan_info.get(&htlc.prev_hop.short_channel_id).is_some() {
14060					log_error!(args.logger,
14061						"We do not have the required information to claim a pending payment with payment hash {} reliably.\
14062						As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime!\
14063						All HTLCs that were received by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1\
14064						Continuing anyway, though panics may occur!",
14065						payment_hash,
14066					);
14067				} else {
14068					log_error!(args.logger,
14069						"We do not have the required information to claim a pending payment with payment hash {}.\
14070						All HTLCs that were received by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1",
14071						payment_hash,
14072					);
14073					return Err(DecodeError::InvalidValue);
14074				}
14075			}
14076		}
14077
14078		let mut secp_ctx = Secp256k1::new();
14079		secp_ctx.seeded_randomize(&args.entropy_source.get_secure_random_bytes());
14080
14081		let our_network_pubkey = match args.node_signer.get_node_id(Recipient::Node) {
14082			Ok(key) => key,
14083			Err(()) => return Err(DecodeError::InvalidValue)
14084		};
14085		if let Some(network_pubkey) = received_network_pubkey {
14086			if network_pubkey != our_network_pubkey {
14087				log_error!(args.logger, "Key that was generated does not match the existing key.");
14088				return Err(DecodeError::InvalidValue);
14089			}
14090		}
14091
14092		let mut outbound_scid_aliases = new_hash_set();
14093		for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() {
14094			let mut peer_state_lock = peer_state_mutex.lock().unwrap();
14095			let peer_state = &mut *peer_state_lock;
14096			for (chan_id, phase) in peer_state.channel_by_id.iter_mut() {
14097				if let ChannelPhase::Funded(chan) = phase {
14098					let logger = WithChannelContext::from(&args.logger, &chan.context, None);
14099					if chan.context.outbound_scid_alias() == 0 {
14100						let mut outbound_scid_alias;
14101						loop {
14102							outbound_scid_alias = fake_scid::Namespace::OutboundAlias
14103								.get_fake_scid(best_block_height, &chain_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
14104							if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
14105						}
14106						chan.context.set_outbound_scid_alias(outbound_scid_alias);
14107					} else if !outbound_scid_aliases.insert(chan.context.outbound_scid_alias()) {
14108						// Note that in rare cases its possible to hit this while reading an older
14109						// channel if we just happened to pick a colliding outbound alias above.
14110						log_error!(logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
14111						return Err(DecodeError::InvalidValue);
14112					}
14113					if chan.context.is_usable() {
14114						if short_to_chan_info.insert(chan.context.outbound_scid_alias(), (chan.context.get_counterparty_node_id(), *chan_id)).is_some() {
14115							// Note that in rare cases its possible to hit this while reading an older
14116							// channel if we just happened to pick a colliding outbound alias above.
14117							log_error!(logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
14118							return Err(DecodeError::InvalidValue);
14119						}
14120					}
14121				} else {
14122					// We shouldn't have persisted (or read) any unfunded channel types so none should have been
14123					// created in this `channel_by_id` map.
14124					debug_assert!(false);
14125					return Err(DecodeError::InvalidValue);
14126				}
14127			}
14128		}
14129
14130		let bounded_fee_estimator = LowerBoundedFeeEstimator::new(args.fee_estimator);
14131
14132		for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
14133			if let Some(peer_state) = per_peer_state.get(&node_id) {
14134				for (channel_id, actions) in monitor_update_blocked_actions.iter() {
14135					let logger = WithContext::from(&args.logger, Some(node_id), Some(*channel_id), None);
14136					for action in actions.iter() {
14137						if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
14138							downstream_counterparty_and_funding_outpoint:
14139								Some(EventUnblockedChannel {
14140									counterparty_node_id: blocked_node_id,
14141									funding_txo: _,
14142									channel_id: blocked_channel_id,
14143									blocking_action,
14144								}), ..
14145						} = action {
14146							if let Some(blocked_peer_state) = per_peer_state.get(blocked_node_id) {
14147								log_trace!(logger,
14148									"Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
14149									blocked_channel_id);
14150								blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
14151									.entry(*blocked_channel_id)
14152									.or_insert_with(Vec::new).push(blocking_action.clone());
14153							} else {
14154								// If the channel we were blocking has closed, we don't need to
14155								// worry about it - the blocked monitor update should never have
14156								// been released from the `Channel` object so it can't have
14157								// completed, and if the channel closed there's no reason to bother
14158								// anymore.
14159							}
14160						}
14161						if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately { .. } = action {
14162							debug_assert!(false, "Non-event-generating channel freeing should not appear in our queue");
14163						}
14164					}
14165					// Note that we may have a post-update action for a channel that has no pending
14166					// `ChannelMonitorUpdate`s, but unlike the no-peer-state case, it may simply be
14167					// because we had a `ChannelMonitorUpdate` complete after the last time this
14168					// `ChannelManager` was serialized. In that case, we'll run the post-update
14169					// actions as soon as we get going.
14170				}
14171				peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions;
14172			} else {
14173				for actions in monitor_update_blocked_actions.values() {
14174					for action in actions.iter() {
14175						if matches!(action, MonitorUpdateCompletionAction::PaymentClaimed { .. }) {
14176							// If there are no state for this channel but we have pending
14177							// post-update actions, its possible that one was left over from pre-0.1
14178							// payment claims where MPP claims led to a channel blocked on itself
14179							// and later `ChannelMonitorUpdate`s didn't get their post-update
14180							// actions run.
14181							// This should only have happened for `PaymentClaimed` post-update actions,
14182							// which we ignore here.
14183						} else {
14184							let logger = WithContext::from(&args.logger, Some(node_id), None, None);
14185							log_error!(logger, "Got blocked actions {:?} without a per-peer-state for {}", monitor_update_blocked_actions, node_id);
14186							return Err(DecodeError::InvalidValue);
14187						}
14188					}
14189				}
14190			}
14191		}
14192
14193		let channel_manager = ChannelManager {
14194			chain_hash,
14195			fee_estimator: bounded_fee_estimator,
14196			chain_monitor: args.chain_monitor,
14197			tx_broadcaster: args.tx_broadcaster,
14198			router: args.router,
14199			message_router: args.message_router,
14200
14201			best_block: RwLock::new(BestBlock::new(best_block_hash, best_block_height)),
14202
14203			inbound_payment_key: expanded_inbound_key,
14204			pending_outbound_payments: pending_outbounds,
14205			pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()),
14206
14207			forward_htlcs: Mutex::new(forward_htlcs),
14208			decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs),
14209			claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap() }),
14210			outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
14211			outpoint_to_peer: Mutex::new(outpoint_to_peer),
14212			short_to_chan_info: FairRwLock::new(short_to_chan_info),
14213			fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(),
14214
14215			probing_cookie_secret: probing_cookie_secret.unwrap(),
14216			inbound_payment_id_secret: inbound_payment_id_secret.unwrap(),
14217
14218			our_network_pubkey,
14219			secp_ctx,
14220
14221			highest_seen_timestamp: AtomicUsize::new(highest_seen_timestamp as usize),
14222
14223			per_peer_state: FairRwLock::new(per_peer_state),
14224
14225			pending_events: Mutex::new(pending_events_read),
14226			pending_events_processor: AtomicBool::new(false),
14227			pending_background_events: Mutex::new(pending_background_events),
14228			total_consistency_lock: RwLock::new(()),
14229			background_events_processed_since_startup: AtomicBool::new(false),
14230
14231			event_persist_notifier: Notifier::new(),
14232			needs_persist_flag: AtomicBool::new(false),
14233
14234			funding_batch_states: Mutex::new(BTreeMap::new()),
14235
14236			pending_offers_messages: Mutex::new(Vec::new()),
14237			pending_async_payments_messages: Mutex::new(Vec::new()),
14238
14239			pending_broadcast_messages: Mutex::new(Vec::new()),
14240
14241			entropy_source: args.entropy_source,
14242			node_signer: args.node_signer,
14243			signer_provider: args.signer_provider,
14244
14245			last_days_feerates: Mutex::new(VecDeque::new()),
14246
14247			logger: args.logger,
14248			default_configuration: args.default_config,
14249
14250			#[cfg(feature = "dnssec")]
14251			hrn_resolver: OMNameResolver::new(highest_seen_timestamp, best_block_height),
14252			#[cfg(feature = "dnssec")]
14253			pending_dns_onion_messages: Mutex::new(Vec::new()),
14254
14255			#[cfg(feature = "_test_utils")]
14256			testing_dnssec_proof_offer_resolution_override: Mutex::new(new_hash_map()),
14257		};
14258
14259		let mut processed_claims: HashSet<Vec<MPPClaimHTLCSource>> = new_hash_set();
14260		for (_, monitor) in args.channel_monitors.iter() {
14261			for (payment_hash, (payment_preimage, payment_claims)) in monitor.get_stored_preimages() {
14262				if !payment_claims.is_empty() {
14263					for payment_claim in payment_claims {
14264						if processed_claims.contains(&payment_claim.mpp_parts) {
14265							// We might get the same payment a few times from different channels
14266							// that the MPP payment was received using. There's no point in trying
14267							// to claim the same payment again and again, so we check if the HTLCs
14268							// are the same and skip the payment here.
14269							continue;
14270						}
14271						if payment_claim.mpp_parts.is_empty() {
14272							return Err(DecodeError::InvalidValue);
14273						}
14274						{
14275							let payments = channel_manager.claimable_payments.lock().unwrap();
14276							if !payments.claimable_payments.contains_key(&payment_hash) {
14277								if let Some(payment) = payments.pending_claiming_payments.get(&payment_hash) {
14278									if payment.payment_id == payment_claim.claiming_payment.payment_id {
14279										// If this payment already exists and was marked as
14280										// being-claimed then the serialized state must contain all
14281										// of the pending `ChannelMonitorUpdate`s required to get
14282										// the preimage on disk in all MPP parts. Thus we can skip
14283										// the replay below.
14284										continue;
14285									}
14286								}
14287							}
14288						}
14289
14290						let mut channels_without_preimage = payment_claim.mpp_parts.iter()
14291							.map(|htlc_info| (htlc_info.counterparty_node_id, htlc_info.funding_txo, htlc_info.channel_id))
14292							.collect::<Vec<_>>();
14293						// If we have multiple MPP parts which were received over the same channel,
14294						// we only track it once as once we get a preimage durably in the
14295						// `ChannelMonitor` it will be used for all HTLCs with a matching hash.
14296						channels_without_preimage.sort_unstable();
14297						channels_without_preimage.dedup();
14298						let pending_claims = PendingMPPClaim {
14299							channels_without_preimage,
14300							channels_with_preimage: Vec::new(),
14301						};
14302						let pending_claim_ptr_opt = Some(Arc::new(Mutex::new(pending_claims)));
14303
14304						// While it may be duplicative to generate a PaymentClaimed here, trying to
14305						// figure out if the user definitely saw it before shutdown would require some
14306						// nontrivial logic and may break as we move away from regularly persisting
14307						// ChannelManager. Instead, we rely on the users' event handler being
14308						// idempotent and just blindly generate one no matter what, letting the
14309						// preimages eventually timing out from ChannelMonitors to prevent us from
14310						// doing so forever.
14311
14312						let claim_found =
14313							channel_manager.claimable_payments.lock().unwrap().begin_claiming_payment(
14314								payment_hash, &channel_manager.node_signer, &channel_manager.logger,
14315								&channel_manager.inbound_payment_id_secret, true,
14316							);
14317						if claim_found.is_err() {
14318							let mut claimable_payments = channel_manager.claimable_payments.lock().unwrap();
14319							match claimable_payments.pending_claiming_payments.entry(payment_hash) {
14320								hash_map::Entry::Occupied(_) => {
14321									debug_assert!(false, "Entry was added in begin_claiming_payment");
14322									return Err(DecodeError::InvalidValue);
14323								},
14324								hash_map::Entry::Vacant(entry) => {
14325									entry.insert(payment_claim.claiming_payment);
14326								},
14327							}
14328						}
14329
14330						for part in payment_claim.mpp_parts.iter() {
14331							let pending_mpp_claim = pending_claim_ptr_opt.as_ref().map(|ptr| (
14332								part.counterparty_node_id, part.channel_id,
14333								PendingMPPClaimPointer(Arc::clone(&ptr))
14334							));
14335							let pending_claim_ptr = pending_claim_ptr_opt.as_ref().map(|ptr|
14336								RAAMonitorUpdateBlockingAction::ClaimedMPPPayment {
14337									pending_claim: PendingMPPClaimPointer(Arc::clone(&ptr)),
14338								}
14339							);
14340							// Note that we don't need to pass the `payment_info` here - its
14341							// already (clearly) durably on disk in the `ChannelMonitor` so there's
14342							// no need to worry about getting it into others.
14343							channel_manager.claim_mpp_part(
14344								part.into(), payment_preimage, None,
14345								|_, _|
14346									(Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim }), pending_claim_ptr)
14347							);
14348						}
14349						processed_claims.insert(payment_claim.mpp_parts);
14350					}
14351				} else {
14352					let per_peer_state = channel_manager.per_peer_state.read().unwrap();
14353					let mut claimable_payments = channel_manager.claimable_payments.lock().unwrap();
14354					let payment = claimable_payments.claimable_payments.remove(&payment_hash);
14355					mem::drop(claimable_payments);
14356					if let Some(payment) = payment {
14357						log_info!(channel_manager.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", &payment_hash);
14358						let mut claimable_amt_msat = 0;
14359						let mut receiver_node_id = Some(our_network_pubkey);
14360						let phantom_shared_secret = payment.htlcs[0].prev_hop.phantom_shared_secret;
14361						if phantom_shared_secret.is_some() {
14362							let phantom_pubkey = channel_manager.node_signer.get_node_id(Recipient::PhantomNode)
14363								.expect("Failed to get node_id for phantom node recipient");
14364							receiver_node_id = Some(phantom_pubkey)
14365						}
14366						for claimable_htlc in &payment.htlcs {
14367							claimable_amt_msat += claimable_htlc.value;
14368
14369							// Add a holding-cell claim of the payment to the Channel, which should be
14370							// applied ~immediately on peer reconnection. Because it won't generate a
14371							// new commitment transaction we can just provide the payment preimage to
14372							// the corresponding ChannelMonitor and nothing else.
14373							//
14374							// We do so directly instead of via the normal ChannelMonitor update
14375							// procedure as the ChainMonitor hasn't yet been initialized, implying
14376							// we're not allowed to call it directly yet. Further, we do the update
14377							// without incrementing the ChannelMonitor update ID as there isn't any
14378							// reason to.
14379							// If we were to generate a new ChannelMonitor update ID here and then
14380							// crash before the user finishes block connect we'd end up force-closing
14381							// this channel as well. On the flip side, there's no harm in restarting
14382							// without the new monitor persisted - we'll end up right back here on
14383							// restart.
14384							let previous_channel_id = claimable_htlc.prev_hop.channel_id;
14385							let peer_node_id_opt = channel_manager.outpoint_to_peer.lock().unwrap()
14386								.get(&claimable_htlc.prev_hop.outpoint).cloned();
14387							if let Some(peer_node_id) = peer_node_id_opt {
14388								let peer_state_mutex = per_peer_state.get(&peer_node_id).unwrap();
14389								let mut peer_state_lock = peer_state_mutex.lock().unwrap();
14390								let peer_state = &mut *peer_state_lock;
14391								if let Some(ChannelPhase::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
14392									let logger = WithChannelContext::from(&channel_manager.logger, &channel.context, Some(payment_hash));
14393									channel.claim_htlc_while_disconnected_dropping_mon_update_legacy(
14394										claimable_htlc.prev_hop.htlc_id, payment_preimage, &&logger
14395									);
14396								}
14397							}
14398							if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) {
14399								// Note that this is unsafe as we no longer require the
14400								// `ChannelMonitor`s to be re-persisted prior to this
14401								// `ChannelManager` being persisted after we get started running.
14402								// If this `ChannelManager` gets persisted first then we crash, we
14403								// won't have the `claimable_payments` entry we need to re-enter
14404								// this code block, causing us to not re-apply the preimage to this
14405								// `ChannelMonitor`.
14406								//
14407								// We should never be here with modern payment claims, however, as
14408								// they should always include the HTLC list. Instead, this is only
14409								// for nodes during upgrade, and we explicitly require the old
14410								// persistence semantics on upgrade in the release notes.
14411								previous_hop_monitor.provide_payment_preimage_unsafe_legacy(
14412									&payment_hash, &payment_preimage, &channel_manager.tx_broadcaster,
14413									&channel_manager.fee_estimator, &channel_manager.logger
14414								);
14415							}
14416						}
14417						let mut pending_events = channel_manager.pending_events.lock().unwrap();
14418						let payment_id = payment.inbound_payment_id(&inbound_payment_id_secret.unwrap());
14419						pending_events.push_back((events::Event::PaymentClaimed {
14420							receiver_node_id,
14421							payment_hash,
14422							purpose: payment.purpose,
14423							amount_msat: claimable_amt_msat,
14424							htlcs: payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(),
14425							sender_intended_total_msat: payment.htlcs.first().map(|htlc| htlc.total_msat),
14426							onion_fields: payment.onion_fields,
14427							payment_id: Some(payment_id),
14428						}, None));
14429					}
14430				}
14431			}
14432		}
14433
14434		for htlc_source in failed_htlcs.drain(..) {
14435			let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
14436			let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
14437			let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
14438			channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
14439		}
14440
14441		for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding, downstream_channel_id) in pending_claims_to_replay {
14442			// We use `downstream_closed` in place of `from_onchain` here just as a guess - we
14443			// don't remember in the `ChannelMonitor` where we got a preimage from, but if the
14444			// channel is closed we just assume that it probably came from an on-chain claim.
14445			channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None,
14446				downstream_closed, true, downstream_node_id, downstream_funding,
14447				downstream_channel_id, None
14448			);
14449		}
14450
14451		//TODO: Broadcast channel update for closed channels, but only after we've made a
14452		//connection or two.
14453
14454		Ok((best_block_hash.clone(), channel_manager))
14455	}
14456}
14457
14458#[cfg(test)]
14459mod tests {
14460	use bitcoin::hashes::Hash;
14461	use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
14462	use core::sync::atomic::Ordering;
14463	use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
14464	use crate::ln::types::ChannelId;
14465	use crate::types::payment::{PaymentPreimage, PaymentHash, PaymentSecret};
14466	use crate::ln::channelmanager::{create_recv_pending_htlc_info, HTLCForwardInfo, inbound_payment, PaymentId, RecipientOnionFields, InterceptId};
14467	use crate::ln::functional_test_utils::*;
14468	use crate::ln::msgs::{self, ErrorAction};
14469	use crate::ln::msgs::ChannelMessageHandler;
14470	use crate::ln::outbound_payment::Retry;
14471	use crate::prelude::*;
14472	use crate::routing::router::{PaymentParameters, RouteParameters, find_route};
14473	use crate::util::errors::APIError;
14474	use crate::util::ser::Writeable;
14475	use crate::util::test_utils;
14476	use crate::util::config::{ChannelConfig, ChannelConfigUpdate};
14477	use crate::sign::EntropySource;
14478
14479	#[test]
14480	fn test_notify_limits() {
14481		// Check that a few cases which don't require the persistence of a new ChannelManager,
14482		// indeed, do not cause the persistence of a new ChannelManager.
14483		let chanmon_cfgs = create_chanmon_cfgs(3);
14484		let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
14485		let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
14486		let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
14487
14488		// All nodes start with a persistable update pending as `create_network` connects each node
14489		// with all other nodes to make most tests simpler.
14490		assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14491		assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14492		assert!(nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
14493
14494		let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1);
14495
14496		// We check that the channel info nodes have doesn't change too early, even though we try
14497		// to connect messages with new values
14498		chan.0.contents.fee_base_msat *= 2;
14499		chan.1.contents.fee_base_msat *= 2;
14500		let node_a_chan_info = nodes[0].node.list_channels_with_counterparty(
14501			&nodes[1].node.get_our_node_id()).pop().unwrap();
14502		let node_b_chan_info = nodes[1].node.list_channels_with_counterparty(
14503			&nodes[0].node.get_our_node_id()).pop().unwrap();
14504
14505		// The first two nodes (which opened a channel) should now require fresh persistence
14506		assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14507		assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14508		// ... but the last node should not.
14509		assert!(!nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
14510		// After persisting the first two nodes they should no longer need fresh persistence.
14511		assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14512		assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14513
14514		// Node 3, unrelated to the only channel, shouldn't care if it receives a channel_update
14515		// about the channel.
14516		nodes[2].node.handle_channel_update(nodes[1].node.get_our_node_id(), &chan.0);
14517		nodes[2].node.handle_channel_update(nodes[1].node.get_our_node_id(), &chan.1);
14518		assert!(!nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
14519
14520		// The nodes which are a party to the channel should also ignore messages from unrelated
14521		// parties.
14522		nodes[0].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.0);
14523		nodes[0].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.1);
14524		nodes[1].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.0);
14525		nodes[1].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.1);
14526		assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14527		assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14528
14529		// At this point the channel info given by peers should still be the same.
14530		assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
14531		assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
14532
14533		// An earlier version of handle_channel_update didn't check the directionality of the
14534		// update message and would always update the local fee info, even if our peer was
14535		// (spuriously) forwarding us our own channel_update.
14536		let as_node_one = nodes[0].node.get_our_node_id().serialize()[..] < nodes[1].node.get_our_node_id().serialize()[..];
14537		let as_update = if as_node_one == (chan.0.contents.channel_flags & 1 == 0 /* chan.0 is from node one */) { &chan.0 } else { &chan.1 };
14538		let bs_update = if as_node_one == (chan.0.contents.channel_flags & 1 == 0 /* chan.0 is from node one */) { &chan.1 } else { &chan.0 };
14539
14540		// First deliver each peers' own message, checking that the node doesn't need to be
14541		// persisted and that its channel info remains the same.
14542		nodes[0].node.handle_channel_update(nodes[1].node.get_our_node_id(), &as_update);
14543		nodes[1].node.handle_channel_update(nodes[0].node.get_our_node_id(), &bs_update);
14544		assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14545		assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14546		assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
14547		assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
14548
14549		// Finally, deliver the other peers' message, ensuring each node needs to be persisted and
14550		// the channel info has updated.
14551		nodes[0].node.handle_channel_update(nodes[1].node.get_our_node_id(), &bs_update);
14552		nodes[1].node.handle_channel_update(nodes[0].node.get_our_node_id(), &as_update);
14553		assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14554		assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14555		assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
14556		assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
14557	}
14558
14559	#[test]
14560	fn test_keysend_dup_hash_partial_mpp() {
14561		// Test that a keysend payment with a duplicate hash to an existing partial MPP payment fails as
14562		// expected.
14563		let chanmon_cfgs = create_chanmon_cfgs(2);
14564		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14565		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
14566		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
14567		create_announced_chan_between_nodes(&nodes, 0, 1);
14568
14569		// First, send a partial MPP payment.
14570		let (route, our_payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000);
14571		let mut mpp_route = route.clone();
14572		mpp_route.paths.push(mpp_route.paths[0].clone());
14573
14574		let payment_id = PaymentId([42; 32]);
14575		// Use the utility function send_payment_along_path to send the payment with MPP data which
14576		// indicates there are more HTLCs coming.
14577		let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
14578		let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
14579			RecipientOnionFields::secret_only(payment_secret), payment_id, &mpp_route).unwrap();
14580		nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &our_payment_hash,
14581			RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
14582		check_added_monitors!(nodes[0], 1);
14583		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14584		assert_eq!(events.len(), 1);
14585		pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
14586
14587		// Next, send a keysend payment with the same payment_hash and make sure it fails.
14588		nodes[0].node.send_spontaneous_payment(
14589			Some(payment_preimage), RecipientOnionFields::spontaneous_empty(),
14590			PaymentId(payment_preimage.0), route.route_params.clone().unwrap(), Retry::Attempts(0)
14591		).unwrap();
14592		check_added_monitors!(nodes[0], 1);
14593		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14594		assert_eq!(events.len(), 1);
14595		let ev = events.drain(..).next().unwrap();
14596		let payment_event = SendEvent::from_event(ev);
14597		nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
14598		check_added_monitors!(nodes[1], 0);
14599		commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
14600		expect_pending_htlcs_forwardable!(nodes[1]);
14601		expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
14602		check_added_monitors!(nodes[1], 1);
14603		let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14604		assert!(updates.update_add_htlcs.is_empty());
14605		assert!(updates.update_fulfill_htlcs.is_empty());
14606		assert_eq!(updates.update_fail_htlcs.len(), 1);
14607		assert!(updates.update_fail_malformed_htlcs.is_empty());
14608		assert!(updates.update_fee.is_none());
14609		nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
14610		commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
14611		expect_payment_failed!(nodes[0], our_payment_hash, true);
14612
14613		// Send the second half of the original MPP payment.
14614		nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &our_payment_hash,
14615			RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
14616		check_added_monitors!(nodes[0], 1);
14617		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14618		assert_eq!(events.len(), 1);
14619		pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), true, None);
14620
14621		// Claim the full MPP payment. Note that we can't use a test utility like
14622		// claim_funds_along_route because the ordering of the messages causes the second half of the
14623		// payment to be put in the holding cell, which confuses the test utilities. So we exchange the
14624		// lightning messages manually.
14625		nodes[1].node.claim_funds(payment_preimage);
14626		expect_payment_claimed!(nodes[1], our_payment_hash, 200_000);
14627		check_added_monitors!(nodes[1], 2);
14628
14629		let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14630		nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]);
14631		expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
14632		nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed);
14633		check_added_monitors!(nodes[0], 1);
14634		let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
14635		nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa);
14636		check_added_monitors!(nodes[1], 1);
14637		let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14638		nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_first_cs);
14639		check_added_monitors!(nodes[1], 1);
14640		let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
14641		nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
14642		nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
14643		check_added_monitors!(nodes[0], 1);
14644		let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
14645		nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa);
14646		let as_second_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
14647		check_added_monitors!(nodes[0], 1);
14648		nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa);
14649		check_added_monitors!(nodes[1], 1);
14650		nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed);
14651		check_added_monitors!(nodes[1], 1);
14652		let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
14653		nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_third_raa);
14654		check_added_monitors!(nodes[0], 1);
14655
14656		// Note that successful MPP payments will generate a single PaymentSent event upon the first
14657		// path's success and a PaymentPathSuccessful event for each path's success.
14658		let events = nodes[0].node.get_and_clear_pending_events();
14659		assert_eq!(events.len(), 2);
14660		match events[0] {
14661			Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
14662				assert_eq!(payment_id, *actual_payment_id);
14663				assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
14664				assert_eq!(route.paths[0], *path);
14665			},
14666			_ => panic!("Unexpected event"),
14667		}
14668		match events[1] {
14669			Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
14670				assert_eq!(payment_id, *actual_payment_id);
14671				assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
14672				assert_eq!(route.paths[0], *path);
14673			},
14674			_ => panic!("Unexpected event"),
14675		}
14676	}
14677
14678	#[test]
14679	fn test_keysend_dup_payment_hash() {
14680		// (1): Test that a keysend payment with a duplicate payment hash to an existing pending
14681		//      outbound regular payment fails as expected.
14682		// (2): Test that a regular payment with a duplicate payment hash to an existing keysend payment
14683		//      fails as expected.
14684		// (3): Test that a keysend payment with a duplicate payment hash to an existing keysend
14685		//      payment fails as expected. We only accept MPP keysends with payment secrets and reject
14686		//      otherwise.
14687		let chanmon_cfgs = create_chanmon_cfgs(2);
14688		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14689		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
14690		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
14691		create_announced_chan_between_nodes(&nodes, 0, 1);
14692		let scorer = test_utils::TestScorer::new();
14693		let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
14694
14695		// To start (1), send a regular payment but don't claim it.
14696		let expected_route = [&nodes[1]];
14697		let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &expected_route, 100_000);
14698
14699		// Next, attempt a keysend payment and make sure it fails.
14700		let route_params = RouteParameters::from_payment_params_and_value(
14701			PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(),
14702			TEST_FINAL_CLTV, false), 100_000);
14703		nodes[0].node.send_spontaneous_payment(
14704			Some(payment_preimage), RecipientOnionFields::spontaneous_empty(),
14705			PaymentId(payment_preimage.0), route_params.clone(), Retry::Attempts(0)
14706		).unwrap();
14707		check_added_monitors!(nodes[0], 1);
14708		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14709		assert_eq!(events.len(), 1);
14710		let ev = events.drain(..).next().unwrap();
14711		let payment_event = SendEvent::from_event(ev);
14712		nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
14713		check_added_monitors!(nodes[1], 0);
14714		commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
14715		// We have to forward pending HTLCs twice - once tries to forward the payment forward (and
14716		// fails), the second will process the resulting failure and fail the HTLC backward
14717		expect_pending_htlcs_forwardable!(nodes[1]);
14718		expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
14719		check_added_monitors!(nodes[1], 1);
14720		let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14721		assert!(updates.update_add_htlcs.is_empty());
14722		assert!(updates.update_fulfill_htlcs.is_empty());
14723		assert_eq!(updates.update_fail_htlcs.len(), 1);
14724		assert!(updates.update_fail_malformed_htlcs.is_empty());
14725		assert!(updates.update_fee.is_none());
14726		nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
14727		commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
14728		expect_payment_failed!(nodes[0], payment_hash, true);
14729
14730		// Finally, claim the original payment.
14731		claim_payment(&nodes[0], &expected_route, payment_preimage);
14732
14733		// To start (2), send a keysend payment but don't claim it.
14734		let payment_preimage = PaymentPreimage([42; 32]);
14735		let route = find_route(
14736			&nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
14737			None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
14738		).unwrap();
14739		let payment_hash = nodes[0].node.send_spontaneous_payment(
14740			Some(payment_preimage), RecipientOnionFields::spontaneous_empty(),
14741			PaymentId(payment_preimage.0), route.route_params.clone().unwrap(), Retry::Attempts(0)
14742		).unwrap();
14743		check_added_monitors!(nodes[0], 1);
14744		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14745		assert_eq!(events.len(), 1);
14746		let event = events.pop().unwrap();
14747		let path = vec![&nodes[1]];
14748		pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
14749
14750		// Next, attempt a regular payment and make sure it fails.
14751		let payment_secret = PaymentSecret([43; 32]);
14752		nodes[0].node.send_payment_with_route(route.clone(), payment_hash,
14753			RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
14754		check_added_monitors!(nodes[0], 1);
14755		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14756		assert_eq!(events.len(), 1);
14757		let ev = events.drain(..).next().unwrap();
14758		let payment_event = SendEvent::from_event(ev);
14759		nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
14760		check_added_monitors!(nodes[1], 0);
14761		commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
14762		expect_pending_htlcs_forwardable!(nodes[1]);
14763		expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
14764		check_added_monitors!(nodes[1], 1);
14765		let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14766		assert!(updates.update_add_htlcs.is_empty());
14767		assert!(updates.update_fulfill_htlcs.is_empty());
14768		assert_eq!(updates.update_fail_htlcs.len(), 1);
14769		assert!(updates.update_fail_malformed_htlcs.is_empty());
14770		assert!(updates.update_fee.is_none());
14771		nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
14772		commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
14773		expect_payment_failed!(nodes[0], payment_hash, true);
14774
14775		// Finally, succeed the keysend payment.
14776		claim_payment(&nodes[0], &expected_route, payment_preimage);
14777
14778		// To start (3), send a keysend payment but don't claim it.
14779		let payment_id_1 = PaymentId([44; 32]);
14780		let payment_hash = nodes[0].node.send_spontaneous_payment(
14781			Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_1,
14782			route.route_params.clone().unwrap(), Retry::Attempts(0)
14783		).unwrap();
14784		check_added_monitors!(nodes[0], 1);
14785		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14786		assert_eq!(events.len(), 1);
14787		let event = events.pop().unwrap();
14788		let path = vec![&nodes[1]];
14789		pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
14790
14791		// Next, attempt a keysend payment and make sure it fails.
14792		let route_params = RouteParameters::from_payment_params_and_value(
14793			PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false),
14794			100_000
14795		);
14796		let payment_id_2 = PaymentId([45; 32]);
14797		nodes[0].node.send_spontaneous_payment(
14798			Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_2, route_params,
14799			Retry::Attempts(0)
14800		).unwrap();
14801		check_added_monitors!(nodes[0], 1);
14802		let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14803		assert_eq!(events.len(), 1);
14804		let ev = events.drain(..).next().unwrap();
14805		let payment_event = SendEvent::from_event(ev);
14806		nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
14807		check_added_monitors!(nodes[1], 0);
14808		commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
14809		expect_pending_htlcs_forwardable!(nodes[1]);
14810		expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
14811		check_added_monitors!(nodes[1], 1);
14812		let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14813		assert!(updates.update_add_htlcs.is_empty());
14814		assert!(updates.update_fulfill_htlcs.is_empty());
14815		assert_eq!(updates.update_fail_htlcs.len(), 1);
14816		assert!(updates.update_fail_malformed_htlcs.is_empty());
14817		assert!(updates.update_fee.is_none());
14818		nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
14819		commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
14820		expect_payment_failed!(nodes[0], payment_hash, true);
14821
14822		// Finally, claim the original payment.
14823		claim_payment(&nodes[0], &expected_route, payment_preimage);
14824	}
14825
14826	#[test]
14827	fn test_keysend_hash_mismatch() {
14828		// Test that if we receive a keysend `update_add_htlc` msg, we fail as expected if the keysend
14829		// preimage doesn't match the msg's payment hash.
14830		let chanmon_cfgs = create_chanmon_cfgs(2);
14831		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14832		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
14833		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
14834
14835		let payer_pubkey = nodes[0].node.get_our_node_id();
14836		let payee_pubkey = nodes[1].node.get_our_node_id();
14837
14838		let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
14839		let route_params = RouteParameters::from_payment_params_and_value(
14840			PaymentParameters::for_keysend(payee_pubkey, 40, false), 10_000);
14841		let network_graph = nodes[0].network_graph;
14842		let first_hops = nodes[0].node.list_usable_channels();
14843		let scorer = test_utils::TestScorer::new();
14844		let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
14845		let route = find_route(
14846			&payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
14847			nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
14848		).unwrap();
14849
14850		let test_preimage = PaymentPreimage([42; 32]);
14851		let mismatch_payment_hash = PaymentHash([43; 32]);
14852		let session_privs = nodes[0].node.test_add_new_pending_payment(mismatch_payment_hash,
14853			RecipientOnionFields::spontaneous_empty(), PaymentId(mismatch_payment_hash.0), &route).unwrap();
14854		nodes[0].node.test_send_payment_internal(&route, mismatch_payment_hash,
14855			RecipientOnionFields::spontaneous_empty(), Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap();
14856		check_added_monitors!(nodes[0], 1);
14857
14858		let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
14859		assert_eq!(updates.update_add_htlcs.len(), 1);
14860		assert!(updates.update_fulfill_htlcs.is_empty());
14861		assert!(updates.update_fail_htlcs.is_empty());
14862		assert!(updates.update_fail_malformed_htlcs.is_empty());
14863		assert!(updates.update_fee.is_none());
14864		nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
14865
14866		nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Payment preimage didn't match payment hash", 1);
14867	}
14868
14869	#[test]
14870	fn test_multi_hop_missing_secret() {
14871		let chanmon_cfgs = create_chanmon_cfgs(4);
14872		let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
14873		let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
14874		let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
14875
14876		let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
14877		let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
14878		let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
14879		let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
14880
14881		// Marshall an MPP route.
14882		let (mut route, payment_hash, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
14883		let path = route.paths[0].clone();
14884		route.paths.push(path);
14885		route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
14886		route.paths[0].hops[0].short_channel_id = chan_1_id;
14887		route.paths[0].hops[1].short_channel_id = chan_3_id;
14888		route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
14889		route.paths[1].hops[0].short_channel_id = chan_2_id;
14890		route.paths[1].hops[1].short_channel_id = chan_4_id;
14891
14892		nodes[0].node.send_payment_with_route(route, payment_hash,
14893			RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)).unwrap();
14894		let events = nodes[0].node.get_and_clear_pending_events();
14895		assert_eq!(events.len(), 1);
14896		match events[0] {
14897			Event::PaymentFailed { reason, .. } => {
14898				assert_eq!(reason.unwrap(), crate::events::PaymentFailureReason::UnexpectedError);
14899			}
14900			_ => panic!()
14901		}
14902		nodes[0].logger.assert_log_contains("lightning::ln::outbound_payment", "Payment secret is required for multi-path payments", 2);
14903		assert!(nodes[0].node.list_recent_payments().is_empty());
14904	}
14905
14906	#[test]
14907	fn test_channel_update_cached() {
14908		let chanmon_cfgs = create_chanmon_cfgs(3);
14909		let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
14910		let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
14911		let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
14912
14913		let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
14914
14915		nodes[0].node.force_close_channel_with_peer(&chan.2, &nodes[1].node.get_our_node_id(), None, true).unwrap();
14916		check_added_monitors!(nodes[0], 1);
14917		check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
14918
14919		// Confirm that the channel_update was not sent immediately to node[1] but was cached.
14920		let node_1_events = nodes[1].node.get_and_clear_pending_msg_events();
14921		assert_eq!(node_1_events.len(), 0);
14922
14923		{
14924			// Assert that ChannelUpdate message has been added to node[0] pending broadcast messages
14925			let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
14926			assert_eq!(pending_broadcast_messages.len(), 1);
14927		}
14928
14929		// Test that we do not retrieve the pending broadcast messages when we are not connected to any peer
14930		nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
14931		nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
14932
14933		nodes[0].node.peer_disconnected(nodes[2].node.get_our_node_id());
14934		nodes[2].node.peer_disconnected(nodes[0].node.get_our_node_id());
14935
14936		let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
14937		assert_eq!(node_0_events.len(), 0);
14938
14939		// Now we reconnect to a peer
14940		nodes[0].node.peer_connected(nodes[2].node.get_our_node_id(), &msgs::Init {
14941			features: nodes[2].node.init_features(), networks: None, remote_network_address: None
14942		}, true).unwrap();
14943		nodes[2].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
14944			features: nodes[0].node.init_features(), networks: None, remote_network_address: None
14945		}, false).unwrap();
14946
14947		// Confirm that get_and_clear_pending_msg_events correctly captures pending broadcast messages
14948		let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
14949		assert_eq!(node_0_events.len(), 1);
14950		match &node_0_events[0] {
14951			MessageSendEvent::BroadcastChannelUpdate { .. } => (),
14952			_ => panic!("Unexpected event"),
14953		}
14954		{
14955			// Assert that ChannelUpdate message has been cleared from nodes[0] pending broadcast messages
14956			let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
14957			assert_eq!(pending_broadcast_messages.len(), 0);
14958		}
14959	}
14960
14961	#[test]
14962	fn test_drop_disconnected_peers_when_removing_channels() {
14963		let chanmon_cfgs = create_chanmon_cfgs(2);
14964		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14965		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
14966		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
14967
14968		create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
14969
14970		nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
14971		nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
14972		let chan_id = nodes[0].node.list_channels()[0].channel_id;
14973		let error_message = "Channel force-closed";
14974		nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
14975		check_added_monitors!(nodes[0], 1);
14976		check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 1_000_000);
14977
14978		{
14979			// Assert that nodes[1] is awaiting removal for nodes[0] once nodes[1] has been
14980			// disconnected and the channel between has been force closed.
14981			let nodes_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
14982			// Assert that nodes[1] isn't removed before `timer_tick_occurred` has been executed.
14983			assert_eq!(nodes_0_per_peer_state.len(), 1);
14984			assert!(nodes_0_per_peer_state.get(&nodes[1].node.get_our_node_id()).is_some());
14985		}
14986
14987		nodes[0].node.timer_tick_occurred();
14988
14989		{
14990			// Assert that nodes[1] has now been removed.
14991			assert_eq!(nodes[0].node.per_peer_state.read().unwrap().len(), 0);
14992		}
14993	}
14994
14995	#[test]
14996	fn test_drop_peers_when_removing_unfunded_channels() {
14997		let chanmon_cfgs = create_chanmon_cfgs(2);
14998		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14999		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
15000		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15001
15002		exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
15003		let events = nodes[0].node.get_and_clear_pending_events();
15004		assert_eq!(events.len(), 1, "Unexpected events {:?}", events);
15005		match events[0] {
15006			Event::FundingGenerationReady { .. } => {}
15007			_ => panic!("Unexpected event {:?}", events),
15008		}
15009
15010		nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
15011		nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
15012		check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer, [nodes[1].node.get_our_node_id()], 1_000_000);
15013		check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer, [nodes[0].node.get_our_node_id()], 1_000_000);
15014
15015		// At this point the state for the peers should have been removed.
15016		assert_eq!(nodes[0].node.per_peer_state.read().unwrap().len(), 0);
15017		assert_eq!(nodes[1].node.per_peer_state.read().unwrap().len(), 0);
15018	}
15019
15020	#[test]
15021	fn bad_inbound_payment_hash() {
15022		// Add coverage for checking that a user-provided payment hash matches the payment secret.
15023		let chanmon_cfgs = create_chanmon_cfgs(2);
15024		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
15025		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
15026		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15027
15028		let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[0]);
15029		let payment_data = msgs::FinalOnionHopData {
15030			payment_secret,
15031			total_msat: 100_000,
15032		};
15033
15034		// Ensure that if the payment hash given to `inbound_payment::verify` differs from the original,
15035		// payment verification fails as expected.
15036		let mut bad_payment_hash = payment_hash.clone();
15037		bad_payment_hash.0[0] += 1;
15038		match inbound_payment::verify(bad_payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) {
15039			Ok(_) => panic!("Unexpected ok"),
15040			Err(()) => {
15041				nodes[0].logger.assert_log_contains("lightning::ln::inbound_payment", "Failing HTLC with user-generated payment_hash", 1);
15042			}
15043		}
15044
15045		// Check that using the original payment hash succeeds.
15046		assert!(inbound_payment::verify(payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger).is_ok());
15047	}
15048
15049	#[test]
15050	fn test_outpoint_to_peer_coverage() {
15051		// Test that the `ChannelManager:outpoint_to_peer` contains channels which have been assigned
15052		// a `channel_id` (i.e. have had the funding tx created), and that they are removed once
15053		// the channel is successfully closed.
15054		let chanmon_cfgs = create_chanmon_cfgs(2);
15055		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
15056		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
15057		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15058
15059		nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
15060		let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15061		nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel);
15062		let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
15063		nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel);
15064
15065		let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
15066		let channel_id = ChannelId::from_bytes(tx.compute_txid().to_byte_array());
15067		{
15068			// Ensure that the `outpoint_to_peer` map is empty until either party has received the
15069			// funding transaction, and have the real `channel_id`.
15070			assert_eq!(nodes[0].node.outpoint_to_peer.lock().unwrap().len(), 0);
15071			assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
15072		}
15073
15074		nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
15075		{
15076			// Assert that `nodes[0]`'s `outpoint_to_peer` map is populated with the channel as soon as
15077			// as it has the funding transaction.
15078			let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
15079			assert_eq!(nodes_0_lock.len(), 1);
15080			assert!(nodes_0_lock.contains_key(&funding_output));
15081		}
15082
15083		assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
15084
15085		let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
15086
15087		nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg);
15088		{
15089			let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
15090			assert_eq!(nodes_0_lock.len(), 1);
15091			assert!(nodes_0_lock.contains_key(&funding_output));
15092		}
15093		expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
15094
15095		{
15096			// Assert that `nodes[1]`'s `outpoint_to_peer` map is populated with the channel as
15097			// soon as it has the funding transaction.
15098			let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
15099			assert_eq!(nodes_1_lock.len(), 1);
15100			assert!(nodes_1_lock.contains_key(&funding_output));
15101		}
15102		check_added_monitors!(nodes[1], 1);
15103		let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
15104		nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed);
15105		check_added_monitors!(nodes[0], 1);
15106		expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
15107		let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
15108		let (announcement, nodes_0_update, nodes_1_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
15109		update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update);
15110
15111		nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
15112		nodes[1].node.handle_shutdown(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
15113		let nodes_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
15114		nodes[0].node.handle_shutdown(nodes[1].node.get_our_node_id(), &nodes_1_shutdown);
15115
15116		let closing_signed_node_0 = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
15117		nodes[1].node.handle_closing_signed(nodes[0].node.get_our_node_id(), &closing_signed_node_0);
15118		{
15119			// Assert that the channel is kept in the `outpoint_to_peer` map for both nodes until the
15120			// channel can be fully closed by both parties (i.e. no outstanding htlcs exists, the
15121			// fee for the closing transaction has been negotiated and the parties has the other
15122			// party's signature for the fee negotiated closing transaction.)
15123			let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
15124			assert_eq!(nodes_0_lock.len(), 1);
15125			assert!(nodes_0_lock.contains_key(&funding_output));
15126		}
15127
15128		{
15129			// At this stage, `nodes[1]` has proposed a fee for the closing transaction in the
15130			// `handle_closing_signed` call above. As `nodes[1]` has not yet received the signature
15131			// from `nodes[0]` for the closing transaction with the proposed fee, the channel is
15132			// kept in the `nodes[1]`'s `outpoint_to_peer` map.
15133			let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
15134			assert_eq!(nodes_1_lock.len(), 1);
15135			assert!(nodes_1_lock.contains_key(&funding_output));
15136		}
15137
15138		nodes[0].node.handle_closing_signed(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
15139		{
15140			// `nodes[0]` accepts `nodes[1]`'s proposed fee for the closing transaction, and
15141			// therefore has all it needs to fully close the channel (both signatures for the
15142			// closing transaction).
15143			// Assert that the channel is removed from `nodes[0]`'s `outpoint_to_peer` map as it can be
15144			// fully closed by `nodes[0]`.
15145			assert_eq!(nodes[0].node.outpoint_to_peer.lock().unwrap().len(), 0);
15146
15147			// Assert that the channel is still in `nodes[1]`'s  `outpoint_to_peer` map, as `nodes[1]`
15148			// doesn't have `nodes[0]`'s signature for the closing transaction yet.
15149			let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
15150			assert_eq!(nodes_1_lock.len(), 1);
15151			assert!(nodes_1_lock.contains_key(&funding_output));
15152		}
15153
15154		let (_nodes_0_update, closing_signed_node_0) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
15155
15156		nodes[1].node.handle_closing_signed(nodes[0].node.get_our_node_id(), &closing_signed_node_0.unwrap());
15157		{
15158			// Assert that the channel has now been removed from both parties `outpoint_to_peer` map once
15159			// they both have everything required to fully close the channel.
15160			assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
15161		}
15162		let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
15163
15164		check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
15165		check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
15166	}
15167
15168	fn check_not_connected_to_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
15169		let expected_message = format!("Not connected to node: {}", expected_public_key);
15170		check_api_error_message(expected_message, res_err)
15171	}
15172
15173	fn check_unkown_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
15174		let expected_message = format!("Can't find a peer matching the passed counterparty node_id {}", expected_public_key);
15175		check_api_error_message(expected_message, res_err)
15176	}
15177
15178	fn check_channel_unavailable_error<T>(res_err: Result<T, APIError>, expected_channel_id: ChannelId, peer_node_id: PublicKey) {
15179		let expected_message = format!("Channel with id {} not found for the passed counterparty node_id {}", expected_channel_id, peer_node_id);
15180		check_api_error_message(expected_message, res_err)
15181	}
15182
15183	fn check_api_misuse_error<T>(res_err: Result<T, APIError>) {
15184		let expected_message = "No such channel awaiting to be accepted.".to_string();
15185		check_api_error_message(expected_message, res_err)
15186	}
15187
15188	fn check_api_error_message<T>(expected_err_message: String, res_err: Result<T, APIError>) {
15189		match res_err {
15190			Err(APIError::APIMisuseError { err }) => {
15191				assert_eq!(err, expected_err_message);
15192			},
15193			Err(APIError::ChannelUnavailable { err }) => {
15194				assert_eq!(err, expected_err_message);
15195			},
15196			Ok(_) => panic!("Unexpected Ok"),
15197			Err(_) => panic!("Unexpected Error"),
15198		}
15199	}
15200
15201	#[test]
15202	fn test_api_calls_with_unkown_counterparty_node() {
15203		// Tests that our API functions that expects a `counterparty_node_id` as input, behaves as
15204		// expected if the `counterparty_node_id` is an unkown peer in the
15205		// `ChannelManager::per_peer_state` map.
15206		let chanmon_cfg = create_chanmon_cfgs(2);
15207		let node_cfg = create_node_cfgs(2, &chanmon_cfg);
15208		let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
15209		let nodes = create_network(2, &node_cfg, &node_chanmgr);
15210
15211		// Dummy values
15212		let channel_id = ChannelId::from_bytes([4; 32]);
15213		let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap());
15214		let intercept_id = InterceptId([0; 32]);
15215		let error_message = "Channel force-closed";
15216
15217		// Test the API functions.
15218		check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None, None), unkown_public_key);
15219
15220		check_unkown_peer_error(nodes[0].node.accept_inbound_channel(&channel_id, &unkown_public_key, 42), unkown_public_key);
15221
15222		check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key);
15223
15224		check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key);
15225
15226		check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key);
15227
15228		check_unkown_peer_error(nodes[0].node.forward_intercepted_htlc(intercept_id, &channel_id, unkown_public_key, 1_000_000), unkown_public_key);
15229
15230		check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key);
15231	}
15232
15233	#[test]
15234	fn test_api_calls_with_unavailable_channel() {
15235		// Tests that our API functions that expects a `counterparty_node_id` and a `channel_id`
15236		// as input, behaves as expected if the `counterparty_node_id` is a known peer in the
15237		// `ChannelManager::per_peer_state` map, but the peer state doesn't contain a channel with
15238		// the given `channel_id`.
15239		let chanmon_cfg = create_chanmon_cfgs(2);
15240		let node_cfg = create_node_cfgs(2, &chanmon_cfg);
15241		let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
15242		let nodes = create_network(2, &node_cfg, &node_chanmgr);
15243
15244		let counterparty_node_id = nodes[1].node.get_our_node_id();
15245
15246		// Dummy values
15247		let channel_id = ChannelId::from_bytes([4; 32]);
15248		let error_message = "Channel force-closed";
15249
15250		// Test the API functions.
15251		check_api_misuse_error(nodes[0].node.accept_inbound_channel(&channel_id, &counterparty_node_id, 42));
15252
15253		check_channel_unavailable_error(nodes[0].node.close_channel(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
15254
15255		check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id);
15256
15257		check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id);
15258
15259		check_channel_unavailable_error(nodes[0].node.forward_intercepted_htlc(InterceptId([0; 32]), &channel_id, counterparty_node_id, 1_000_000), channel_id, counterparty_node_id);
15260
15261		check_channel_unavailable_error(nodes[0].node.update_channel_config(&counterparty_node_id, &[channel_id], &ChannelConfig::default()), channel_id, counterparty_node_id);
15262	}
15263
15264	#[test]
15265	fn test_connection_limiting() {
15266		// Test that we limit un-channel'd peers and un-funded channels properly.
15267		let chanmon_cfgs = create_chanmon_cfgs(2);
15268		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
15269		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
15270		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15271
15272		// Note that create_network connects the nodes together for us
15273
15274		nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
15275		let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15276
15277		let mut funding_tx = None;
15278		for idx in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
15279			nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15280			let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
15281
15282			if idx == 0 {
15283				nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel);
15284				let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
15285				funding_tx = Some(tx.clone());
15286				nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx).unwrap();
15287				let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
15288
15289				nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg);
15290				check_added_monitors!(nodes[1], 1);
15291				expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
15292
15293				let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
15294
15295				nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed);
15296				check_added_monitors!(nodes[0], 1);
15297				expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
15298			}
15299			open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
15300		}
15301
15302		// A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected
15303		open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(
15304			&nodes[0].keys_manager);
15305		nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15306		assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
15307			open_channel_msg.common_fields.temporary_channel_id);
15308
15309		// Further, because all of our channels with nodes[0] are inbound, and none of them funded,
15310		// it doesn't count as a "protected" peer, i.e. it counts towards the MAX_NO_CHANNEL_PEERS
15311		// limit.
15312		let mut peer_pks = Vec::with_capacity(super::MAX_NO_CHANNEL_PEERS);
15313		for _ in 1..super::MAX_NO_CHANNEL_PEERS {
15314			let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
15315				&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
15316			peer_pks.push(random_pk);
15317			nodes[1].node.peer_connected(random_pk, &msgs::Init {
15318				features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15319			}, true).unwrap();
15320		}
15321		let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
15322			&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
15323		nodes[1].node.peer_connected(last_random_pk, &msgs::Init {
15324			features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15325		}, true).unwrap_err();
15326
15327		// Also importantly, because nodes[0] isn't "protected", we will refuse a reconnection from
15328		// them if we have too many un-channel'd peers.
15329		nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
15330		let chan_closed_events = nodes[1].node.get_and_clear_pending_events();
15331		assert_eq!(chan_closed_events.len(), super::MAX_UNFUNDED_CHANS_PER_PEER - 1);
15332		for ev in chan_closed_events {
15333			if let Event::ChannelClosed { .. } = ev { } else { panic!(); }
15334		}
15335		nodes[1].node.peer_connected(last_random_pk, &msgs::Init {
15336			features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15337		}, true).unwrap();
15338		nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
15339			features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15340		}, true).unwrap_err();
15341
15342		// but of course if the connection is outbound its allowed...
15343		nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
15344			features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15345		}, false).unwrap();
15346		nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
15347
15348		// Now nodes[0] is disconnected but still has a pending, un-funded channel lying around.
15349		// Even though we accept one more connection from new peers, we won't actually let them
15350		// open channels.
15351		assert!(peer_pks.len() > super::MAX_UNFUNDED_CHANNEL_PEERS - 1);
15352		for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
15353			nodes[1].node.handle_open_channel(peer_pks[i], &open_channel_msg);
15354			get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]);
15355			open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
15356		}
15357		nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
15358		assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
15359			open_channel_msg.common_fields.temporary_channel_id);
15360
15361		// Of course, however, outbound channels are always allowed
15362		nodes[1].node.create_channel(last_random_pk, 100_000, 0, 42, None, None).unwrap();
15363		get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, last_random_pk);
15364
15365		// If we fund the first channel, nodes[0] has a live on-chain channel with us, it is now
15366		// "protected" and can connect again.
15367		mine_transaction(&nodes[1], funding_tx.as_ref().unwrap());
15368		nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
15369			features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15370		}, true).unwrap();
15371		get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
15372
15373		// Further, because the first channel was funded, we can open another channel with
15374		// last_random_pk.
15375		nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
15376		get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
15377	}
15378
15379	#[test]
15380	fn test_outbound_chans_unlimited() {
15381		// Test that we never refuse an outbound channel even if a peer is unfuned-channel-limited
15382		let chanmon_cfgs = create_chanmon_cfgs(2);
15383		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
15384		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
15385		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15386
15387		// Note that create_network connects the nodes together for us
15388
15389		nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
15390		let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15391
15392		for _ in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
15393			nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15394			get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
15395			open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
15396		}
15397
15398		// Once we have MAX_UNFUNDED_CHANS_PER_PEER unfunded channels, new inbound channels will be
15399		// rejected.
15400		nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15401		assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
15402			open_channel_msg.common_fields.temporary_channel_id);
15403
15404		// but we can still open an outbound channel.
15405		nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
15406		get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
15407
15408		// but even with such an outbound channel, additional inbound channels will still fail.
15409		nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15410		assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
15411			open_channel_msg.common_fields.temporary_channel_id);
15412	}
15413
15414	#[test]
15415	fn test_0conf_limiting() {
15416		// Tests that we properly limit inbound channels when we have the manual-channel-acceptance
15417		// flag set and (sometimes) accept channels as 0conf.
15418		let chanmon_cfgs = create_chanmon_cfgs(2);
15419		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
15420		let mut settings = test_default_channel_config();
15421		settings.manually_accept_inbound_channels = true;
15422		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(settings)]);
15423		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15424
15425		// Note that create_network connects the nodes together for us
15426
15427		nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
15428		let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15429
15430		// First, get us up to MAX_UNFUNDED_CHANNEL_PEERS so we can test at the edge
15431		for _ in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
15432			let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
15433				&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
15434			nodes[1].node.peer_connected(random_pk, &msgs::Init {
15435				features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15436			}, true).unwrap();
15437
15438			nodes[1].node.handle_open_channel(random_pk, &open_channel_msg);
15439			let events = nodes[1].node.get_and_clear_pending_events();
15440			match events[0] {
15441				Event::OpenChannelRequest { temporary_channel_id, .. } => {
15442					nodes[1].node.accept_inbound_channel(&temporary_channel_id, &random_pk, 23).unwrap();
15443				}
15444				_ => panic!("Unexpected event"),
15445			}
15446			get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, random_pk);
15447			open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
15448		}
15449
15450		// If we try to accept a channel from another peer non-0conf it will fail.
15451		let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
15452			&SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
15453		nodes[1].node.peer_connected(last_random_pk, &msgs::Init {
15454			features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15455		}, true).unwrap();
15456		nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
15457		let events = nodes[1].node.get_and_clear_pending_events();
15458		match events[0] {
15459			Event::OpenChannelRequest { temporary_channel_id, .. } => {
15460				match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &last_random_pk, 23) {
15461					Err(APIError::APIMisuseError { err }) =>
15462						assert_eq!(err, "Too many peers with unfunded channels, refusing to accept new ones"),
15463					_ => panic!(),
15464				}
15465			}
15466			_ => panic!("Unexpected event"),
15467		}
15468		assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
15469			open_channel_msg.common_fields.temporary_channel_id);
15470
15471		// ...however if we accept the same channel 0conf it should work just fine.
15472		nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
15473		let events = nodes[1].node.get_and_clear_pending_events();
15474		match events[0] {
15475			Event::OpenChannelRequest { temporary_channel_id, .. } => {
15476				nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &last_random_pk, 23).unwrap();
15477			}
15478			_ => panic!("Unexpected event"),
15479		}
15480		get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
15481	}
15482
15483	#[test]
15484	fn reject_excessively_underpaying_htlcs() {
15485		let chanmon_cfg = create_chanmon_cfgs(1);
15486		let node_cfg = create_node_cfgs(1, &chanmon_cfg);
15487		let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
15488		let node = create_network(1, &node_cfg, &node_chanmgr);
15489		let sender_intended_amt_msat = 100;
15490		let extra_fee_msat = 10;
15491		let hop_data = msgs::InboundOnionPayload::Receive {
15492			sender_intended_htlc_amt_msat: 100,
15493			cltv_expiry_height: 42,
15494			payment_metadata: None,
15495			keysend_preimage: None,
15496			payment_data: Some(msgs::FinalOnionHopData {
15497				payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat,
15498			}),
15499			custom_tlvs: Vec::new(),
15500		};
15501		// Check that if the amount we received + the penultimate hop extra fee is less than the sender
15502		// intended amount, we fail the payment.
15503		let current_height: u32 = node[0].node.best_block.read().unwrap().height;
15504		if let Err(crate::ln::channelmanager::InboundHTLCErr { err_code, .. }) =
15505			create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
15506				sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat),
15507				current_height)
15508		{
15509			assert_eq!(err_code, 19);
15510		} else { panic!(); }
15511
15512		// If amt_received + extra_fee is equal to the sender intended amount, we're fine.
15513		let hop_data = msgs::InboundOnionPayload::Receive { // This is the same payload as above, InboundOnionPayload doesn't implement Clone
15514			sender_intended_htlc_amt_msat: 100,
15515			cltv_expiry_height: 42,
15516			payment_metadata: None,
15517			keysend_preimage: None,
15518			payment_data: Some(msgs::FinalOnionHopData {
15519				payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat,
15520			}),
15521			custom_tlvs: Vec::new(),
15522		};
15523		let current_height: u32 = node[0].node.best_block.read().unwrap().height;
15524		assert!(create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
15525			sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat),
15526			current_height).is_ok());
15527	}
15528
15529	#[test]
15530	fn test_final_incorrect_cltv(){
15531		let chanmon_cfg = create_chanmon_cfgs(1);
15532		let node_cfg = create_node_cfgs(1, &chanmon_cfg);
15533		let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
15534		let node = create_network(1, &node_cfg, &node_chanmgr);
15535
15536		let current_height: u32 = node[0].node.best_block.read().unwrap().height;
15537		let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive {
15538			sender_intended_htlc_amt_msat: 100,
15539			cltv_expiry_height: 22,
15540			payment_metadata: None,
15541			keysend_preimage: None,
15542			payment_data: Some(msgs::FinalOnionHopData {
15543				payment_secret: PaymentSecret([0; 32]), total_msat: 100,
15544			}),
15545			custom_tlvs: Vec::new(),
15546		}, [0; 32], PaymentHash([0; 32]), 100, 23, None, true, None, current_height);
15547
15548		// Should not return an error as this condition:
15549		// https://github.com/lightning/bolts/blob/4dcc377209509b13cf89a4b91fde7d478f5b46d8/04-onion-routing.md?plain=1#L334
15550		// is not satisfied.
15551		assert!(result.is_ok());
15552	}
15553
15554	#[test]
15555	fn test_inbound_anchors_manual_acceptance() {
15556		// Tests that we properly limit inbound channels when we have the manual-channel-acceptance
15557		// flag set and (sometimes) accept channels as 0conf.
15558		let mut anchors_cfg = test_default_channel_config();
15559		anchors_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
15560
15561		let mut anchors_manual_accept_cfg = anchors_cfg.clone();
15562		anchors_manual_accept_cfg.manually_accept_inbound_channels = true;
15563
15564		let chanmon_cfgs = create_chanmon_cfgs(3);
15565		let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
15566		let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs,
15567			&[Some(anchors_cfg.clone()), Some(anchors_cfg.clone()), Some(anchors_manual_accept_cfg.clone())]);
15568		let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
15569
15570		nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
15571		let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15572
15573		nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15574		assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
15575		let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
15576		match &msg_events[0] {
15577			MessageSendEvent::HandleError { node_id, action } => {
15578				assert_eq!(*node_id, nodes[0].node.get_our_node_id());
15579				match action {
15580					ErrorAction::SendErrorMessage { msg } =>
15581						assert_eq!(msg.data, "No channels with anchor outputs accepted".to_owned()),
15582					_ => panic!("Unexpected error action"),
15583				}
15584			}
15585			_ => panic!("Unexpected event"),
15586		}
15587
15588		nodes[2].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15589		let events = nodes[2].node.get_and_clear_pending_events();
15590		match events[0] {
15591			Event::OpenChannelRequest { temporary_channel_id, .. } =>
15592				nodes[2].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23).unwrap(),
15593			_ => panic!("Unexpected event"),
15594		}
15595		get_event_msg!(nodes[2], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
15596	}
15597
15598	#[test]
15599	fn test_anchors_zero_fee_htlc_tx_fallback() {
15600		// Tests that if both nodes support anchors, but the remote node does not want to accept
15601		// anchor channels at the moment, an error it sent to the local node such that it can retry
15602		// the channel without the anchors feature.
15603		let chanmon_cfgs = create_chanmon_cfgs(2);
15604		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
15605		let mut anchors_config = test_default_channel_config();
15606		anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
15607		anchors_config.manually_accept_inbound_channels = true;
15608		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config.clone())]);
15609		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15610		let error_message = "Channel force-closed";
15611
15612		nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 0, None, None).unwrap();
15613		let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15614		assert!(open_channel_msg.common_fields.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx());
15615
15616		nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15617		let events = nodes[1].node.get_and_clear_pending_events();
15618		match events[0] {
15619			Event::OpenChannelRequest { temporary_channel_id, .. } => {
15620				nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
15621			}
15622			_ => panic!("Unexpected event"),
15623		}
15624
15625		let error_msg = get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id());
15626		nodes[0].node.handle_error(nodes[1].node.get_our_node_id(), &error_msg);
15627
15628		let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15629		assert!(!open_channel_msg.common_fields.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx());
15630
15631		// Since nodes[1] should not have accepted the channel, it should
15632		// not have generated any events.
15633		assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
15634	}
15635
15636	#[test]
15637	fn test_update_channel_config() {
15638		let chanmon_cfg = create_chanmon_cfgs(2);
15639		let node_cfg = create_node_cfgs(2, &chanmon_cfg);
15640		let mut user_config = test_default_channel_config();
15641		let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]);
15642		let nodes = create_network(2, &node_cfg, &node_chanmgr);
15643		let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
15644		let channel = &nodes[0].node.list_channels()[0];
15645
15646		nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap();
15647		let events = nodes[0].node.get_and_clear_pending_msg_events();
15648		assert_eq!(events.len(), 0);
15649
15650		user_config.channel_config.forwarding_fee_base_msat += 10;
15651		nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap();
15652		assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_base_msat, user_config.channel_config.forwarding_fee_base_msat);
15653		let events = nodes[0].node.get_and_clear_pending_msg_events();
15654		assert_eq!(events.len(), 1);
15655		match &events[0] {
15656			MessageSendEvent::BroadcastChannelUpdate { .. } => {},
15657			_ => panic!("expected BroadcastChannelUpdate event"),
15658		}
15659
15660		nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate::default()).unwrap();
15661		let events = nodes[0].node.get_and_clear_pending_msg_events();
15662		assert_eq!(events.len(), 0);
15663
15664		let new_cltv_expiry_delta = user_config.channel_config.cltv_expiry_delta + 6;
15665		nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate {
15666			cltv_expiry_delta: Some(new_cltv_expiry_delta),
15667			..Default::default()
15668		}).unwrap();
15669		assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta);
15670		let events = nodes[0].node.get_and_clear_pending_msg_events();
15671		assert_eq!(events.len(), 1);
15672		match &events[0] {
15673			MessageSendEvent::BroadcastChannelUpdate { .. } => {},
15674			_ => panic!("expected BroadcastChannelUpdate event"),
15675		}
15676
15677		let new_fee = user_config.channel_config.forwarding_fee_proportional_millionths + 100;
15678		nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate {
15679			forwarding_fee_proportional_millionths: Some(new_fee),
15680			..Default::default()
15681		}).unwrap();
15682		assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta);
15683		assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, new_fee);
15684		let events = nodes[0].node.get_and_clear_pending_msg_events();
15685		assert_eq!(events.len(), 1);
15686		match &events[0] {
15687			MessageSendEvent::BroadcastChannelUpdate { .. } => {},
15688			_ => panic!("expected BroadcastChannelUpdate event"),
15689		}
15690
15691		// If we provide a channel_id not associated with the peer, we should get an error and no updates
15692		// should be applied to ensure update atomicity as specified in the API docs.
15693		let bad_channel_id = ChannelId::v1_from_funding_txid(&[10; 32], 10);
15694		let current_fee = nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths;
15695		let new_fee = current_fee + 100;
15696		assert!(
15697			matches!(
15698				nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id, bad_channel_id], &ChannelConfigUpdate {
15699					forwarding_fee_proportional_millionths: Some(new_fee),
15700					..Default::default()
15701				}),
15702				Err(APIError::ChannelUnavailable { err: _ }),
15703			)
15704		);
15705		// Check that the fee hasn't changed for the channel that exists.
15706		assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, current_fee);
15707		let events = nodes[0].node.get_and_clear_pending_msg_events();
15708		assert_eq!(events.len(), 0);
15709	}
15710
15711	#[test]
15712	fn test_payment_display() {
15713		let payment_id = PaymentId([42; 32]);
15714		assert_eq!(format!("{}", &payment_id), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
15715		let payment_hash = PaymentHash([42; 32]);
15716		assert_eq!(format!("{}", &payment_hash), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
15717		let payment_preimage = PaymentPreimage([42; 32]);
15718		assert_eq!(format!("{}", &payment_preimage), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
15719	}
15720
15721	#[test]
15722	fn test_trigger_lnd_force_close() {
15723		let chanmon_cfg = create_chanmon_cfgs(2);
15724		let node_cfg = create_node_cfgs(2, &chanmon_cfg);
15725		let user_config = test_default_channel_config();
15726		let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]);
15727		let nodes = create_network(2, &node_cfg, &node_chanmgr);
15728		let error_message = "Channel force-closed";
15729
15730		// Open a channel, immediately disconnect each other, and broadcast Alice's latest state.
15731		let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
15732		nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
15733		nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
15734		nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
15735		check_closed_broadcast(&nodes[0], 1, true);
15736		check_added_monitors(&nodes[0], 1);
15737		check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
15738		{
15739			let txn = nodes[0].tx_broadcaster.txn_broadcast();
15740			assert_eq!(txn.len(), 1);
15741			check_spends!(txn[0], funding_tx);
15742		}
15743
15744		// Since they're disconnected, Bob won't receive Alice's `Error` message. Reconnect them
15745		// such that Bob sends a `ChannelReestablish` to Alice since the channel is still open from
15746		// their side.
15747		nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init {
15748			features: nodes[1].node.init_features(), networks: None, remote_network_address: None
15749		}, true).unwrap();
15750		nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
15751			features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15752		}, false).unwrap();
15753		assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
15754		let channel_reestablish = get_event_msg!(
15755			nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()
15756		);
15757		nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &channel_reestablish);
15758
15759		// Alice should respond with an error since the channel isn't known, but a bogus
15760		// `ChannelReestablish` should be sent first, such that we actually trigger Bob to force
15761		// close even if it was an lnd node.
15762		let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
15763		assert_eq!(msg_events.len(), 2);
15764		if let MessageSendEvent::SendChannelReestablish { node_id, msg } = &msg_events[0] {
15765			assert_eq!(*node_id, nodes[1].node.get_our_node_id());
15766			assert_eq!(msg.next_local_commitment_number, 0);
15767			assert_eq!(msg.next_remote_commitment_number, 0);
15768			nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &msg);
15769		} else { panic!() };
15770		check_closed_broadcast(&nodes[1], 1, true);
15771		check_added_monitors(&nodes[1], 1);
15772		let expected_close_reason = ClosureReason::ProcessingError {
15773			err: "Peer sent an invalid channel_reestablish to force close in a non-standard way".to_string()
15774		};
15775		check_closed_event!(nodes[1], 1, expected_close_reason, [nodes[0].node.get_our_node_id()], 100000);
15776		{
15777			let txn = nodes[1].tx_broadcaster.txn_broadcast();
15778			assert_eq!(txn.len(), 1);
15779			check_spends!(txn[0], funding_tx);
15780		}
15781	}
15782
15783	#[test]
15784	fn test_malformed_forward_htlcs_ser() {
15785		// Ensure that `HTLCForwardInfo::FailMalformedHTLC`s are (de)serialized properly.
15786		let chanmon_cfg = create_chanmon_cfgs(1);
15787		let node_cfg = create_node_cfgs(1, &chanmon_cfg);
15788		let persister;
15789		let chain_monitor;
15790		let chanmgrs = create_node_chanmgrs(1, &node_cfg, &[None]);
15791		let deserialized_chanmgr;
15792		let mut nodes = create_network(1, &node_cfg, &chanmgrs);
15793
15794		let dummy_failed_htlc = |htlc_id| {
15795			HTLCForwardInfo::FailHTLC { htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }, }
15796		};
15797		let dummy_malformed_htlc = |htlc_id| {
15798			HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code: 0x4000, sha256_of_onion: [0; 32] }
15799		};
15800
15801		let dummy_htlcs_1: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
15802			if htlc_id % 2 == 0 {
15803				dummy_failed_htlc(htlc_id)
15804			} else {
15805				dummy_malformed_htlc(htlc_id)
15806			}
15807		}).collect();
15808
15809		let dummy_htlcs_2: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
15810			if htlc_id % 2 == 1 {
15811				dummy_failed_htlc(htlc_id)
15812			} else {
15813				dummy_malformed_htlc(htlc_id)
15814			}
15815		}).collect();
15816
15817
15818		let (scid_1, scid_2) = (42, 43);
15819		let mut forward_htlcs = new_hash_map();
15820		forward_htlcs.insert(scid_1, dummy_htlcs_1.clone());
15821		forward_htlcs.insert(scid_2, dummy_htlcs_2.clone());
15822
15823		let mut chanmgr_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
15824		*chanmgr_fwd_htlcs = forward_htlcs.clone();
15825		core::mem::drop(chanmgr_fwd_htlcs);
15826
15827		reload_node!(nodes[0], nodes[0].node.encode(), &[], persister, chain_monitor, deserialized_chanmgr);
15828
15829		let mut deserialized_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
15830		for scid in [scid_1, scid_2].iter() {
15831			let deserialized_htlcs = deserialized_fwd_htlcs.remove(scid).unwrap();
15832			assert_eq!(forward_htlcs.remove(scid).unwrap(), deserialized_htlcs);
15833		}
15834		assert!(deserialized_fwd_htlcs.is_empty());
15835		core::mem::drop(deserialized_fwd_htlcs);
15836
15837		expect_pending_htlcs_forwardable!(nodes[0]);
15838	}
15839}
15840
15841#[cfg(ldk_bench)]
15842pub mod bench {
15843	use crate::chain::Listen;
15844	use crate::chain::chainmonitor::{ChainMonitor, Persist};
15845	use crate::sign::{KeysManager, InMemorySigner};
15846	use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider};
15847	use crate::ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId, RecipientOnionFields, Retry};
15848	use crate::ln::functional_test_utils::*;
15849	use crate::ln::msgs::{ChannelMessageHandler, Init};
15850	use crate::routing::gossip::NetworkGraph;
15851	use crate::routing::router::{PaymentParameters, RouteParameters};
15852	use crate::util::test_utils;
15853	use crate::util::config::{UserConfig, MaxDustHTLCExposure};
15854
15855	use bitcoin::amount::Amount;
15856	use bitcoin::locktime::absolute::LockTime;
15857	use bitcoin::hashes::Hash;
15858	use bitcoin::hashes::sha256::Hash as Sha256;
15859	use bitcoin::{Transaction, TxOut};
15860	use bitcoin::transaction::Version;
15861
15862	use crate::sync::{Arc, Mutex, RwLock};
15863
15864	use criterion::Criterion;
15865
15866	type Manager<'a, P> = ChannelManager<
15867		&'a ChainMonitor<InMemorySigner, &'a test_utils::TestChainSource,
15868			&'a test_utils::TestBroadcaster, &'a test_utils::TestFeeEstimator,
15869			&'a test_utils::TestLogger, &'a P>,
15870		&'a test_utils::TestBroadcaster, &'a KeysManager, &'a KeysManager, &'a KeysManager,
15871		&'a test_utils::TestFeeEstimator, &'a test_utils::TestRouter<'a>,
15872		&'a test_utils::TestMessageRouter<'a>, &'a test_utils::TestLogger>;
15873
15874	struct ANodeHolder<'node_cfg, 'chan_mon_cfg: 'node_cfg, P: Persist<InMemorySigner>> {
15875		node: &'node_cfg Manager<'chan_mon_cfg, P>,
15876	}
15877	impl<'node_cfg, 'chan_mon_cfg: 'node_cfg, P: Persist<InMemorySigner>> NodeHolder for ANodeHolder<'node_cfg, 'chan_mon_cfg, P> {
15878		type CM = Manager<'chan_mon_cfg, P>;
15879		#[inline]
15880		fn node(&self) -> &Manager<'chan_mon_cfg, P> { self.node }
15881		#[inline]
15882		fn chain_monitor(&self) -> Option<&test_utils::TestChainMonitor> { None }
15883	}
15884
15885	pub fn bench_sends(bench: &mut Criterion) {
15886		bench_two_sends(bench, "bench_sends", test_utils::TestPersister::new(), test_utils::TestPersister::new());
15887	}
15888
15889	pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Criterion, bench_name: &str, persister_a: P, persister_b: P) {
15890		// Do a simple benchmark of sending a payment back and forth between two nodes.
15891		// Note that this is unrealistic as each payment send will require at least two fsync
15892		// calls per node.
15893		let network = bitcoin::Network::Testnet;
15894		let genesis_block = bitcoin::constants::genesis_block(network);
15895
15896		let tx_broadcaster = test_utils::TestBroadcaster::new(network);
15897		let fee_estimator = test_utils::TestFeeEstimator::new(253);
15898		let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
15899		let scorer = RwLock::new(test_utils::TestScorer::new());
15900		let entropy = test_utils::TestKeysInterface::new(&[0u8; 32], network);
15901		let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &logger_a, &scorer);
15902		let message_router = test_utils::TestMessageRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &entropy);
15903
15904		let mut config: UserConfig = Default::default();
15905		config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253);
15906		config.channel_handshake_config.minimum_depth = 1;
15907
15908		let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a);
15909		let seed_a = [1u8; 32];
15910		let keys_manager_a = KeysManager::new(&seed_a, 42, 42);
15911		let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &message_router, &logger_a, &keys_manager_a, &keys_manager_a, &keys_manager_a, config.clone(), ChainParameters {
15912			network,
15913			best_block: BestBlock::from_network(network),
15914		}, genesis_block.header.time);
15915		let node_a_holder = ANodeHolder { node: &node_a };
15916
15917		let logger_b = test_utils::TestLogger::with_id("node a".to_owned());
15918		let chain_monitor_b = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_b);
15919		let seed_b = [2u8; 32];
15920		let keys_manager_b = KeysManager::new(&seed_b, 42, 42);
15921		let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &message_router, &logger_b, &keys_manager_b, &keys_manager_b, &keys_manager_b, config.clone(), ChainParameters {
15922			network,
15923			best_block: BestBlock::from_network(network),
15924		}, genesis_block.header.time);
15925		let node_b_holder = ANodeHolder { node: &node_b };
15926
15927		node_a.peer_connected(node_b.get_our_node_id(), &Init {
15928			features: node_b.init_features(), networks: None, remote_network_address: None
15929		}, true).unwrap();
15930		node_b.peer_connected(node_a.get_our_node_id(), &Init {
15931			features: node_a.init_features(), networks: None, remote_network_address: None
15932		}, false).unwrap();
15933		node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None, None).unwrap();
15934		node_b.handle_open_channel(node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
15935		node_a.handle_accept_channel(node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
15936
15937		let tx;
15938		if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) {
15939			tx = Transaction { version: Version::TWO, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
15940				value: Amount::from_sat(8_000_000), script_pubkey: output_script,
15941			}]};
15942			node_a.funding_transaction_generated(temporary_channel_id, node_b.get_our_node_id(), tx.clone()).unwrap();
15943		} else { panic!(); }
15944
15945		node_b.handle_funding_created(node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingCreated, node_b.get_our_node_id()));
15946		let events_b = node_b.get_and_clear_pending_events();
15947		assert_eq!(events_b.len(), 1);
15948		match events_b[0] {
15949			Event::ChannelPending{ ref counterparty_node_id, .. } => {
15950				assert_eq!(*counterparty_node_id, node_a.get_our_node_id());
15951			},
15952			_ => panic!("Unexpected event"),
15953		}
15954
15955		node_a.handle_funding_signed(node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingSigned, node_a.get_our_node_id()));
15956		let events_a = node_a.get_and_clear_pending_events();
15957		assert_eq!(events_a.len(), 1);
15958		match events_a[0] {
15959			Event::ChannelPending{ ref counterparty_node_id, .. } => {
15960				assert_eq!(*counterparty_node_id, node_b.get_our_node_id());
15961			},
15962			_ => panic!("Unexpected event"),
15963		}
15964
15965		assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
15966
15967		let block = create_dummy_block(BestBlock::from_network(network).block_hash, 42, vec![tx]);
15968		Listen::block_connected(&node_a, &block, 1);
15969		Listen::block_connected(&node_b, &block, 1);
15970
15971		node_a.handle_channel_ready(node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendChannelReady, node_a.get_our_node_id()));
15972		let msg_events = node_a.get_and_clear_pending_msg_events();
15973		assert_eq!(msg_events.len(), 2);
15974		match msg_events[0] {
15975			MessageSendEvent::SendChannelReady { ref msg, .. } => {
15976				node_b.handle_channel_ready(node_a.get_our_node_id(), msg);
15977				get_event_msg!(node_b_holder, MessageSendEvent::SendChannelUpdate, node_a.get_our_node_id());
15978			},
15979			_ => panic!(),
15980		}
15981		match msg_events[1] {
15982			MessageSendEvent::SendChannelUpdate { .. } => {},
15983			_ => panic!(),
15984		}
15985
15986		let events_a = node_a.get_and_clear_pending_events();
15987		assert_eq!(events_a.len(), 1);
15988		match events_a[0] {
15989			Event::ChannelReady{ ref counterparty_node_id, .. } => {
15990				assert_eq!(*counterparty_node_id, node_b.get_our_node_id());
15991			},
15992			_ => panic!("Unexpected event"),
15993		}
15994
15995		let events_b = node_b.get_and_clear_pending_events();
15996		assert_eq!(events_b.len(), 1);
15997		match events_b[0] {
15998			Event::ChannelReady{ ref counterparty_node_id, .. } => {
15999				assert_eq!(*counterparty_node_id, node_a.get_our_node_id());
16000			},
16001			_ => panic!("Unexpected event"),
16002		}
16003
16004		let mut payment_count: u64 = 0;
16005		macro_rules! send_payment {
16006			($node_a: expr, $node_b: expr) => {
16007				let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id(), TEST_FINAL_CLTV)
16008					.with_bolt11_features($node_b.bolt11_invoice_features()).unwrap();
16009				let mut payment_preimage = PaymentPreimage([0; 32]);
16010				payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes());
16011				payment_count += 1;
16012				let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
16013				let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
16014
16015				$node_a.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
16016					PaymentId(payment_hash.0),
16017					RouteParameters::from_payment_params_and_value(payment_params, 10_000),
16018					Retry::Attempts(0)).unwrap();
16019				let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
16020				$node_b.handle_update_add_htlc($node_a.get_our_node_id(), &payment_event.msgs[0]);
16021				$node_b.handle_commitment_signed($node_a.get_our_node_id(), &payment_event.commitment_msg);
16022				let (raa, cs) = get_revoke_commit_msgs(&ANodeHolder { node: &$node_b }, &$node_a.get_our_node_id());
16023				$node_a.handle_revoke_and_ack($node_b.get_our_node_id(), &raa);
16024				$node_a.handle_commitment_signed($node_b.get_our_node_id(), &cs);
16025				$node_b.handle_revoke_and_ack($node_a.get_our_node_id(), &get_event_msg!(ANodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id()));
16026
16027				expect_pending_htlcs_forwardable!(ANodeHolder { node: &$node_b });
16028				expect_payment_claimable!(ANodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000);
16029				$node_b.claim_funds(payment_preimage);
16030				expect_payment_claimed!(ANodeHolder { node: &$node_b }, payment_hash, 10_000);
16031
16032				match $node_b.get_and_clear_pending_msg_events().pop().unwrap() {
16033					MessageSendEvent::UpdateHTLCs { node_id, updates } => {
16034						assert_eq!(node_id, $node_a.get_our_node_id());
16035						$node_a.handle_update_fulfill_htlc($node_b.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
16036						$node_a.handle_commitment_signed($node_b.get_our_node_id(), &updates.commitment_signed);
16037					},
16038					_ => panic!("Failed to generate claim event"),
16039				}
16040
16041				let (raa, cs) = get_revoke_commit_msgs(&ANodeHolder { node: &$node_a }, &$node_b.get_our_node_id());
16042				$node_b.handle_revoke_and_ack($node_a.get_our_node_id(), &raa);
16043				$node_b.handle_commitment_signed($node_a.get_our_node_id(), &cs);
16044				$node_a.handle_revoke_and_ack($node_b.get_our_node_id(), &get_event_msg!(ANodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id()));
16045
16046				expect_payment_sent!(ANodeHolder { node: &$node_a }, payment_preimage);
16047			}
16048		}
16049
16050		bench.bench_function(bench_name, |b| b.iter(|| {
16051			send_payment!(node_a, node_b);
16052			send_payment!(node_b, node_a);
16053		}));
16054	}
16055}