1use bitcoin::block::Header;
21use bitcoin::transaction::{Transaction, TxIn};
22use bitcoin::constants::ChainHash;
23use bitcoin::key::constants::SECRET_KEY_SIZE;
24use bitcoin::network::Network;
25
26use bitcoin::hashes::{Hash, HashEngine, HmacEngine};
27use bitcoin::hashes::hmac::Hmac;
28use bitcoin::hashes::sha256::Hash as Sha256;
29use bitcoin::hash_types::{BlockHash, Txid};
30
31use bitcoin::secp256k1::{SecretKey,PublicKey};
32use bitcoin::secp256k1::Secp256k1;
33use bitcoin::{secp256k1, Sequence, Weight};
34
35use crate::events::FundingInfo;
36use crate::blinded_path::message::{AsyncPaymentsContext, MessageContext, OffersContext};
37use crate::blinded_path::NodeIdLookUp;
38use crate::blinded_path::message::{BlindedMessagePath, MessageForwardNode};
39use crate::blinded_path::payment::{BlindedPaymentPath, Bolt12OfferContext, Bolt12RefundContext, PaymentConstraints, PaymentContext, UnauthenticatedReceiveTlvs};
40use crate::chain;
41use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
42use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
43use crate::chain::channelmonitor::{Balance, ChannelMonitor, ChannelMonitorUpdate, WithChannelMonitor, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent};
44use crate::chain::transaction::{OutPoint, TransactionData};
45use crate::events::{self, Event, EventHandler, EventsProvider, InboundChannelFunds, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination, PaymentFailureReason, ReplayEvent};
46use crate::ln::inbound_payment;
49use crate::ln::types::ChannelId;
50use crate::types::payment::{PaymentHash, PaymentPreimage, PaymentSecret};
51use crate::ln::channel::{self, Channel, ChannelPhase, ChannelError, ChannelUpdateStatus, ShutdownResult, UpdateFulfillCommitFetch, OutboundV1Channel, InboundV1Channel, WithChannelContext, InteractivelyFunded as _};
52#[cfg(any(dual_funding, splicing))]
53use crate::ln::channel::InboundV2Channel;
54use crate::ln::channel_state::ChannelDetails;
55use crate::types::features::{Bolt12InvoiceFeatures, ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
56#[cfg(any(feature = "_test_utils", test))]
57use crate::types::features::Bolt11InvoiceFeatures;
58use crate::routing::router::{BlindedTail, FixedRouter, InFlightHtlcs, Path, Payee, PaymentParameters, Route, RouteParameters, Router};
59use crate::ln::onion_payment::{check_incoming_htlc_cltv, create_recv_pending_htlc_info, create_fwd_pending_htlc_info, decode_incoming_update_add_htlc_onion, InboundHTLCErr, NextPacketDetails};
60use crate::ln::msgs;
61use crate::ln::onion_utils;
62use crate::ln::onion_utils::{HTLCFailReason, INVALID_ONION_BLINDING};
63use crate::ln::msgs::{ChannelMessageHandler, CommitmentUpdate, DecodeError, LightningError};
64#[cfg(test)]
65use crate::ln::outbound_payment;
66use crate::ln::outbound_payment::{OutboundPayments, PendingOutboundPayment, RetryableInvoiceRequest, SendAlongPathArgs, StaleExpiration};
67use crate::offers::invoice::{Bolt12Invoice, DEFAULT_RELATIVE_EXPIRY, DerivedSigningPubkey, ExplicitSigningPubkey, InvoiceBuilder, UnsignedBolt12Invoice};
68use crate::offers::invoice_error::InvoiceError;
69use crate::offers::invoice_request::{InvoiceRequest, InvoiceRequestBuilder};
70use crate::offers::nonce::Nonce;
71use crate::offers::offer::{Offer, OfferBuilder};
72use crate::offers::parse::Bolt12SemanticError;
73use crate::offers::refund::{Refund, RefundBuilder};
74use crate::offers::signer;
75#[cfg(async_payments)]
76use crate::offers::static_invoice::StaticInvoice;
77use crate::onion_message::async_payments::{AsyncPaymentsMessage, HeldHtlcAvailable, ReleaseHeldHtlc, AsyncPaymentsMessageHandler};
78use crate::onion_message::dns_resolution::HumanReadableName;
79use crate::onion_message::messenger::{Destination, MessageRouter, Responder, ResponseInstruction, MessageSendInstructions};
80use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
81use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider};
82use crate::sign::ecdsa::EcdsaChannelSigner;
83use crate::util::config::{UserConfig, ChannelConfig, ChannelConfigUpdate};
84use crate::util::wakers::{Future, Notifier};
85use crate::util::scid_utils::fake_scid;
86use crate::util::string::UntrustedString;
87use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
88use crate::util::ser::TransactionU16LenLimited;
89use crate::util::logger::{Level, Logger, WithContext};
90use crate::util::errors::APIError;
91
92#[cfg(feature = "dnssec")]
93use crate::blinded_path::message::DNSResolverContext;
94#[cfg(feature = "dnssec")]
95use crate::onion_message::dns_resolution::{DNSResolverMessage, DNSResolverMessageHandler, DNSSECQuery, DNSSECProof, OMNameResolver};
96
97#[cfg(not(c_bindings))]
98use {
99 crate::offers::offer::DerivedMetadata,
100 crate::onion_message::messenger::DefaultMessageRouter,
101 crate::routing::router::DefaultRouter,
102 crate::routing::gossip::NetworkGraph,
103 crate::routing::scoring::{ProbabilisticScorer, ProbabilisticScoringFeeParameters},
104 crate::sign::KeysManager,
105};
106#[cfg(c_bindings)]
107use {
108 crate::offers::offer::OfferWithDerivedMetadataBuilder,
109 crate::offers::refund::RefundMaybeWithDerivedMetadataBuilder,
110};
111
112use lightning_invoice::{Bolt11Invoice, Bolt11InvoiceDescription, CreationError, Currency, Description, InvoiceBuilder as Bolt11InvoiceBuilder, SignOrCreationError, DEFAULT_EXPIRY_TIME};
113
114use alloc::collections::{btree_map, BTreeMap};
115
116use crate::io;
117use crate::prelude::*;
118use core::{cmp, mem};
119use core::borrow::Borrow;
120use core::cell::RefCell;
121use crate::io::Read;
122use crate::sync::{Arc, Mutex, RwLock, RwLockReadGuard, FairRwLock, LockTestExt, LockHeldState};
123use core::sync::atomic::{AtomicUsize, AtomicBool, Ordering};
124use core::time::Duration;
125use core::ops::Deref;
126use bitcoin::hex::impl_fmt_traits;
127pub use crate::ln::outbound_payment::{Bolt12PaymentError, ProbeSendFailure, Retry, RetryableSendFailure, RecipientOnionFields};
129#[cfg(test)]
130pub(crate) use crate::ln::outbound_payment::PaymentSendFailure;
131use crate::ln::script::ShutdownScript;
132
133#[derive(Clone)] #[cfg_attr(test, derive(Debug, PartialEq))]
153pub enum PendingHTLCRouting {
154 Forward {
156 onion_packet: msgs::OnionPacket,
159 short_channel_id: u64, blinded: Option<BlindedForward>,
167 incoming_cltv_expiry: Option<u32>,
169 },
170 Receive {
175 payment_data: msgs::FinalOnionHopData,
179 payment_metadata: Option<Vec<u8>>,
185 payment_context: Option<PaymentContext>,
190 incoming_cltv_expiry: u32,
194 phantom_shared_secret: Option<[u8; 32]>,
198 custom_tlvs: Vec<(u64, Vec<u8>)>,
204 requires_blinded_error: bool,
206 },
207 ReceiveKeysend {
211 payment_data: Option<msgs::FinalOnionHopData>,
217 payment_preimage: PaymentPreimage,
220 payment_metadata: Option<Vec<u8>>,
225 incoming_cltv_expiry: u32,
229 custom_tlvs: Vec<(u64, Vec<u8>)>,
234 requires_blinded_error: bool,
236 has_recipient_created_payment_secret: bool,
240 },
241}
242
243#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
245pub struct BlindedForward {
246 pub inbound_blinding_point: PublicKey,
250 pub failure: BlindedFailure,
253 pub next_blinding_override: Option<PublicKey>,
257}
258
259impl PendingHTLCRouting {
260 fn blinded_failure(&self) -> Option<BlindedFailure> {
262 match self {
263 Self::Forward { blinded: Some(BlindedForward { failure, .. }), .. } => Some(*failure),
264 Self::Receive { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
265 Self::ReceiveKeysend { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
266 _ => None,
267 }
268 }
269
270 fn incoming_cltv_expiry(&self) -> Option<u32> {
271 match self {
272 Self::Forward { incoming_cltv_expiry, .. } => *incoming_cltv_expiry,
273 Self::Receive { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry),
274 Self::ReceiveKeysend { incoming_cltv_expiry, .. } => Some(*incoming_cltv_expiry),
275 }
276 }
277}
278
279#[derive(Clone)] #[cfg_attr(test, derive(Debug, PartialEq))]
283pub struct PendingHTLCInfo {
284 pub routing: PendingHTLCRouting,
286 pub incoming_shared_secret: [u8; 32],
290 pub payment_hash: PaymentHash,
292 pub incoming_amt_msat: Option<u64>,
297 pub outgoing_amt_msat: u64,
309 pub outgoing_cltv_value: u32,
312 pub skimmed_fee_msat: Option<u64>,
322}
323
324#[derive(Clone)] pub(super) enum HTLCFailureMsg {
326 Relay(msgs::UpdateFailHTLC),
327 Malformed(msgs::UpdateFailMalformedHTLC),
328}
329
330#[derive(Clone)] pub(super) enum PendingHTLCStatus {
333 Forward(PendingHTLCInfo),
334 Fail(HTLCFailureMsg),
335}
336
337#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
338pub(super) struct PendingAddHTLCInfo {
339 pub(super) forward_info: PendingHTLCInfo,
340
341 prev_short_channel_id: u64,
348 prev_htlc_id: u64,
349 prev_counterparty_node_id: Option<PublicKey>,
350 prev_channel_id: ChannelId,
351 prev_funding_outpoint: OutPoint,
352 prev_user_channel_id: u128,
353}
354
355#[cfg_attr(test, derive(Clone, Debug, PartialEq))]
356pub(super) enum HTLCForwardInfo {
357 AddHTLC(PendingAddHTLCInfo),
358 FailHTLC {
359 htlc_id: u64,
360 err_packet: msgs::OnionErrorPacket,
361 },
362 FailMalformedHTLC {
363 htlc_id: u64,
364 failure_code: u16,
365 sha256_of_onion: [u8; 32],
366 },
367}
368
369#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
372pub enum BlindedFailure {
373 FromIntroductionNode,
376 FromBlindedNode,
379}
380
381#[derive(Clone, Debug, Hash, PartialEq, Eq)]
383pub(crate) struct HTLCPreviousHopData {
384 short_channel_id: u64,
386 user_channel_id: Option<u128>,
387 htlc_id: u64,
388 incoming_packet_shared_secret: [u8; 32],
389 phantom_shared_secret: Option<[u8; 32]>,
390 blinded_failure: Option<BlindedFailure>,
391 channel_id: ChannelId,
392
393 outpoint: OutPoint,
396 counterparty_node_id: Option<PublicKey>,
397 cltv_expiry: Option<u32>,
400}
401
402#[derive(PartialEq, Eq)]
403enum OnionPayload {
404 Invoice {
406 _legacy_hop_data: Option<msgs::FinalOnionHopData>,
409 },
410 Spontaneous(PaymentPreimage),
412}
413
414#[derive(PartialEq, Eq)]
416struct ClaimableHTLC {
417 prev_hop: HTLCPreviousHopData,
418 cltv_expiry: u32,
419 value: u64,
421 sender_intended_value: u64,
424 onion_payload: OnionPayload,
425 timer_ticks: u8,
426 total_value_received: Option<u64>,
429 total_msat: u64,
431 counterparty_skimmed_fee_msat: Option<u64>,
433}
434
435impl From<&ClaimableHTLC> for events::ClaimedHTLC {
436 fn from(val: &ClaimableHTLC) -> Self {
437 events::ClaimedHTLC {
438 channel_id: val.prev_hop.channel_id,
439 user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0),
440 cltv_expiry: val.cltv_expiry,
441 value_msat: val.value,
442 counterparty_skimmed_fee_msat: val.counterparty_skimmed_fee_msat.unwrap_or(0),
443 }
444 }
445}
446
447impl PartialOrd for ClaimableHTLC {
448 fn partial_cmp(&self, other: &ClaimableHTLC) -> Option<cmp::Ordering> {
449 Some(self.cmp(other))
450 }
451}
452impl Ord for ClaimableHTLC {
453 fn cmp(&self, other: &ClaimableHTLC) -> cmp::Ordering {
454 let res = (self.prev_hop.channel_id, self.prev_hop.htlc_id).cmp(
455 &(other.prev_hop.channel_id, other.prev_hop.htlc_id)
456 );
457 if res.is_eq() {
458 debug_assert!(self == other, "ClaimableHTLCs from the same source should be identical");
459 }
460 res
461 }
462}
463
464pub trait Verification {
466 fn hmac_for_offer_payment(
469 &self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
470 ) -> Hmac<Sha256>;
471
472 fn verify_for_offer_payment(
474 &self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
475 ) -> Result<(), ()>;
476}
477
478impl Verification for PaymentHash {
479 fn hmac_for_offer_payment(
482 &self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
483 ) -> Hmac<Sha256> {
484 signer::hmac_for_payment_hash(*self, nonce, expanded_key)
485 }
486
487 fn verify_for_offer_payment(
490 &self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
491 ) -> Result<(), ()> {
492 signer::verify_payment_hash(*self, hmac, nonce, expanded_key)
493 }
494}
495
496impl Verification for UnauthenticatedReceiveTlvs {
497 fn hmac_for_offer_payment(
498 &self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
499 ) -> Hmac<Sha256> {
500 signer::hmac_for_payment_tlvs(self, nonce, expanded_key)
501 }
502
503 fn verify_for_offer_payment(
504 &self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
505 ) -> Result<(), ()> {
506 signer::verify_payment_tlvs(self, hmac, nonce, expanded_key)
507 }
508}
509
510#[derive(Hash, Copy, Clone, PartialEq, Eq)]
515pub struct PaymentId(pub [u8; Self::LENGTH]);
516
517impl PaymentId {
518 pub const LENGTH: usize = 32;
520
521 #[cfg(async_payments)]
524 pub fn hmac_for_async_payment(
525 &self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
526 ) -> Hmac<Sha256> {
527 signer::hmac_for_async_payment_id(*self, nonce, expanded_key)
528 }
529
530 #[cfg(async_payments)]
533 pub fn verify_for_async_payment(
534 &self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
535 ) -> Result<(), ()> {
536 signer::verify_async_payment_id(*self, hmac, nonce, expanded_key)
537 }
538}
539
540impl Verification for PaymentId {
541 fn hmac_for_offer_payment(
544 &self, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
545 ) -> Hmac<Sha256> {
546 signer::hmac_for_offer_payment_id(*self, nonce, expanded_key)
547 }
548
549 fn verify_for_offer_payment(
552 &self, hmac: Hmac<Sha256>, nonce: Nonce, expanded_key: &inbound_payment::ExpandedKey,
553 ) -> Result<(), ()> {
554 signer::verify_offer_payment_id(*self, hmac, nonce, expanded_key)
555 }
556}
557
558impl PaymentId {
559 fn for_inbound_from_htlcs<I: Iterator<Item=(ChannelId, u64)>>(key: &[u8; 32], htlcs: I) -> PaymentId {
560 let mut prev_pair = None;
561 let mut hasher = HmacEngine::new(key);
562 for (channel_id, htlc_id) in htlcs {
563 hasher.input(&channel_id.0);
564 hasher.input(&htlc_id.to_le_bytes());
565 if let Some(prev) = prev_pair {
566 debug_assert!(prev < (channel_id, htlc_id), "HTLCs should be sorted");
567 }
568 prev_pair = Some((channel_id, htlc_id));
569 }
570 PaymentId(Hmac::<Sha256>::from_engine(hasher).to_byte_array())
571 }
572}
573
574impl Borrow<[u8]> for PaymentId {
575 fn borrow(&self) -> &[u8] {
576 &self.0[..]
577 }
578}
579
580impl_fmt_traits! {
581 impl fmt_traits for PaymentId {
582 const LENGTH: usize = 32;
583 }
584}
585
586impl Writeable for PaymentId {
587 fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
588 self.0.write(w)
589 }
590}
591
592impl Readable for PaymentId {
593 fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
594 let buf: [u8; 32] = Readable::read(r)?;
595 Ok(PaymentId(buf))
596 }
597}
598
599#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
603pub struct InterceptId(pub [u8; 32]);
604
605impl Writeable for InterceptId {
606 fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
607 self.0.write(w)
608 }
609}
610
611impl Readable for InterceptId {
612 fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
613 let buf: [u8; 32] = Readable::read(r)?;
614 Ok(InterceptId(buf))
615 }
616}
617
618#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
619pub(crate) enum SentHTLCId {
621 PreviousHopData { short_channel_id: u64, htlc_id: u64 },
622 OutboundRoute { session_priv: [u8; SECRET_KEY_SIZE] },
623}
624impl SentHTLCId {
625 pub(crate) fn from_source(source: &HTLCSource) -> Self {
626 match source {
627 HTLCSource::PreviousHopData(hop_data) => Self::PreviousHopData {
628 short_channel_id: hop_data.short_channel_id,
629 htlc_id: hop_data.htlc_id,
630 },
631 HTLCSource::OutboundRoute { session_priv, .. } =>
632 Self::OutboundRoute { session_priv: session_priv.secret_bytes() },
633 }
634 }
635}
636impl_writeable_tlv_based_enum!(SentHTLCId,
637 (0, PreviousHopData) => {
638 (0, short_channel_id, required),
639 (2, htlc_id, required),
640 },
641 (2, OutboundRoute) => {
642 (0, session_priv, required),
643 },
644);
645
646
647#[allow(clippy::derive_hash_xor_eq)] #[derive(Clone, Debug, PartialEq, Eq)]
650pub(crate) enum HTLCSource {
651 PreviousHopData(HTLCPreviousHopData),
652 OutboundRoute {
653 path: Path,
654 session_priv: SecretKey,
655 first_hop_htlc_msat: u64,
658 payment_id: PaymentId,
659 },
660}
661#[allow(clippy::derive_hash_xor_eq)] impl core::hash::Hash for HTLCSource {
663 fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
664 match self {
665 HTLCSource::PreviousHopData(prev_hop_data) => {
666 0u8.hash(hasher);
667 prev_hop_data.hash(hasher);
668 },
669 HTLCSource::OutboundRoute { path, session_priv, payment_id, first_hop_htlc_msat } => {
670 1u8.hash(hasher);
671 path.hash(hasher);
672 session_priv[..].hash(hasher);
673 payment_id.hash(hasher);
674 first_hop_htlc_msat.hash(hasher);
675 },
676 }
677 }
678}
679impl HTLCSource {
680 #[cfg(all(ldk_test_vectors, test))]
681 pub fn dummy() -> Self {
682 assert!(cfg!(not(feature = "grind_signatures")));
683 HTLCSource::OutboundRoute {
684 path: Path { hops: Vec::new(), blinded_tail: None },
685 session_priv: SecretKey::from_slice(&[1; 32]).unwrap(),
686 first_hop_htlc_msat: 0,
687 payment_id: PaymentId([2; 32]),
688 }
689 }
690
691 #[cfg(debug_assertions)]
692 pub(crate) fn possibly_matches_output(&self, htlc: &super::chan_utils::HTLCOutputInCommitment) -> bool {
695 if let HTLCSource::OutboundRoute { first_hop_htlc_msat, .. } = self {
696 *first_hop_htlc_msat == htlc.amount_msat
697 } else {
698 true
700 }
701 }
702
703 pub(crate) fn inbound_htlc_expiry(&self) -> Option<u32> {
706 match self {
707 Self::PreviousHopData(HTLCPreviousHopData { cltv_expiry, .. }) => *cltv_expiry,
708 _ => None,
709 }
710 }
711}
712
713#[derive(Clone, Copy)]
718pub enum FailureCode {
719 TemporaryNodeFailure,
722 RequiredNodeFeatureMissing,
725 IncorrectOrUnknownPaymentDetails,
730 InvalidOnionPayload(Option<(u64, u16)>),
736}
737
738impl Into<u16> for FailureCode {
739 fn into(self) -> u16 {
740 match self {
741 FailureCode::TemporaryNodeFailure => 0x2000 | 2,
742 FailureCode::RequiredNodeFeatureMissing => 0x4000 | 0x2000 | 3,
743 FailureCode::IncorrectOrUnknownPaymentDetails => 0x4000 | 15,
744 FailureCode::InvalidOnionPayload(_) => 0x4000 | 22,
745 }
746 }
747}
748
749struct MsgHandleErrInternal {
755 err: msgs::LightningError,
756 closes_channel: bool,
757 shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
758}
759impl MsgHandleErrInternal {
760 #[inline]
761 fn send_err_msg_no_close(err: String, channel_id: ChannelId) -> Self {
762 Self {
763 err: LightningError {
764 err: err.clone(),
765 action: msgs::ErrorAction::SendErrorMessage {
766 msg: msgs::ErrorMessage {
767 channel_id,
768 data: err
769 },
770 },
771 },
772 closes_channel: false,
773 shutdown_finish: None,
774 }
775 }
776 #[inline]
777 fn from_no_close(err: msgs::LightningError) -> Self {
778 Self { err, closes_channel: false, shutdown_finish: None }
779 }
780 #[inline]
781 fn from_finish_shutdown(err: String, channel_id: ChannelId, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
782 let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() };
783 let action = if shutdown_res.monitor_update.is_some() {
784 msgs::ErrorAction::DisconnectPeer { msg: Some(err_msg) }
788 } else {
789 msgs::ErrorAction::SendErrorMessage { msg: err_msg }
790 };
791 Self {
792 err: LightningError { err, action },
793 closes_channel: true,
794 shutdown_finish: Some((shutdown_res, channel_update)),
795 }
796 }
797 #[inline]
798 fn from_chan_no_close(err: ChannelError, channel_id: ChannelId) -> Self {
799 Self {
800 err: match err {
801 ChannelError::Warn(msg) => LightningError {
802 err: msg.clone(),
803 action: msgs::ErrorAction::SendWarningMessage {
804 msg: msgs::WarningMessage {
805 channel_id,
806 data: msg
807 },
808 log_level: Level::Warn,
809 },
810 },
811 ChannelError::Ignore(msg) => LightningError {
812 err: msg,
813 action: msgs::ErrorAction::IgnoreError,
814 },
815 ChannelError::Close((msg, _reason)) => LightningError {
816 err: msg.clone(),
817 action: msgs::ErrorAction::SendErrorMessage {
818 msg: msgs::ErrorMessage {
819 channel_id,
820 data: msg
821 },
822 },
823 },
824 },
825 closes_channel: false,
826 shutdown_finish: None,
827 }
828 }
829
830 fn closes_channel(&self) -> bool {
831 self.closes_channel
832 }
833}
834
835pub(super) const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100;
840
841#[derive(Clone, PartialEq, Debug)]
846pub(super) enum RAACommitmentOrder {
847 CommitmentFirst,
849 RevokeAndACKFirst,
851}
852
853#[derive(Clone, Debug, PartialEq, Eq)]
855struct ClaimingPayment {
856 amount_msat: u64,
857 payment_purpose: events::PaymentPurpose,
858 receiver_node_id: PublicKey,
859 htlcs: Vec<events::ClaimedHTLC>,
860 sender_intended_value: Option<u64>,
861 onion_fields: Option<RecipientOnionFields>,
862 payment_id: Option<PaymentId>,
863}
864impl_writeable_tlv_based!(ClaimingPayment, {
865 (0, amount_msat, required),
866 (2, payment_purpose, required),
867 (4, receiver_node_id, required),
868 (5, htlcs, optional_vec),
869 (7, sender_intended_value, option),
870 (9, onion_fields, option),
871 (11, payment_id, option),
872});
873
874struct ClaimablePayment {
875 purpose: events::PaymentPurpose,
876 onion_fields: Option<RecipientOnionFields>,
877 htlcs: Vec<ClaimableHTLC>,
878}
879
880impl ClaimablePayment {
881 fn inbound_payment_id(&self, secret: &[u8; 32]) -> PaymentId {
882 PaymentId::for_inbound_from_htlcs(
883 secret,
884 self.htlcs.iter().map(|htlc| (htlc.prev_hop.channel_id, htlc.prev_hop.htlc_id))
885 )
886 }
887}
888
889enum FundingType {
891 Checked(Transaction),
896 Unchecked(OutPoint),
904}
905
906impl FundingType {
907 fn txid(&self) -> Txid {
908 match self {
909 FundingType::Checked(tx) => tx.compute_txid(),
910 FundingType::Unchecked(outp) => outp.txid,
911 }
912 }
913
914 fn transaction_or_dummy(&self) -> Transaction {
915 match self {
916 FundingType::Checked(tx) => tx.clone(),
917 FundingType::Unchecked(_) => Transaction {
918 version: bitcoin::transaction::Version::TWO,
919 lock_time: bitcoin::absolute::LockTime::ZERO,
920 input: Vec::new(),
921 output: Vec::new(),
922 },
923 }
924 }
925
926 fn is_manual_broadcast(&self) -> bool {
927 match self {
928 FundingType::Checked(_) => false,
929 FundingType::Unchecked(_) => true,
930 }
931 }
932}
933
934struct ClaimablePayments {
936 claimable_payments: HashMap<PaymentHash, ClaimablePayment>,
945
946 pending_claiming_payments: HashMap<PaymentHash, ClaimingPayment>,
950}
951
952impl ClaimablePayments {
953 fn begin_claiming_payment<L: Deref, S: Deref>(
964 &mut self, payment_hash: PaymentHash, node_signer: &S, logger: &L,
965 inbound_payment_id_secret: &[u8; 32], custom_tlvs_known: bool,
966 ) -> Result<(Vec<ClaimableHTLC>, ClaimingPayment), Vec<ClaimableHTLC>>
967 where L::Target: Logger, S::Target: NodeSigner,
968 {
969 match self.claimable_payments.remove(&payment_hash) {
970 Some(payment) => {
971 let mut receiver_node_id = node_signer.get_node_id(Recipient::Node)
972 .expect("Failed to get node_id for node recipient");
973 for htlc in payment.htlcs.iter() {
974 if htlc.prev_hop.phantom_shared_secret.is_some() {
975 let phantom_pubkey = node_signer.get_node_id(Recipient::PhantomNode)
976 .expect("Failed to get node_id for phantom node recipient");
977 receiver_node_id = phantom_pubkey;
978 break;
979 }
980 }
981
982 if let Some(RecipientOnionFields { custom_tlvs, .. }) = &payment.onion_fields {
983 if !custom_tlvs_known && custom_tlvs.iter().any(|(typ, _)| typ % 2 == 0) {
984 log_info!(logger, "Rejecting payment with payment hash {} as we cannot accept payment with unknown even TLVs: {}",
985 &payment_hash, log_iter!(custom_tlvs.iter().map(|(typ, _)| typ).filter(|typ| *typ % 2 == 0)));
986 return Err(payment.htlcs);
987 }
988 }
989
990 let payment_id = payment.inbound_payment_id(inbound_payment_id_secret);
991 let claiming_payment = self.pending_claiming_payments
992 .entry(payment_hash)
993 .and_modify(|_| {
994 debug_assert!(false, "Shouldn't get a duplicate pending claim event ever");
995 log_error!(logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug",
996 &payment_hash);
997 })
998 .or_insert_with(|| {
999 let htlcs = payment.htlcs.iter().map(events::ClaimedHTLC::from).collect();
1000 let sender_intended_value = payment.htlcs.first().map(|htlc| htlc.total_msat);
1001 ClaimingPayment {
1002 amount_msat: payment.htlcs.iter().map(|source| source.value).sum(),
1003 payment_purpose: payment.purpose,
1004 receiver_node_id,
1005 htlcs,
1006 sender_intended_value,
1007 onion_fields: payment.onion_fields,
1008 payment_id: Some(payment_id),
1009 }
1010 }).clone();
1011
1012 Ok((payment.htlcs, claiming_payment))
1013 },
1014 None => Err(Vec::new())
1015 }
1016 }
1017}
1018
1019#[derive(Debug)]
1024enum BackgroundEvent {
1025 ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelId, ChannelMonitorUpdate)),
1033 MonitorUpdateRegeneratedOnStartup {
1047 counterparty_node_id: PublicKey,
1048 funding_txo: OutPoint,
1049 channel_id: ChannelId,
1050 update: ChannelMonitorUpdate
1051 },
1052 MonitorUpdatesComplete {
1056 counterparty_node_id: PublicKey,
1057 channel_id: ChannelId,
1058 },
1059}
1060
1061#[derive(Debug)]
1063pub(crate) struct EventUnblockedChannel {
1064 counterparty_node_id: PublicKey,
1065 funding_txo: OutPoint,
1066 channel_id: ChannelId,
1067 blocking_action: RAAMonitorUpdateBlockingAction,
1068}
1069
1070impl Writeable for EventUnblockedChannel {
1071 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
1072 self.counterparty_node_id.write(writer)?;
1073 self.funding_txo.write(writer)?;
1074 self.channel_id.write(writer)?;
1075 self.blocking_action.write(writer)
1076 }
1077}
1078
1079impl MaybeReadable for EventUnblockedChannel {
1080 fn read<R: Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
1081 let counterparty_node_id = Readable::read(reader)?;
1082 let funding_txo = Readable::read(reader)?;
1083 let channel_id = Readable::read(reader)?;
1084 let blocking_action = match RAAMonitorUpdateBlockingAction::read(reader)? {
1085 Some(blocking_action) => blocking_action,
1086 None => return Ok(None),
1087 };
1088 Ok(Some(EventUnblockedChannel {
1089 counterparty_node_id,
1090 funding_txo,
1091 channel_id,
1092 blocking_action,
1093 }))
1094 }
1095}
1096
1097#[derive(Debug)]
1098pub(crate) enum MonitorUpdateCompletionAction {
1099 PaymentClaimed {
1104 payment_hash: PaymentHash,
1105 pending_mpp_claim: Option<(PublicKey, ChannelId, PendingMPPClaimPointer)>,
1109 },
1110 EmitEventAndFreeOtherChannel {
1119 event: events::Event,
1120 downstream_counterparty_and_funding_outpoint: Option<EventUnblockedChannel>,
1121 },
1122 FreeOtherChannelImmediately {
1135 downstream_counterparty_node_id: PublicKey,
1136 downstream_funding_outpoint: OutPoint,
1137 blocking_action: RAAMonitorUpdateBlockingAction,
1138 downstream_channel_id: ChannelId,
1139 },
1140}
1141
1142impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction,
1143 (0, PaymentClaimed) => {
1144 (0, payment_hash, required),
1145 (9999999999, pending_mpp_claim, (static_value, None)),
1146 },
1147 (1, FreeOtherChannelImmediately) => {
1150 (0, downstream_counterparty_node_id, required),
1151 (2, downstream_funding_outpoint, required),
1152 (4, blocking_action, upgradable_required),
1153 (5, downstream_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(downstream_funding_outpoint.0.unwrap()))),
1156 },
1157 (2, EmitEventAndFreeOtherChannel) => {
1158 (0, event, upgradable_required),
1159 (1, downstream_counterparty_and_funding_outpoint, upgradable_option),
1165 },
1166);
1167
1168#[derive(Clone, Debug, PartialEq, Eq)]
1169pub(crate) enum EventCompletionAction {
1170 ReleaseRAAChannelMonitorUpdate {
1171 counterparty_node_id: PublicKey,
1172 channel_funding_outpoint: OutPoint,
1173 channel_id: ChannelId,
1174 },
1175}
1176impl_writeable_tlv_based_enum!(EventCompletionAction,
1177 (0, ReleaseRAAChannelMonitorUpdate) => {
1178 (0, channel_funding_outpoint, required),
1179 (2, counterparty_node_id, required),
1180 (3, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(channel_funding_outpoint.0.unwrap()))),
1183 }
1184);
1185
1186struct HTLCClaimSource {
1193 counterparty_node_id: Option<PublicKey>,
1194 funding_txo: OutPoint,
1195 channel_id: ChannelId,
1196 htlc_id: u64,
1197}
1198
1199impl From<&MPPClaimHTLCSource> for HTLCClaimSource {
1200 fn from(o: &MPPClaimHTLCSource) -> HTLCClaimSource {
1201 HTLCClaimSource {
1202 counterparty_node_id: Some(o.counterparty_node_id),
1203 funding_txo: o.funding_txo,
1204 channel_id: o.channel_id,
1205 htlc_id: o.htlc_id,
1206 }
1207 }
1208}
1209
1210#[derive(Debug)]
1211pub(crate) struct PendingMPPClaim {
1212 channels_without_preimage: Vec<(PublicKey, OutPoint, ChannelId)>,
1213 channels_with_preimage: Vec<(PublicKey, OutPoint, ChannelId)>,
1214}
1215
1216#[derive(Clone, Debug, Hash, PartialEq, Eq)]
1217struct MPPClaimHTLCSource {
1221 counterparty_node_id: PublicKey,
1222 funding_txo: OutPoint,
1223 channel_id: ChannelId,
1224 htlc_id: u64,
1225}
1226
1227impl_writeable_tlv_based!(MPPClaimHTLCSource, {
1228 (0, counterparty_node_id, required),
1229 (2, funding_txo, required),
1230 (4, channel_id, required),
1231 (6, htlc_id, required),
1232});
1233
1234#[derive(Clone, Debug, PartialEq, Eq)]
1235pub(crate) struct PaymentClaimDetails {
1240 mpp_parts: Vec<MPPClaimHTLCSource>,
1241 claiming_payment: ClaimingPayment,
1244}
1245
1246impl_writeable_tlv_based!(PaymentClaimDetails, {
1247 (0, mpp_parts, required_vec),
1248 (2, claiming_payment, required),
1249});
1250
1251#[derive(Clone)]
1252pub(crate) struct PendingMPPClaimPointer(Arc<Mutex<PendingMPPClaim>>);
1253
1254impl PartialEq for PendingMPPClaimPointer {
1255 fn eq(&self, o: &Self) -> bool { Arc::ptr_eq(&self.0, &o.0) }
1256}
1257impl Eq for PendingMPPClaimPointer {}
1258
1259impl core::fmt::Debug for PendingMPPClaimPointer {
1260 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
1261 self.0.lock().unwrap().fmt(f)
1262 }
1263}
1264
1265#[derive(Clone, PartialEq, Eq, Debug)]
1266pub(crate) enum RAAMonitorUpdateBlockingAction {
1269 ForwardedPaymentInboundClaim {
1273 channel_id: ChannelId,
1275 htlc_id: u64,
1277 },
1278 ClaimedMPPPayment {
1286 pending_claim: PendingMPPClaimPointer,
1287 }
1288}
1289
1290impl RAAMonitorUpdateBlockingAction {
1291 fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self {
1292 Self::ForwardedPaymentInboundClaim {
1293 channel_id: prev_hop.channel_id,
1294 htlc_id: prev_hop.htlc_id,
1295 }
1296 }
1297}
1298
1299impl_writeable_tlv_based_enum_upgradable!(RAAMonitorUpdateBlockingAction,
1300 (0, ForwardedPaymentInboundClaim) => { (0, channel_id, required), (2, htlc_id, required) },
1301 unread_variants: ClaimedMPPPayment
1302);
1303
1304impl Readable for Option<RAAMonitorUpdateBlockingAction> {
1305 fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
1306 Ok(RAAMonitorUpdateBlockingAction::read(reader)?)
1307 }
1308}
1309
1310pub(super) struct PeerState<SP: Deref> where SP::Target: SignerProvider {
1312 pub(super) channel_by_id: HashMap<ChannelId, ChannelPhase<SP>>,
1316 pub(super) inbound_channel_request_by_id: HashMap<ChannelId, InboundChannelRequest>,
1323 latest_features: InitFeatures,
1325 pub(super) pending_msg_events: Vec<MessageSendEvent>,
1328 in_flight_monitor_updates: BTreeMap<OutPoint, Vec<ChannelMonitorUpdate>>,
1342 monitor_update_blocked_actions: BTreeMap<ChannelId, Vec<MonitorUpdateCompletionAction>>,
1357 actions_blocking_raa_monitor_updates: BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
1362 closed_channel_monitor_update_ids: BTreeMap<ChannelId, u64>,
1370 pub is_connected: bool,
1374}
1375
1376impl <SP: Deref> PeerState<SP> where SP::Target: SignerProvider {
1377 fn ok_to_remove(&self, require_disconnected: bool) -> bool {
1381 if require_disconnected && self.is_connected {
1382 return false
1383 }
1384 for (_, updates) in self.in_flight_monitor_updates.iter() {
1385 if !updates.is_empty() {
1386 return false;
1387 }
1388 }
1389 !self.channel_by_id.iter().any(|(_, phase)|
1390 match phase {
1391 ChannelPhase::Funded(_) | ChannelPhase::UnfundedOutboundV1(_) => true,
1392 ChannelPhase::UnfundedInboundV1(_) => false,
1393 ChannelPhase::UnfundedOutboundV2(_) => true,
1394 ChannelPhase::UnfundedInboundV2(_) => false,
1395 }
1396 )
1397 && self.monitor_update_blocked_actions.is_empty()
1398 && self.closed_channel_monitor_update_ids.is_empty()
1399 }
1400
1401 fn total_channel_count(&self) -> usize {
1403 self.channel_by_id.len() + self.inbound_channel_request_by_id.len()
1404 }
1405
1406 fn has_channel(&self, channel_id: &ChannelId) -> bool {
1408 self.channel_by_id.contains_key(channel_id) ||
1409 self.inbound_channel_request_by_id.contains_key(channel_id)
1410 }
1411}
1412
1413#[derive(Clone)]
1414pub(super) enum OpenChannelMessage {
1415 V1(msgs::OpenChannel),
1416 #[cfg(dual_funding)]
1417 V2(msgs::OpenChannelV2),
1418}
1419
1420pub(super) enum OpenChannelMessageRef<'a> {
1421 V1(&'a msgs::OpenChannel),
1422 #[cfg(dual_funding)]
1423 V2(&'a msgs::OpenChannelV2),
1424}
1425
1426pub(super) struct InboundChannelRequest {
1429 pub open_channel_msg: OpenChannelMessage,
1431 pub ticks_remaining: i32,
1433}
1434
1435const UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS: i32 = 2;
1438
1439pub(super) const FEERATE_TRACKING_BLOCKS: usize = 144;
1443
1444#[derive(Debug)]
1453struct PendingInboundPayment {
1454 payment_secret: PaymentSecret,
1456 expiry_time: u64,
1459 user_payment_id: u64,
1461 payment_preimage: Option<PaymentPreimage>,
1463 min_value_msat: Option<u64>,
1464}
1465
1466#[cfg(not(c_bindings))]
1477pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<
1478 Arc<M>,
1479 Arc<T>,
1480 Arc<KeysManager>,
1481 Arc<KeysManager>,
1482 Arc<KeysManager>,
1483 Arc<F>,
1484 Arc<DefaultRouter<
1485 Arc<NetworkGraph<Arc<L>>>,
1486 Arc<L>,
1487 Arc<KeysManager>,
1488 Arc<RwLock<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>,
1489 ProbabilisticScoringFeeParameters,
1490 ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>,
1491 >>,
1492 Arc<DefaultMessageRouter<
1493 Arc<NetworkGraph<Arc<L>>>,
1494 Arc<L>,
1495 Arc<KeysManager>,
1496 >>,
1497 Arc<L>
1498>;
1499
1500#[cfg(not(c_bindings))]
1512pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, 'i, M, T, F, L> =
1513 ChannelManager<
1514 &'a M,
1515 &'b T,
1516 &'c KeysManager,
1517 &'c KeysManager,
1518 &'c KeysManager,
1519 &'d F,
1520 &'e DefaultRouter<
1521 &'f NetworkGraph<&'g L>,
1522 &'g L,
1523 &'c KeysManager,
1524 &'h RwLock<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>,
1525 ProbabilisticScoringFeeParameters,
1526 ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>
1527 >,
1528 &'i DefaultMessageRouter<
1529 &'f NetworkGraph<&'g L>,
1530 &'g L,
1531 &'c KeysManager,
1532 >,
1533 &'g L
1534 >;
1535
1536pub trait AChannelManager {
1541 type Watch: chain::Watch<Self::Signer> + ?Sized;
1543 type M: Deref<Target = Self::Watch>;
1545 type Broadcaster: BroadcasterInterface + ?Sized;
1547 type T: Deref<Target = Self::Broadcaster>;
1549 type EntropySource: EntropySource + ?Sized;
1551 type ES: Deref<Target = Self::EntropySource>;
1553 type NodeSigner: NodeSigner + ?Sized;
1555 type NS: Deref<Target = Self::NodeSigner>;
1557 type Signer: EcdsaChannelSigner + Sized;
1559 type SignerProvider: SignerProvider<EcdsaSigner= Self::Signer> + ?Sized;
1561 type SP: Deref<Target = Self::SignerProvider>;
1563 type FeeEstimator: FeeEstimator + ?Sized;
1565 type F: Deref<Target = Self::FeeEstimator>;
1567 type Router: Router + ?Sized;
1569 type R: Deref<Target = Self::Router>;
1571 type MessageRouter: MessageRouter + ?Sized;
1573 type MR: Deref<Target = Self::MessageRouter>;
1575 type Logger: Logger + ?Sized;
1577 type L: Deref<Target = Self::Logger>;
1579 fn get_cm(&self) -> &ChannelManager<Self::M, Self::T, Self::ES, Self::NS, Self::SP, Self::F, Self::R, Self::MR, Self::L>;
1581}
1582
1583impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> AChannelManager
1584for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
1585where
1586 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
1587 T::Target: BroadcasterInterface,
1588 ES::Target: EntropySource,
1589 NS::Target: NodeSigner,
1590 SP::Target: SignerProvider,
1591 F::Target: FeeEstimator,
1592 R::Target: Router,
1593 MR::Target: MessageRouter,
1594 L::Target: Logger,
1595{
1596 type Watch = M::Target;
1597 type M = M;
1598 type Broadcaster = T::Target;
1599 type T = T;
1600 type EntropySource = ES::Target;
1601 type ES = ES;
1602 type NodeSigner = NS::Target;
1603 type NS = NS;
1604 type Signer = <SP::Target as SignerProvider>::EcdsaSigner;
1605 type SignerProvider = SP::Target;
1606 type SP = SP;
1607 type FeeEstimator = F::Target;
1608 type F = F;
1609 type Router = R::Target;
1610 type R = R;
1611 type MessageRouter = MR::Target;
1612 type MR = MR;
1613 type Logger = L::Target;
1614 type L = L;
1615 fn get_cm(&self) -> &ChannelManager<M, T, ES, NS, SP, F, R, MR, L> { self }
1616}
1617
1618pub struct ChannelManager<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
2404where
2405 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
2406 T::Target: BroadcasterInterface,
2407 ES::Target: EntropySource,
2408 NS::Target: NodeSigner,
2409 SP::Target: SignerProvider,
2410 F::Target: FeeEstimator,
2411 R::Target: Router,
2412 MR::Target: MessageRouter,
2413 L::Target: Logger,
2414{
2415 default_configuration: UserConfig,
2416 chain_hash: ChainHash,
2417 fee_estimator: LowerBoundedFeeEstimator<F>,
2418 chain_monitor: M,
2419 tx_broadcaster: T,
2420 router: R,
2421 message_router: MR,
2422
2423 #[cfg(test)]
2425 pub(super) best_block: RwLock<BestBlock>,
2426 #[cfg(not(test))]
2427 best_block: RwLock<BestBlock>,
2428 secp_ctx: Secp256k1<secp256k1::All>,
2429
2430 pending_outbound_payments: OutboundPayments,
2443
2444 #[cfg(test)]
2455 pub(super) forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
2456 #[cfg(not(test))]
2457 forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
2458 pending_intercepted_htlcs: Mutex<HashMap<InterceptId, PendingAddHTLCInfo>>,
2463
2464 decode_update_add_htlcs: Mutex<HashMap<u64, Vec<msgs::UpdateAddHTLC>>>,
2475
2476 claimable_payments: Mutex<ClaimablePayments>,
2481
2482 outbound_scid_aliases: Mutex<HashSet<u64>>,
2489
2490 #[cfg(not(test))]
2509 outpoint_to_peer: Mutex<HashMap<OutPoint, PublicKey>>,
2510 #[cfg(test)]
2511 pub(crate) outpoint_to_peer: Mutex<HashMap<OutPoint, PublicKey>>,
2512
2513 #[cfg(test)]
2525 pub(super) short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, ChannelId)>>,
2526 #[cfg(not(test))]
2527 short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, ChannelId)>>,
2528
2529 our_network_pubkey: PublicKey,
2530
2531 inbound_payment_key: inbound_payment::ExpandedKey,
2532
2533 fake_scid_rand_bytes: [u8; 32],
2539
2540 probing_cookie_secret: [u8; 32],
2544
2545 inbound_payment_id_secret: [u8; 32],
2547
2548 highest_seen_timestamp: AtomicUsize,
2552
2553 #[cfg(not(any(test, feature = "_test_utils")))]
2567 per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<SP>>>>,
2568 #[cfg(any(test, feature = "_test_utils"))]
2569 pub(super) per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<SP>>>>,
2570
2571 #[cfg(not(any(test, feature = "_test_utils")))]
2582 pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
2583 #[cfg(any(test, feature = "_test_utils"))]
2584 pub(crate) pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
2585
2586 pending_events_processor: AtomicBool,
2588
2589 pending_background_events: Mutex<Vec<BackgroundEvent>>,
2601 total_consistency_lock: RwLock<()>,
2608 funding_batch_states: Mutex<BTreeMap<Txid, Vec<(ChannelId, PublicKey, bool)>>>,
2614
2615 background_events_processed_since_startup: AtomicBool,
2616
2617 event_persist_notifier: Notifier,
2618 needs_persist_flag: AtomicBool,
2619
2620 #[cfg(not(any(test, feature = "_test_utils")))]
2621 pending_offers_messages: Mutex<Vec<(OffersMessage, MessageSendInstructions)>>,
2622 #[cfg(any(test, feature = "_test_utils"))]
2623 pub(crate) pending_offers_messages: Mutex<Vec<(OffersMessage, MessageSendInstructions)>>,
2624 pending_async_payments_messages: Mutex<Vec<(AsyncPaymentsMessage, MessageSendInstructions)>>,
2625
2626 pending_broadcast_messages: Mutex<Vec<MessageSendEvent>>,
2628
2629 last_days_feerates: Mutex<VecDeque<(u32, u32)>>,
2643
2644 #[cfg(feature = "dnssec")]
2645 hrn_resolver: OMNameResolver,
2646 #[cfg(feature = "dnssec")]
2647 pending_dns_onion_messages: Mutex<Vec<(DNSResolverMessage, MessageSendInstructions)>>,
2648
2649 #[cfg(feature = "_test_utils")]
2650 pub testing_dnssec_proof_offer_resolution_override: Mutex<HashMap<HumanReadableName, Offer>>,
2656
2657 #[cfg(test)]
2658 pub(super) entropy_source: ES,
2659 #[cfg(not(test))]
2660 entropy_source: ES,
2661 node_signer: NS,
2662 #[cfg(test)]
2663 pub(super) signer_provider: SP,
2664 #[cfg(not(test))]
2665 signer_provider: SP,
2666
2667 logger: L,
2668}
2669
2670#[derive(Clone, Copy, PartialEq)]
2676pub struct ChainParameters {
2677 pub network: Network,
2679
2680 pub best_block: BestBlock,
2684}
2685
2686#[derive(Copy, Clone, PartialEq)]
2687#[must_use]
2688enum NotifyOption {
2689 DoPersist,
2690 SkipPersistHandleEvents,
2691 SkipPersistNoEvents,
2692}
2693
2694struct PersistenceNotifierGuard<'a, F: FnMut() -> NotifyOption> {
2705 event_persist_notifier: &'a Notifier,
2706 needs_persist_flag: &'a AtomicBool,
2707 should_persist: F,
2708 _read_guard: RwLockReadGuard<'a, ()>,
2710}
2711
2712impl<'a> PersistenceNotifierGuard<'a, fn() -> NotifyOption> { fn notify_on_drop<C: AChannelManager>(cm: &'a C) -> PersistenceNotifierGuard<'a, impl FnMut() -> NotifyOption> {
2720 Self::optionally_notify(cm, || -> NotifyOption { NotifyOption::DoPersist })
2721 }
2722
2723 fn optionally_notify<F: FnMut() -> NotifyOption, C: AChannelManager>(cm: &'a C, mut persist_check: F)
2724 -> PersistenceNotifierGuard<'a, impl FnMut() -> NotifyOption> {
2725 let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
2726 let force_notify = cm.get_cm().process_background_events();
2727
2728 PersistenceNotifierGuard {
2729 event_persist_notifier: &cm.get_cm().event_persist_notifier,
2730 needs_persist_flag: &cm.get_cm().needs_persist_flag,
2731 should_persist: move || {
2732 let notify = persist_check();
2735 match (notify, force_notify) {
2736 (NotifyOption::DoPersist, _) => NotifyOption::DoPersist,
2737 (_, NotifyOption::DoPersist) => NotifyOption::DoPersist,
2738 (NotifyOption::SkipPersistHandleEvents, _) => NotifyOption::SkipPersistHandleEvents,
2739 (_, NotifyOption::SkipPersistHandleEvents) => NotifyOption::SkipPersistHandleEvents,
2740 _ => NotifyOption::SkipPersistNoEvents,
2741 }
2742 },
2743 _read_guard: read_guard,
2744 }
2745 }
2746
2747 fn optionally_notify_skipping_background_events<F: Fn() -> NotifyOption, C: AChannelManager>
2751 (cm: &'a C, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
2752 let read_guard = cm.get_cm().total_consistency_lock.read().unwrap();
2753
2754 PersistenceNotifierGuard {
2755 event_persist_notifier: &cm.get_cm().event_persist_notifier,
2756 needs_persist_flag: &cm.get_cm().needs_persist_flag,
2757 should_persist: persist_check,
2758 _read_guard: read_guard,
2759 }
2760 }
2761}
2762
2763impl<'a, F: FnMut() -> NotifyOption> Drop for PersistenceNotifierGuard<'a, F> {
2764 fn drop(&mut self) {
2765 match (self.should_persist)() {
2766 NotifyOption::DoPersist => {
2767 self.needs_persist_flag.store(true, Ordering::Release);
2768 self.event_persist_notifier.notify()
2769 },
2770 NotifyOption::SkipPersistHandleEvents =>
2771 self.event_persist_notifier.notify(),
2772 NotifyOption::SkipPersistNoEvents => {},
2773 }
2774 }
2775}
2776
2777pub const BREAKDOWN_TIMEOUT: u16 = 6 * 24;
2784pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7;
2787
2788pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6*7;
2799pub(super) const CLTV_FAR_FAR_AWAY: u32 = 14 * 24 * 6;
2806
2807pub const MIN_FINAL_CLTV_EXPIRY_DELTA: u16 = HTLC_FAIL_BACK_BUFFER as u16 + 3;
2814
2815#[allow(dead_code)]
2822const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
2823
2824#[allow(dead_code)]
2827const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
2828
2829pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;
2831
2832pub(crate) const DISABLE_GOSSIP_TICKS: u8 = 10;
2835
2836pub(crate) const ENABLE_GOSSIP_TICKS: u8 = 5;
2839
2840const MAX_UNFUNDED_CHANS_PER_PEER: usize = 4;
2844
2845const MAX_UNFUNDED_CHANNEL_PEERS: usize = 50;
2848
2849const MAX_NO_CHANNEL_PEERS: usize = 250;
2852
2853pub const MAX_SHORT_LIVED_RELATIVE_EXPIRY: Duration = Duration::from_secs(60 * 60 * 24);
2865
2866#[derive(Debug, PartialEq)]
2869pub enum RecentPaymentDetails {
2870 AwaitingInvoice {
2872 payment_id: PaymentId,
2875 },
2876 Pending {
2878 payment_id: PaymentId,
2884 payment_hash: PaymentHash,
2887 total_msat: u64,
2890 },
2891 Fulfilled {
2895 payment_id: PaymentId,
2901 payment_hash: Option<PaymentHash>,
2904 },
2905 Abandoned {
2909 payment_id: PaymentId,
2915 payment_hash: PaymentHash,
2917 },
2918}
2919
2920#[derive(Clone)]
2924pub struct PhantomRouteHints {
2925 pub channels: Vec<ChannelDetails>,
2927 pub phantom_scid: u64,
2930 pub real_node_pubkey: PublicKey,
2932}
2933
2934macro_rules! handle_error {
2935 ($self: ident, $internal: expr, $counterparty_node_id: expr) => { {
2936 debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
2939 debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
2940
2941 match $internal {
2942 Ok(msg) => Ok(msg),
2943 Err(MsgHandleErrInternal { err, shutdown_finish, .. }) => {
2944 let mut msg_event = None;
2945
2946 if let Some((shutdown_res, update_option)) = shutdown_finish {
2947 let counterparty_node_id = shutdown_res.counterparty_node_id;
2948 let channel_id = shutdown_res.channel_id;
2949 let logger = WithContext::from(
2950 &$self.logger, Some(counterparty_node_id), Some(channel_id), None
2951 );
2952 log_error!(logger, "Force-closing channel: {}", err.err);
2953
2954 $self.finish_close_channel(shutdown_res);
2955 if let Some(update) = update_option {
2956 let mut pending_broadcast_messages = $self.pending_broadcast_messages.lock().unwrap();
2957 pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
2958 msg: update
2959 });
2960 }
2961 } else {
2962 log_error!($self.logger, "Got non-closing error: {}", err.err);
2963 }
2964
2965 if let msgs::ErrorAction::IgnoreError = err.action {
2966 } else {
2967 msg_event = Some(events::MessageSendEvent::HandleError {
2968 node_id: $counterparty_node_id,
2969 action: err.action.clone()
2970 });
2971 }
2972
2973 if let Some(msg_event) = msg_event {
2974 let per_peer_state = $self.per_peer_state.read().unwrap();
2975 if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) {
2976 let mut peer_state = peer_state_mutex.lock().unwrap();
2977 peer_state.pending_msg_events.push(msg_event);
2978 }
2979 }
2980
2981 Err(err)
2983 },
2984 }
2985 } };
2986}
2987
2988macro_rules! locked_close_channel {
2996 ($self: ident, $peer_state: expr, $channel_context: expr, $shutdown_res_mut: expr) => {{
2997 if let Some((_, funding_txo, _, update)) = $shutdown_res_mut.monitor_update.take() {
2998 handle_new_monitor_update!($self, funding_txo, update, $peer_state,
2999 $channel_context, REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER);
3000 }
3001 let update_id = $channel_context.get_latest_monitor_update_id();
3006 if $channel_context.get_funding_tx_confirmation_height().is_some() || $channel_context.minimum_depth() == Some(0) || update_id > 1 {
3007 let chan_id = $channel_context.channel_id();
3008 $peer_state.closed_channel_monitor_update_ids.insert(chan_id, update_id);
3009 }
3010 if let Some(outpoint) = $channel_context.get_funding_txo() {
3011 $self.outpoint_to_peer.lock().unwrap().remove(&outpoint);
3012 }
3013 let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
3014 if let Some(short_id) = $channel_context.get_short_channel_id() {
3015 short_to_chan_info.remove(&short_id);
3016 } else {
3017 let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel_context.outbound_scid_alias());
3024 debug_assert!(alias_removed);
3025 }
3026 short_to_chan_info.remove(&$channel_context.outbound_scid_alias());
3027 }}
3028}
3029
3030macro_rules! convert_chan_phase_err {
3032 ($self: ident, $peer_state: expr, $err: expr, $channel: expr, $channel_id: expr, MANUAL_CHANNEL_UPDATE, $channel_update: expr) => {
3033 match $err {
3034 ChannelError::Warn(msg) => {
3035 (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), *$channel_id))
3036 },
3037 ChannelError::Ignore(msg) => {
3038 (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), *$channel_id))
3039 },
3040 ChannelError::Close((msg, reason)) => {
3041 let logger = WithChannelContext::from(&$self.logger, &$channel.context, None);
3042 log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg);
3043 let mut shutdown_res = $channel.context.force_shutdown(true, reason);
3044 locked_close_channel!($self, $peer_state, &$channel.context, &mut shutdown_res);
3045 let err =
3046 MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $channel_update);
3047 (true, err)
3048 },
3049 }
3050 };
3051 ($self: ident, $peer_state: expr, $err: expr, $channel: expr, $channel_id: expr, FUNDED_CHANNEL) => {
3052 convert_chan_phase_err!($self, $peer_state, $err, $channel, $channel_id, MANUAL_CHANNEL_UPDATE, { $self.get_channel_update_for_broadcast($channel).ok() })
3053 };
3054 ($self: ident, $peer_state: expr, $err: expr, $channel: expr, $channel_id: expr, UNFUNDED_CHANNEL) => {
3055 convert_chan_phase_err!($self, $peer_state, $err, $channel, $channel_id, MANUAL_CHANNEL_UPDATE, None)
3056 };
3057 ($self: ident, $peer_state: expr, $err: expr, $channel_phase: expr, $channel_id: expr) => {
3058 match $channel_phase {
3059 ChannelPhase::Funded(channel) => {
3060 convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, FUNDED_CHANNEL)
3061 },
3062 ChannelPhase::UnfundedOutboundV1(channel) => {
3063 convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, UNFUNDED_CHANNEL)
3064 },
3065 ChannelPhase::UnfundedInboundV1(channel) => {
3066 convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, UNFUNDED_CHANNEL)
3067 },
3068 ChannelPhase::UnfundedOutboundV2(channel) => {
3069 convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, UNFUNDED_CHANNEL)
3070 },
3071 ChannelPhase::UnfundedInboundV2(channel) => {
3072 convert_chan_phase_err!($self, $peer_state, $err, channel, $channel_id, UNFUNDED_CHANNEL)
3073 },
3074 }
3075 };
3076}
3077
3078macro_rules! break_chan_phase_entry {
3079 ($self: ident, $peer_state: expr, $res: expr, $entry: expr) => {
3080 match $res {
3081 Ok(res) => res,
3082 Err(e) => {
3083 let key = *$entry.key();
3084 let (drop, res) = convert_chan_phase_err!($self, $peer_state, e, $entry.get_mut(), &key);
3085 if drop {
3086 $entry.remove_entry();
3087 }
3088 break Err(res);
3089 }
3090 }
3091 }
3092}
3093
3094macro_rules! try_chan_phase_entry {
3095 ($self: ident, $peer_state: expr, $res: expr, $entry: expr) => {
3096 match $res {
3097 Ok(res) => res,
3098 Err(e) => {
3099 let key = *$entry.key();
3100 let (drop, res) = convert_chan_phase_err!($self, $peer_state, e, $entry.get_mut(), &key);
3101 if drop {
3102 $entry.remove_entry();
3103 }
3104 return Err(res);
3105 }
3106 }
3107 }
3108}
3109
3110macro_rules! remove_channel_phase {
3111 ($self: ident, $peer_state: expr, $entry: expr, $shutdown_res_mut: expr) => {
3112 {
3113 let channel = $entry.remove_entry().1;
3114 locked_close_channel!($self, $peer_state, &channel.context(), $shutdown_res_mut);
3115 channel
3116 }
3117 }
3118}
3119
3120macro_rules! send_channel_ready {
3121 ($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{
3122 $pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
3123 node_id: $channel.context.get_counterparty_node_id(),
3124 msg: $channel_ready_msg,
3125 });
3126 let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
3129 let outbound_alias_insert = short_to_chan_info.insert($channel.context.outbound_scid_alias(), ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()));
3130 assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()),
3131 "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
3132 if let Some(real_scid) = $channel.context.get_short_channel_id() {
3133 let scid_insert = short_to_chan_info.insert(real_scid, ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()));
3134 assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.context.get_counterparty_node_id(), $channel.context.channel_id()),
3135 "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
3136 }
3137 }}
3138}
3139macro_rules! emit_funding_tx_broadcast_safe_event {
3140 ($locked_events: expr, $channel: expr, $funding_txo: expr) => {
3141 if !$channel.context.funding_tx_broadcast_safe_event_emitted() {
3142 $locked_events.push_back((events::Event::FundingTxBroadcastSafe {
3143 channel_id: $channel.context.channel_id(),
3144 user_channel_id: $channel.context.get_user_id(),
3145 funding_txo: $funding_txo,
3146 counterparty_node_id: $channel.context.get_counterparty_node_id(),
3147 former_temporary_channel_id: $channel.context.temporary_channel_id()
3148 .expect("Unreachable: FundingTxBroadcastSafe event feature added to channel establishment process in LDK v0.0.124 where this should never be None."),
3149 }, None));
3150 $channel.context.set_funding_tx_broadcast_safe_event_emitted();
3151 }
3152 }
3153}
3154
3155macro_rules! emit_channel_pending_event {
3156 ($locked_events: expr, $channel: expr) => {
3157 if $channel.context.should_emit_channel_pending_event() {
3158 $locked_events.push_back((events::Event::ChannelPending {
3159 channel_id: $channel.context.channel_id(),
3160 former_temporary_channel_id: $channel.context.temporary_channel_id(),
3161 counterparty_node_id: $channel.context.get_counterparty_node_id(),
3162 user_channel_id: $channel.context.get_user_id(),
3163 funding_txo: $channel.context.get_funding_txo().unwrap().into_bitcoin_outpoint(),
3164 channel_type: Some($channel.context.get_channel_type().clone()),
3165 }, None));
3166 $channel.context.set_channel_pending_event_emitted();
3167 }
3168 }
3169}
3170
3171macro_rules! emit_channel_ready_event {
3172 ($locked_events: expr, $channel: expr) => {
3173 if $channel.context.should_emit_channel_ready_event() {
3174 debug_assert!($channel.context.channel_pending_event_emitted());
3175 $locked_events.push_back((events::Event::ChannelReady {
3176 channel_id: $channel.context.channel_id(),
3177 user_channel_id: $channel.context.get_user_id(),
3178 counterparty_node_id: $channel.context.get_counterparty_node_id(),
3179 channel_type: $channel.context.get_channel_type().clone(),
3180 }, None));
3181 $channel.context.set_channel_ready_event_emitted();
3182 }
3183 }
3184}
3185
3186macro_rules! handle_monitor_update_completion {
3187 ($self: ident, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
3188 let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3189 let mut updates = $chan.monitor_updating_restored(&&logger,
3190 &$self.node_signer, $self.chain_hash, &$self.default_configuration,
3191 $self.best_block.read().unwrap().height);
3192 let counterparty_node_id = $chan.context.get_counterparty_node_id();
3193 let channel_update = if updates.channel_ready.is_some() && $chan.context.is_usable() {
3194 if let Ok(msg) = $self.get_channel_update_for_unicast($chan) {
3200 Some(events::MessageSendEvent::SendChannelUpdate {
3201 node_id: counterparty_node_id,
3202 msg,
3203 })
3204 } else { None }
3205 } else { None };
3206
3207 let update_actions = $peer_state.monitor_update_blocked_actions
3208 .remove(&$chan.context.channel_id()).unwrap_or(Vec::new());
3209
3210 let (htlc_forwards, decode_update_add_htlcs) = $self.handle_channel_resumption(
3211 &mut $peer_state.pending_msg_events, $chan, updates.raa,
3212 updates.commitment_update, updates.order, updates.accepted_htlcs, updates.pending_update_adds,
3213 updates.funding_broadcastable, updates.channel_ready,
3214 updates.announcement_sigs, updates.tx_signatures);
3215 if let Some(upd) = channel_update {
3216 $peer_state.pending_msg_events.push(upd);
3217 }
3218
3219 let channel_id = $chan.context.channel_id();
3220 let unbroadcasted_batch_funding_txid = $chan.context.unbroadcasted_batch_funding_txid();
3221 core::mem::drop($peer_state_lock);
3222 core::mem::drop($per_peer_state_lock);
3223
3224 if let Some(txid) = unbroadcasted_batch_funding_txid {
3227 let mut funding_batch_states = $self.funding_batch_states.lock().unwrap();
3228 let mut batch_completed = false;
3229 if let Some(batch_state) = funding_batch_states.get_mut(&txid) {
3230 let channel_state = batch_state.iter_mut().find(|(chan_id, pubkey, _)| (
3231 *chan_id == channel_id &&
3232 *pubkey == counterparty_node_id
3233 ));
3234 if let Some(channel_state) = channel_state {
3235 channel_state.2 = true;
3236 } else {
3237 debug_assert!(false, "Missing channel batch state for channel which completed initial monitor update");
3238 }
3239 batch_completed = batch_state.iter().all(|(_, _, completed)| *completed);
3240 } else {
3241 debug_assert!(false, "Missing batch state for channel which completed initial monitor update");
3242 }
3243
3244 if batch_completed {
3247 let removed_batch_state = funding_batch_states.remove(&txid).into_iter().flatten();
3248 let per_peer_state = $self.per_peer_state.read().unwrap();
3249 let mut batch_funding_tx = None;
3250 for (channel_id, counterparty_node_id, _) in removed_batch_state {
3251 if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
3252 let mut peer_state = peer_state_mutex.lock().unwrap();
3253 if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
3254 batch_funding_tx = batch_funding_tx.or_else(|| chan.context.unbroadcasted_funding());
3255 chan.set_batch_ready();
3256 let mut pending_events = $self.pending_events.lock().unwrap();
3257 emit_channel_pending_event!(pending_events, chan);
3258 }
3259 }
3260 }
3261 if let Some(tx) = batch_funding_tx {
3262 log_info!($self.logger, "Broadcasting batch funding transaction with txid {}", tx.compute_txid());
3263 $self.tx_broadcaster.broadcast_transactions(&[&tx]);
3264 }
3265 }
3266 }
3267
3268 $self.handle_monitor_update_completion_actions(update_actions);
3269
3270 if let Some(forwards) = htlc_forwards {
3271 $self.forward_htlcs(&mut [forwards][..]);
3272 }
3273 if let Some(decode) = decode_update_add_htlcs {
3274 $self.push_decode_update_add_htlcs(decode);
3275 }
3276 $self.finalize_claims(updates.finalized_claimed_htlcs);
3277 for failure in updates.failed_htlcs.drain(..) {
3278 let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
3279 $self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver);
3280 }
3281 } }
3282}
3283
3284macro_rules! handle_new_monitor_update {
3285 ($self: ident, $update_res: expr, $logger: expr, $channel_id: expr, _internal, $completed: expr) => { {
3286 debug_assert!($self.background_events_processed_since_startup.load(Ordering::Acquire));
3287 match $update_res {
3288 ChannelMonitorUpdateStatus::UnrecoverableError => {
3289 let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
3290 log_error!($logger, "{}", err_str);
3291 panic!("{}", err_str);
3292 },
3293 ChannelMonitorUpdateStatus::InProgress => {
3294 log_debug!($logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
3295 $channel_id);
3296 false
3297 },
3298 ChannelMonitorUpdateStatus::Completed => {
3299 $completed;
3300 true
3301 },
3302 }
3303 } };
3304 ($self: ident, $update_res: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, INITIAL_MONITOR) => {
3305 let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3306 handle_new_monitor_update!($self, $update_res, logger, $chan.context.channel_id(), _internal,
3307 handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan))
3308 };
3309 (
3310 $self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $logger: expr,
3311 $chan_id: expr, $counterparty_node_id: expr, $in_flight_updates: ident, $update_idx: ident,
3312 _internal_outer, $completed: expr
3313 ) => { {
3314 $in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo)
3315 .or_insert_with(Vec::new);
3316 $update_idx = $in_flight_updates.iter().position(|upd| upd == &$update)
3320 .unwrap_or_else(|| {
3321 $in_flight_updates.push($update);
3322 $in_flight_updates.len() - 1
3323 });
3324 if $self.background_events_processed_since_startup.load(Ordering::Acquire) {
3325 let update_res = $self.chain_monitor.update_channel($funding_txo, &$in_flight_updates[$update_idx]);
3326 handle_new_monitor_update!($self, update_res, $logger, $chan_id, _internal, $completed)
3327 } else {
3328 let event = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
3332 counterparty_node_id: $counterparty_node_id,
3333 funding_txo: $funding_txo,
3334 channel_id: $chan_id,
3335 update: $in_flight_updates[$update_idx].clone(),
3336 };
3337 $self.pending_background_events.lock().unwrap().push(event);
3346 false
3347 }
3348 } };
3349 (
3350 $self: ident, $funding_txo: expr, $update: expr, $peer_state: expr, $chan_context: expr,
3351 REMAIN_LOCKED_UPDATE_ACTIONS_PROCESSED_LATER
3352 ) => { {
3353 let logger = WithChannelContext::from(&$self.logger, &$chan_context, None);
3354 let chan_id = $chan_context.channel_id();
3355 let counterparty_node_id = $chan_context.get_counterparty_node_id();
3356 let in_flight_updates;
3357 let idx;
3358 handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
3359 counterparty_node_id, in_flight_updates, idx, _internal_outer,
3360 {
3361 let _ = in_flight_updates.remove(idx);
3362 })
3363 } };
3364 (
3365 $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
3366 $per_peer_state_lock: expr, $counterparty_node_id: expr, $channel_id: expr, POST_CHANNEL_CLOSE
3367 ) => { {
3368 let logger = WithContext::from(&$self.logger, Some($counterparty_node_id), Some($channel_id), None);
3369 let in_flight_updates;
3370 let idx;
3371 handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger,
3372 $channel_id, $counterparty_node_id, in_flight_updates, idx, _internal_outer,
3373 {
3374 let _ = in_flight_updates.remove(idx);
3375 if in_flight_updates.is_empty() {
3376 let update_actions = $peer_state.monitor_update_blocked_actions
3377 .remove(&$channel_id).unwrap_or(Vec::new());
3378
3379 mem::drop($peer_state_lock);
3380 mem::drop($per_peer_state_lock);
3381
3382 $self.handle_monitor_update_completion_actions(update_actions);
3383 }
3384 })
3385 } };
3386 (
3387 $self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr,
3388 $per_peer_state_lock: expr, $chan: expr
3389 ) => { {
3390 let logger = WithChannelContext::from(&$self.logger, &$chan.context, None);
3391 let chan_id = $chan.context.channel_id();
3392 let counterparty_node_id = $chan.context.get_counterparty_node_id();
3393 let in_flight_updates;
3394 let idx;
3395 handle_new_monitor_update!($self, $funding_txo, $update, $peer_state, logger, chan_id,
3396 counterparty_node_id, in_flight_updates, idx, _internal_outer,
3397 {
3398 let _ = in_flight_updates.remove(idx);
3399 if in_flight_updates.is_empty() && $chan.blocked_monitor_updates_pending() == 0 {
3400 handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan);
3401 }
3402 })
3403 } };
3404}
3405
3406macro_rules! process_events_body {
3407 ($self: expr, $event_to_handle: expr, $handle_event: expr) => {
3408 let mut handling_failed = false;
3409 let mut processed_all_events = false;
3410 while !handling_failed && !processed_all_events {
3411 if $self.pending_events_processor.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed).is_err() {
3412 return;
3413 }
3414
3415 let mut result;
3416
3417 {
3418 let _read_guard = $self.total_consistency_lock.read().unwrap();
3421
3422 result = $self.process_background_events();
3425
3426 if $self.process_pending_monitor_events() {
3429 result = NotifyOption::DoPersist;
3430 }
3431 }
3432
3433 let pending_events = $self.pending_events.lock().unwrap().clone();
3434 if !pending_events.is_empty() {
3435 result = NotifyOption::DoPersist;
3436 }
3437
3438 let mut post_event_actions = Vec::new();
3439
3440 let mut num_handled_events = 0;
3441 for (event, action_opt) in pending_events {
3442 log_trace!($self.logger, "Handling event {:?}...", event);
3443 $event_to_handle = event;
3444 let event_handling_result = $handle_event;
3445 log_trace!($self.logger, "Done handling event, result: {:?}", event_handling_result);
3446 match event_handling_result {
3447 Ok(()) => {
3448 if let Some(action) = action_opt {
3449 post_event_actions.push(action);
3450 }
3451 num_handled_events += 1;
3452 }
3453 Err(_e) => {
3454 handling_failed = true;
3457 break;
3458 }
3459 }
3460 }
3461
3462 {
3463 let mut pending_events = $self.pending_events.lock().unwrap();
3464 pending_events.drain(..num_handled_events);
3465 processed_all_events = pending_events.is_empty();
3466 $self.pending_events_processor.store(false, Ordering::Release);
3469 }
3470
3471 if !post_event_actions.is_empty() {
3472 $self.handle_post_event_actions(post_event_actions);
3473 processed_all_events = false;
3475 }
3476
3477 match result {
3478 NotifyOption::DoPersist => {
3479 $self.needs_persist_flag.store(true, Ordering::Release);
3480 $self.event_persist_notifier.notify();
3481 },
3482 NotifyOption::SkipPersistHandleEvents =>
3483 $self.event_persist_notifier.notify(),
3484 NotifyOption::SkipPersistNoEvents => {},
3485 }
3486 }
3487 }
3488}
3489
3490impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
3491where
3492 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
3493 T::Target: BroadcasterInterface,
3494 ES::Target: EntropySource,
3495 NS::Target: NodeSigner,
3496 SP::Target: SignerProvider,
3497 F::Target: FeeEstimator,
3498 R::Target: Router,
3499 MR::Target: MessageRouter,
3500 L::Target: Logger,
3501{
3502 pub fn new(
3520 fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, message_router: MR, logger: L,
3521 entropy_source: ES, node_signer: NS, signer_provider: SP, config: UserConfig,
3522 params: ChainParameters, current_timestamp: u32,
3523 ) -> Self {
3524 let mut secp_ctx = Secp256k1::new();
3525 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
3526 let expanded_inbound_key = node_signer.get_inbound_payment_key();
3527 ChannelManager {
3528 default_configuration: config.clone(),
3529 chain_hash: ChainHash::using_genesis_block(params.network),
3530 fee_estimator: LowerBoundedFeeEstimator::new(fee_est),
3531 chain_monitor,
3532 tx_broadcaster,
3533 router,
3534 message_router,
3535
3536 best_block: RwLock::new(params.best_block),
3537
3538 outbound_scid_aliases: Mutex::new(new_hash_set()),
3539 pending_outbound_payments: OutboundPayments::new(new_hash_map()),
3540 forward_htlcs: Mutex::new(new_hash_map()),
3541 decode_update_add_htlcs: Mutex::new(new_hash_map()),
3542 claimable_payments: Mutex::new(ClaimablePayments { claimable_payments: new_hash_map(), pending_claiming_payments: new_hash_map() }),
3543 pending_intercepted_htlcs: Mutex::new(new_hash_map()),
3544 outpoint_to_peer: Mutex::new(new_hash_map()),
3545 short_to_chan_info: FairRwLock::new(new_hash_map()),
3546
3547 our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(),
3548 secp_ctx,
3549
3550 inbound_payment_key: expanded_inbound_key,
3551 fake_scid_rand_bytes: entropy_source.get_secure_random_bytes(),
3552
3553 probing_cookie_secret: entropy_source.get_secure_random_bytes(),
3554 inbound_payment_id_secret: entropy_source.get_secure_random_bytes(),
3555
3556 highest_seen_timestamp: AtomicUsize::new(current_timestamp as usize),
3557
3558 per_peer_state: FairRwLock::new(new_hash_map()),
3559
3560 pending_events: Mutex::new(VecDeque::new()),
3561 pending_events_processor: AtomicBool::new(false),
3562 pending_background_events: Mutex::new(Vec::new()),
3563 total_consistency_lock: RwLock::new(()),
3564 background_events_processed_since_startup: AtomicBool::new(false),
3565 event_persist_notifier: Notifier::new(),
3566 needs_persist_flag: AtomicBool::new(false),
3567 funding_batch_states: Mutex::new(BTreeMap::new()),
3568
3569 pending_offers_messages: Mutex::new(Vec::new()),
3570 pending_async_payments_messages: Mutex::new(Vec::new()),
3571 pending_broadcast_messages: Mutex::new(Vec::new()),
3572
3573 last_days_feerates: Mutex::new(VecDeque::new()),
3574
3575 entropy_source,
3576 node_signer,
3577 signer_provider,
3578
3579 logger,
3580
3581 #[cfg(feature = "dnssec")]
3582 hrn_resolver: OMNameResolver::new(current_timestamp, params.best_block.height),
3583 #[cfg(feature = "dnssec")]
3584 pending_dns_onion_messages: Mutex::new(Vec::new()),
3585
3586 #[cfg(feature = "_test_utils")]
3587 testing_dnssec_proof_offer_resolution_override: Mutex::new(new_hash_map()),
3588 }
3589 }
3590
3591 pub fn get_current_default_configuration(&self) -> &UserConfig {
3593 &self.default_configuration
3594 }
3595
3596 #[cfg(test)]
3597 pub fn create_and_insert_outbound_scid_alias_for_test(&self) -> u64 {
3598 self.create_and_insert_outbound_scid_alias()
3599 }
3600
3601 fn create_and_insert_outbound_scid_alias(&self) -> u64 {
3602 let height = self.best_block.read().unwrap().height;
3603 let mut outbound_scid_alias = 0;
3604 let mut i = 0;
3605 loop {
3606 if cfg!(fuzzing) { outbound_scid_alias += 1;
3608 } else {
3609 outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
3610 }
3611 if outbound_scid_alias != 0 && self.outbound_scid_aliases.lock().unwrap().insert(outbound_scid_alias) {
3612 break;
3613 }
3614 i += 1;
3615 if i > 1_000_000 { panic!("Your RNG is busted or we ran out of possible outbound SCID aliases (which should never happen before we run out of memory to store channels"); }
3616 }
3617 outbound_scid_alias
3618 }
3619
3620 pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u128, temporary_channel_id: Option<ChannelId>, override_config: Option<UserConfig>) -> Result<ChannelId, APIError> {
3653 if channel_value_satoshis < 1000 {
3654 return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
3655 }
3656
3657 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
3658 debug_assert!(&self.total_consistency_lock.try_write().is_err());
3660
3661 let per_peer_state = self.per_peer_state.read().unwrap();
3662
3663 let peer_state_mutex = per_peer_state.get(&their_network_key)
3664 .ok_or_else(|| APIError::APIMisuseError{ err: format!("Not connected to node: {}", their_network_key) })?;
3665
3666 let mut peer_state = peer_state_mutex.lock().unwrap();
3667
3668 if let Some(temporary_channel_id) = temporary_channel_id {
3669 if peer_state.channel_by_id.contains_key(&temporary_channel_id) {
3670 return Err(APIError::APIMisuseError{ err: format!("Channel with temporary channel ID {} already exists!", temporary_channel_id)});
3671 }
3672 }
3673
3674 let mut channel = {
3675 let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
3676 let their_features = &peer_state.latest_features;
3677 let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
3678 match OutboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key,
3679 their_features, channel_value_satoshis, push_msat, user_channel_id, config,
3680 self.best_block.read().unwrap().height, outbound_scid_alias, temporary_channel_id, &*self.logger)
3681 {
3682 Ok(res) => res,
3683 Err(e) => {
3684 self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
3685 return Err(e);
3686 },
3687 }
3688 };
3689 let logger = WithChannelContext::from(&self.logger, &channel.context, None);
3690 let res = channel.get_open_channel(self.chain_hash, &&logger);
3691
3692 let temporary_channel_id = channel.context.channel_id();
3693 match peer_state.channel_by_id.entry(temporary_channel_id) {
3694 hash_map::Entry::Occupied(_) => {
3695 if cfg!(fuzzing) {
3696 return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
3697 } else {
3698 panic!("RNG is bad???");
3699 }
3700 },
3701 hash_map::Entry::Vacant(entry) => { entry.insert(ChannelPhase::UnfundedOutboundV1(channel)); }
3702 }
3703
3704 if let Some(msg) = res {
3705 peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
3706 node_id: their_network_key,
3707 msg,
3708 });
3709 }
3710 Ok(temporary_channel_id)
3711 }
3712
3713 fn list_funded_channels_with_filter<Fn: FnMut(&(&ChannelId, &Channel<SP>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
3714 let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
3721 {
3722 let best_block_height = self.best_block.read().unwrap().height;
3723 let per_peer_state = self.per_peer_state.read().unwrap();
3724 for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
3725 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3726 let peer_state = &mut *peer_state_lock;
3727 res.extend(peer_state.channel_by_id.iter()
3728 .filter_map(|(chan_id, phase)| match phase {
3729 ChannelPhase::Funded(chan) => Some((chan_id, chan)),
3731 _ => None,
3732 })
3733 .filter(f)
3734 .map(|(_channel_id, channel)| {
3735 ChannelDetails::from_channel_context(&channel.context, best_block_height,
3736 peer_state.latest_features.clone(), &self.fee_estimator)
3737 })
3738 );
3739 }
3740 }
3741 res
3742 }
3743
3744 pub fn list_channels(&self) -> Vec<ChannelDetails> {
3747 let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
3754 {
3755 let best_block_height = self.best_block.read().unwrap().height;
3756 let per_peer_state = self.per_peer_state.read().unwrap();
3757 for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
3758 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3759 let peer_state = &mut *peer_state_lock;
3760 for context in peer_state.channel_by_id.iter().map(|(_, phase)| phase.context()) {
3761 let details = ChannelDetails::from_channel_context(context, best_block_height,
3762 peer_state.latest_features.clone(), &self.fee_estimator);
3763 res.push(details);
3764 }
3765 }
3766 }
3767 res
3768 }
3769
3770 pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
3777 self.list_funded_channels_with_filter(|&(_, ref channel)| channel.context.is_live())
3781 }
3782
3783 pub fn list_channels_with_counterparty(&self, counterparty_node_id: &PublicKey) -> Vec<ChannelDetails> {
3785 let best_block_height = self.best_block.read().unwrap().height;
3786 let per_peer_state = self.per_peer_state.read().unwrap();
3787
3788 if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
3789 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3790 let peer_state = &mut *peer_state_lock;
3791 let features = &peer_state.latest_features;
3792 let context_to_details = |context| {
3793 ChannelDetails::from_channel_context(context, best_block_height, features.clone(), &self.fee_estimator)
3794 };
3795 return peer_state.channel_by_id
3796 .iter()
3797 .map(|(_, phase)| phase.context())
3798 .map(context_to_details)
3799 .collect();
3800 }
3801 vec![]
3802 }
3803
3804 pub fn list_recent_payments(&self) -> Vec<RecentPaymentDetails> {
3813 self.pending_outbound_payments.pending_outbound_payments.lock().unwrap().iter()
3814 .filter_map(|(payment_id, pending_outbound_payment)| match pending_outbound_payment {
3815 PendingOutboundPayment::AwaitingInvoice { .. }
3816 | PendingOutboundPayment::AwaitingOffer { .. }
3817 | PendingOutboundPayment::InvoiceReceived { .. } =>
3819 {
3820 Some(RecentPaymentDetails::AwaitingInvoice { payment_id: *payment_id })
3821 },
3822 PendingOutboundPayment::StaticInvoiceReceived { .. } => {
3823 Some(RecentPaymentDetails::AwaitingInvoice { payment_id: *payment_id })
3824 },
3825 PendingOutboundPayment::Retryable { payment_hash, total_msat, .. } => {
3826 Some(RecentPaymentDetails::Pending {
3827 payment_id: *payment_id,
3828 payment_hash: *payment_hash,
3829 total_msat: *total_msat,
3830 })
3831 },
3832 PendingOutboundPayment::Abandoned { payment_hash, .. } => {
3833 Some(RecentPaymentDetails::Abandoned { payment_id: *payment_id, payment_hash: *payment_hash })
3834 },
3835 PendingOutboundPayment::Fulfilled { payment_hash, .. } => {
3836 Some(RecentPaymentDetails::Fulfilled { payment_id: *payment_id, payment_hash: *payment_hash })
3837 },
3838 PendingOutboundPayment::Legacy { .. } => None
3839 })
3840 .collect()
3841 }
3842
3843 fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, override_shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
3844 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
3845
3846 let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new();
3847 let mut shutdown_result = None;
3848
3849 {
3850 let per_peer_state = self.per_peer_state.read().unwrap();
3851
3852 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
3853 .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
3854
3855 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3856 let peer_state = &mut *peer_state_lock;
3857
3858 match peer_state.channel_by_id.entry(channel_id.clone()) {
3859 hash_map::Entry::Occupied(mut chan_phase_entry) => {
3860 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
3861 let funding_txo_opt = chan.context.get_funding_txo();
3862 let their_features = &peer_state.latest_features;
3863 let (shutdown_msg, mut monitor_update_opt, htlcs) =
3864 chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
3865 failed_htlcs = htlcs;
3866
3867 peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
3871 node_id: *counterparty_node_id,
3872 msg: shutdown_msg,
3873 });
3874
3875 debug_assert!(monitor_update_opt.is_none() || !chan.is_shutdown(),
3876 "We can't both complete shutdown and generate a monitor update");
3877
3878 if let Some(monitor_update) = monitor_update_opt.take() {
3880 handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
3881 peer_state_lock, peer_state, per_peer_state, chan);
3882 }
3883 } else {
3884 let mut shutdown_res = chan_phase_entry.get_mut().context_mut()
3885 .force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) });
3886 remove_channel_phase!(self, peer_state, chan_phase_entry, shutdown_res);
3887 shutdown_result = Some(shutdown_res);
3888 }
3889 },
3890 hash_map::Entry::Vacant(_) => {
3891 return Err(APIError::ChannelUnavailable {
3892 err: format!(
3893 "Channel with id {} not found for the passed counterparty node_id {}",
3894 channel_id, counterparty_node_id,
3895 )
3896 });
3897 },
3898 }
3899 }
3900
3901 for htlc_source in failed_htlcs.drain(..) {
3902 let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
3903 let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id };
3904 self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
3905 }
3906
3907 if let Some(shutdown_result) = shutdown_result {
3908 self.finish_close_channel(shutdown_result);
3909 }
3910
3911 Ok(())
3912 }
3913
3914 pub fn close_channel(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey) -> Result<(), APIError> {
3938 self.close_channel_internal(channel_id, counterparty_node_id, None, None)
3939 }
3940
3941 pub fn close_channel_with_feerate_and_script(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>, shutdown_script: Option<ShutdownScript>) -> Result<(), APIError> {
3971 self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
3972 }
3973
3974 fn apply_post_close_monitor_update(
3976 &self, counterparty_node_id: PublicKey, channel_id: ChannelId, funding_txo: OutPoint,
3977 monitor_update: ChannelMonitorUpdate,
3978 ) {
3979 let per_peer_state = self.per_peer_state.read().unwrap();
3982 let mut peer_state_lock = per_peer_state.get(&counterparty_node_id)
3983 .expect("We must always have a peer entry for a peer with which we have channels that have ChannelMonitors")
3984 .lock().unwrap();
3985 let peer_state = &mut *peer_state_lock;
3986 match peer_state.channel_by_id.entry(channel_id) {
3987 hash_map::Entry::Occupied(mut chan_phase) => {
3988 if let ChannelPhase::Funded(chan) = chan_phase.get_mut() {
3989 handle_new_monitor_update!(self, funding_txo,
3990 monitor_update, peer_state_lock, peer_state, per_peer_state, chan);
3991 return;
3992 } else {
3993 debug_assert!(false, "We shouldn't have an update for a non-funded channel");
3994 }
3995 },
3996 hash_map::Entry::Vacant(_) => {},
3997 }
3998
3999 handle_new_monitor_update!(
4000 self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state,
4001 counterparty_node_id, channel_id, POST_CHANNEL_CLOSE
4002 );
4003 }
4004
4005 fn finish_close_channel(&self, mut shutdown_res: ShutdownResult) {
4011 debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
4012 #[cfg(debug_assertions)]
4013 for (_, peer) in self.per_peer_state.read().unwrap().iter() {
4014 debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
4015 }
4016
4017 let logger = WithContext::from(
4018 &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id), None
4019 );
4020
4021 log_debug!(logger, "Finishing closure of channel due to {} with {} HTLCs to fail",
4022 shutdown_res.closure_reason, shutdown_res.dropped_outbound_htlcs.len());
4023 for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
4024 let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
4025 let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
4026 let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
4027 self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
4028 }
4029 if let Some((_, funding_txo, _channel_id, monitor_update)) = shutdown_res.monitor_update {
4030 debug_assert!(false, "This should have been handled in `locked_close_channel`");
4031 self.apply_post_close_monitor_update(shutdown_res.counterparty_node_id, shutdown_res.channel_id, funding_txo, monitor_update);
4032 }
4033 if self.background_events_processed_since_startup.load(Ordering::Acquire) {
4034 if let Some(funding_txo) = shutdown_res.channel_funding_txo {
4040 let per_peer_state = self.per_peer_state.read().unwrap();
4041 if let Some(peer_state_mtx) = per_peer_state.get(&shutdown_res.counterparty_node_id) {
4042 let mut peer_state = peer_state_mtx.lock().unwrap();
4043 if peer_state.in_flight_monitor_updates.get(&funding_txo).map(|l| l.is_empty()).unwrap_or(true) {
4044 let update_actions = peer_state.monitor_update_blocked_actions
4045 .remove(&shutdown_res.channel_id).unwrap_or(Vec::new());
4046
4047 mem::drop(peer_state);
4048 mem::drop(per_peer_state);
4049
4050 self.handle_monitor_update_completion_actions(update_actions);
4051 }
4052 }
4053 }
4054 }
4055 let mut shutdown_results = Vec::new();
4056 if let Some(txid) = shutdown_res.unbroadcasted_batch_funding_txid {
4057 let mut funding_batch_states = self.funding_batch_states.lock().unwrap();
4058 let affected_channels = funding_batch_states.remove(&txid).into_iter().flatten();
4059 let per_peer_state = self.per_peer_state.read().unwrap();
4060 let mut has_uncompleted_channel = None;
4061 for (channel_id, counterparty_node_id, state) in affected_channels {
4062 if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
4063 let mut peer_state = peer_state_mutex.lock().unwrap();
4064 if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) {
4065 let mut close_res = chan.context_mut().force_shutdown(false, ClosureReason::FundingBatchClosure);
4066 locked_close_channel!(self, &mut *peer_state, chan.context(), close_res);
4067 shutdown_results.push(close_res);
4068 }
4069 }
4070 has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state));
4071 }
4072 debug_assert!(
4073 has_uncompleted_channel.unwrap_or(true),
4074 "Closing a batch where all channels have completed initial monitor update",
4075 );
4076 }
4077
4078 {
4079 let mut pending_events = self.pending_events.lock().unwrap();
4080 pending_events.push_back((events::Event::ChannelClosed {
4081 channel_id: shutdown_res.channel_id,
4082 user_channel_id: shutdown_res.user_channel_id,
4083 reason: shutdown_res.closure_reason,
4084 counterparty_node_id: Some(shutdown_res.counterparty_node_id),
4085 channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis),
4086 channel_funding_txo: shutdown_res.channel_funding_txo,
4087 last_local_balance_msat: Some(shutdown_res.last_local_balance_msat),
4088 }, None));
4089
4090 if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx {
4091 let funding_info = if shutdown_res.is_manual_broadcast {
4092 FundingInfo::OutPoint {
4093 outpoint: shutdown_res.channel_funding_txo
4094 .expect("We had an unbroadcasted funding tx, so should also have had a funding outpoint"),
4095 }
4096 } else {
4097 FundingInfo::Tx{ transaction }
4098 };
4099 pending_events.push_back((events::Event::DiscardFunding {
4100 channel_id: shutdown_res.channel_id, funding_info
4101 }, None));
4102 }
4103 }
4104 for shutdown_result in shutdown_results.drain(..) {
4105 self.finish_close_channel(shutdown_result);
4106 }
4107 }
4108
4109 fn force_close_channel_with_peer(&self, channel_id: &ChannelId, peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool)
4112 -> Result<PublicKey, APIError> {
4113 let per_peer_state = self.per_peer_state.read().unwrap();
4114 let peer_state_mutex = per_peer_state.get(peer_node_id)
4115 .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?;
4116 let (update_opt, counterparty_node_id) = {
4117 let mut peer_state = peer_state_mutex.lock().unwrap();
4118 let closure_reason = if let Some(peer_msg) = peer_msg {
4119 ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) }
4120 } else {
4121 ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(broadcast) }
4122 };
4123 let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id), None);
4124 if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) {
4125 log_error!(logger, "Force-closing channel {}", channel_id);
4126 let (mut shutdown_res, update_opt) = match chan_phase_entry.get_mut() {
4127 ChannelPhase::Funded(ref mut chan) => {
4128 (
4129 chan.context.force_shutdown(broadcast, closure_reason),
4130 self.get_channel_update_for_broadcast(&chan).ok(),
4131 )
4132 },
4133 ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) |
4134 ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => {
4135 (chan_phase_entry.get_mut().context_mut().force_shutdown(false, closure_reason), None)
4137 },
4138 };
4139 let chan_phase = remove_channel_phase!(self, peer_state, chan_phase_entry, shutdown_res);
4140 mem::drop(peer_state);
4141 mem::drop(per_peer_state);
4142 self.finish_close_channel(shutdown_res);
4143 (update_opt, chan_phase.context().get_counterparty_node_id())
4144 } else if peer_state.inbound_channel_request_by_id.remove(channel_id).is_some() {
4145 log_error!(logger, "Force-closing channel {}", &channel_id);
4146 (None, *peer_node_id)
4150 } else {
4151 return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, peer_node_id) });
4152 }
4153 };
4154 if let Some(update) = update_opt {
4155 let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
4157 pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
4158 msg: update
4159 });
4160 }
4161
4162 Ok(counterparty_node_id)
4163 }
4164
4165 fn force_close_sending_error(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, broadcast: bool, error_message: String)
4166 -> Result<(), APIError> {
4167 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4168 log_debug!(self.logger,
4169 "Force-closing channel, The error message sent to the peer : {}", error_message);
4170 match self.force_close_channel_with_peer(channel_id, &counterparty_node_id, None, broadcast) {
4171 Ok(counterparty_node_id) => {
4172 let per_peer_state = self.per_peer_state.read().unwrap();
4173 if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
4174 let mut peer_state = peer_state_mutex.lock().unwrap();
4175 peer_state.pending_msg_events.push(
4176 events::MessageSendEvent::HandleError {
4177 node_id: counterparty_node_id,
4178 action: msgs::ErrorAction::SendErrorMessage {
4179 msg: msgs::ErrorMessage { channel_id: *channel_id, data: error_message }
4180 },
4181 }
4182 );
4183 }
4184 Ok(())
4185 },
4186 Err(e) => Err(e)
4187 }
4188 }
4189
4190 pub fn force_close_broadcasting_latest_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String)
4199 -> Result<(), APIError> {
4200 self.force_close_sending_error(channel_id, counterparty_node_id, true, error_message)
4201 }
4202
4203 pub fn force_close_without_broadcasting_txn(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, error_message: String)
4214 -> Result<(), APIError> {
4215 self.force_close_sending_error(channel_id, counterparty_node_id, false, error_message)
4216 }
4217
4218 pub fn force_close_all_channels_broadcasting_latest_txn(&self, error_message: String) {
4224 for chan in self.list_channels() {
4225 let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id, error_message.clone());
4226 }
4227 }
4228
4229 pub fn force_close_all_channels_without_broadcasting_txn(&self, error_message: String) {
4235 for chan in self.list_channels() {
4236 let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id, error_message.clone());
4237 }
4238 }
4239
4240 fn can_forward_htlc_to_outgoing_channel(
4241 &self, chan: &mut Channel<SP>, msg: &msgs::UpdateAddHTLC, next_packet: &NextPacketDetails
4242 ) -> Result<(), (&'static str, u16)> {
4243 if !chan.context.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
4244 return Err(("Refusing to forward to a private channel based on our config.", 0x4000 | 10));
4248 }
4249 if chan.context.get_channel_type().supports_scid_privacy() && next_packet.outgoing_scid != chan.context.outbound_scid_alias() {
4250 return Err(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10));
4254 }
4255
4256 if !chan.context.is_live() {
4262 if !chan.context.is_enabled() {
4263 return Err(("Forwarding channel has been disconnected for some time.", 0x1000 | 20));
4265 } else {
4266 return Err(("Forwarding channel is not in a ready state.", 0x1000 | 7));
4268 }
4269 }
4270 if next_packet.outgoing_amt_msat < chan.context.get_counterparty_htlc_minimum_msat() { return Err(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11));
4272 }
4273 if let Err((err, code)) = chan.htlc_satisfies_config(msg, next_packet.outgoing_amt_msat, next_packet.outgoing_cltv_value) {
4274 return Err((err, code));
4275 }
4276
4277 Ok(())
4278 }
4279
4280 fn do_funded_channel_callback<X, C: Fn(&mut Channel<SP>) -> X>(
4283 &self, scid: u64, callback: C,
4284 ) -> Option<X> {
4285 let (counterparty_node_id, channel_id) = match self.short_to_chan_info.read().unwrap().get(&scid).cloned() {
4286 None => return None,
4287 Some((cp_id, id)) => (cp_id, id),
4288 };
4289 let per_peer_state = self.per_peer_state.read().unwrap();
4290 let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
4291 if peer_state_mutex_opt.is_none() {
4292 return None;
4293 }
4294 let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
4295 let peer_state = &mut *peer_state_lock;
4296 match peer_state.channel_by_id.get_mut(&channel_id).and_then(
4297 |chan_phase| if let ChannelPhase::Funded(chan) = chan_phase { Some(chan) } else { None }
4298 ) {
4299 None => None,
4300 Some(chan) => Some(callback(chan)),
4301 }
4302 }
4303
4304 fn can_forward_htlc(
4305 &self, msg: &msgs::UpdateAddHTLC, next_packet_details: &NextPacketDetails
4306 ) -> Result<(), (&'static str, u16)> {
4307 match self.do_funded_channel_callback(next_packet_details.outgoing_scid, |chan: &mut Channel<SP>| {
4308 self.can_forward_htlc_to_outgoing_channel(chan, msg, next_packet_details)
4309 }) {
4310 Some(Ok(())) => {},
4311 Some(Err(e)) => return Err(e),
4312 None => {
4313 if (self.default_configuration.accept_intercept_htlcs &&
4316 fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash)) ||
4317 fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, next_packet_details.outgoing_scid, &self.chain_hash)
4318 {} else {
4319 return Err(("Don't have available channel for forwarding as requested.", 0x4000 | 10));
4320 }
4321 }
4322 }
4323
4324 let cur_height = self.best_block.read().unwrap().height + 1;
4325 if let Err((err_msg, err_code)) = check_incoming_htlc_cltv(
4326 cur_height, next_packet_details.outgoing_cltv_value, msg.cltv_expiry
4327 ) {
4328 return Err((err_msg, err_code));
4329 }
4330
4331 Ok(())
4332 }
4333
4334 fn htlc_failure_from_update_add_err(
4335 &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, err_msg: &'static str,
4336 err_code: u16, is_intro_node_blinded_forward: bool,
4337 shared_secret: &[u8; 32]
4338 ) -> HTLCFailureMsg {
4339 let mut res = VecWriter(Vec::with_capacity(8 + 2));
4341 if err_code & 0x1000 == 0x1000 {
4342 if err_code == 0x1000 | 11 || err_code == 0x1000 | 12 {
4343 msg.amount_msat.write(&mut res).expect("Writes cannot fail");
4344 }
4345 else if err_code == 0x1000 | 13 {
4346 msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
4347 }
4348 else if err_code == 0x1000 | 20 {
4349 0u16.write(&mut res).expect("Writes cannot fail");
4351 }
4352 (0u16).write(&mut res).expect("Writes cannot fail");
4354 }
4355
4356 log_info!(
4357 WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash)),
4358 "Failed to accept/forward incoming HTLC: {}", err_msg
4359 );
4360 if msg.blinding_point.is_some() {
4362 return HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
4363 channel_id: msg.channel_id,
4364 htlc_id: msg.htlc_id,
4365 sha256_of_onion: [0; 32],
4366 failure_code: INVALID_ONION_BLINDING,
4367 });
4368 }
4369
4370 let (err_code, err_data) = if is_intro_node_blinded_forward {
4371 (INVALID_ONION_BLINDING, &[0; 32][..])
4372 } else {
4373 (err_code, &res.0[..])
4374 };
4375 HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
4376 channel_id: msg.channel_id,
4377 htlc_id: msg.htlc_id,
4378 reason: HTLCFailReason::reason(err_code, err_data.to_vec())
4379 .get_encrypted_failure_packet(shared_secret, &None),
4380 })
4381 }
4382
4383 fn decode_update_add_htlc_onion(
4384 &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey,
4385 ) -> Result<
4386 (onion_utils::Hop, [u8; 32], Option<Result<PublicKey, secp256k1::Error>>), HTLCFailureMsg
4387 > {
4388 let (next_hop, shared_secret, next_packet_details_opt) = decode_incoming_update_add_htlc_onion(
4389 msg, &*self.node_signer, &*self.logger, &self.secp_ctx
4390 )?;
4391
4392 let next_packet_details = match next_packet_details_opt {
4393 Some(next_packet_details) => next_packet_details,
4394 None => return Ok((next_hop, shared_secret, None)),
4396 };
4397
4398 self.can_forward_htlc(&msg, &next_packet_details).map_err(|e| {
4401 let (err_msg, err_code) = e;
4402 self.htlc_failure_from_update_add_err(
4403 msg, counterparty_node_id, err_msg, err_code,
4404 next_hop.is_intro_node_blinded_forward(), &shared_secret
4405 )
4406 })?;
4407
4408 Ok((next_hop, shared_secret, Some(next_packet_details.next_packet_pubkey)))
4409 }
4410
4411 fn construct_pending_htlc_status<'a>(
4412 &self, msg: &msgs::UpdateAddHTLC, counterparty_node_id: &PublicKey, shared_secret: [u8; 32],
4413 decoded_hop: onion_utils::Hop, allow_underpay: bool,
4414 next_packet_pubkey_opt: Option<Result<PublicKey, secp256k1::Error>>,
4415 ) -> PendingHTLCStatus {
4416 macro_rules! return_err {
4417 ($msg: expr, $err_code: expr, $data: expr) => {
4418 {
4419 let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), Some(msg.payment_hash));
4420 log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
4421 if msg.blinding_point.is_some() {
4422 return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
4423 msgs::UpdateFailMalformedHTLC {
4424 channel_id: msg.channel_id,
4425 htlc_id: msg.htlc_id,
4426 sha256_of_onion: [0; 32],
4427 failure_code: INVALID_ONION_BLINDING,
4428 }
4429 ))
4430 }
4431 return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
4432 channel_id: msg.channel_id,
4433 htlc_id: msg.htlc_id,
4434 reason: HTLCFailReason::reason($err_code, $data.to_vec())
4435 .get_encrypted_failure_packet(&shared_secret, &None),
4436 }));
4437 }
4438 }
4439 }
4440 match decoded_hop {
4441 onion_utils::Hop::Receive(next_hop_data) => {
4442 let current_height: u32 = self.best_block.read().unwrap().height;
4444 match create_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash,
4445 msg.amount_msat, msg.cltv_expiry, None, allow_underpay, msg.skimmed_fee_msat,
4446 current_height)
4447 {
4448 Ok(info) => {
4449 PendingHTLCStatus::Forward(info)
4454 },
4455 Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
4456 }
4457 },
4458 onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => {
4459 match create_fwd_pending_htlc_info(msg, next_hop_data, next_hop_hmac,
4460 new_packet_bytes, shared_secret, next_packet_pubkey_opt) {
4461 Ok(info) => PendingHTLCStatus::Forward(info),
4462 Err(InboundHTLCErr { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
4463 }
4464 }
4465 }
4466 }
4467
4468 fn get_channel_update_for_broadcast(&self, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
4479 if !chan.context.should_announce() {
4480 return Err(LightningError {
4481 err: "Cannot broadcast a channel_update for a private channel".to_owned(),
4482 action: msgs::ErrorAction::IgnoreError
4483 });
4484 }
4485 if chan.context.get_short_channel_id().is_none() {
4486 return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError});
4487 }
4488 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
4489 log_trace!(logger, "Attempting to generate broadcast channel update for channel {}", &chan.context.channel_id());
4490 self.get_channel_update_for_unicast(chan)
4491 }
4492
4493 fn get_channel_update_for_unicast(&self, chan: &Channel<SP>) -> Result<msgs::ChannelUpdate, LightningError> {
4505 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
4506 log_trace!(logger, "Attempting to generate channel update for channel {}", chan.context.channel_id());
4507 let short_channel_id = match chan.context.get_short_channel_id().or(chan.context.latest_inbound_scid_alias()) {
4508 None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
4509 Some(id) => id,
4510 };
4511
4512 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
4513 log_trace!(logger, "Generating channel update for channel {}", chan.context.channel_id());
4514 let were_node_one = self.our_network_pubkey.serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
4515 let enabled = chan.context.is_enabled();
4516
4517 let unsigned = msgs::UnsignedChannelUpdate {
4518 chain_hash: self.chain_hash,
4519 short_channel_id,
4520 timestamp: chan.context.get_update_time_counter(),
4521 message_flags: 1, channel_flags: (!were_node_one) as u8 | ((!enabled as u8) << 1),
4523 cltv_expiry_delta: chan.context.get_cltv_expiry_delta(),
4524 htlc_minimum_msat: chan.context.get_counterparty_htlc_minimum_msat(),
4525 htlc_maximum_msat: chan.context.get_announced_htlc_max_msat(),
4526 fee_base_msat: chan.context.get_outbound_forwarding_fee_base_msat(),
4527 fee_proportional_millionths: chan.context.get_fee_proportional_millionths(),
4528 excess_data: Vec::new(),
4529 };
4530 let sig = self.node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelUpdate(&unsigned)).unwrap();
4535
4536 Ok(msgs::ChannelUpdate {
4537 signature: sig,
4538 contents: unsigned
4539 })
4540 }
4541
4542 #[cfg(test)]
4543 pub(crate) fn test_send_payment_along_path(&self, path: &Path, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
4544 let _lck = self.total_consistency_lock.read().unwrap();
4545 self.send_payment_along_path(SendAlongPathArgs {
4546 path, payment_hash, recipient_onion: &recipient_onion, total_value,
4547 cur_height, payment_id, keysend_preimage, invoice_request: None, session_priv_bytes
4548 })
4549 }
4550
4551 fn send_payment_along_path(&self, args: SendAlongPathArgs) -> Result<(), APIError> {
4552 let SendAlongPathArgs {
4553 path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage,
4554 invoice_request, session_priv_bytes
4555 } = args;
4556 debug_assert!(self.total_consistency_lock.try_write().is_err());
4558 let prng_seed = self.entropy_source.get_secure_random_bytes();
4559 let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
4560
4561 let (onion_packet, htlc_msat, htlc_cltv) = onion_utils::create_payment_onion(
4562 &self.secp_ctx, &path, &session_priv, total_value, recipient_onion, cur_height,
4563 payment_hash, keysend_preimage, invoice_request, prng_seed
4564 ).map_err(|e| {
4565 let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None, Some(*payment_hash));
4566 log_error!(logger, "Failed to build an onion for path for payment hash {}", payment_hash);
4567 e
4568 })?;
4569
4570 let err: Result<(), _> = loop {
4571 let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.hops.first().unwrap().short_channel_id) {
4572 None => {
4573 let logger = WithContext::from(&self.logger, Some(path.hops.first().unwrap().pubkey), None, Some(*payment_hash));
4574 log_error!(logger, "Failed to find first-hop for payment hash {}", payment_hash);
4575 return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()})
4576 },
4577 Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
4578 };
4579
4580 let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(id), Some(*payment_hash));
4581 log_trace!(logger,
4582 "Attempting to send payment with payment hash {} along path with next hop {}",
4583 payment_hash, path.hops.first().unwrap().short_channel_id);
4584
4585 let per_peer_state = self.per_peer_state.read().unwrap();
4586 let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
4587 .ok_or_else(|| APIError::ChannelUnavailable{err: "No peer matching the path's first hop found!".to_owned() })?;
4588 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4589 let peer_state = &mut *peer_state_lock;
4590 if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(id) {
4591 match chan_phase_entry.get_mut() {
4592 ChannelPhase::Funded(chan) => {
4593 if !chan.context.is_live() {
4594 return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
4595 }
4596 let funding_txo = chan.context.get_funding_txo().unwrap();
4597 let logger = WithChannelContext::from(&self.logger, &chan.context, Some(*payment_hash));
4598 let send_res = chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(),
4599 htlc_cltv, HTLCSource::OutboundRoute {
4600 path: path.clone(),
4601 session_priv: session_priv.clone(),
4602 first_hop_htlc_msat: htlc_msat,
4603 payment_id,
4604 }, onion_packet, None, &self.fee_estimator, &&logger);
4605 match break_chan_phase_entry!(self, peer_state, send_res, chan_phase_entry) {
4606 Some(monitor_update) => {
4607 match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) {
4608 false => {
4609 return Err(APIError::MonitorUpdateInProgress);
4616 },
4617 true => {},
4618 }
4619 },
4620 None => {},
4621 }
4622 },
4623 _ => return Err(APIError::ChannelUnavailable{err: "Channel to first hop is unfunded".to_owned()}),
4624 };
4625 } else {
4626 return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()});
4631 }
4632 return Ok(());
4633 };
4634 match handle_error!(self, err, path.hops.first().unwrap().pubkey) {
4635 Ok(_) => unreachable!(),
4636 Err(e) => {
4637 Err(APIError::ChannelUnavailable { err: e.err })
4638 },
4639 }
4640 }
4641
4642 pub fn send_payment_with_route(
4647 &self, mut route: Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields,
4648 payment_id: PaymentId
4649 ) -> Result<(), RetryableSendFailure> {
4650 let best_block_height = self.best_block.read().unwrap().height;
4651 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4652 let route_params = route.route_params.clone().unwrap_or_else(|| {
4653 let (payee_node_id, cltv_delta) = route.paths.first()
4655 .and_then(|path| path.hops.last().map(|hop| (hop.pubkey, hop.cltv_expiry_delta as u32)))
4656 .unwrap_or_else(|| (PublicKey::from_slice(&[2; 32]).unwrap(), MIN_FINAL_CLTV_EXPIRY_DELTA as u32));
4657 let dummy_payment_params = PaymentParameters::from_node_id(payee_node_id, cltv_delta);
4658 RouteParameters::from_payment_params_and_value(dummy_payment_params, route.get_total_amount())
4659 });
4660 if route.route_params.is_none() { route.route_params = Some(route_params.clone()); }
4661 let router = FixedRouter::new(route);
4662 self.pending_outbound_payments
4663 .send_payment(payment_hash, recipient_onion, payment_id, Retry::Attempts(0),
4664 route_params, &&router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
4665 &self.entropy_source, &self.node_signer, best_block_height, &self.logger,
4666 &self.pending_events, |args| self.send_payment_along_path(args))
4667 }
4668
4669 pub fn send_payment(
4704 &self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId,
4705 route_params: RouteParameters, retry_strategy: Retry
4706 ) -> Result<(), RetryableSendFailure> {
4707 let best_block_height = self.best_block.read().unwrap().height;
4708 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4709 self.pending_outbound_payments
4710 .send_payment(payment_hash, recipient_onion, payment_id, retry_strategy, route_params,
4711 &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
4712 &self.entropy_source, &self.node_signer, best_block_height, &self.logger,
4713 &self.pending_events, |args| self.send_payment_along_path(args))
4714 }
4715
4716 #[cfg(test)]
4717 pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
4718 let best_block_height = self.best_block.read().unwrap().height;
4719 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4720 self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion,
4721 keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer,
4722 best_block_height, |args| self.send_payment_along_path(args))
4723 }
4724
4725 #[cfg(test)]
4726 pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route: &Route) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
4727 let best_block_height = self.best_block.read().unwrap().height;
4728 self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, recipient_onion, payment_id, route, None, &self.entropy_source, best_block_height)
4729 }
4730
4731 #[cfg(test)]
4732 pub(crate) fn test_set_payment_metadata(&self, payment_id: PaymentId, new_payment_metadata: Option<Vec<u8>>) {
4733 self.pending_outbound_payments.test_set_payment_metadata(payment_id, new_payment_metadata);
4734 }
4735
4736 pub fn send_payment_for_bolt12_invoice(
4757 &self, invoice: &Bolt12Invoice, context: Option<&OffersContext>,
4758 ) -> Result<(), Bolt12PaymentError> {
4759 match self.verify_bolt12_invoice(invoice, context) {
4760 Ok(payment_id) => self.send_payment_for_verified_bolt12_invoice(invoice, payment_id),
4761 Err(()) => Err(Bolt12PaymentError::UnexpectedInvoice),
4762 }
4763 }
4764
4765 fn verify_bolt12_invoice(
4766 &self, invoice: &Bolt12Invoice, context: Option<&OffersContext>,
4767 ) -> Result<PaymentId, ()> {
4768 let secp_ctx = &self.secp_ctx;
4769 let expanded_key = &self.inbound_payment_key;
4770
4771 match context {
4772 None if invoice.is_for_refund_without_paths() => {
4773 invoice.verify_using_metadata(expanded_key, secp_ctx)
4774 },
4775 Some(&OffersContext::OutboundPayment { payment_id, nonce, .. }) => {
4776 invoice.verify_using_payer_data(payment_id, nonce, expanded_key, secp_ctx)
4777 },
4778 _ => Err(()),
4779 }
4780 }
4781
4782 fn send_payment_for_verified_bolt12_invoice(&self, invoice: &Bolt12Invoice, payment_id: PaymentId) -> Result<(), Bolt12PaymentError> {
4783 let best_block_height = self.best_block.read().unwrap().height;
4784 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4785 let features = self.bolt12_invoice_features();
4786 self.pending_outbound_payments
4787 .send_payment_for_bolt12_invoice(
4788 invoice, payment_id, &self.router, self.list_usable_channels(), features,
4789 || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, &self,
4790 &self.secp_ctx, best_block_height, &self.logger, &self.pending_events,
4791 |args| self.send_payment_along_path(args)
4792 )
4793 }
4794
4795 #[cfg(async_payments)]
4796 fn initiate_async_payment(
4797 &self, invoice: &StaticInvoice, payment_id: PaymentId
4798 ) -> Result<(), Bolt12PaymentError> {
4799 let mut res = Ok(());
4800 PersistenceNotifierGuard::optionally_notify(self, || {
4801 let best_block_height = self.best_block.read().unwrap().height;
4802 let features = self.bolt12_invoice_features();
4803 let outbound_pmts_res = self.pending_outbound_payments.static_invoice_received(
4804 invoice, payment_id, features, best_block_height, &*self.entropy_source,
4805 &self.pending_events
4806 );
4807 match outbound_pmts_res {
4808 Ok(()) => {},
4809 Err(Bolt12PaymentError::UnexpectedInvoice) | Err(Bolt12PaymentError::DuplicateInvoice) => {
4810 res = outbound_pmts_res.map(|_| ());
4811 return NotifyOption::SkipPersistNoEvents
4812 },
4813 Err(e) => {
4814 res = Err(e);
4815 return NotifyOption::DoPersist
4816 }
4817 };
4818
4819 let nonce = Nonce::from_entropy_source(&*self.entropy_source);
4820 let hmac = payment_id.hmac_for_async_payment(nonce, &self.inbound_payment_key);
4821 let reply_paths = match self.create_blinded_paths(
4822 MessageContext::AsyncPayments(
4823 AsyncPaymentsContext::OutboundPayment { payment_id, nonce, hmac }
4824 )
4825 ) {
4826 Ok(paths) => paths,
4827 Err(()) => {
4828 self.abandon_payment_with_reason(payment_id, PaymentFailureReason::BlindedPathCreationFailed);
4829 res = Err(Bolt12PaymentError::BlindedPathCreationFailed);
4830 return NotifyOption::DoPersist
4831 }
4832 };
4833
4834 let mut pending_async_payments_messages = self.pending_async_payments_messages.lock().unwrap();
4835 const HTLC_AVAILABLE_LIMIT: usize = 10;
4836 reply_paths
4837 .iter()
4838 .flat_map(|reply_path| invoice.message_paths().iter().map(move |invoice_path| (invoice_path, reply_path)))
4839 .take(HTLC_AVAILABLE_LIMIT)
4840 .for_each(|(invoice_path, reply_path)| {
4841 let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
4842 destination: Destination::BlindedPath(invoice_path.clone()),
4843 reply_path: reply_path.clone(),
4844 };
4845 let message = AsyncPaymentsMessage::HeldHtlcAvailable(HeldHtlcAvailable {});
4846 pending_async_payments_messages.push((message, instructions));
4847 });
4848
4849 NotifyOption::DoPersist
4850 });
4851
4852 res
4853 }
4854
4855 #[cfg(async_payments)]
4856 fn send_payment_for_static_invoice(
4857 &self, payment_id: PaymentId
4858 ) -> Result<(), Bolt12PaymentError> {
4859 let best_block_height = self.best_block.read().unwrap().height;
4860 let mut res = Ok(());
4861 PersistenceNotifierGuard::optionally_notify(self, || {
4862 let outbound_pmts_res = self.pending_outbound_payments.send_payment_for_static_invoice(
4863 payment_id, &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
4864 &self.entropy_source, &self.node_signer, &self, &self.secp_ctx, best_block_height,
4865 &self.logger, &self.pending_events, |args| self.send_payment_along_path(args)
4866 );
4867 match outbound_pmts_res {
4868 Err(Bolt12PaymentError::UnexpectedInvoice) | Err(Bolt12PaymentError::DuplicateInvoice) => {
4869 res = outbound_pmts_res.map(|_| ());
4870 NotifyOption::SkipPersistNoEvents
4871 },
4872 other_res => {
4873 res = other_res;
4874 NotifyOption::DoPersist
4875 }
4876 }
4877 });
4878 res
4879 }
4880
4881 pub fn abandon_payment(&self, payment_id: PaymentId) {
4907 self.abandon_payment_with_reason(payment_id, PaymentFailureReason::UserAbandoned)
4908 }
4909
4910 fn abandon_payment_with_reason(&self, payment_id: PaymentId, reason: PaymentFailureReason) {
4911 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4912 self.pending_outbound_payments.abandon_payment(payment_id, reason, &self.pending_events);
4913 }
4914
4915 pub fn send_spontaneous_payment(
4933 &self, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields,
4934 payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry
4935 ) -> Result<PaymentHash, RetryableSendFailure> {
4936 let best_block_height = self.best_block.read().unwrap().height;
4937 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4938 self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, recipient_onion,
4939 payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(),
4940 || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
4941 &self.logger, &self.pending_events, |args| self.send_payment_along_path(args))
4942 }
4943
4944 pub fn send_probe(&self, path: Path) -> Result<(PaymentHash, PaymentId), ProbeSendFailure> {
4948 let best_block_height = self.best_block.read().unwrap().height;
4949 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
4950 self.pending_outbound_payments.send_probe(path, self.probing_cookie_secret,
4951 &self.entropy_source, &self.node_signer, best_block_height,
4952 |args| self.send_payment_along_path(args))
4953 }
4954
4955 #[cfg(test)]
4958 pub(crate) fn payment_is_probe(&self, payment_hash: &PaymentHash, payment_id: &PaymentId) -> bool {
4959 outbound_payment::payment_is_probe(payment_hash, payment_id, self.probing_cookie_secret)
4960 }
4961
4962 pub fn send_spontaneous_preflight_probes(
4967 &self, node_id: PublicKey, amount_msat: u64, final_cltv_expiry_delta: u32,
4968 liquidity_limit_multiplier: Option<u64>,
4969 ) -> Result<Vec<(PaymentHash, PaymentId)>, ProbeSendFailure> {
4970 let payment_params =
4971 PaymentParameters::from_node_id(node_id, final_cltv_expiry_delta);
4972
4973 let route_params = RouteParameters::from_payment_params_and_value(payment_params, amount_msat);
4974
4975 self.send_preflight_probes(route_params, liquidity_limit_multiplier)
4976 }
4977
4978 pub fn send_preflight_probes(
4993 &self, route_params: RouteParameters, liquidity_limit_multiplier: Option<u64>,
4994 ) -> Result<Vec<(PaymentHash, PaymentId)>, ProbeSendFailure> {
4995 let liquidity_limit_multiplier = liquidity_limit_multiplier.unwrap_or(3);
4996
4997 let payer = self.get_our_node_id();
4998 let usable_channels = self.list_usable_channels();
4999 let first_hops = usable_channels.iter().collect::<Vec<_>>();
5000 let inflight_htlcs = self.compute_inflight_htlcs();
5001
5002 let route = self
5003 .router
5004 .find_route(&payer, &route_params, Some(&first_hops), inflight_htlcs)
5005 .map_err(|e| {
5006 log_error!(self.logger, "Failed to find path for payment probe: {:?}", e);
5007 ProbeSendFailure::RouteNotFound
5008 })?;
5009
5010 let mut used_liquidity_map = hash_map_with_capacity(first_hops.len());
5011
5012 let mut res = Vec::new();
5013
5014 for mut path in route.paths {
5015 while let Some(last_path_hop) = path.hops.last() {
5018 if last_path_hop.maybe_announced_channel {
5019 break;
5021 } else {
5022 log_debug!(
5024 self.logger,
5025 "Avoided sending payment probe all the way to last hop {} as it is likely unannounced.",
5026 last_path_hop.short_channel_id
5027 );
5028 let final_value_msat = path.final_value_msat();
5029 path.hops.pop();
5030 if let Some(new_last) = path.hops.last_mut() {
5031 new_last.fee_msat += final_value_msat;
5032 }
5033 }
5034 }
5035
5036 if path.hops.len() < 2 {
5037 log_debug!(
5038 self.logger,
5039 "Skipped sending payment probe over path with less than two hops."
5040 );
5041 continue;
5042 }
5043
5044 if let Some(first_path_hop) = path.hops.first() {
5045 if let Some(first_hop) = first_hops.iter().find(|h| {
5046 h.get_outbound_payment_scid() == Some(first_path_hop.short_channel_id)
5047 }) {
5048 let path_value = path.final_value_msat() + path.fee_msat();
5049 let used_liquidity =
5050 used_liquidity_map.entry(first_path_hop.short_channel_id).or_insert(0);
5051
5052 if first_hop.next_outbound_htlc_limit_msat
5053 < (*used_liquidity + path_value) * liquidity_limit_multiplier
5054 {
5055 log_debug!(self.logger, "Skipped sending payment probe to avoid putting channel {} under the liquidity limit.", first_path_hop.short_channel_id);
5056 continue;
5057 } else {
5058 *used_liquidity += path_value;
5059 }
5060 }
5061 }
5062
5063 res.push(self.send_probe(path).map_err(|e| {
5064 log_error!(self.logger, "Failed to send pre-flight probe: {:?}", e);
5065 e
5066 })?);
5067 }
5068
5069 Ok(res)
5070 }
5071
5072 fn funding_transaction_generated_intern<FundingOutput: FnMut(&OutboundV1Channel<SP>) -> Result<OutPoint, &'static str>>(
5075 &self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding_transaction: Transaction, is_batch_funding: bool,
5076 mut find_funding_output: FundingOutput, is_manual_broadcast: bool,
5077 ) -> Result<(), APIError> {
5078 let per_peer_state = self.per_peer_state.read().unwrap();
5079 let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
5080 .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
5081
5082 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5083 let peer_state = &mut *peer_state_lock;
5084 let funding_txo;
5085 let (mut chan, msg_opt) = match peer_state.channel_by_id.remove(&temporary_channel_id) {
5086 Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => {
5087 macro_rules! close_chan { ($err: expr, $api_err: expr, $chan: expr) => { {
5088 let counterparty;
5089 let err = if let ChannelError::Close((msg, reason)) = $err {
5090 let channel_id = $chan.context.channel_id();
5091 counterparty = chan.context.get_counterparty_node_id();
5092 let shutdown_res = $chan.context.force_shutdown(false, reason);
5093 MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None)
5094 } else { unreachable!(); };
5095
5096 mem::drop(peer_state_lock);
5097 mem::drop(per_peer_state);
5098 let _: Result<(), _> = handle_error!(self, Err(err), counterparty);
5099 Err($api_err)
5100 } } }
5101 match find_funding_output(&chan) {
5102 Ok(found_funding_txo) => funding_txo = found_funding_txo,
5103 Err(err) => {
5104 let chan_err = ChannelError::close(err.to_owned());
5105 let api_err = APIError::APIMisuseError { err: err.to_owned() };
5106 return close_chan!(chan_err, api_err, chan);
5107 },
5108 }
5109
5110 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
5111 let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger);
5112 match funding_res {
5113 Ok(funding_msg) => (chan, funding_msg),
5114 Err((mut chan, chan_err)) => {
5115 let api_err = APIError::ChannelUnavailable { err: "Signer refused to sign the initial commitment transaction".to_owned() };
5116 return close_chan!(chan_err, api_err, chan);
5117 }
5118 }
5119 },
5120 Some(phase) => {
5121 peer_state.channel_by_id.insert(temporary_channel_id, phase);
5122 return Err(APIError::APIMisuseError {
5123 err: format!(
5124 "Channel with id {} for the passed counterparty node_id {} is not an unfunded, outbound V1 channel",
5125 temporary_channel_id, counterparty_node_id),
5126 })
5127 },
5128 None => return Err(APIError::ChannelUnavailable {err: format!(
5129 "Channel with id {} not found for the passed counterparty node_id {}",
5130 temporary_channel_id, counterparty_node_id),
5131 }),
5132 };
5133
5134 if let Some(msg) = msg_opt {
5135 peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
5136 node_id: chan.context.get_counterparty_node_id(),
5137 msg,
5138 });
5139 }
5140 if is_manual_broadcast {
5141 chan.context.set_manual_broadcast();
5142 }
5143 match peer_state.channel_by_id.entry(chan.context.channel_id()) {
5144 hash_map::Entry::Occupied(_) => {
5145 panic!("Generated duplicate funding txid?");
5146 },
5147 hash_map::Entry::Vacant(e) => {
5148 let mut outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
5149 match outpoint_to_peer.entry(funding_txo) {
5150 hash_map::Entry::Vacant(e) => { e.insert(chan.context.get_counterparty_node_id()); },
5151 hash_map::Entry::Occupied(o) => {
5152 let err = format!(
5153 "An existing channel using outpoint {} is open with peer {}",
5154 funding_txo, o.get()
5155 );
5156 mem::drop(outpoint_to_peer);
5157 mem::drop(peer_state_lock);
5158 mem::drop(per_peer_state);
5159 let reason = ClosureReason::ProcessingError { err: err.clone() };
5160 self.finish_close_channel(chan.context.force_shutdown(true, reason));
5161 return Err(APIError::ChannelUnavailable { err });
5162 }
5163 }
5164 e.insert(ChannelPhase::UnfundedOutboundV1(chan));
5165 }
5166 }
5167 Ok(())
5168 }
5169
5170 #[cfg(test)]
5171 pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> {
5172 let txid = funding_transaction.compute_txid();
5173 self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, false, |_| {
5174 Ok(OutPoint { txid, index: output_index })
5175 }, false)
5176 }
5177
5178 pub fn funding_transaction_generated(&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
5209 self.batch_funding_transaction_generated(&[(&temporary_channel_id, &counterparty_node_id)], funding_transaction)
5210 }
5211
5212
5213 pub fn unsafe_manual_funding_transaction_generated(&self, temporary_channel_id: ChannelId, counterparty_node_id: PublicKey, funding: OutPoint) -> Result<(), APIError> {
5241 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5242
5243 let temporary_channels = &[(&temporary_channel_id, &counterparty_node_id)];
5244 return self.batch_funding_transaction_generated_intern(temporary_channels, FundingType::Unchecked(funding));
5245
5246 }
5247
5248 pub fn batch_funding_transaction_generated(&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding_transaction: Transaction) -> Result<(), APIError> {
5259 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5260 self.batch_funding_transaction_generated_intern(temporary_channels, FundingType::Checked(funding_transaction))
5261 }
5262
5263 fn batch_funding_transaction_generated_intern(&self, temporary_channels: &[(&ChannelId, &PublicKey)], funding: FundingType) -> Result<(), APIError> {
5264 let mut result = Ok(());
5265 if let FundingType::Checked(funding_transaction) = &funding {
5266 if !funding_transaction.is_coinbase() {
5267 for inp in funding_transaction.input.iter() {
5268 if inp.witness.is_empty() {
5269 result = result.and(Err(APIError::APIMisuseError {
5270 err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned()
5271 }));
5272 }
5273 }
5274 }
5275
5276 if funding_transaction.output.len() > u16::max_value() as usize {
5277 result = result.and(Err(APIError::APIMisuseError {
5278 err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
5279 }));
5280 }
5281 let height = self.best_block.read().unwrap().height;
5282 if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) &&
5287 funding_transaction.lock_time.is_block_height() &&
5288 funding_transaction.lock_time.to_consensus_u32() > height + 1
5289 {
5290 result = result.and(Err(APIError::APIMisuseError {
5291 err: "Funding transaction absolute timelock is non-final".to_owned()
5292 }));
5293 }
5294 }
5295
5296 let txid = funding.txid();
5297 let is_batch_funding = temporary_channels.len() > 1;
5298 let mut funding_batch_states = if is_batch_funding {
5299 Some(self.funding_batch_states.lock().unwrap())
5300 } else {
5301 None
5302 };
5303 let mut funding_batch_state = funding_batch_states.as_mut().and_then(|states| {
5304 match states.entry(txid) {
5305 btree_map::Entry::Occupied(_) => {
5306 result = result.clone().and(Err(APIError::APIMisuseError {
5307 err: "Batch funding transaction with the same txid already exists".to_owned()
5308 }));
5309 None
5310 },
5311 btree_map::Entry::Vacant(vacant) => Some(vacant.insert(Vec::new())),
5312 }
5313 });
5314 let is_manual_broadcast = funding.is_manual_broadcast();
5315 for &(temporary_channel_id, counterparty_node_id) in temporary_channels {
5316 result = result.and_then(|_| self.funding_transaction_generated_intern(
5317 *temporary_channel_id,
5318 *counterparty_node_id,
5319 funding.transaction_or_dummy(),
5320 is_batch_funding,
5321 |chan| {
5322 let mut output_index = None;
5323 let expected_spk = chan.context.get_funding_redeemscript().to_p2wsh();
5324 let outpoint = match &funding {
5325 FundingType::Checked(tx) => {
5326 for (idx, outp) in tx.output.iter().enumerate() {
5327 if outp.script_pubkey == expected_spk && outp.value.to_sat() == chan.context.get_value_satoshis() {
5328 if output_index.is_some() {
5329 return Err("Multiple outputs matched the expected script and value");
5330 }
5331 output_index = Some(idx as u16);
5332 }
5333 }
5334 if output_index.is_none() {
5335 return Err("No output matched the script_pubkey and value in the FundingGenerationReady event");
5336 }
5337 OutPoint { txid, index: output_index.unwrap() }
5338 },
5339 FundingType::Unchecked(outpoint) => outpoint.clone(),
5340 };
5341 if let Some(funding_batch_state) = funding_batch_state.as_mut() {
5342 funding_batch_state.push((ChannelId::v1_from_funding_outpoint(outpoint), *counterparty_node_id, false));
5346 }
5347 Ok(outpoint)
5348 },
5349 is_manual_broadcast)
5350 );
5351 }
5352 if let Err(ref e) = result {
5353 let e = format!("Error in transaction funding: {:?}", e);
5355 let mut channels_to_remove = Vec::new();
5356 channels_to_remove.extend(funding_batch_states.as_mut()
5357 .and_then(|states| states.remove(&txid))
5358 .into_iter().flatten()
5359 .map(|(chan_id, node_id, _state)| (chan_id, node_id))
5360 );
5361 channels_to_remove.extend(temporary_channels.iter()
5362 .map(|(&chan_id, &node_id)| (chan_id, node_id))
5363 );
5364 let mut shutdown_results = Vec::new();
5365 {
5366 let per_peer_state = self.per_peer_state.read().unwrap();
5367 for (channel_id, counterparty_node_id) in channels_to_remove {
5368 per_peer_state.get(&counterparty_node_id)
5369 .map(|peer_state_mutex| peer_state_mutex.lock().unwrap())
5370 .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id).map(|chan| (chan, peer_state)))
5371 .map(|(mut chan, mut peer_state)| {
5372 let closure_reason = ClosureReason::ProcessingError { err: e.clone() };
5373 let mut close_res = chan.context_mut().force_shutdown(false, closure_reason);
5374 locked_close_channel!(self, peer_state, chan.context(), close_res);
5375 shutdown_results.push(close_res);
5376 peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
5377 node_id: counterparty_node_id,
5378 action: msgs::ErrorAction::SendErrorMessage {
5379 msg: msgs::ErrorMessage {
5380 channel_id,
5381 data: "Failed to fund channel".to_owned(),
5382 }
5383 },
5384 });
5385 });
5386 }
5387 }
5388 mem::drop(funding_batch_states);
5389 for shutdown_result in shutdown_results.drain(..) {
5390 self.finish_close_channel(shutdown_result);
5391 }
5392 }
5393 result
5394 }
5395
5396 pub fn update_partial_channel_config(
5419 &self, counterparty_node_id: &PublicKey, channel_ids: &[ChannelId], config_update: &ChannelConfigUpdate,
5420 ) -> Result<(), APIError> {
5421 if config_update.cltv_expiry_delta.map(|delta| delta < MIN_CLTV_EXPIRY_DELTA).unwrap_or(false) {
5422 return Err(APIError::APIMisuseError {
5423 err: format!("The chosen CLTV expiry delta is below the minimum of {}", MIN_CLTV_EXPIRY_DELTA),
5424 });
5425 }
5426
5427 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5428 let per_peer_state = self.per_peer_state.read().unwrap();
5429 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
5430 .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
5431 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5432 let peer_state = &mut *peer_state_lock;
5433
5434 for channel_id in channel_ids {
5435 if !peer_state.has_channel(channel_id) {
5436 return Err(APIError::ChannelUnavailable {
5437 err: format!("Channel with id {} not found for the passed counterparty node_id {}", channel_id, counterparty_node_id),
5438 });
5439 };
5440 }
5441 for channel_id in channel_ids {
5442 if let Some(channel_phase) = peer_state.channel_by_id.get_mut(channel_id) {
5443 let mut config = channel_phase.context().config();
5444 config.apply(config_update);
5445 if !channel_phase.context_mut().update_config(&config) {
5446 continue;
5447 }
5448 if let ChannelPhase::Funded(channel) = channel_phase {
5449 if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
5450 let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
5451 pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
5452 } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
5453 peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
5454 node_id: channel.context.get_counterparty_node_id(),
5455 msg,
5456 });
5457 }
5458 }
5459 continue;
5460 } else {
5461 debug_assert!(false);
5463 return Err(APIError::ChannelUnavailable {
5464 err: format!(
5465 "Channel with ID {} for passed counterparty_node_id {} disappeared after we confirmed its existence - this should not be reachable!",
5466 channel_id, counterparty_node_id),
5467 });
5468 };
5469 }
5470 Ok(())
5471 }
5472
5473 pub fn update_channel_config(
5496 &self, counterparty_node_id: &PublicKey, channel_ids: &[ChannelId], config: &ChannelConfig,
5497 ) -> Result<(), APIError> {
5498 return self.update_partial_channel_config(counterparty_node_id, channel_ids, &(*config).into());
5499 }
5500
5501 pub fn forward_intercepted_htlc(&self, intercept_id: InterceptId, next_hop_channel_id: &ChannelId, next_node_id: PublicKey, amt_to_forward_msat: u64) -> Result<(), APIError> {
5527 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5528
5529 let next_hop_scid = {
5530 let peer_state_lock = self.per_peer_state.read().unwrap();
5531 let peer_state_mutex = peer_state_lock.get(&next_node_id)
5532 .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", next_node_id) })?;
5533 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5534 let peer_state = &mut *peer_state_lock;
5535 match peer_state.channel_by_id.get(next_hop_channel_id) {
5536 Some(ChannelPhase::Funded(chan)) => {
5537 if !chan.context.is_usable() {
5538 return Err(APIError::ChannelUnavailable {
5539 err: format!("Channel with id {} not fully established", next_hop_channel_id)
5540 })
5541 }
5542 chan.context.get_short_channel_id().unwrap_or(chan.context.outbound_scid_alias())
5543 },
5544 Some(_) => return Err(APIError::ChannelUnavailable {
5545 err: format!("Channel with id {} for the passed counterparty node_id {} is still opening.",
5546 next_hop_channel_id, next_node_id)
5547 }),
5548 None => {
5549 let error = format!("Channel with id {} not found for the passed counterparty node_id {}",
5550 next_hop_channel_id, next_node_id);
5551 let logger = WithContext::from(&self.logger, Some(next_node_id), Some(*next_hop_channel_id), None);
5552 log_error!(logger, "{} when attempting to forward intercepted HTLC", error);
5553 return Err(APIError::ChannelUnavailable {
5554 err: error
5555 })
5556 }
5557 }
5558 };
5559
5560 let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id)
5561 .ok_or_else(|| APIError::APIMisuseError {
5562 err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0))
5563 })?;
5564
5565 let routing = match payment.forward_info.routing {
5566 PendingHTLCRouting::Forward { onion_packet, blinded, incoming_cltv_expiry, .. } => {
5567 PendingHTLCRouting::Forward {
5568 onion_packet, blinded, incoming_cltv_expiry, short_channel_id: next_hop_scid,
5569 }
5570 },
5571 _ => unreachable!() };
5573 let skimmed_fee_msat =
5574 payment.forward_info.outgoing_amt_msat.saturating_sub(amt_to_forward_msat);
5575 let pending_htlc_info = PendingHTLCInfo {
5576 skimmed_fee_msat: if skimmed_fee_msat == 0 { None } else { Some(skimmed_fee_msat) },
5577 outgoing_amt_msat: amt_to_forward_msat, routing, ..payment.forward_info
5578 };
5579
5580 let mut per_source_pending_forward = [(
5581 payment.prev_short_channel_id,
5582 payment.prev_counterparty_node_id,
5583 payment.prev_funding_outpoint,
5584 payment.prev_channel_id,
5585 payment.prev_user_channel_id,
5586 vec![(pending_htlc_info, payment.prev_htlc_id)]
5587 )];
5588 self.forward_htlcs(&mut per_source_pending_forward);
5589 Ok(())
5590 }
5591
5592 pub fn fail_intercepted_htlc(&self, intercept_id: InterceptId) -> Result<(), APIError> {
5600 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5601
5602 let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id)
5603 .ok_or_else(|| APIError::APIMisuseError {
5604 err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0))
5605 })?;
5606
5607 if let PendingHTLCRouting::Forward { short_channel_id, incoming_cltv_expiry, .. } = payment.forward_info.routing {
5608 let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
5609 short_channel_id: payment.prev_short_channel_id,
5610 user_channel_id: Some(payment.prev_user_channel_id),
5611 outpoint: payment.prev_funding_outpoint,
5612 channel_id: payment.prev_channel_id,
5613 counterparty_node_id: payment.prev_counterparty_node_id,
5614 htlc_id: payment.prev_htlc_id,
5615 incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret,
5616 phantom_shared_secret: None,
5617 blinded_failure: payment.forward_info.routing.blinded_failure(),
5618 cltv_expiry: incoming_cltv_expiry,
5619 });
5620
5621 let failure_reason = HTLCFailReason::from_failure_code(0x4000 | 10);
5622 let destination = HTLCDestination::UnknownNextHop { requested_forward_scid: short_channel_id };
5623 self.fail_htlc_backwards_internal(&htlc_source, &payment.forward_info.payment_hash, &failure_reason, destination);
5624 } else { unreachable!() } Ok(())
5627 }
5628
5629 fn process_pending_update_add_htlcs(&self) {
5630 let mut decode_update_add_htlcs = new_hash_map();
5631 mem::swap(&mut decode_update_add_htlcs, &mut self.decode_update_add_htlcs.lock().unwrap());
5632
5633 let get_failed_htlc_destination = |outgoing_scid_opt: Option<u64>, payment_hash: PaymentHash| {
5634 if let Some(outgoing_scid) = outgoing_scid_opt {
5635 match self.short_to_chan_info.read().unwrap().get(&outgoing_scid) {
5636 Some((outgoing_counterparty_node_id, outgoing_channel_id)) =>
5637 HTLCDestination::NextHopChannel {
5638 node_id: Some(*outgoing_counterparty_node_id),
5639 channel_id: *outgoing_channel_id,
5640 },
5641 None => HTLCDestination::UnknownNextHop {
5642 requested_forward_scid: outgoing_scid,
5643 },
5644 }
5645 } else {
5646 HTLCDestination::FailedPayment { payment_hash }
5647 }
5648 };
5649
5650 'outer_loop: for (incoming_scid, update_add_htlcs) in decode_update_add_htlcs {
5651 let incoming_channel_details_opt = self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel<SP>| {
5652 let counterparty_node_id = chan.context.get_counterparty_node_id();
5653 let channel_id = chan.context.channel_id();
5654 let funding_txo = chan.context.get_funding_txo().unwrap();
5655 let user_channel_id = chan.context.get_user_id();
5656 let accept_underpaying_htlcs = chan.context.config().accept_underpaying_htlcs;
5657 (counterparty_node_id, channel_id, funding_txo, user_channel_id, accept_underpaying_htlcs)
5658 });
5659 let (
5660 incoming_counterparty_node_id, incoming_channel_id, incoming_funding_txo,
5661 incoming_user_channel_id, incoming_accept_underpaying_htlcs
5662 ) = if let Some(incoming_channel_details) = incoming_channel_details_opt {
5663 incoming_channel_details
5664 } else {
5665 continue;
5667 };
5668
5669 let mut htlc_forwards = Vec::new();
5670 let mut htlc_fails = Vec::new();
5671 for update_add_htlc in &update_add_htlcs {
5672 let (next_hop, shared_secret, next_packet_details_opt) = match decode_incoming_update_add_htlc_onion(
5673 &update_add_htlc, &*self.node_signer, &*self.logger, &self.secp_ctx
5674 ) {
5675 Ok(decoded_onion) => decoded_onion,
5676 Err(htlc_fail) => {
5677 htlc_fails.push((htlc_fail, HTLCDestination::InvalidOnion));
5678 continue;
5679 },
5680 };
5681
5682 let is_intro_node_blinded_forward = next_hop.is_intro_node_blinded_forward();
5683 let outgoing_scid_opt = next_packet_details_opt.as_ref().map(|d| d.outgoing_scid);
5684
5685 match self.do_funded_channel_callback(incoming_scid, |chan: &mut Channel<SP>| {
5687 let logger = WithChannelContext::from(&self.logger, &chan.context, Some(update_add_htlc.payment_hash));
5688 chan.can_accept_incoming_htlc(
5689 update_add_htlc, &self.fee_estimator, &logger,
5690 )
5691 }) {
5692 Some(Ok(_)) => {},
5693 Some(Err((err, code))) => {
5694 let htlc_fail = self.htlc_failure_from_update_add_err(
5695 &update_add_htlc, &incoming_counterparty_node_id, err, code,
5696 is_intro_node_blinded_forward, &shared_secret,
5697 );
5698 let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
5699 htlc_fails.push((htlc_fail, htlc_destination));
5700 continue;
5701 },
5702 None => continue 'outer_loop,
5704 }
5705
5706 if let Some(next_packet_details) = next_packet_details_opt.as_ref() {
5708 if let Err((err, code)) = self.can_forward_htlc(
5709 &update_add_htlc, next_packet_details
5710 ) {
5711 let htlc_fail = self.htlc_failure_from_update_add_err(
5712 &update_add_htlc, &incoming_counterparty_node_id, err, code,
5713 is_intro_node_blinded_forward, &shared_secret,
5714 );
5715 let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
5716 htlc_fails.push((htlc_fail, htlc_destination));
5717 continue;
5718 }
5719 }
5720
5721 match self.construct_pending_htlc_status(
5722 &update_add_htlc, &incoming_counterparty_node_id, shared_secret, next_hop,
5723 incoming_accept_underpaying_htlcs, next_packet_details_opt.map(|d| d.next_packet_pubkey),
5724 ) {
5725 PendingHTLCStatus::Forward(htlc_forward) => {
5726 htlc_forwards.push((htlc_forward, update_add_htlc.htlc_id));
5727 },
5728 PendingHTLCStatus::Fail(htlc_fail) => {
5729 let htlc_destination = get_failed_htlc_destination(outgoing_scid_opt, update_add_htlc.payment_hash);
5730 htlc_fails.push((htlc_fail, htlc_destination));
5731 },
5732 }
5733 }
5734
5735 let pending_forwards = (
5738 incoming_scid, Some(incoming_counterparty_node_id), incoming_funding_txo,
5739 incoming_channel_id, incoming_user_channel_id, htlc_forwards.drain(..).collect()
5740 );
5741 self.forward_htlcs_without_forward_event(&mut [pending_forwards]);
5742 for (htlc_fail, htlc_destination) in htlc_fails.drain(..) {
5743 let failure = match htlc_fail {
5744 HTLCFailureMsg::Relay(fail_htlc) => HTLCForwardInfo::FailHTLC {
5745 htlc_id: fail_htlc.htlc_id,
5746 err_packet: fail_htlc.reason,
5747 },
5748 HTLCFailureMsg::Malformed(fail_malformed_htlc) => HTLCForwardInfo::FailMalformedHTLC {
5749 htlc_id: fail_malformed_htlc.htlc_id,
5750 sha256_of_onion: fail_malformed_htlc.sha256_of_onion,
5751 failure_code: fail_malformed_htlc.failure_code,
5752 },
5753 };
5754 self.forward_htlcs.lock().unwrap().entry(incoming_scid).or_default().push(failure);
5755 self.pending_events.lock().unwrap().push_back((events::Event::HTLCHandlingFailed {
5756 prev_channel_id: incoming_channel_id,
5757 failed_next_destination: htlc_destination,
5758 }, None));
5759 }
5760 }
5761 }
5762
5763 pub fn process_pending_htlc_forwards(&self) {
5768 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
5769
5770 self.process_pending_update_add_htlcs();
5771
5772 let mut new_events = VecDeque::new();
5773 let mut failed_forwards = Vec::new();
5774 let mut phantom_receives: Vec<(u64, Option<PublicKey>, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
5775 {
5776 let mut forward_htlcs = new_hash_map();
5777 mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
5778
5779 for (short_chan_id, mut pending_forwards) in forward_htlcs {
5780 if short_chan_id != 0 {
5781 let mut forwarding_counterparty = None;
5782 macro_rules! forwarding_channel_not_found {
5783 ($forward_infos: expr) => {
5784 for forward_info in $forward_infos {
5785 match forward_info {
5786 HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
5787 prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
5788 prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo {
5789 routing, incoming_shared_secret, payment_hash, outgoing_amt_msat,
5790 outgoing_cltv_value, ..
5791 }
5792 }) => {
5793 let cltv_expiry = routing.incoming_cltv_expiry();
5794 macro_rules! failure_handler {
5795 ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
5796 let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id), Some(payment_hash));
5797 log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg);
5798
5799 let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
5800 short_channel_id: prev_short_channel_id,
5801 user_channel_id: Some(prev_user_channel_id),
5802 channel_id: prev_channel_id,
5803 outpoint: prev_funding_outpoint,
5804 counterparty_node_id: prev_counterparty_node_id,
5805 htlc_id: prev_htlc_id,
5806 incoming_packet_shared_secret: incoming_shared_secret,
5807 phantom_shared_secret: $phantom_ss,
5808 blinded_failure: routing.blinded_failure(),
5809 cltv_expiry,
5810 });
5811
5812 let reason = if $next_hop_unknown {
5813 HTLCDestination::UnknownNextHop { requested_forward_scid: short_chan_id }
5814 } else {
5815 HTLCDestination::FailedPayment{ payment_hash }
5816 };
5817
5818 failed_forwards.push((htlc_source, payment_hash,
5819 HTLCFailReason::reason($err_code, $err_data),
5820 reason
5821 ));
5822 continue;
5823 }
5824 }
5825 macro_rules! fail_forward {
5826 ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
5827 {
5828 failure_handler!($msg, $err_code, $err_data, $phantom_ss, true);
5829 }
5830 }
5831 }
5832 macro_rules! failed_payment {
5833 ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
5834 {
5835 failure_handler!($msg, $err_code, $err_data, $phantom_ss, false);
5836 }
5837 }
5838 }
5839 if let PendingHTLCRouting::Forward { ref onion_packet, .. } = routing {
5840 let phantom_pubkey_res = self.node_signer.get_node_id(Recipient::PhantomNode);
5841 if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.chain_hash) {
5842 let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes();
5843 let next_hop = match onion_utils::decode_next_payment_hop(
5844 phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac,
5845 payment_hash, None, &*self.node_signer
5846 ) {
5847 Ok(res) => res,
5848 Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
5849 let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).to_byte_array();
5850 failed_payment!(err_msg, err_code, sha256_of_onion.to_vec(), None);
5855 },
5856 Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
5857 failed_payment!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
5858 },
5859 };
5860 match next_hop {
5861 onion_utils::Hop::Receive(hop_data) => {
5862 let current_height: u32 = self.best_block.read().unwrap().height;
5863 match create_recv_pending_htlc_info(hop_data,
5864 incoming_shared_secret, payment_hash, outgoing_amt_msat,
5865 outgoing_cltv_value, Some(phantom_shared_secret), false, None,
5866 current_height)
5867 {
5868 Ok(info) => phantom_receives.push((
5869 prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint,
5870 prev_channel_id, prev_user_channel_id, vec![(info, prev_htlc_id)]
5871 )),
5872 Err(InboundHTLCErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
5873 }
5874 },
5875 _ => panic!(),
5876 }
5877 } else {
5878 fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
5879 }
5880 } else {
5881 fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
5882 }
5883 },
5884 HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => {
5885 }
5890 }
5891 }
5892 }
5893 }
5894 let chan_info_opt = self.short_to_chan_info.read().unwrap().get(&short_chan_id).cloned();
5895 let (counterparty_node_id, forward_chan_id) = match chan_info_opt {
5896 Some((cp_id, chan_id)) => (cp_id, chan_id),
5897 None => {
5898 forwarding_channel_not_found!(pending_forwards.drain(..));
5899 continue;
5900 }
5901 };
5902 forwarding_counterparty = Some(counterparty_node_id);
5903 let per_peer_state = self.per_peer_state.read().unwrap();
5904 let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
5905 if peer_state_mutex_opt.is_none() {
5906 forwarding_channel_not_found!(pending_forwards.drain(..));
5907 continue;
5908 }
5909 let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
5910 let peer_state = &mut *peer_state_lock;
5911 let mut draining_pending_forwards = pending_forwards.drain(..);
5912 while let Some(forward_info) = draining_pending_forwards.next() {
5913 let queue_fail_htlc_res = match forward_info {
5914 HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
5915 prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
5916 prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo {
5917 incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
5918 routing: PendingHTLCRouting::Forward {
5919 ref onion_packet, blinded, incoming_cltv_expiry, ..
5920 }, skimmed_fee_msat, ..
5921 },
5922 }) => {
5923 let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
5924 short_channel_id: prev_short_channel_id,
5925 user_channel_id: Some(prev_user_channel_id),
5926 counterparty_node_id: prev_counterparty_node_id,
5927 channel_id: prev_channel_id,
5928 outpoint: prev_funding_outpoint,
5929 htlc_id: prev_htlc_id,
5930 incoming_packet_shared_secret: incoming_shared_secret,
5931 phantom_shared_secret: None,
5933 blinded_failure: blinded.map(|b| b.failure),
5934 cltv_expiry: incoming_cltv_expiry,
5935 });
5936 let next_blinding_point = blinded.and_then(|b| {
5937 b.next_blinding_override.or_else(|| {
5938 let encrypted_tlvs_ss = self.node_signer.ecdh(
5939 Recipient::Node, &b.inbound_blinding_point, None
5940 ).unwrap().secret_bytes();
5941 onion_utils::next_hop_pubkey(
5942 &self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss
5943 ).ok()
5944 })
5945 });
5946
5947 let maybe_optimal_channel = peer_state.channel_by_id.values_mut().filter_map(|phase| match phase {
5952 ChannelPhase::Funded(chan) => {
5953 let balances = chan.context.get_available_balances(&self.fee_estimator);
5954 if outgoing_amt_msat <= balances.next_outbound_htlc_limit_msat &&
5955 outgoing_amt_msat >= balances.next_outbound_htlc_minimum_msat &&
5956 chan.context.is_usable() {
5957 Some((chan, balances))
5958 } else {
5959 None
5960 }
5961 },
5962 _ => None,
5963 }).min_by_key(|(_, balances)| balances.next_outbound_htlc_limit_msat).map(|(c, _)| c);
5964 let optimal_channel = match maybe_optimal_channel {
5965 Some(chan) => chan,
5966 None => {
5967 if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
5969 chan
5970 } else {
5971 forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
5972 break;
5973 }
5974 }
5975 };
5976
5977 let logger = WithChannelContext::from(&self.logger, &optimal_channel.context, Some(payment_hash));
5978 let channel_description = if optimal_channel.context.get_short_channel_id() == Some(short_chan_id) {
5979 "specified"
5980 } else {
5981 "alternate"
5982 };
5983 log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} channel {} with corresponding peer {}",
5984 prev_short_channel_id, &payment_hash, short_chan_id, channel_description, optimal_channel.context.channel_id(), &counterparty_node_id);
5985 if let Err(e) = optimal_channel.queue_add_htlc(outgoing_amt_msat,
5986 payment_hash, outgoing_cltv_value, htlc_source.clone(),
5987 onion_packet.clone(), skimmed_fee_msat, next_blinding_point, &self.fee_estimator,
5988 &&logger)
5989 {
5990 if let ChannelError::Ignore(msg) = e {
5991 log_trace!(logger, "Failed to forward HTLC with payment_hash {} to peer {}: {}", &payment_hash, &counterparty_node_id, msg);
5992 } else {
5993 panic!("Stated return value requirements in send_htlc() were not met");
5994 }
5995
5996 if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
5997 let failure_code = 0x1000|7;
5998 let data = self.get_htlc_inbound_temp_fail_data(failure_code);
5999 failed_forwards.push((htlc_source, payment_hash,
6000 HTLCFailReason::reason(failure_code, data),
6001 HTLCDestination::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id }
6002 ));
6003 } else {
6004 forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
6005 break;
6006 }
6007 }
6008 None
6009 },
6010 HTLCForwardInfo::AddHTLC { .. } => {
6011 panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
6012 },
6013 HTLCForwardInfo::FailHTLC { htlc_id, ref err_packet } => {
6014 if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
6015 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
6016 log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
6017 Some((chan.queue_fail_htlc(htlc_id, err_packet.clone(), &&logger), htlc_id))
6018 } else {
6019 forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
6020 break;
6021 }
6022 },
6023 HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
6024 if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
6025 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
6026 log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
6027 let res = chan.queue_fail_malformed_htlc(
6028 htlc_id, failure_code, sha256_of_onion, &&logger
6029 );
6030 Some((res, htlc_id))
6031 } else {
6032 forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
6033 break;
6034 }
6035 },
6036 };
6037 if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
6038 if let Err(e) = queue_fail_htlc_res {
6039 if let ChannelError::Ignore(msg) = e {
6040 if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
6041 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
6042 log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
6043 }
6044 } else {
6045 panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
6046 }
6047 }
6051 }
6052 }
6053 } else {
6054 'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
6055 match forward_info {
6056 HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
6057 prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
6058 prev_user_channel_id, prev_counterparty_node_id, forward_info: PendingHTLCInfo {
6059 routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat,
6060 skimmed_fee_msat, ..
6061 }
6062 }) => {
6063 let blinded_failure = routing.blinded_failure();
6064 let (
6065 cltv_expiry, onion_payload, payment_data, payment_context, phantom_shared_secret,
6066 mut onion_fields, has_recipient_created_payment_secret
6067 ) = match routing {
6068 PendingHTLCRouting::Receive {
6069 payment_data, payment_metadata, payment_context,
6070 incoming_cltv_expiry, phantom_shared_secret, custom_tlvs,
6071 requires_blinded_error: _
6072 } => {
6073 let _legacy_hop_data = Some(payment_data.clone());
6074 let onion_fields = RecipientOnionFields { payment_secret: Some(payment_data.payment_secret),
6075 payment_metadata, custom_tlvs };
6076 (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data },
6077 Some(payment_data), payment_context, phantom_shared_secret, onion_fields,
6078 true)
6079 },
6080 PendingHTLCRouting::ReceiveKeysend {
6081 payment_data, payment_preimage, payment_metadata,
6082 incoming_cltv_expiry, custom_tlvs, requires_blinded_error: _,
6083 has_recipient_created_payment_secret,
6084 } => {
6085 let onion_fields = RecipientOnionFields {
6086 payment_secret: payment_data.as_ref().map(|data| data.payment_secret),
6087 payment_metadata,
6088 custom_tlvs,
6089 };
6090 (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage),
6091 payment_data, None, None, onion_fields, has_recipient_created_payment_secret)
6092 },
6093 _ => {
6094 panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
6095 }
6096 };
6097 let claimable_htlc = ClaimableHTLC {
6098 prev_hop: HTLCPreviousHopData {
6099 short_channel_id: prev_short_channel_id,
6100 user_channel_id: Some(prev_user_channel_id),
6101 counterparty_node_id: prev_counterparty_node_id,
6102 channel_id: prev_channel_id,
6103 outpoint: prev_funding_outpoint,
6104 htlc_id: prev_htlc_id,
6105 incoming_packet_shared_secret: incoming_shared_secret,
6106 phantom_shared_secret,
6107 blinded_failure,
6108 cltv_expiry: Some(cltv_expiry),
6109 },
6110 value: incoming_amt_msat.unwrap_or(outgoing_amt_msat),
6114 sender_intended_value: outgoing_amt_msat,
6115 timer_ticks: 0,
6116 total_value_received: None,
6117 total_msat: if let Some(data) = &payment_data { data.total_msat } else { outgoing_amt_msat },
6118 cltv_expiry,
6119 onion_payload,
6120 counterparty_skimmed_fee_msat: skimmed_fee_msat,
6121 };
6122
6123 let mut committed_to_claimable = false;
6124
6125 macro_rules! fail_htlc {
6126 ($htlc: expr, $payment_hash: expr) => {
6127 debug_assert!(!committed_to_claimable);
6128 let mut htlc_msat_height_data = $htlc.value.to_be_bytes().to_vec();
6129 htlc_msat_height_data.extend_from_slice(
6130 &self.best_block.read().unwrap().height.to_be_bytes(),
6131 );
6132 failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
6133 short_channel_id: $htlc.prev_hop.short_channel_id,
6134 user_channel_id: $htlc.prev_hop.user_channel_id,
6135 counterparty_node_id: $htlc.prev_hop.counterparty_node_id,
6136 channel_id: prev_channel_id,
6137 outpoint: prev_funding_outpoint,
6138 htlc_id: $htlc.prev_hop.htlc_id,
6139 incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
6140 phantom_shared_secret,
6141 blinded_failure,
6142 cltv_expiry: Some(cltv_expiry),
6143 }), payment_hash,
6144 HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data),
6145 HTLCDestination::FailedPayment { payment_hash: $payment_hash },
6146 ));
6147 continue 'next_forwardable_htlc;
6148 }
6149 }
6150 let phantom_shared_secret = claimable_htlc.prev_hop.phantom_shared_secret;
6151 let mut receiver_node_id = self.our_network_pubkey;
6152 if phantom_shared_secret.is_some() {
6153 receiver_node_id = self.node_signer.get_node_id(Recipient::PhantomNode)
6154 .expect("Failed to get node_id for phantom node recipient");
6155 }
6156
6157 macro_rules! check_total_value {
6158 ($purpose: expr) => {{
6159 let mut payment_claimable_generated = false;
6160 let is_keysend = $purpose.is_keysend();
6161 let mut claimable_payments = self.claimable_payments.lock().unwrap();
6162 if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
6163 fail_htlc!(claimable_htlc, payment_hash);
6164 }
6165 let ref mut claimable_payment = claimable_payments.claimable_payments
6166 .entry(payment_hash)
6167 .or_insert_with(|| {
6169 committed_to_claimable = true;
6170 ClaimablePayment {
6171 purpose: $purpose.clone(), htlcs: Vec::new(), onion_fields: None,
6172 }
6173 });
6174 if $purpose != claimable_payment.purpose {
6175 let log_keysend = |keysend| if keysend { "keysend" } else { "non-keysend" };
6176 log_trace!(self.logger, "Failing new {} HTLC with payment_hash {} as we already had an existing {} HTLC with the same payment hash", log_keysend(is_keysend), &payment_hash, log_keysend(!is_keysend));
6177 fail_htlc!(claimable_htlc, payment_hash);
6178 }
6179 if let Some(earlier_fields) = &mut claimable_payment.onion_fields {
6180 if earlier_fields.check_merge(&mut onion_fields).is_err() {
6181 fail_htlc!(claimable_htlc, payment_hash);
6182 }
6183 } else {
6184 claimable_payment.onion_fields = Some(onion_fields);
6185 }
6186 let mut total_value = claimable_htlc.sender_intended_value;
6187 let mut earliest_expiry = claimable_htlc.cltv_expiry;
6188 for htlc in claimable_payment.htlcs.iter() {
6189 total_value += htlc.sender_intended_value;
6190 earliest_expiry = cmp::min(earliest_expiry, htlc.cltv_expiry);
6191 if htlc.total_msat != claimable_htlc.total_msat {
6192 log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
6193 &payment_hash, claimable_htlc.total_msat, htlc.total_msat);
6194 total_value = msgs::MAX_VALUE_MSAT;
6195 }
6196 if total_value >= msgs::MAX_VALUE_MSAT { break; }
6197 }
6198 if total_value >= msgs::MAX_VALUE_MSAT {
6201 fail_htlc!(claimable_htlc, payment_hash);
6202 } else if total_value - claimable_htlc.sender_intended_value >= claimable_htlc.total_msat {
6203 log_trace!(self.logger, "Failing HTLC with payment_hash {} as payment is already claimable",
6204 &payment_hash);
6205 fail_htlc!(claimable_htlc, payment_hash);
6206 } else if total_value >= claimable_htlc.total_msat {
6207 #[allow(unused_assignments)] {
6208 committed_to_claimable = true;
6209 }
6210 claimable_payment.htlcs.push(claimable_htlc);
6211 let amount_msat =
6212 claimable_payment.htlcs.iter().map(|htlc| htlc.value).sum();
6213 claimable_payment.htlcs.iter_mut()
6214 .for_each(|htlc| htlc.total_value_received = Some(amount_msat));
6215 let counterparty_skimmed_fee_msat = claimable_payment.htlcs.iter()
6216 .map(|htlc| htlc.counterparty_skimmed_fee_msat.unwrap_or(0)).sum();
6217 debug_assert!(total_value.saturating_sub(amount_msat) <=
6218 counterparty_skimmed_fee_msat);
6219 claimable_payment.htlcs.sort();
6220 let payment_id =
6221 claimable_payment.inbound_payment_id(&self.inbound_payment_id_secret);
6222 new_events.push_back((events::Event::PaymentClaimable {
6223 receiver_node_id: Some(receiver_node_id),
6224 payment_hash,
6225 purpose: $purpose,
6226 amount_msat,
6227 counterparty_skimmed_fee_msat,
6228 via_channel_id: Some(prev_channel_id),
6229 via_user_channel_id: Some(prev_user_channel_id),
6230 claim_deadline: Some(earliest_expiry - HTLC_FAIL_BACK_BUFFER),
6231 onion_fields: claimable_payment.onion_fields.clone(),
6232 payment_id: Some(payment_id),
6233 }, None));
6234 payment_claimable_generated = true;
6235 } else {
6236 claimable_payment.htlcs.push(claimable_htlc);
6240 #[allow(unused_assignments)] {
6241 committed_to_claimable = true;
6242 }
6243 }
6244 payment_claimable_generated
6245 }}
6246 }
6247
6248 let payment_preimage = if has_recipient_created_payment_secret {
6255 if let Some(ref payment_data) = payment_data {
6256 let (payment_preimage, min_final_cltv_expiry_delta) = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) {
6257 Ok(result) => result,
6258 Err(()) => {
6259 log_trace!(self.logger, "Failing new HTLC with payment_hash {} as payment verification failed", &payment_hash);
6260 fail_htlc!(claimable_htlc, payment_hash);
6261 }
6262 };
6263 if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta {
6264 let expected_min_expiry_height = (self.current_best_block().height + min_final_cltv_expiry_delta as u32) as u64;
6265 if (cltv_expiry as u64) < expected_min_expiry_height {
6266 log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})",
6267 &payment_hash, cltv_expiry, expected_min_expiry_height);
6268 fail_htlc!(claimable_htlc, payment_hash);
6269 }
6270 }
6271 payment_preimage
6272 } else { fail_htlc!(claimable_htlc, payment_hash); }
6273 } else { None };
6274 match claimable_htlc.onion_payload {
6275 OnionPayload::Invoice { .. } => {
6276 let payment_data = payment_data.unwrap();
6277 let purpose = events::PaymentPurpose::from_parts(
6278 payment_preimage,
6279 payment_data.payment_secret,
6280 payment_context,
6281 );
6282 check_total_value!(purpose);
6283 },
6284 OnionPayload::Spontaneous(preimage) => {
6285 let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
6286 check_total_value!(purpose);
6287 }
6288 }
6289 },
6290 HTLCForwardInfo::FailHTLC { .. } | HTLCForwardInfo::FailMalformedHTLC { .. } => {
6291 panic!("Got pending fail of our own HTLC");
6292 }
6293 }
6294 }
6295 }
6296 }
6297 }
6298
6299 let best_block_height = self.best_block.read().unwrap().height;
6300 self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(),
6301 || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
6302 &self.pending_events, &self.logger, |args| self.send_payment_along_path(args));
6303
6304 for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
6305 self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
6306 }
6307 self.forward_htlcs(&mut phantom_receives);
6308
6309 self.check_free_holding_cells();
6314
6315 if new_events.is_empty() { return }
6316 let mut events = self.pending_events.lock().unwrap();
6317 events.append(&mut new_events);
6318 }
6319
6320 fn process_background_events(&self) -> NotifyOption {
6324 debug_assert_ne!(self.total_consistency_lock.held_by_thread(), LockHeldState::NotHeldByThread);
6325
6326 self.background_events_processed_since_startup.store(true, Ordering::Release);
6327
6328 let mut background_events = Vec::new();
6329 mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events);
6330 if background_events.is_empty() {
6331 return NotifyOption::SkipPersistNoEvents;
6332 }
6333
6334 for event in background_events.drain(..) {
6335 match event {
6336 BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, _channel_id, update)) => {
6337 let _ = self.chain_monitor.update_channel(funding_txo, &update);
6340 },
6341 BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => {
6342 self.apply_post_close_monitor_update(counterparty_node_id, channel_id, funding_txo, update);
6343 },
6344 BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
6345 let per_peer_state = self.per_peer_state.read().unwrap();
6346 if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
6347 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6348 let peer_state = &mut *peer_state_lock;
6349 if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) {
6350 if chan.blocked_monitor_updates_pending() == 0 {
6351 handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
6352 }
6353 } else {
6354 let update_actions = peer_state.monitor_update_blocked_actions
6355 .remove(&channel_id).unwrap_or(Vec::new());
6356 mem::drop(peer_state_lock);
6357 mem::drop(per_peer_state);
6358 self.handle_monitor_update_completion_actions(update_actions);
6359 }
6360 }
6361 },
6362 }
6363 }
6364 NotifyOption::DoPersist
6365 }
6366
6367 #[cfg(any(test, feature = "_test_utils"))]
6368 pub fn test_process_background_events(&self) {
6370 let _lck = self.total_consistency_lock.read().unwrap();
6371 let _ = self.process_background_events();
6372 }
6373
6374 fn update_channel_fee(&self, chan_id: &ChannelId, chan: &mut Channel<SP>, new_feerate: u32) -> NotifyOption {
6375 if !chan.context.is_outbound() { return NotifyOption::SkipPersistNoEvents; }
6376
6377 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
6378
6379 if new_feerate <= chan.context.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.context.get_feerate_sat_per_1000_weight() {
6381 return NotifyOption::SkipPersistNoEvents;
6382 }
6383 if !chan.context.is_live() {
6384 log_trace!(logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
6385 chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
6386 return NotifyOption::SkipPersistNoEvents;
6387 }
6388 log_trace!(logger, "Channel {} qualifies for a feerate change from {} to {}.",
6389 &chan_id, chan.context.get_feerate_sat_per_1000_weight(), new_feerate);
6390
6391 chan.queue_update_fee(new_feerate, &self.fee_estimator, &&logger);
6392 NotifyOption::DoPersist
6393 }
6394
6395 #[cfg(fuzzing)]
6396 pub fn maybe_update_chan_fees(&self) {
6401 PersistenceNotifierGuard::optionally_notify(self, || {
6402 let mut should_persist = NotifyOption::SkipPersistNoEvents;
6403
6404 let non_anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6405 let anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee);
6406
6407 let per_peer_state = self.per_peer_state.read().unwrap();
6408 for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
6409 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6410 let peer_state = &mut *peer_state_lock;
6411 for (chan_id, chan) in peer_state.channel_by_id.iter_mut().filter_map(
6412 |(chan_id, phase)| if let ChannelPhase::Funded(chan) = phase { Some((chan_id, chan)) } else { None }
6413 ) {
6414 let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6415 anchor_feerate
6416 } else {
6417 non_anchor_feerate
6418 };
6419 let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
6420 if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
6421 }
6422 }
6423
6424 should_persist
6425 });
6426 }
6427
6428 pub fn timer_tick_occurred(&self) {
6450 PersistenceNotifierGuard::optionally_notify(self, || {
6451 let mut should_persist = NotifyOption::SkipPersistNoEvents;
6452
6453 let non_anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6454 let anchor_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee);
6455
6456 let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
6457 let mut timed_out_mpp_htlcs = Vec::new();
6458 let mut pending_peers_awaiting_removal = Vec::new();
6459 let mut shutdown_channels = Vec::new();
6460
6461 macro_rules! process_unfunded_channel_tick {
6462 ($peer_state: expr, $chan: expr, $pending_msg_events: expr) => { {
6463 let context = &mut $chan.context;
6464 context.maybe_expire_prev_config();
6465 if $chan.unfunded_context.should_expire_unfunded_channel() {
6466 let logger = WithChannelContext::from(&self.logger, context, None);
6467 log_error!(logger,
6468 "Force-closing pending channel with ID {} for not establishing in a timely manner",
6469 context.channel_id());
6470 let mut close_res = context.force_shutdown(false, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) });
6471 locked_close_channel!(self, $peer_state, context, close_res);
6472 shutdown_channels.push(close_res);
6473 $pending_msg_events.push(MessageSendEvent::HandleError {
6474 node_id: context.get_counterparty_node_id(),
6475 action: msgs::ErrorAction::SendErrorMessage {
6476 msg: msgs::ErrorMessage {
6477 channel_id: context.channel_id(),
6478 data: "Force-closing pending channel due to timeout awaiting establishment handshake".to_owned(),
6479 },
6480 },
6481 });
6482 false
6483 } else {
6484 true
6485 }
6486 } }
6487 }
6488
6489 {
6490 let per_peer_state = self.per_peer_state.read().unwrap();
6491 for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
6492 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6493 let peer_state = &mut *peer_state_lock;
6494 let pending_msg_events = &mut peer_state.pending_msg_events;
6495 let counterparty_node_id = *counterparty_node_id;
6496 peer_state.channel_by_id.retain(|chan_id, phase| {
6497 match phase {
6498 ChannelPhase::Funded(chan) => {
6499 let new_feerate = if chan.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6500 anchor_feerate
6501 } else {
6502 non_anchor_feerate
6503 };
6504 let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
6505 if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
6506
6507 if let Err(e) = chan.timer_check_closing_negotiation_progress() {
6508 let (needs_close, err) = convert_chan_phase_err!(self, peer_state, e, chan, chan_id, FUNDED_CHANNEL);
6509 handle_errors.push((Err(err), counterparty_node_id));
6510 if needs_close { return false; }
6511 }
6512
6513 match chan.channel_update_status() {
6514 ChannelUpdateStatus::Enabled if !chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)),
6515 ChannelUpdateStatus::Disabled if chan.context.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)),
6516 ChannelUpdateStatus::DisabledStaged(_) if chan.context.is_live()
6517 => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
6518 ChannelUpdateStatus::EnabledStaged(_) if !chan.context.is_live()
6519 => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
6520 ChannelUpdateStatus::DisabledStaged(mut n) if !chan.context.is_live() => {
6521 n += 1;
6522 if n >= DISABLE_GOSSIP_TICKS {
6523 chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
6524 if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
6525 let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
6526 pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
6527 msg: update
6528 });
6529 }
6530 should_persist = NotifyOption::DoPersist;
6531 } else {
6532 chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(n));
6533 }
6534 },
6535 ChannelUpdateStatus::EnabledStaged(mut n) if chan.context.is_live() => {
6536 n += 1;
6537 if n >= ENABLE_GOSSIP_TICKS {
6538 chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
6539 if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
6540 let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
6541 pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
6542 msg: update
6543 });
6544 }
6545 should_persist = NotifyOption::DoPersist;
6546 } else {
6547 chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(n));
6548 }
6549 },
6550 _ => {},
6551 }
6552
6553 chan.context.maybe_expire_prev_config();
6554
6555 if chan.should_disconnect_peer_awaiting_response() {
6556 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
6557 log_debug!(logger, "Disconnecting peer {} due to not making any progress on channel {}",
6558 counterparty_node_id, chan_id);
6559 pending_msg_events.push(MessageSendEvent::HandleError {
6560 node_id: counterparty_node_id,
6561 action: msgs::ErrorAction::DisconnectPeerWithWarning {
6562 msg: msgs::WarningMessage {
6563 channel_id: *chan_id,
6564 data: "Disconnecting due to timeout awaiting response".to_owned(),
6565 },
6566 },
6567 });
6568 }
6569
6570 true
6571 },
6572 ChannelPhase::UnfundedInboundV1(chan) => {
6573 process_unfunded_channel_tick!(peer_state, chan, pending_msg_events)
6574 },
6575 ChannelPhase::UnfundedOutboundV1(chan) => {
6576 process_unfunded_channel_tick!(peer_state, chan, pending_msg_events)
6577 },
6578 ChannelPhase::UnfundedInboundV2(chan) => {
6579 process_unfunded_channel_tick!(peer_state, chan, pending_msg_events)
6580 },
6581 ChannelPhase::UnfundedOutboundV2(chan) => {
6582 process_unfunded_channel_tick!(peer_state, chan, pending_msg_events)
6583 },
6584 }
6585 });
6586
6587 for (chan_id, req) in peer_state.inbound_channel_request_by_id.iter_mut() {
6588 if { req.ticks_remaining -= 1 ; req.ticks_remaining } <= 0 {
6589 let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*chan_id), None);
6590 log_error!(logger, "Force-closing unaccepted inbound channel {} for not accepting in a timely manner", &chan_id);
6591 peer_state.pending_msg_events.push(
6592 events::MessageSendEvent::HandleError {
6593 node_id: counterparty_node_id,
6594 action: msgs::ErrorAction::SendErrorMessage {
6595 msg: msgs::ErrorMessage { channel_id: chan_id.clone(), data: "Channel force-closed".to_owned() }
6596 },
6597 }
6598 );
6599 }
6600 }
6601 peer_state.inbound_channel_request_by_id.retain(|_, req| req.ticks_remaining > 0);
6602
6603 if peer_state.ok_to_remove(true) {
6604 pending_peers_awaiting_removal.push(counterparty_node_id);
6605 }
6606 }
6607 }
6608
6609 if pending_peers_awaiting_removal.len() > 0 {
6617 let mut per_peer_state = self.per_peer_state.write().unwrap();
6618 for counterparty_node_id in pending_peers_awaiting_removal {
6619 match per_peer_state.entry(counterparty_node_id) {
6620 hash_map::Entry::Occupied(entry) => {
6621 let remove_entry = {
6624 let peer_state = entry.get().lock().unwrap();
6625 peer_state.ok_to_remove(true)
6626 };
6627 if remove_entry {
6628 entry.remove_entry();
6629 }
6630 },
6631 hash_map::Entry::Vacant(_) => { }
6632 }
6633 }
6634 }
6635
6636 self.claimable_payments.lock().unwrap().claimable_payments.retain(|payment_hash, payment| {
6637 if payment.htlcs.is_empty() {
6638 debug_assert!(false);
6640 return false;
6641 }
6642 if let OnionPayload::Invoice { .. } = payment.htlcs[0].onion_payload {
6643 if payment.htlcs[0].total_msat <= payment.htlcs.iter()
6648 .fold(0, |total, htlc| total + htlc.sender_intended_value)
6649 {
6650 return true;
6651 } else if payment.htlcs.iter_mut().any(|htlc| {
6652 htlc.timer_ticks += 1;
6653 return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
6654 }) {
6655 timed_out_mpp_htlcs.extend(payment.htlcs.drain(..)
6656 .map(|htlc: ClaimableHTLC| (htlc.prev_hop, *payment_hash)));
6657 return false;
6658 }
6659 }
6660 true
6661 });
6662
6663 for htlc_source in timed_out_mpp_htlcs.drain(..) {
6664 let source = HTLCSource::PreviousHopData(htlc_source.0.clone());
6665 let reason = HTLCFailReason::from_failure_code(23);
6666 let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
6667 self.fail_htlc_backwards_internal(&source, &htlc_source.1, &reason, receiver);
6668 }
6669
6670 for (err, counterparty_node_id) in handle_errors.drain(..) {
6671 let _ = handle_error!(self, err, counterparty_node_id);
6672 }
6673
6674 for shutdown_res in shutdown_channels {
6675 self.finish_close_channel(shutdown_res);
6676 }
6677
6678 #[cfg(feature = "std")]
6679 let duration_since_epoch = std::time::SystemTime::now()
6680 .duration_since(std::time::SystemTime::UNIX_EPOCH)
6681 .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
6682 #[cfg(not(feature = "std"))]
6683 let duration_since_epoch = Duration::from_secs(
6684 self.highest_seen_timestamp.load(Ordering::Acquire).saturating_sub(7200) as u64
6685 );
6686
6687 self.pending_outbound_payments.remove_stale_payments(
6688 duration_since_epoch, &self.pending_events
6689 );
6690
6691 if self.check_free_holding_cells() {
6695 should_persist = NotifyOption::DoPersist;
6696 }
6697
6698 should_persist
6699 });
6700 }
6701
6702 pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
6716 self.fail_htlc_backwards_with_reason(payment_hash, FailureCode::IncorrectOrUnknownPaymentDetails);
6717 }
6718
6719 pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: FailureCode) {
6724 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
6725
6726 let removed_source = self.claimable_payments.lock().unwrap().claimable_payments.remove(payment_hash);
6727 if let Some(payment) = removed_source {
6728 for htlc in payment.htlcs {
6729 let reason = self.get_htlc_fail_reason_from_failure_code(failure_code, &htlc);
6730 let source = HTLCSource::PreviousHopData(htlc.prev_hop);
6731 let receiver = HTLCDestination::FailedPayment { payment_hash: *payment_hash };
6732 self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
6733 }
6734 }
6735 }
6736
6737 fn get_htlc_fail_reason_from_failure_code(&self, failure_code: FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason {
6739 match failure_code {
6740 FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(failure_code.into()),
6741 FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code.into()),
6742 FailureCode::IncorrectOrUnknownPaymentDetails => {
6743 let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
6744 htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes());
6745 HTLCFailReason::reason(failure_code.into(), htlc_msat_height_data)
6746 },
6747 FailureCode::InvalidOnionPayload(data) => {
6748 let fail_data = match data {
6749 Some((typ, offset)) => [BigSize(typ).encode(), offset.encode()].concat(),
6750 None => Vec::new(),
6751 };
6752 HTLCFailReason::reason(failure_code.into(), fail_data)
6753 }
6754 }
6755 }
6756
6757 fn get_htlc_inbound_temp_fail_data(&self, err_code: u16) -> Vec<u8> {
6763 debug_assert_eq!(err_code & 0x1000, 0x1000);
6764 debug_assert_ne!(err_code, 0x1000|11);
6765 debug_assert_ne!(err_code, 0x1000|12);
6766 debug_assert_ne!(err_code, 0x1000|13);
6767 let mut enc = VecWriter(Vec::with_capacity(4));
6769 if err_code == 0x1000 | 20 {
6770 0u16.write(&mut enc).expect("Writes cannot fail");
6773 }
6774 (0u16).write(&mut enc).expect("Writes cannot fail");
6776 enc.0
6777 }
6778
6779 fn fail_holding_cell_htlcs(
6783 &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: ChannelId,
6784 counterparty_node_id: &PublicKey
6785 ) {
6786 let (failure_code, onion_failure_data) = {
6787 let per_peer_state = self.per_peer_state.read().unwrap();
6788 if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
6789 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6790 let peer_state = &mut *peer_state_lock;
6791 match peer_state.channel_by_id.entry(channel_id) {
6792 hash_map::Entry::Occupied(chan_phase_entry) => {
6793 if let ChannelPhase::Funded(_chan) = chan_phase_entry.get() {
6794 let failure_code = 0x1000|7;
6795 let data = self.get_htlc_inbound_temp_fail_data(failure_code);
6796 (failure_code, data)
6797 } else {
6798 debug_assert!(false);
6800 (0x4000|10, Vec::new())
6801 }
6802 },
6803 hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
6804 }
6805 } else { (0x4000|10, Vec::new()) }
6806 };
6807
6808 for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
6809 let reason = HTLCFailReason::reason(failure_code, onion_failure_data.clone());
6810 let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id };
6811 self.fail_htlc_backwards_internal(&htlc_src, &payment_hash, &reason, receiver);
6812 }
6813 }
6814
6815 fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
6816 let push_forward_event = self.fail_htlc_backwards_internal_without_forward_event(source, payment_hash, onion_error, destination);
6817 if push_forward_event { self.push_pending_forwards_ev(); }
6818 }
6819
6820 fn fail_htlc_backwards_internal_without_forward_event(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) -> bool {
6823 #[cfg(debug_assertions)]
6828 for (_, peer) in self.per_peer_state.read().unwrap().iter() {
6829 debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
6830 }
6831
6832 let mut push_forward_event;
6841 match source {
6842 HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => {
6843 push_forward_event = self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path,
6844 session_priv, payment_id, self.probing_cookie_secret, &self.secp_ctx,
6845 &self.pending_events, &self.logger);
6846 },
6847 HTLCSource::PreviousHopData(HTLCPreviousHopData {
6848 ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret,
6849 ref phantom_shared_secret, outpoint: _, ref blinded_failure, ref channel_id, ..
6850 }) => {
6851 log_trace!(
6852 WithContext::from(&self.logger, None, Some(*channel_id), Some(*payment_hash)),
6853 "Failing {}HTLC with payment_hash {} backwards from us: {:?}",
6854 if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error
6855 );
6856 let failure = match blinded_failure {
6857 Some(BlindedFailure::FromIntroductionNode) => {
6858 let blinded_onion_error = HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32]);
6859 let err_packet = blinded_onion_error.get_encrypted_failure_packet(
6860 incoming_packet_shared_secret, phantom_shared_secret
6861 );
6862 HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }
6863 },
6864 Some(BlindedFailure::FromBlindedNode) => {
6865 HTLCForwardInfo::FailMalformedHTLC {
6866 htlc_id: *htlc_id,
6867 failure_code: INVALID_ONION_BLINDING,
6868 sha256_of_onion: [0; 32]
6869 }
6870 },
6871 None => {
6872 let err_packet = onion_error.get_encrypted_failure_packet(
6873 incoming_packet_shared_secret, phantom_shared_secret
6874 );
6875 HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }
6876 }
6877 };
6878
6879 push_forward_event = self.decode_update_add_htlcs.lock().unwrap().is_empty();
6880 let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
6881 push_forward_event &= forward_htlcs.is_empty();
6882 match forward_htlcs.entry(*short_channel_id) {
6883 hash_map::Entry::Occupied(mut entry) => {
6884 entry.get_mut().push(failure);
6885 },
6886 hash_map::Entry::Vacant(entry) => {
6887 entry.insert(vec!(failure));
6888 }
6889 }
6890 mem::drop(forward_htlcs);
6891 let mut pending_events = self.pending_events.lock().unwrap();
6892 pending_events.push_back((events::Event::HTLCHandlingFailed {
6893 prev_channel_id: *channel_id,
6894 failed_next_destination: destination,
6895 }, None));
6896 },
6897 }
6898 push_forward_event
6899 }
6900
6901 pub fn claim_funds(&self, payment_preimage: PaymentPreimage) {
6926 self.claim_payment_internal(payment_preimage, false);
6927 }
6928
6929 pub fn claim_funds_with_known_custom_tlvs(&self, payment_preimage: PaymentPreimage) {
6939 self.claim_payment_internal(payment_preimage, true);
6940 }
6941
6942 fn claim_payment_internal(&self, payment_preimage: PaymentPreimage, custom_tlvs_known: bool) {
6943 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).to_byte_array());
6944
6945 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
6946
6947 let (sources, claiming_payment) = {
6948 let res = self.claimable_payments.lock().unwrap().begin_claiming_payment(
6949 payment_hash, &self.node_signer, &self.logger, &self.inbound_payment_id_secret,
6950 custom_tlvs_known,
6951 );
6952
6953 match res {
6954 Ok((htlcs, payment_info)) => (htlcs, payment_info),
6955 Err(htlcs) => {
6956 for htlc in htlcs {
6957 let reason = self.get_htlc_fail_reason_from_failure_code(FailureCode::InvalidOnionPayload(None), &htlc);
6958 let source = HTLCSource::PreviousHopData(htlc.prev_hop);
6959 let receiver = HTLCDestination::FailedPayment { payment_hash };
6960 self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
6961 }
6962 return;
6963 }
6964 }
6965 };
6966 debug_assert!(!sources.is_empty());
6967
6968 let mut claimable_amt_msat = 0;
6973 let mut prev_total_msat = None;
6974 let mut expected_amt_msat = None;
6975 let mut valid_mpp = true;
6976 let mut errs = Vec::new();
6977 let per_peer_state = self.per_peer_state.read().unwrap();
6978 for htlc in sources.iter() {
6979 if prev_total_msat.is_some() && prev_total_msat != Some(htlc.total_msat) {
6980 log_error!(self.logger, "Somehow ended up with an MPP payment with different expected total amounts - this should not be reachable!");
6981 debug_assert!(false);
6982 valid_mpp = false;
6983 break;
6984 }
6985 prev_total_msat = Some(htlc.total_msat);
6986
6987 if expected_amt_msat.is_some() && expected_amt_msat != htlc.total_value_received {
6988 log_error!(self.logger, "Somehow ended up with an MPP payment with different received total amounts - this should not be reachable!");
6989 debug_assert!(false);
6990 valid_mpp = false;
6991 break;
6992 }
6993 expected_amt_msat = htlc.total_value_received;
6994 claimable_amt_msat += htlc.value;
6995 }
6996 mem::drop(per_peer_state);
6997 if sources.is_empty() || expected_amt_msat.is_none() {
6998 self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
6999 log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!");
7000 return;
7001 }
7002 if claimable_amt_msat != expected_amt_msat.unwrap() {
7003 self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
7004 log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.",
7005 expected_amt_msat.unwrap(), claimable_amt_msat);
7006 return;
7007 }
7008 if valid_mpp {
7009 let mpp_parts: Vec<_> = sources.iter().filter_map(|htlc| {
7010 if let Some(cp_id) = htlc.prev_hop.counterparty_node_id {
7011 Some(MPPClaimHTLCSource {
7012 counterparty_node_id: cp_id,
7013 funding_txo: htlc.prev_hop.outpoint,
7014 channel_id: htlc.prev_hop.channel_id,
7015 htlc_id: htlc.prev_hop.htlc_id,
7016 })
7017 } else {
7018 None
7019 }
7020 }).collect();
7021 let pending_mpp_claim_ptr_opt = if sources.len() > 1 {
7022 let mut channels_without_preimage = Vec::with_capacity(mpp_parts.len());
7023 for part in mpp_parts.iter() {
7024 let chan = (part.counterparty_node_id, part.funding_txo, part.channel_id);
7025 if !channels_without_preimage.contains(&chan) {
7026 channels_without_preimage.push(chan);
7027 }
7028 }
7029 Some(Arc::new(Mutex::new(PendingMPPClaim {
7030 channels_without_preimage,
7031 channels_with_preimage: Vec::new(),
7032 })))
7033 } else {
7034 None
7035 };
7036 let payment_info = Some(PaymentClaimDetails { mpp_parts, claiming_payment });
7037 for htlc in sources {
7038 let this_mpp_claim = pending_mpp_claim_ptr_opt.as_ref().and_then(|pending_mpp_claim|
7039 if let Some(cp_id) = htlc.prev_hop.counterparty_node_id {
7040 let claim_ptr = PendingMPPClaimPointer(Arc::clone(pending_mpp_claim));
7041 Some((cp_id, htlc.prev_hop.channel_id, claim_ptr))
7042 } else {
7043 None
7044 }
7045 );
7046 let raa_blocker = pending_mpp_claim_ptr_opt.as_ref().map(|pending_claim| {
7047 RAAMonitorUpdateBlockingAction::ClaimedMPPPayment {
7048 pending_claim: PendingMPPClaimPointer(Arc::clone(pending_claim)),
7049 }
7050 });
7051 self.claim_funds_from_hop(
7052 htlc.prev_hop, payment_preimage, payment_info.clone(),
7053 |_, definitely_duplicate| {
7054 debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment");
7055 (Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim: this_mpp_claim }), raa_blocker)
7056 }
7057 );
7058 }
7059 } else {
7060 for htlc in sources {
7061 let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
7062 htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height.to_be_bytes());
7063 let source = HTLCSource::PreviousHopData(htlc.prev_hop);
7064 let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data);
7065 let receiver = HTLCDestination::FailedPayment { payment_hash };
7066 self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
7067 }
7068 self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
7069 }
7070
7071 for (counterparty_node_id, err) in errs.drain(..) {
7073 let res: Result<(), _> = Err(err);
7074 let _ = handle_error!(self, res, counterparty_node_id);
7075 }
7076 }
7077
7078 fn claim_funds_from_hop<
7079 ComplFunc: FnOnce(Option<u64>, bool) -> (Option<MonitorUpdateCompletionAction>, Option<RAAMonitorUpdateBlockingAction>)
7080 >(
7081 &self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage,
7082 payment_info: Option<PaymentClaimDetails>, completion_action: ComplFunc,
7083 ) {
7084 let counterparty_node_id = prev_hop.counterparty_node_id.or_else(|| {
7085 let short_to_chan_info = self.short_to_chan_info.read().unwrap();
7086 short_to_chan_info.get(&prev_hop.short_channel_id).map(|(cp_id, _)| *cp_id)
7087 });
7088
7089 let htlc_source = HTLCClaimSource {
7090 counterparty_node_id,
7091 funding_txo: prev_hop.outpoint,
7092 channel_id: prev_hop.channel_id,
7093 htlc_id: prev_hop.htlc_id,
7094 };
7095 self.claim_mpp_part(htlc_source, payment_preimage, payment_info, completion_action)
7096 }
7097
7098 fn claim_mpp_part<
7099 ComplFunc: FnOnce(Option<u64>, bool) -> (Option<MonitorUpdateCompletionAction>, Option<RAAMonitorUpdateBlockingAction>)
7100 >(
7101 &self, prev_hop: HTLCClaimSource, payment_preimage: PaymentPreimage,
7102 payment_info: Option<PaymentClaimDetails>, completion_action: ComplFunc,
7103 ) {
7104 let during_init = !self.background_events_processed_since_startup.load(Ordering::Acquire);
7110
7111 debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
7114 debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread);
7115
7116 let per_peer_state = self.per_peer_state.read().unwrap();
7117 let chan_id = prev_hop.channel_id;
7118
7119 const MISSING_MON_ERROR: &'static str =
7120 "If we're going to claim an HTLC against a channel, we should always have *some* state for the channel, even if just the latest ChannelMonitor update_id. This failure indicates we need to claim an HTLC from a channel for which we did not have a ChannelMonitor at startup and didn't create one while running.";
7121
7122 let mut peer_state_opt = prev_hop.counterparty_node_id.as_ref().map(
7125 |counterparty_node_id| per_peer_state.get(counterparty_node_id)
7126 .map(|peer_mutex| peer_mutex.lock().unwrap())
7127 .expect(MISSING_MON_ERROR)
7128 );
7129
7130 if let Some(peer_state_lock) = peer_state_opt.as_mut() {
7131 let peer_state = &mut **peer_state_lock;
7132 if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(chan_id) {
7133 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
7134 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
7135 let fulfill_res = chan.get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, payment_info, &&logger);
7136
7137 match fulfill_res {
7138 UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } => {
7139 let (action_opt, raa_blocker_opt) = completion_action(Some(htlc_value_msat), false);
7140 if let Some(action) = action_opt {
7141 log_trace!(logger, "Tracking monitor update completion action for channel {}: {:?}",
7142 chan_id, action);
7143 peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7144 }
7145 if let Some(raa_blocker) = raa_blocker_opt {
7146 peer_state.actions_blocking_raa_monitor_updates.entry(chan_id).or_insert_with(Vec::new).push(raa_blocker);
7147 }
7148 handle_new_monitor_update!(self, prev_hop.funding_txo, monitor_update, peer_state_opt,
7149 peer_state, per_peer_state, chan);
7150 }
7151 UpdateFulfillCommitFetch::DuplicateClaim {} => {
7152 let (action_opt, raa_blocker_opt) = completion_action(None, true);
7153 if let Some(raa_blocker) = raa_blocker_opt {
7154 debug_assert!(during_init ||
7163 peer_state.actions_blocking_raa_monitor_updates.get(&chan_id).unwrap().contains(&raa_blocker));
7164 }
7165 let action = if let Some(action) = action_opt {
7166 action
7167 } else {
7168 return;
7169 };
7170
7171 mem::drop(peer_state_opt);
7172
7173 log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}",
7174 chan_id, action);
7175 if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
7176 downstream_counterparty_node_id: node_id,
7177 downstream_funding_outpoint: _,
7178 blocking_action: blocker, downstream_channel_id: channel_id,
7179 } = action {
7180 if let Some(peer_state_mtx) = per_peer_state.get(&node_id) {
7181 let mut peer_state = peer_state_mtx.lock().unwrap();
7182 if let Some(blockers) = peer_state
7183 .actions_blocking_raa_monitor_updates
7184 .get_mut(&channel_id)
7185 {
7186 let mut found_blocker = false;
7187 blockers.retain(|iter| {
7188 let first_blocker = !found_blocker;
7192 if *iter == blocker { found_blocker = true; }
7193 *iter != blocker || !first_blocker
7194 });
7195 debug_assert!(found_blocker);
7196 }
7197 } else {
7198 debug_assert!(false);
7199 }
7200 } else if matches!(action, MonitorUpdateCompletionAction::PaymentClaimed { .. }) {
7201 debug_assert!(during_init,
7202 "Duplicate claims should always either be for forwarded payments(freeing another channel immediately) or during init (for claim replay)");
7203 mem::drop(per_peer_state);
7204 self.handle_monitor_update_completion_actions([action]);
7205 } else {
7206 debug_assert!(false,
7207 "Duplicate claims should always either be for forwarded payments(freeing another channel immediately) or during init (for claim replay)");
7208 return;
7209 };
7210 }
7211 }
7212 }
7213 return;
7214 }
7215 }
7216
7217 if prev_hop.counterparty_node_id.is_none() {
7218 let payment_hash: PaymentHash = payment_preimage.into();
7219 panic!(
7220 "Prior to upgrading to LDK 0.1, all pending HTLCs forwarded by LDK 0.0.123 or before must be resolved. It appears at least the HTLC with payment_hash {} (preimage {}) was not resolved. Please downgrade to LDK 0.0.125 and resolve the HTLC prior to upgrading.",
7221 payment_hash,
7222 payment_preimage,
7223 );
7224 }
7225 let counterparty_node_id = prev_hop.counterparty_node_id.expect("Checked immediately above");
7226 let mut peer_state = peer_state_opt.expect("peer_state_opt is always Some when the counterparty_node_id is Some");
7227
7228 let update_id = if let Some(latest_update_id) = peer_state.closed_channel_monitor_update_ids.get_mut(&chan_id) {
7229 *latest_update_id = latest_update_id.saturating_add(1);
7230 *latest_update_id
7231 } else {
7232 let err = "We need the latest ChannelMonitorUpdate ID to build a new update.
7233This should have been checked for availability on startup but somehow it is no longer available.
7234This indicates a bug inside LDK. Please report this error at https://github.com/lightningdevkit/rust-lightning/issues/new";
7235 log_error!(self.logger, "{}", err);
7236 panic!("{}", err);
7237 };
7238
7239 let preimage_update = ChannelMonitorUpdate {
7240 update_id,
7241 counterparty_node_id: Some(counterparty_node_id),
7242 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
7243 payment_preimage,
7244 payment_info,
7245 }],
7246 channel_id: Some(prev_hop.channel_id),
7247 };
7248
7249 let (action_opt, raa_blocker_opt) = completion_action(None, false);
7255
7256 if let Some(raa_blocker) = raa_blocker_opt {
7257 peer_state.actions_blocking_raa_monitor_updates
7258 .entry(prev_hop.channel_id)
7259 .or_default()
7260 .push(raa_blocker);
7261 }
7262
7263 let payment_hash = payment_preimage.into();
7266 let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(chan_id), Some(payment_hash));
7267
7268 if let Some(action) = action_opt {
7269 log_trace!(logger, "Tracking monitor update completion action for closed channel {}: {:?}",
7270 chan_id, action);
7271 peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
7272 }
7273
7274 handle_new_monitor_update!(
7275 self, prev_hop.funding_txo, preimage_update, peer_state, peer_state, per_peer_state,
7276 counterparty_node_id, chan_id, POST_CHANNEL_CLOSE
7277 );
7278 }
7279
7280 fn finalize_claims(&self, sources: Vec<HTLCSource>) {
7281 self.pending_outbound_payments.finalize_claims(sources, &self.pending_events);
7282 }
7283
7284 fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
7285 forwarded_htlc_value_msat: Option<u64>, skimmed_fee_msat: Option<u64>, from_onchain: bool,
7286 startup_replay: bool, next_channel_counterparty_node_id: Option<PublicKey>,
7287 next_channel_outpoint: OutPoint, next_channel_id: ChannelId, next_user_channel_id: Option<u128>,
7288 ) {
7289 match source {
7290 HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
7291 debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire),
7292 "We don't support claim_htlc claims during startup - monitors may not be available yet");
7293 if let Some(pubkey) = next_channel_counterparty_node_id {
7294 debug_assert_eq!(pubkey, path.hops[0].pubkey);
7295 }
7296 let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
7297 channel_funding_outpoint: next_channel_outpoint, channel_id: next_channel_id,
7298 counterparty_node_id: path.hops[0].pubkey,
7299 };
7300 self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage,
7301 session_priv, path, from_onchain, ev_completion_action, &self.pending_events,
7302 &self.logger);
7303 },
7304 HTLCSource::PreviousHopData(hop_data) => {
7305 let prev_channel_id = hop_data.channel_id;
7306 let prev_user_channel_id = hop_data.user_channel_id;
7307 let prev_node_id = hop_data.counterparty_node_id;
7308 let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
7309 self.claim_funds_from_hop(hop_data, payment_preimage, None,
7310 |htlc_claim_value_msat, definitely_duplicate| {
7311 let chan_to_release =
7312 if let Some(node_id) = next_channel_counterparty_node_id {
7313 Some(EventUnblockedChannel {
7314 counterparty_node_id: node_id,
7315 funding_txo: next_channel_outpoint,
7316 channel_id: next_channel_id,
7317 blocking_action: completed_blocker
7318 })
7319 } else {
7320 None
7326 };
7327
7328 if definitely_duplicate && startup_replay {
7329 (None, None)
7334 } else if definitely_duplicate {
7335 if let Some(other_chan) = chan_to_release {
7336 (Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
7337 downstream_counterparty_node_id: other_chan.counterparty_node_id,
7338 downstream_funding_outpoint: other_chan.funding_txo,
7339 downstream_channel_id: other_chan.channel_id,
7340 blocking_action: other_chan.blocking_action,
7341 }), None)
7342 } else { (None, None) }
7343 } else {
7344 let total_fee_earned_msat = if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
7345 if let Some(claimed_htlc_value) = htlc_claim_value_msat {
7346 Some(claimed_htlc_value - forwarded_htlc_value)
7347 } else { None }
7348 } else { None };
7349 debug_assert!(skimmed_fee_msat <= total_fee_earned_msat,
7350 "skimmed_fee_msat must always be included in total_fee_earned_msat");
7351 (Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
7352 event: events::Event::PaymentForwarded {
7353 prev_channel_id: Some(prev_channel_id),
7354 next_channel_id: Some(next_channel_id),
7355 prev_user_channel_id,
7356 next_user_channel_id,
7357 prev_node_id,
7358 next_node_id: next_channel_counterparty_node_id,
7359 total_fee_earned_msat,
7360 skimmed_fee_msat,
7361 claim_from_onchain_tx: from_onchain,
7362 outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
7363 },
7364 downstream_counterparty_and_funding_outpoint: chan_to_release,
7365 }), None)
7366 }
7367 });
7368 },
7369 }
7370 }
7371
7372 pub fn get_our_node_id(&self) -> PublicKey {
7374 self.our_network_pubkey
7375 }
7376
7377 fn handle_monitor_update_completion_actions<I: IntoIterator<Item=MonitorUpdateCompletionAction>>(&self, actions: I) {
7378 debug_assert_ne!(self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
7379 debug_assert_ne!(self.claimable_payments.held_by_thread(), LockHeldState::HeldByThread);
7380 debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
7381
7382 let mut freed_channels = Vec::new();
7383
7384 for action in actions.into_iter() {
7385 match action {
7386 MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim } => {
7387 if let Some((counterparty_node_id, chan_id, claim_ptr)) = pending_mpp_claim {
7388 let per_peer_state = self.per_peer_state.read().unwrap();
7389 per_peer_state.get(&counterparty_node_id).map(|peer_state_mutex| {
7390 let mut peer_state = peer_state_mutex.lock().unwrap();
7391 let blockers_entry = peer_state.actions_blocking_raa_monitor_updates.entry(chan_id);
7392 if let btree_map::Entry::Occupied(mut blockers) = blockers_entry {
7393 blockers.get_mut().retain(|blocker|
7394 if let &RAAMonitorUpdateBlockingAction::ClaimedMPPPayment { pending_claim } = &blocker {
7395 if *pending_claim == claim_ptr {
7396 let mut pending_claim_state_lock = pending_claim.0.lock().unwrap();
7397 let pending_claim_state = &mut *pending_claim_state_lock;
7398 pending_claim_state.channels_without_preimage.retain(|(cp, op, cid)| {
7399 let this_claim =
7400 *cp == counterparty_node_id && *cid == chan_id;
7401 if this_claim {
7402 pending_claim_state.channels_with_preimage.push((*cp, *op, *cid));
7403 false
7404 } else { true }
7405 });
7406 if pending_claim_state.channels_without_preimage.is_empty() {
7407 for (cp, op, cid) in pending_claim_state.channels_with_preimage.iter() {
7408 let freed_chan = (*cp, *op, *cid, blocker.clone());
7409 freed_channels.push(freed_chan);
7410 }
7411 }
7412 !pending_claim_state.channels_without_preimage.is_empty()
7413 } else { true }
7414 } else { true }
7415 );
7416 if blockers.get().is_empty() {
7417 blockers.remove();
7418 }
7419 }
7420 });
7421 }
7422
7423 let payment = self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
7424 if let Some(ClaimingPayment {
7425 amount_msat,
7426 payment_purpose: purpose,
7427 receiver_node_id,
7428 htlcs,
7429 sender_intended_value: sender_intended_total_msat,
7430 onion_fields,
7431 payment_id,
7432 }) = payment {
7433 let event = events::Event::PaymentClaimed {
7434 payment_hash,
7435 purpose,
7436 amount_msat,
7437 receiver_node_id: Some(receiver_node_id),
7438 htlcs,
7439 sender_intended_total_msat,
7440 onion_fields,
7441 payment_id,
7442 };
7443 let event_action = (event, None);
7444 let mut pending_events = self.pending_events.lock().unwrap();
7445 if !pending_events.contains(&event_action) {
7450 pending_events.push_back(event_action);
7451 }
7452 }
7453 },
7454 MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
7455 event, downstream_counterparty_and_funding_outpoint
7456 } => {
7457 self.pending_events.lock().unwrap().push_back((event, None));
7458 if let Some(unblocked) = downstream_counterparty_and_funding_outpoint {
7459 self.handle_monitor_update_release(
7460 unblocked.counterparty_node_id, unblocked.funding_txo,
7461 unblocked.channel_id, Some(unblocked.blocking_action),
7462 );
7463 }
7464 },
7465 MonitorUpdateCompletionAction::FreeOtherChannelImmediately {
7466 downstream_counterparty_node_id, downstream_funding_outpoint, downstream_channel_id, blocking_action,
7467 } => {
7468 self.handle_monitor_update_release(
7469 downstream_counterparty_node_id,
7470 downstream_funding_outpoint,
7471 downstream_channel_id,
7472 Some(blocking_action),
7473 );
7474 },
7475 }
7476 }
7477
7478 for (node_id, funding_outpoint, channel_id, blocker) in freed_channels {
7479 self.handle_monitor_update_release(node_id, funding_outpoint, channel_id, Some(blocker));
7480 }
7481 }
7482
7483 fn handle_channel_resumption(&self, pending_msg_events: &mut Vec<MessageSendEvent>,
7486 channel: &mut Channel<SP>, raa: Option<msgs::RevokeAndACK>,
7487 commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
7488 pending_forwards: Vec<(PendingHTLCInfo, u64)>, pending_update_adds: Vec<msgs::UpdateAddHTLC>,
7489 funding_broadcastable: Option<Transaction>,
7490 channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>,
7491 tx_signatures: Option<msgs::TxSignatures>
7492 ) -> (Option<(u64, Option<PublicKey>, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)>, Option<(u64, Vec<msgs::UpdateAddHTLC>)>) {
7493 let logger = WithChannelContext::from(&self.logger, &channel.context, None);
7494 log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {} pending update_add_htlcs, {}broadcasting funding, {} channel ready, {} announcement, {} tx_signatures",
7495 &channel.context.channel_id(),
7496 if raa.is_some() { "an" } else { "no" },
7497 if commitment_update.is_some() { "a" } else { "no" },
7498 pending_forwards.len(), pending_update_adds.len(),
7499 if funding_broadcastable.is_some() { "" } else { "not " },
7500 if channel_ready.is_some() { "sending" } else { "without" },
7501 if announcement_sigs.is_some() { "sending" } else { "without" },
7502 if tx_signatures.is_some() { "sending" } else { "without" },
7503 );
7504
7505 let counterparty_node_id = channel.context.get_counterparty_node_id();
7506 let short_channel_id = channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias());
7507
7508 let mut htlc_forwards = None;
7509 if !pending_forwards.is_empty() {
7510 htlc_forwards = Some((
7511 short_channel_id, Some(channel.context.get_counterparty_node_id()),
7512 channel.context.get_funding_txo().unwrap(), channel.context.channel_id(),
7513 channel.context.get_user_id(), pending_forwards
7514 ));
7515 }
7516 let mut decode_update_add_htlcs = None;
7517 if !pending_update_adds.is_empty() {
7518 decode_update_add_htlcs = Some((short_channel_id, pending_update_adds));
7519 }
7520
7521 if let Some(msg) = channel_ready {
7522 send_channel_ready!(self, pending_msg_events, channel, msg);
7523 }
7524 if let Some(msg) = announcement_sigs {
7525 pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
7526 node_id: counterparty_node_id,
7527 msg,
7528 });
7529 }
7530 if let Some(msg) = tx_signatures {
7531 pending_msg_events.push(events::MessageSendEvent::SendTxSignatures {
7532 node_id: counterparty_node_id,
7533 msg,
7534 });
7535 }
7536
7537 macro_rules! handle_cs { () => {
7538 if let Some(update) = commitment_update {
7539 pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
7540 node_id: counterparty_node_id,
7541 updates: update,
7542 });
7543 }
7544 } }
7545 macro_rules! handle_raa { () => {
7546 if let Some(revoke_and_ack) = raa {
7547 pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
7548 node_id: counterparty_node_id,
7549 msg: revoke_and_ack,
7550 });
7551 }
7552 } }
7553 match order {
7554 RAACommitmentOrder::CommitmentFirst => {
7555 handle_cs!();
7556 handle_raa!();
7557 },
7558 RAACommitmentOrder::RevokeAndACKFirst => {
7559 handle_raa!();
7560 handle_cs!();
7561 },
7562 }
7563
7564 if let Some(tx) = funding_broadcastable {
7565 if channel.context.is_manual_broadcast() {
7566 log_info!(logger, "Not broadcasting funding transaction with txid {} as it is manually managed", tx.compute_txid());
7567 let mut pending_events = self.pending_events.lock().unwrap();
7568 match channel.context.get_funding_txo() {
7569 Some(funding_txo) => {
7570 emit_funding_tx_broadcast_safe_event!(pending_events, channel, funding_txo.into_bitcoin_outpoint())
7571 },
7572 None => {
7573 debug_assert!(false, "Channel resumed without a funding txo, this should never happen!");
7574 return (htlc_forwards, decode_update_add_htlcs);
7575 }
7576 };
7577 } else {
7578 log_info!(logger, "Broadcasting funding transaction with txid {}", tx.compute_txid());
7579 self.tx_broadcaster.broadcast_transactions(&[&tx]);
7580 }
7581 }
7582
7583 {
7584 let mut pending_events = self.pending_events.lock().unwrap();
7585 emit_channel_pending_event!(pending_events, channel);
7586 emit_channel_ready_event!(pending_events, channel);
7587 }
7588
7589 (htlc_forwards, decode_update_add_htlcs)
7590 }
7591
7592 fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
7593 debug_assert!(self.total_consistency_lock.try_write().is_err()); let counterparty_node_id = match counterparty_node_id {
7596 Some(cp_id) => cp_id.clone(),
7597 None => {
7598 let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
7601 match outpoint_to_peer.get(funding_txo) {
7602 Some(cp_id) => cp_id.clone(),
7603 None => return,
7604 }
7605 }
7606 };
7607 let per_peer_state = self.per_peer_state.read().unwrap();
7608 let mut peer_state_lock;
7609 let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
7610 if peer_state_mutex_opt.is_none() { return }
7611 peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
7612 let peer_state = &mut *peer_state_lock;
7613
7614 let remaining_in_flight =
7615 if let Some(pending) = peer_state.in_flight_monitor_updates.get_mut(funding_txo) {
7616 pending.retain(|upd| upd.update_id > highest_applied_update_id);
7617 pending.len()
7618 } else { 0 };
7619
7620 let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(*channel_id), None);
7621 log_trace!(logger, "ChannelMonitor updated to {}. {} pending in-flight updates.",
7622 highest_applied_update_id, remaining_in_flight);
7623
7624 if remaining_in_flight != 0 {
7625 return;
7626 }
7627
7628 if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(channel_id) {
7629 if chan.is_awaiting_monitor_update() {
7630 if chan.blocked_monitor_updates_pending() == 0 {
7631 log_trace!(logger, "Channel is open and awaiting update, resuming it");
7632 handle_monitor_update_completion!(self, peer_state_lock, peer_state, per_peer_state, chan);
7633 } else {
7634 log_trace!(logger, "Channel is open and awaiting update, leaving it blocked due to a blocked monitor update");
7635 }
7636 } else {
7637 log_trace!(logger, "Channel is open but not awaiting update");
7638 }
7639 } else {
7640 let update_actions = peer_state.monitor_update_blocked_actions
7641 .remove(channel_id).unwrap_or(Vec::new());
7642 log_trace!(logger, "Channel is closed, applying {} post-update actions", update_actions.len());
7643 mem::drop(peer_state_lock);
7644 mem::drop(per_peer_state);
7645 self.handle_monitor_update_completion_actions(update_actions);
7646 }
7647 }
7648
7649 pub fn accept_inbound_channel(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
7670 self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, false, user_channel_id, vec![], Weight::from_wu(0))
7671 }
7672
7673 pub fn accept_inbound_channel_from_trusted_peer_0conf(&self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
7692 self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, true, user_channel_id, vec![], Weight::from_wu(0))
7693 }
7694
7695 fn do_accept_inbound_channel(
7696 &self, temporary_channel_id: &ChannelId, counterparty_node_id: &PublicKey, accept_0conf: bool,
7697 user_channel_id: u128, _funding_inputs: Vec<(TxIn, TransactionU16LenLimited)>,
7698 _total_witness_weight: Weight,
7699 ) -> Result<(), APIError> {
7700 let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(*temporary_channel_id), None);
7701 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
7702
7703 let peers_without_funded_channels =
7704 self.peers_without_funded_channels(|peer| { peer.total_channel_count() > 0 });
7705 let per_peer_state = self.per_peer_state.read().unwrap();
7706 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
7707 .ok_or_else(|| {
7708 let err_str = format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id);
7709 log_error!(logger, "{}", err_str);
7710
7711 APIError::ChannelUnavailable { err: err_str }
7712 })?;
7713 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
7714 let peer_state = &mut *peer_state_lock;
7715 let is_only_peer_channel = peer_state.total_channel_count() == 1;
7716
7717 let res = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) {
7722 Some(unaccepted_channel) => {
7723 let best_block_height = self.best_block.read().unwrap().height;
7724 match unaccepted_channel.open_channel_msg {
7725 OpenChannelMessage::V1(open_channel_msg) => {
7726 InboundV1Channel::new(
7727 &self.fee_estimator, &self.entropy_source, &self.signer_provider, *counterparty_node_id,
7728 &self.channel_type_features(), &peer_state.latest_features, &open_channel_msg,
7729 user_channel_id, &self.default_configuration, best_block_height, &self.logger, accept_0conf
7730 ).map_err(|err| MsgHandleErrInternal::from_chan_no_close(err, *temporary_channel_id)
7731 ).map(|mut channel| {
7732 let logger = WithChannelContext::from(&self.logger, &channel.context, None);
7733 let message_send_event = channel.accept_inbound_channel(&&logger).map(|msg| {
7734 events::MessageSendEvent::SendAcceptChannel {
7735 node_id: *counterparty_node_id,
7736 msg,
7737 }
7738 });
7739 (*temporary_channel_id, ChannelPhase::UnfundedInboundV1(channel), message_send_event)
7740 })
7741 },
7742 #[cfg(dual_funding)]
7743 OpenChannelMessage::V2(open_channel_msg) => {
7744 InboundV2Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
7745 self.get_our_node_id(), *counterparty_node_id, &self.channel_type_features(), &peer_state.latest_features,
7746 &open_channel_msg, _funding_inputs, _total_witness_weight, user_channel_id,
7747 &self.default_configuration, best_block_height, &self.logger
7748 ).map_err(|_| MsgHandleErrInternal::from_chan_no_close(
7749 ChannelError::Close(
7750 (
7751 "V2 channel rejected due to sender error".into(),
7752 ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) },
7753 )
7754 ), *temporary_channel_id)
7755 ).map(|channel| {
7756 let message_send_event = events::MessageSendEvent::SendAcceptChannelV2 {
7757 node_id: channel.context.get_counterparty_node_id(),
7758 msg: channel.accept_inbound_dual_funded_channel()
7759 };
7760 (channel.context.channel_id(), ChannelPhase::UnfundedInboundV2(channel), Some(message_send_event))
7761 })
7762 },
7763 }
7764 },
7765 None => {
7766 let err_str = "No such channel awaiting to be accepted.".to_owned();
7767 log_error!(logger, "{}", err_str);
7768
7769 return Err(APIError::APIMisuseError { err: err_str });
7770 }
7771 };
7772
7773 let (channel_id, mut channel_phase, message_send_event) = match res {
7776 Ok(res) => res,
7777 Err(err) => {
7778 mem::drop(peer_state_lock);
7779 mem::drop(per_peer_state);
7780 match handle_error!(self, Result::<(), MsgHandleErrInternal>::Err(err), *counterparty_node_id) {
7782 Ok(_) => unreachable!("`handle_error` only returns Err as we've passed in an Err"),
7783 Err(e) => {
7784 return Err(APIError::ChannelUnavailable { err: e.err });
7785 },
7786 }
7787 }
7788 };
7789
7790 if accept_0conf {
7791 debug_assert!(channel_phase.context().minimum_depth().unwrap() == 0);
7793 } else if channel_phase.context().get_channel_type().requires_zero_conf() {
7794 let send_msg_err_event = events::MessageSendEvent::HandleError {
7795 node_id: channel_phase.context().get_counterparty_node_id(),
7796 action: msgs::ErrorAction::SendErrorMessage{
7797 msg: msgs::ErrorMessage { channel_id: *temporary_channel_id, data: "No zero confirmation channels accepted".to_owned(), }
7798 }
7799 };
7800 peer_state.pending_msg_events.push(send_msg_err_event);
7801 let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
7802 log_error!(logger, "{}", err_str);
7803
7804 return Err(APIError::APIMisuseError { err: err_str });
7805 } else {
7806 if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
7810 let send_msg_err_event = events::MessageSendEvent::HandleError {
7811 node_id: channel_phase.context().get_counterparty_node_id(),
7812 action: msgs::ErrorAction::SendErrorMessage{
7813 msg: msgs::ErrorMessage { channel_id: *temporary_channel_id, data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
7814 }
7815 };
7816 peer_state.pending_msg_events.push(send_msg_err_event);
7817 let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
7818 log_error!(logger, "{}", err_str);
7819
7820 return Err(APIError::APIMisuseError { err: err_str });
7821 }
7822 }
7823
7824 let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
7826 channel_phase.context_mut().set_outbound_scid_alias(outbound_scid_alias);
7827
7828 if let Some(message_send_event) = message_send_event {
7829 peer_state.pending_msg_events.push(message_send_event);
7830 }
7831 peer_state.channel_by_id.insert(channel_id, channel_phase);
7832
7833 Ok(())
7834 }
7835
7836 fn peers_without_funded_channels<Filter>(&self, maybe_count_peer: Filter) -> usize
7842 where Filter: Fn(&PeerState<SP>) -> bool {
7843 let mut peers_without_funded_channels = 0;
7844 let best_block_height = self.best_block.read().unwrap().height;
7845 {
7846 let peer_state_lock = self.per_peer_state.read().unwrap();
7847 for (_, peer_mtx) in peer_state_lock.iter() {
7848 let peer = peer_mtx.lock().unwrap();
7849 if !maybe_count_peer(&*peer) { continue; }
7850 let num_unfunded_channels = Self::unfunded_channel_count(&peer, best_block_height);
7851 if num_unfunded_channels == peer.total_channel_count() {
7852 peers_without_funded_channels += 1;
7853 }
7854 }
7855 }
7856 return peers_without_funded_channels;
7857 }
7858
7859 fn unfunded_channel_count(
7860 peer: &PeerState<SP>, best_block_height: u32
7861 ) -> usize {
7862 let mut num_unfunded_channels = 0;
7863 for (_, phase) in peer.channel_by_id.iter() {
7864 match phase {
7865 ChannelPhase::Funded(chan) => {
7866 if !chan.context.is_outbound() && chan.context.minimum_depth().unwrap_or(1) != 0 &&
7869 chan.context.get_funding_tx_confirmations(best_block_height) == 0
7870 {
7871 num_unfunded_channels += 1;
7872 }
7873 },
7874 ChannelPhase::UnfundedInboundV1(chan) => {
7875 if chan.context.minimum_depth().unwrap_or(1) != 0 {
7876 num_unfunded_channels += 1;
7877 }
7878 },
7879 ChannelPhase::UnfundedInboundV2(chan) => {
7880 if chan.context.minimum_depth().unwrap_or(1) != 0 &&
7883 chan.dual_funding_context.our_funding_satoshis == 0 {
7884 num_unfunded_channels += 1;
7885 }
7886 },
7887 ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedOutboundV2(_) => {
7888 continue;
7890 },
7891 }
7892 }
7893 num_unfunded_channels + peer.inbound_channel_request_by_id.len()
7894 }
7895
7896 fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: OpenChannelMessageRef<'_>) -> Result<(), MsgHandleErrInternal> {
7897 let common_fields = match msg {
7898 OpenChannelMessageRef::V1(msg) => &msg.common_fields,
7899 #[cfg(dual_funding)]
7900 OpenChannelMessageRef::V2(msg) => &msg.common_fields,
7901 };
7902
7903 if common_fields.chain_hash != self.chain_hash {
7908 return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(),
7909 common_fields.temporary_channel_id));
7910 }
7911
7912 if !self.default_configuration.accept_inbound_channels {
7913 return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(),
7914 common_fields.temporary_channel_id));
7915 }
7916
7917 let channeled_peers_without_funding =
7921 self.peers_without_funded_channels(|node| node.total_channel_count() > 0);
7922
7923 let per_peer_state = self.per_peer_state.read().unwrap();
7924 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
7925 .ok_or_else(|| {
7926 debug_assert!(false);
7927 MsgHandleErrInternal::send_err_msg_no_close(
7928 format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
7929 common_fields.temporary_channel_id)
7930 })?;
7931 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
7932 let peer_state = &mut *peer_state_lock;
7933
7934 if peer_state.total_channel_count() == 0 &&
7938 channeled_peers_without_funding >= MAX_UNFUNDED_CHANNEL_PEERS &&
7939 !self.default_configuration.manually_accept_inbound_channels
7940 {
7941 return Err(MsgHandleErrInternal::send_err_msg_no_close(
7942 "Have too many peers with unfunded channels, not accepting new ones".to_owned(),
7943 common_fields.temporary_channel_id));
7944 }
7945
7946 let best_block_height = self.best_block.read().unwrap().height;
7947 if Self::unfunded_channel_count(peer_state, best_block_height) >= MAX_UNFUNDED_CHANS_PER_PEER {
7948 return Err(MsgHandleErrInternal::send_err_msg_no_close(
7949 format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER),
7950 common_fields.temporary_channel_id));
7951 }
7952
7953 let channel_id = common_fields.temporary_channel_id;
7954 let channel_exists = peer_state.has_channel(&channel_id);
7955 if channel_exists {
7956 return Err(MsgHandleErrInternal::send_err_msg_no_close(
7957 "temporary_channel_id collision for the same peer!".to_owned(),
7958 common_fields.temporary_channel_id));
7959 }
7960
7961 let channel_type = channel::channel_type_from_open_channel(
7964 common_fields, &peer_state.latest_features, &self.channel_type_features()
7965 ).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, common_fields.temporary_channel_id))?;
7966
7967 if self.default_configuration.manually_accept_inbound_channels {
7969 let mut pending_events = self.pending_events.lock().unwrap();
7970 let is_announced = (common_fields.channel_flags & 1) == 1;
7971 pending_events.push_back((events::Event::OpenChannelRequest {
7972 temporary_channel_id: common_fields.temporary_channel_id,
7973 counterparty_node_id: *counterparty_node_id,
7974 funding_satoshis: common_fields.funding_satoshis,
7975 channel_negotiation_type: match msg {
7976 OpenChannelMessageRef::V1(msg) => InboundChannelFunds::PushMsat(msg.push_msat),
7977 #[cfg(dual_funding)]
7978 OpenChannelMessageRef::V2(_) => InboundChannelFunds::DualFunded,
7979 },
7980 channel_type,
7981 is_announced,
7982 params: common_fields.channel_parameters(),
7983 }, None));
7984 peer_state.inbound_channel_request_by_id.insert(channel_id, InboundChannelRequest {
7985 open_channel_msg: match msg {
7986 OpenChannelMessageRef::V1(msg) => OpenChannelMessage::V1(msg.clone()),
7987 #[cfg(dual_funding)]
7988 OpenChannelMessageRef::V2(msg) => OpenChannelMessage::V2(msg.clone()),
7989 },
7990 ticks_remaining: UNACCEPTED_INBOUND_CHANNEL_AGE_LIMIT_TICKS,
7991 });
7992 return Ok(());
7993 }
7994
7995 let mut random_bytes = [0u8; 16];
7997 random_bytes.copy_from_slice(&self.entropy_source.get_secure_random_bytes()[..16]);
7998 let user_channel_id = u128::from_be_bytes(random_bytes);
7999
8000 if channel_type.requires_zero_conf() {
8001 return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), common_fields.temporary_channel_id));
8002 }
8003 if channel_type.requires_anchors_zero_fee_htlc_tx() {
8004 return Err(MsgHandleErrInternal::send_err_msg_no_close("No channels with anchor outputs accepted".to_owned(), common_fields.temporary_channel_id));
8005 }
8006
8007 let (mut channel_phase, message_send_event) = match msg {
8008 OpenChannelMessageRef::V1(msg) => {
8009 let mut channel = InboundV1Channel::new(
8010 &self.fee_estimator, &self.entropy_source, &self.signer_provider, *counterparty_node_id,
8011 &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id,
8012 &self.default_configuration, best_block_height, &self.logger, false
8013 ).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id))?;
8014 let logger = WithChannelContext::from(&self.logger, &channel.context, None);
8015 let message_send_event = channel.accept_inbound_channel(&&logger).map(|msg| {
8016 events::MessageSendEvent::SendAcceptChannel {
8017 node_id: *counterparty_node_id,
8018 msg,
8019 }
8020 });
8021 (ChannelPhase::UnfundedInboundV1(channel), message_send_event)
8022 },
8023 #[cfg(dual_funding)]
8024 OpenChannelMessageRef::V2(msg) => {
8025 let channel = InboundV2Channel::new(&self.fee_estimator, &self.entropy_source,
8026 &self.signer_provider, self.get_our_node_id(), *counterparty_node_id,
8027 &self.channel_type_features(), &peer_state.latest_features, msg, vec![], Weight::from_wu(0),
8028 user_channel_id, &self.default_configuration, best_block_height, &self.logger
8029 ).map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.common_fields.temporary_channel_id))?;
8030 let message_send_event = events::MessageSendEvent::SendAcceptChannelV2 {
8031 node_id: *counterparty_node_id,
8032 msg: channel.accept_inbound_dual_funded_channel(),
8033 };
8034 (ChannelPhase::UnfundedInboundV2(channel), Some(message_send_event))
8035 },
8036 };
8037
8038 let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
8039 channel_phase.context_mut().set_outbound_scid_alias(outbound_scid_alias);
8040
8041 if let Some(message_send_event) = message_send_event {
8042 peer_state.pending_msg_events.push(message_send_event);
8043 }
8044 peer_state.channel_by_id.insert(channel_phase.context().channel_id(), channel_phase);
8045
8046 Ok(())
8047 }
8048
8049 fn internal_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
8050 let (value, output_script, user_id) = {
8053 let per_peer_state = self.per_peer_state.read().unwrap();
8054 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8055 .ok_or_else(|| {
8056 debug_assert!(false);
8057 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id)
8058 })?;
8059 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8060 let peer_state = &mut *peer_state_lock;
8061 match peer_state.channel_by_id.entry(msg.common_fields.temporary_channel_id) {
8062 hash_map::Entry::Occupied(mut phase) => {
8063 match phase.get_mut() {
8064 ChannelPhase::UnfundedOutboundV1(chan) => {
8065 try_chan_phase_entry!(self, peer_state, chan.accept_channel(msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), phase);
8066 (chan.context.get_value_satoshis(), chan.context.get_funding_redeemscript().to_p2wsh(), chan.context.get_user_id())
8067 },
8068 _ => {
8069 return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected accept_channel message from peer with counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id));
8070 }
8071 }
8072 },
8073 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.common_fields.temporary_channel_id))
8074 }
8075 };
8076 let mut pending_events = self.pending_events.lock().unwrap();
8077 pending_events.push_back((events::Event::FundingGenerationReady {
8078 temporary_channel_id: msg.common_fields.temporary_channel_id,
8079 counterparty_node_id: *counterparty_node_id,
8080 channel_value_satoshis: value,
8081 output_script,
8082 user_channel_id: user_id,
8083 }, None));
8084 Ok(())
8085 }
8086
8087 fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
8088 let best_block = *self.best_block.read().unwrap();
8089
8090 let per_peer_state = self.per_peer_state.read().unwrap();
8091 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8092 .ok_or_else(|| {
8093 debug_assert!(false);
8094 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id)
8095 })?;
8096
8097 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8098 let peer_state = &mut *peer_state_lock;
8099 let (mut chan, funding_msg_opt, monitor) =
8100 match peer_state.channel_by_id.remove(&msg.temporary_channel_id) {
8101 Some(ChannelPhase::UnfundedInboundV1(inbound_chan)) => {
8102 let logger = WithChannelContext::from(&self.logger, &inbound_chan.context, None);
8103 match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) {
8104 Ok(res) => res,
8105 Err((inbound_chan, err)) => {
8106 debug_assert!(matches!(err, ChannelError::Close(_)));
8110 return Err(convert_chan_phase_err!(self, peer_state, err, &mut ChannelPhase::UnfundedInboundV1(inbound_chan), &msg.temporary_channel_id).1);
8114 },
8115 }
8116 },
8117 Some(mut phase) => {
8118 let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id);
8119 let err = ChannelError::close(err_msg);
8120 return Err(convert_chan_phase_err!(self, peer_state, err, &mut phase, &msg.temporary_channel_id).1);
8121 },
8122 None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
8123 };
8124
8125 let funded_channel_id = chan.context.channel_id();
8126
8127 macro_rules! fail_chan { ($err: expr) => { {
8128 let err = ChannelError::close($err.to_owned());
8134 chan.unset_funding_info(msg.temporary_channel_id);
8135 return Err(convert_chan_phase_err!(self, peer_state, err, chan, &funded_channel_id, UNFUNDED_CHANNEL).1);
8136 } } }
8137
8138 match peer_state.channel_by_id.entry(funded_channel_id) {
8139 hash_map::Entry::Occupied(_) => {
8140 fail_chan!("Already had channel with the new channel_id");
8141 },
8142 hash_map::Entry::Vacant(e) => {
8143 let mut outpoint_to_peer_lock = self.outpoint_to_peer.lock().unwrap();
8144 match outpoint_to_peer_lock.entry(monitor.get_funding_txo().0) {
8145 hash_map::Entry::Occupied(_) => {
8146 fail_chan!("The funding_created message had the same funding_txid as an existing channel - funding is not possible");
8147 },
8148 hash_map::Entry::Vacant(i_e) => {
8149 let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
8150 if let Ok(persist_state) = monitor_res {
8151 i_e.insert(chan.context.get_counterparty_node_id());
8152 mem::drop(outpoint_to_peer_lock);
8153
8154 if let Some(msg) = funding_msg_opt {
8159 peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
8160 node_id: counterparty_node_id.clone(),
8161 msg,
8162 });
8163 }
8164
8165 if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) {
8166 handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
8167 per_peer_state, chan, INITIAL_MONITOR);
8168 } else {
8169 unreachable!("This must be a funded channel as we just inserted it.");
8170 }
8171 Ok(())
8172 } else {
8173 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8174 log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
8175 fail_chan!("Duplicate funding outpoint");
8176 }
8177 }
8178 }
8179 }
8180 }
8181 }
8182
8183 fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
8184 let best_block = *self.best_block.read().unwrap();
8185 let per_peer_state = self.per_peer_state.read().unwrap();
8186 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8187 .ok_or_else(|| {
8188 debug_assert!(false);
8189 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8190 })?;
8191
8192 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8193 let peer_state = &mut *peer_state_lock;
8194 match peer_state.channel_by_id.entry(msg.channel_id) {
8195 hash_map::Entry::Occupied(chan_phase_entry) => {
8196 if matches!(chan_phase_entry.get(), ChannelPhase::UnfundedOutboundV1(_)) {
8197 let chan = if let ChannelPhase::UnfundedOutboundV1(chan) = chan_phase_entry.remove() { chan } else { unreachable!() };
8198 let logger = WithContext::from(
8199 &self.logger,
8200 Some(chan.context.get_counterparty_node_id()),
8201 Some(chan.context.channel_id()),
8202 None
8203 );
8204 let res =
8205 chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger);
8206 match res {
8207 Ok((mut chan, monitor)) => {
8208 if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
8209 let mut chan = peer_state.channel_by_id.entry(msg.channel_id).or_insert(ChannelPhase::Funded(chan));
8213 if let ChannelPhase::Funded(ref mut chan) = &mut chan {
8214 handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR);
8215 } else { unreachable!(); }
8216 Ok(())
8217 } else {
8218 let e = ChannelError::close("Channel funding outpoint was a duplicate".to_owned());
8219 chan.unset_funding_info(msg.channel_id);
8224 return Err(convert_chan_phase_err!(self, peer_state, e, &mut ChannelPhase::Funded(chan), &msg.channel_id).1);
8225 }
8226 },
8227 Err((chan, e)) => {
8228 debug_assert!(matches!(e, ChannelError::Close(_)),
8229 "We don't have a channel anymore, so the error better have expected close");
8230 return Err(convert_chan_phase_err!(self, peer_state, e, &mut ChannelPhase::UnfundedOutboundV1(chan), &msg.channel_id).1);
8234 }
8235 }
8236 } else {
8237 return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id));
8238 }
8239 },
8240 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
8241 }
8242 }
8243
8244 fn internal_tx_msg<HandleTxMsgFn: Fn(&mut ChannelPhase<SP>) -> Result<MessageSendEvent, &'static str>>(
8245 &self, counterparty_node_id: &PublicKey, channel_id: ChannelId, tx_msg_handler: HandleTxMsgFn
8246 ) -> Result<(), MsgHandleErrInternal> {
8247 let per_peer_state = self.per_peer_state.read().unwrap();
8248 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8249 .ok_or_else(|| {
8250 debug_assert!(false);
8251 MsgHandleErrInternal::send_err_msg_no_close(
8252 format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
8253 channel_id)
8254 })?;
8255 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8256 let peer_state = &mut *peer_state_lock;
8257 match peer_state.channel_by_id.entry(channel_id) {
8258 hash_map::Entry::Occupied(mut chan_phase_entry) => {
8259 let channel_phase = chan_phase_entry.get_mut();
8260 let msg_send_event = match tx_msg_handler(channel_phase) {
8261 Ok(msg_send_event) => msg_send_event,
8262 Err(tx_msg_str) => return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(
8263 format!("Got a {tx_msg_str} message with no interactive transaction construction expected or in-progress")
8264 ), channel_id)),
8265 };
8266 peer_state.pending_msg_events.push(msg_send_event);
8267 Ok(())
8268 },
8269 hash_map::Entry::Vacant(_) => {
8270 Err(MsgHandleErrInternal::send_err_msg_no_close(format!(
8271 "Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}",
8272 counterparty_node_id), channel_id)
8273 )
8274 }
8275 }
8276 }
8277
8278 fn internal_tx_add_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddInput) -> Result<(), MsgHandleErrInternal> {
8279 self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase<SP>| {
8280 match channel_phase {
8281 ChannelPhase::UnfundedInboundV2(ref mut channel) => {
8282 Ok(channel.tx_add_input(msg).into_msg_send_event(counterparty_node_id))
8283 },
8284 ChannelPhase::UnfundedOutboundV2(ref mut channel) => {
8285 Ok(channel.tx_add_input(msg).into_msg_send_event(counterparty_node_id))
8286 },
8287 _ => Err("tx_add_input"),
8288 }
8289 })
8290 }
8291
8292 fn internal_tx_add_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddOutput) -> Result<(), MsgHandleErrInternal> {
8293 self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase<SP>| {
8294 match channel_phase {
8295 ChannelPhase::UnfundedInboundV2(ref mut channel) => {
8296 Ok(channel.tx_add_output(msg).into_msg_send_event(counterparty_node_id))
8297 },
8298 ChannelPhase::UnfundedOutboundV2(ref mut channel) => {
8299 Ok(channel.tx_add_output(msg).into_msg_send_event(counterparty_node_id))
8300 },
8301 _ => Err("tx_add_output"),
8302 }
8303 })
8304 }
8305
8306 fn internal_tx_remove_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveInput) -> Result<(), MsgHandleErrInternal> {
8307 self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase<SP>| {
8308 match channel_phase {
8309 ChannelPhase::UnfundedInboundV2(ref mut channel) => {
8310 Ok(channel.tx_remove_input(msg).into_msg_send_event(counterparty_node_id))
8311 },
8312 ChannelPhase::UnfundedOutboundV2(ref mut channel) => {
8313 Ok(channel.tx_remove_input(msg).into_msg_send_event(counterparty_node_id))
8314 },
8315 _ => Err("tx_remove_input"),
8316 }
8317 })
8318 }
8319
8320 fn internal_tx_remove_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveOutput) -> Result<(), MsgHandleErrInternal> {
8321 self.internal_tx_msg(&counterparty_node_id, msg.channel_id, |channel_phase: &mut ChannelPhase<SP>| {
8322 match channel_phase {
8323 ChannelPhase::UnfundedInboundV2(ref mut channel) => {
8324 Ok(channel.tx_remove_output(msg).into_msg_send_event(counterparty_node_id))
8325 },
8326 ChannelPhase::UnfundedOutboundV2(ref mut channel) => {
8327 Ok(channel.tx_remove_output(msg).into_msg_send_event(counterparty_node_id))
8328 },
8329 _ => Err("tx_remove_output"),
8330 }
8331 })
8332 }
8333
8334 fn internal_tx_complete(&self, counterparty_node_id: PublicKey, msg: &msgs::TxComplete) -> Result<(), MsgHandleErrInternal> {
8335 let per_peer_state = self.per_peer_state.read().unwrap();
8336 let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
8337 .ok_or_else(|| {
8338 debug_assert!(false);
8339 MsgHandleErrInternal::send_err_msg_no_close(
8340 format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
8341 msg.channel_id)
8342 })?;
8343 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8344 let peer_state = &mut *peer_state_lock;
8345 match peer_state.channel_by_id.entry(msg.channel_id) {
8346 hash_map::Entry::Occupied(mut chan_phase_entry) => {
8347 let channel_phase = chan_phase_entry.get_mut();
8348 let (msg_send_event_opt, signing_session_opt) = match channel_phase {
8349 ChannelPhase::UnfundedInboundV2(channel) => channel.tx_complete(msg)
8350 .into_msg_send_event_or_signing_session(counterparty_node_id),
8351 ChannelPhase::UnfundedOutboundV2(channel) => channel.tx_complete(msg)
8352 .into_msg_send_event_or_signing_session(counterparty_node_id),
8353 _ => try_chan_phase_entry!(self, peer_state, Err(ChannelError::Close(
8354 (
8355 "Got a tx_complete message with no interactive transaction construction expected or in-progress".into(),
8356 ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) },
8357 ))), chan_phase_entry)
8358 };
8359 if let Some(msg_send_event) = msg_send_event_opt {
8360 peer_state.pending_msg_events.push(msg_send_event);
8361 };
8362 if let Some(mut signing_session) = signing_session_opt {
8363 let (commitment_signed, funding_ready_for_sig_event_opt) = match chan_phase_entry.get_mut() {
8364 ChannelPhase::UnfundedOutboundV2(chan) => {
8365 chan.funding_tx_constructed(&mut signing_session, &self.logger)
8366 },
8367 ChannelPhase::UnfundedInboundV2(chan) => {
8368 chan.funding_tx_constructed(&mut signing_session, &self.logger)
8369 },
8370 _ => Err(ChannelError::Warn(
8371 "Got a tx_complete message with no interactive transaction construction expected or in-progress"
8372 .into())),
8373 }.map_err(|err| MsgHandleErrInternal::send_err_msg_no_close(format!("{}", err), msg.channel_id))?;
8374 let (channel_id, channel_phase) = chan_phase_entry.remove_entry();
8375 let channel = match channel_phase {
8376 ChannelPhase::UnfundedOutboundV2(chan) => chan.into_channel(signing_session),
8377 ChannelPhase::UnfundedInboundV2(chan) => chan.into_channel(signing_session),
8378 _ => {
8379 debug_assert!(false); Err(ChannelError::Warn(
8381 "Got a tx_complete message with no interactive transaction construction expected or in-progress"
8382 .into()))
8383 },
8384 }.map_err(|err| MsgHandleErrInternal::send_err_msg_no_close(format!("{}", err), msg.channel_id))?;
8385 peer_state.channel_by_id.insert(channel_id, ChannelPhase::Funded(channel));
8386 if let Some(funding_ready_for_sig_event) = funding_ready_for_sig_event_opt {
8387 let mut pending_events = self.pending_events.lock().unwrap();
8388 pending_events.push_back((funding_ready_for_sig_event, None));
8389 }
8390 peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
8391 node_id: counterparty_node_id,
8392 updates: CommitmentUpdate {
8393 commitment_signed,
8394 update_add_htlcs: vec![],
8395 update_fulfill_htlcs: vec![],
8396 update_fail_htlcs: vec![],
8397 update_fail_malformed_htlcs: vec![],
8398 update_fee: None,
8399 },
8400 });
8401 }
8402 Ok(())
8403 },
8404 hash_map::Entry::Vacant(_) => {
8405 Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8406 }
8407 }
8408 }
8409
8410 fn internal_tx_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxSignatures)
8411 -> Result<(), MsgHandleErrInternal> {
8412 let per_peer_state = self.per_peer_state.read().unwrap();
8413 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8414 .ok_or_else(|| {
8415 debug_assert!(false);
8416 MsgHandleErrInternal::send_err_msg_no_close(
8417 format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
8418 msg.channel_id)
8419 })?;
8420 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8421 let peer_state = &mut *peer_state_lock;
8422 match peer_state.channel_by_id.entry(msg.channel_id) {
8423 hash_map::Entry::Occupied(mut chan_phase_entry) => {
8424 let channel_phase = chan_phase_entry.get_mut();
8425 match channel_phase {
8426 ChannelPhase::Funded(chan) => {
8427 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8428 let (tx_signatures_opt, funding_tx_opt) = try_chan_phase_entry!(self, peer_state, chan.tx_signatures(msg, &&logger), chan_phase_entry);
8429 if let Some(tx_signatures) = tx_signatures_opt {
8430 peer_state.pending_msg_events.push(events::MessageSendEvent::SendTxSignatures {
8431 node_id: *counterparty_node_id,
8432 msg: tx_signatures,
8433 });
8434 }
8435 if let Some(ref funding_tx) = funding_tx_opt {
8436 self.tx_broadcaster.broadcast_transactions(&[funding_tx]);
8437 {
8438 let mut pending_events = self.pending_events.lock().unwrap();
8439 emit_channel_pending_event!(pending_events, chan);
8440 }
8441 }
8442 },
8443 _ => try_chan_phase_entry!(self, peer_state, Err(ChannelError::Close(
8444 (
8445 "Got an unexpected tx_signatures message".into(),
8446 ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) },
8447 ))), chan_phase_entry)
8448 }
8449 Ok(())
8450 },
8451 hash_map::Entry::Vacant(_) => {
8452 Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8453 }
8454 }
8455 }
8456
8457 fn internal_tx_abort(&self, counterparty_node_id: &PublicKey, msg: &msgs::TxAbort)
8458 -> Result<(), MsgHandleErrInternal> {
8459 let per_peer_state = self.per_peer_state.read().unwrap();
8460 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8461 .ok_or_else(|| {
8462 debug_assert!(false);
8463 MsgHandleErrInternal::send_err_msg_no_close(
8464 format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
8465 msg.channel_id)
8466 })?;
8467 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8468 let peer_state = &mut *peer_state_lock;
8469 match peer_state.channel_by_id.entry(msg.channel_id) {
8470 hash_map::Entry::Occupied(mut chan_phase_entry) => {
8471 let channel_phase = chan_phase_entry.get_mut();
8472 let tx_constructor = match channel_phase {
8473 ChannelPhase::UnfundedInboundV2(chan) => chan.interactive_tx_constructor_mut(),
8474 ChannelPhase::UnfundedOutboundV2(chan) => chan.interactive_tx_constructor_mut(),
8475 ChannelPhase::Funded(_) => {
8476 try_chan_phase_entry!(self, peer_state, Err(ChannelError::Warn(
8481 "Got an unexpected tx_abort message: After initial funding transaction is signed, \
8482 splicing and RBF attempts of interactive funding transactions are not supported yet so \
8483 we don't have any negotiation in progress".into(),
8484 )), chan_phase_entry)
8485 }
8486 ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) => {
8487 try_chan_phase_entry!(self, peer_state, Err(ChannelError::Warn(
8488 "Got an unexpected tx_abort message: This is an unfunded channel created with V1 channel \
8489 establishment".into(),
8490 )), chan_phase_entry)
8491 },
8492 };
8493 if tx_constructor.take().is_some() {
8499 let msg = msgs::TxAbort {
8500 channel_id: msg.channel_id,
8501 data: "Acknowledged tx_abort".to_string().into_bytes(),
8502 };
8503 peer_state.pending_msg_events.push(events::MessageSendEvent::SendTxAbort {
8510 node_id: *counterparty_node_id,
8511 msg,
8512 });
8513 }
8514 Ok(())
8515 },
8516 hash_map::Entry::Vacant(_) => {
8517 Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8518 }
8519 }
8520 }
8521
8522 fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {
8523 let per_peer_state = self.per_peer_state.read().unwrap();
8526 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8527 .ok_or_else(|| {
8528 debug_assert!(false);
8529 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8530 })?;
8531 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8532 let peer_state = &mut *peer_state_lock;
8533 match peer_state.channel_by_id.entry(msg.channel_id) {
8534 hash_map::Entry::Occupied(mut chan_phase_entry) => {
8535 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8536 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8537 let announcement_sigs_opt = try_chan_phase_entry!(self, peer_state, chan.channel_ready(&msg, &self.node_signer,
8538 self.chain_hash, &self.default_configuration, &self.best_block.read().unwrap(), &&logger), chan_phase_entry);
8539 if let Some(announcement_sigs) = announcement_sigs_opt {
8540 log_trace!(logger, "Sending announcement_signatures for channel {}", chan.context.channel_id());
8541 peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
8542 node_id: counterparty_node_id.clone(),
8543 msg: announcement_sigs,
8544 });
8545 } else if chan.context.is_usable() {
8546 log_trace!(logger, "Sending private initial channel_update for our counterparty on channel {}", chan.context.channel_id());
8552 if let Ok(msg) = self.get_channel_update_for_unicast(chan) {
8553 peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
8554 node_id: counterparty_node_id.clone(),
8555 msg,
8556 });
8557 }
8558 }
8559
8560 {
8561 let mut pending_events = self.pending_events.lock().unwrap();
8562 emit_channel_ready_event!(pending_events, chan);
8563 }
8564
8565 Ok(())
8566 } else {
8567 try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8568 "Got a channel_ready message for an unfunded channel!".into())), chan_phase_entry)
8569 }
8570 },
8571 hash_map::Entry::Vacant(_) => {
8572 Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8573 }
8574 }
8575 }
8576
8577 fn internal_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
8578 let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new();
8579 let mut finish_shutdown = None;
8580 {
8581 let per_peer_state = self.per_peer_state.read().unwrap();
8582 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8583 .ok_or_else(|| {
8584 debug_assert!(false);
8585 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8586 })?;
8587 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8588 let peer_state = &mut *peer_state_lock;
8589 if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(msg.channel_id.clone()) {
8590 let phase = chan_phase_entry.get_mut();
8591 match phase {
8592 ChannelPhase::Funded(chan) => {
8593 if !chan.received_shutdown() {
8594 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8595 log_info!(logger, "Received a shutdown message from our counterparty for channel {}{}.",
8596 msg.channel_id,
8597 if chan.sent_shutdown() { " after we initiated shutdown" } else { "" });
8598 }
8599
8600 let funding_txo_opt = chan.context.get_funding_txo();
8601 let (shutdown, monitor_update_opt, htlcs) = try_chan_phase_entry!(self, peer_state,
8602 chan.shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_phase_entry);
8603 dropped_htlcs = htlcs;
8604
8605 if let Some(msg) = shutdown {
8606 peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
8610 node_id: *counterparty_node_id,
8611 msg,
8612 });
8613 }
8614 if let Some(monitor_update) = monitor_update_opt {
8616 handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
8617 peer_state_lock, peer_state, per_peer_state, chan);
8618 }
8619 },
8620 ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedOutboundV1(_) |
8621 ChannelPhase::UnfundedInboundV2(_) | ChannelPhase::UnfundedOutboundV2(_) => {
8622 let context = phase.context_mut();
8623 let logger = WithChannelContext::from(&self.logger, context, None);
8624 log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id);
8625 let mut close_res = phase.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel);
8626 remove_channel_phase!(self, peer_state, chan_phase_entry, close_res);
8627 finish_shutdown = Some(close_res);
8628 },
8629 }
8630 } else {
8631 return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8632 }
8633 }
8634 for htlc_source in dropped_htlcs.drain(..) {
8635 let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id };
8636 let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
8637 self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
8638 }
8639 if let Some(shutdown_res) = finish_shutdown {
8640 self.finish_close_channel(shutdown_res);
8641 }
8642
8643 Ok(())
8644 }
8645
8646 fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
8647 let per_peer_state = self.per_peer_state.read().unwrap();
8648 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8649 .ok_or_else(|| {
8650 debug_assert!(false);
8651 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8652 })?;
8653 let (tx, chan_option, shutdown_result) = {
8654 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8655 let peer_state = &mut *peer_state_lock;
8656 match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
8657 hash_map::Entry::Occupied(mut chan_phase_entry) => {
8658 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8659 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8660 let (closing_signed, tx, shutdown_result) = try_chan_phase_entry!(self, peer_state, chan.closing_signed(&self.fee_estimator, &msg, &&logger), chan_phase_entry);
8661 debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown());
8662 if let Some(msg) = closing_signed {
8663 peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
8664 node_id: counterparty_node_id.clone(),
8665 msg,
8666 });
8667 }
8668 if let Some(mut close_res) = shutdown_result {
8669 debug_assert!(tx.is_some());
8675 let channel_phase = remove_channel_phase!(self, peer_state, chan_phase_entry, close_res);
8676 (tx, Some(channel_phase), Some(close_res))
8677 } else {
8678 debug_assert!(tx.is_none());
8679 (tx, None, None)
8680 }
8681 } else {
8682 return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8683 "Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry);
8684 }
8685 },
8686 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8687 }
8688 };
8689 if let Some(broadcast_tx) = tx {
8690 let channel_id = chan_option.as_ref().map(|channel| channel.context().channel_id());
8691 log_info!(WithContext::from(&self.logger, Some(*counterparty_node_id), channel_id, None), "Broadcasting {}", log_tx!(broadcast_tx));
8692 self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
8693 }
8694 if let Some(ChannelPhase::Funded(chan)) = chan_option {
8695 if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
8696 let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
8697 pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
8698 msg: update
8699 });
8700 }
8701 }
8702 mem::drop(per_peer_state);
8703 if let Some(shutdown_result) = shutdown_result {
8704 self.finish_close_channel(shutdown_result);
8705 }
8706 Ok(())
8707 }
8708
8709 fn internal_update_add_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), MsgHandleErrInternal> {
8710 let decoded_hop_res = self.decode_update_add_htlc_onion(msg, counterparty_node_id);
8723 let per_peer_state = self.per_peer_state.read().unwrap();
8724 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8725 .ok_or_else(|| {
8726 debug_assert!(false);
8727 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8728 })?;
8729 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8730 let peer_state = &mut *peer_state_lock;
8731 match peer_state.channel_by_id.entry(msg.channel_id) {
8732 hash_map::Entry::Occupied(mut chan_phase_entry) => {
8733 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8734 let mut pending_forward_info = match decoded_hop_res {
8735 Ok((next_hop, shared_secret, next_packet_pk_opt)) =>
8736 self.construct_pending_htlc_status(
8737 msg, counterparty_node_id, shared_secret, next_hop,
8738 chan.context.config().accept_underpaying_htlcs, next_packet_pk_opt,
8739 ),
8740 Err(e) => PendingHTLCStatus::Fail(e)
8741 };
8742 let logger = WithChannelContext::from(&self.logger, &chan.context, Some(msg.payment_hash));
8743 if let Err((_, error_code)) = chan.can_accept_incoming_htlc(&msg, &self.fee_estimator, &logger) {
8747 if msg.blinding_point.is_some() {
8748 pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(
8749 msgs::UpdateFailMalformedHTLC {
8750 channel_id: msg.channel_id,
8751 htlc_id: msg.htlc_id,
8752 sha256_of_onion: [0; 32],
8753 failure_code: INVALID_ONION_BLINDING,
8754 }
8755 ))
8756 } else {
8757 match pending_forward_info {
8758 PendingHTLCStatus::Forward(PendingHTLCInfo {
8759 ref incoming_shared_secret, ref routing, ..
8760 }) => {
8761 let reason = if routing.blinded_failure().is_some() {
8762 HTLCFailReason::reason(INVALID_ONION_BLINDING, vec![0; 32])
8763 } else if (error_code & 0x1000) != 0 {
8764 let error_data = self.get_htlc_inbound_temp_fail_data(error_code);
8765 HTLCFailReason::reason(error_code, error_data)
8766 } else {
8767 HTLCFailReason::from_failure_code(error_code)
8768 }.get_encrypted_failure_packet(incoming_shared_secret, &None);
8769 let msg = msgs::UpdateFailHTLC {
8770 channel_id: msg.channel_id,
8771 htlc_id: msg.htlc_id,
8772 reason
8773 };
8774 pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg));
8775 },
8776 _ => {},
8777 }
8778 }
8779 }
8780 try_chan_phase_entry!(self, peer_state, chan.update_add_htlc(&msg, pending_forward_info, &self.fee_estimator), chan_phase_entry);
8781 } else {
8782 return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8783 "Got an update_add_htlc message for an unfunded channel!".into())), chan_phase_entry);
8784 }
8785 },
8786 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8787 }
8788 Ok(())
8789 }
8790
8791 fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
8792 let funding_txo;
8793 let next_user_channel_id;
8794 let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = {
8795 let per_peer_state = self.per_peer_state.read().unwrap();
8796 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8797 .ok_or_else(|| {
8798 debug_assert!(false);
8799 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8800 })?;
8801 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8802 let peer_state = &mut *peer_state_lock;
8803 match peer_state.channel_by_id.entry(msg.channel_id) {
8804 hash_map::Entry::Occupied(mut chan_phase_entry) => {
8805 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8806 let res = try_chan_phase_entry!(self, peer_state, chan.update_fulfill_htlc(&msg), chan_phase_entry);
8807 if let HTLCSource::PreviousHopData(prev_hop) = &res.0 {
8808 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8809 log_trace!(logger,
8810 "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
8811 msg.channel_id);
8812 peer_state.actions_blocking_raa_monitor_updates.entry(msg.channel_id)
8813 .or_insert_with(Vec::new)
8814 .push(RAAMonitorUpdateBlockingAction::from_prev_hop_data(&prev_hop));
8815 }
8816 funding_txo = chan.context.get_funding_txo().expect("We won't accept a fulfill until funded");
8823 next_user_channel_id = chan.context.get_user_id();
8824 res
8825 } else {
8826 return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8827 "Got an update_fulfill_htlc message for an unfunded channel!".into())), chan_phase_entry);
8828 }
8829 },
8830 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8831 }
8832 };
8833 self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(),
8834 Some(forwarded_htlc_value), skimmed_fee_msat, false, false, Some(*counterparty_node_id),
8835 funding_txo, msg.channel_id, Some(next_user_channel_id),
8836 );
8837
8838 Ok(())
8839 }
8840
8841 fn internal_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
8842 let per_peer_state = self.per_peer_state.read().unwrap();
8845 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8846 .ok_or_else(|| {
8847 debug_assert!(false);
8848 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8849 })?;
8850 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8851 let peer_state = &mut *peer_state_lock;
8852 match peer_state.channel_by_id.entry(msg.channel_id) {
8853 hash_map::Entry::Occupied(mut chan_phase_entry) => {
8854 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8855 try_chan_phase_entry!(self, peer_state, chan.update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan_phase_entry);
8856 } else {
8857 return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8858 "Got an update_fail_htlc message for an unfunded channel!".into())), chan_phase_entry);
8859 }
8860 },
8861 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8862 }
8863 Ok(())
8864 }
8865
8866 fn internal_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
8867 let per_peer_state = self.per_peer_state.read().unwrap();
8870 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8871 .ok_or_else(|| {
8872 debug_assert!(false);
8873 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8874 })?;
8875 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8876 let peer_state = &mut *peer_state_lock;
8877 match peer_state.channel_by_id.entry(msg.channel_id) {
8878 hash_map::Entry::Occupied(mut chan_phase_entry) => {
8879 if (msg.failure_code & 0x8000) == 0 {
8880 let chan_err = ChannelError::close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
8881 try_chan_phase_entry!(self, peer_state, Err(chan_err), chan_phase_entry);
8882 }
8883 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8884 try_chan_phase_entry!(self, peer_state, chan.update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan_phase_entry);
8885 } else {
8886 return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8887 "Got an update_fail_malformed_htlc message for an unfunded channel!".into())), chan_phase_entry);
8888 }
8889 Ok(())
8890 },
8891 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8892 }
8893 }
8894
8895 fn internal_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> {
8896 let best_block = *self.best_block.read().unwrap();
8897 let per_peer_state = self.per_peer_state.read().unwrap();
8898 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
8899 .ok_or_else(|| {
8900 debug_assert!(false);
8901 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
8902 })?;
8903 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8904 let peer_state = &mut *peer_state_lock;
8905 match peer_state.channel_by_id.entry(msg.channel_id) {
8906 hash_map::Entry::Occupied(mut chan_phase_entry) => {
8907 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
8908 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8909 let funding_txo = chan.context.get_funding_txo();
8910
8911 if chan.interactive_tx_signing_session.is_some() {
8912 let monitor = try_chan_phase_entry!(
8913 self, peer_state, chan.commitment_signed_initial_v2(msg, best_block, &self.signer_provider, &&logger),
8914 chan_phase_entry);
8915 let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
8916 if let Ok(persist_state) = monitor_res {
8917 handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
8918 per_peer_state, chan, INITIAL_MONITOR);
8919 } else {
8920 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
8921 log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
8922 try_chan_phase_entry!(self, peer_state, Err(ChannelError::Close(
8923 (
8924 "Channel funding outpoint was a duplicate".to_owned(),
8925 ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(false) },
8926 )
8927 )), chan_phase_entry)
8928 }
8929 } else {
8930 let monitor_update_opt = try_chan_phase_entry!(
8931 self, peer_state, chan.commitment_signed(msg, &&logger), chan_phase_entry);
8932 if let Some(monitor_update) = monitor_update_opt {
8933 handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
8934 peer_state, per_peer_state, chan);
8935 }
8936 }
8937 Ok(())
8938 } else {
8939 return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
8940 "Got a commitment_signed message for an unfunded channel!".into())), chan_phase_entry);
8941 }
8942 },
8943 hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
8944 }
8945 }
8946
8947 fn push_decode_update_add_htlcs(&self, mut update_add_htlcs: (u64, Vec<msgs::UpdateAddHTLC>)) {
8948 let mut push_forward_event = self.forward_htlcs.lock().unwrap().is_empty();
8949 let mut decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
8950 push_forward_event &= decode_update_add_htlcs.is_empty();
8951 let scid = update_add_htlcs.0;
8952 match decode_update_add_htlcs.entry(scid) {
8953 hash_map::Entry::Occupied(mut e) => { e.get_mut().append(&mut update_add_htlcs.1); },
8954 hash_map::Entry::Vacant(e) => { e.insert(update_add_htlcs.1); },
8955 }
8956 if push_forward_event { self.push_pending_forwards_ev(); }
8957 }
8958
8959 #[inline]
8960 fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Option<PublicKey>, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) {
8961 let push_forward_event = self.forward_htlcs_without_forward_event(per_source_pending_forwards);
8962 if push_forward_event { self.push_pending_forwards_ev() }
8963 }
8964
8965 #[inline]
8966 fn forward_htlcs_without_forward_event(&self, per_source_pending_forwards: &mut [(u64, Option<PublicKey>, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) -> bool {
8967 let mut push_forward_event = false;
8968 for &mut (prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
8969 let mut new_intercept_events = VecDeque::new();
8970 let mut failed_intercept_forwards = Vec::new();
8971 if !pending_forwards.is_empty() {
8972 for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
8973 let scid = match forward_info.routing {
8974 PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
8975 PendingHTLCRouting::Receive { .. } => 0,
8976 PendingHTLCRouting::ReceiveKeysend { .. } => 0,
8977 };
8978 let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid);
8980
8981 let decode_update_add_htlcs_empty = self.decode_update_add_htlcs.lock().unwrap().is_empty();
8982 let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
8983 let forward_htlcs_empty = forward_htlcs.is_empty();
8984 match forward_htlcs.entry(scid) {
8985 hash_map::Entry::Occupied(mut entry) => {
8986 entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
8987 prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint,
8988 prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info
8989 }));
8990 },
8991 hash_map::Entry::Vacant(entry) => {
8992 if !is_our_scid && forward_info.incoming_amt_msat.is_some() &&
8993 fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, scid, &self.chain_hash)
8994 {
8995 let intercept_id = InterceptId(Sha256::hash(&forward_info.incoming_shared_secret).to_byte_array());
8996 let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
8997 match pending_intercepts.entry(intercept_id) {
8998 hash_map::Entry::Vacant(entry) => {
8999 new_intercept_events.push_back((events::Event::HTLCIntercepted {
9000 requested_next_hop_scid: scid,
9001 payment_hash: forward_info.payment_hash,
9002 inbound_amount_msat: forward_info.incoming_amt_msat.unwrap(),
9003 expected_outbound_amount_msat: forward_info.outgoing_amt_msat,
9004 intercept_id
9005 }, None));
9006 entry.insert(PendingAddHTLCInfo {
9007 prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint,
9008 prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info
9009 });
9010 },
9011 hash_map::Entry::Occupied(_) => {
9012 let logger = WithContext::from(&self.logger, None, Some(prev_channel_id), Some(forward_info.payment_hash));
9013 log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
9014 let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
9015 short_channel_id: prev_short_channel_id,
9016 user_channel_id: Some(prev_user_channel_id),
9017 counterparty_node_id: prev_counterparty_node_id,
9018 outpoint: prev_funding_outpoint,
9019 channel_id: prev_channel_id,
9020 htlc_id: prev_htlc_id,
9021 incoming_packet_shared_secret: forward_info.incoming_shared_secret,
9022 phantom_shared_secret: None,
9023 blinded_failure: forward_info.routing.blinded_failure(),
9024 cltv_expiry: forward_info.routing.incoming_cltv_expiry(),
9025 });
9026
9027 failed_intercept_forwards.push((htlc_source, forward_info.payment_hash,
9028 HTLCFailReason::from_failure_code(0x4000 | 10),
9029 HTLCDestination::InvalidForward { requested_forward_scid: scid },
9030 ));
9031 }
9032 }
9033 } else {
9034 push_forward_event |= forward_htlcs_empty && decode_update_add_htlcs_empty;
9037 entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
9038 prev_short_channel_id, prev_counterparty_node_id, prev_funding_outpoint,
9039 prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info
9040 })));
9041 }
9042 }
9043 }
9044 }
9045 }
9046
9047 for (htlc_source, payment_hash, failure_reason, destination) in failed_intercept_forwards.drain(..) {
9048 push_forward_event |= self.fail_htlc_backwards_internal_without_forward_event(&htlc_source, &payment_hash, &failure_reason, destination);
9049 }
9050
9051 if !new_intercept_events.is_empty() {
9052 let mut events = self.pending_events.lock().unwrap();
9053 events.append(&mut new_intercept_events);
9054 }
9055 }
9056 push_forward_event
9057 }
9058
9059 fn push_pending_forwards_ev(&self) {
9060 let mut pending_events = self.pending_events.lock().unwrap();
9061 let is_processing_events = self.pending_events_processor.load(Ordering::Acquire);
9062 let num_forward_events = pending_events.iter().filter(|(ev, _)|
9063 if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false }
9064 ).count();
9065 if (is_processing_events && num_forward_events <= 1) || num_forward_events < 1 {
9072 pending_events.push_back((Event::PendingHTLCsForwardable {
9073 time_forwardable: Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS),
9074 }, None));
9075 }
9076 }
9077
9078 fn raa_monitor_updates_held(&self,
9083 actions_blocking_raa_monitor_updates: &BTreeMap<ChannelId, Vec<RAAMonitorUpdateBlockingAction>>,
9084 channel_funding_outpoint: OutPoint, channel_id: ChannelId, counterparty_node_id: PublicKey
9085 ) -> bool {
9086 actions_blocking_raa_monitor_updates
9087 .get(&channel_id).map(|v| !v.is_empty()).unwrap_or(false)
9088 || self.pending_events.lock().unwrap().iter().any(|(_, action)| {
9089 action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
9090 channel_funding_outpoint,
9091 channel_id,
9092 counterparty_node_id,
9093 })
9094 })
9095 }
9096
9097 #[cfg(any(test, feature = "_test_utils"))]
9098 pub(crate) fn test_raa_monitor_updates_held(&self,
9099 counterparty_node_id: PublicKey, channel_id: ChannelId
9100 ) -> bool {
9101 let per_peer_state = self.per_peer_state.read().unwrap();
9102 if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
9103 let mut peer_state_lck = peer_state_mtx.lock().unwrap();
9104 let peer_state = &mut *peer_state_lck;
9105
9106 if let Some(chan) = peer_state.channel_by_id.get(&channel_id) {
9107 return self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
9108 chan.context().get_funding_txo().unwrap(), channel_id, counterparty_node_id);
9109 }
9110 }
9111 false
9112 }
9113
9114 fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
9115 let htlcs_to_fail = {
9116 let per_peer_state = self.per_peer_state.read().unwrap();
9117 let mut peer_state_lock = per_peer_state.get(counterparty_node_id)
9118 .ok_or_else(|| {
9119 debug_assert!(false);
9120 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
9121 }).map(|mtx| mtx.lock().unwrap())?;
9122 let peer_state = &mut *peer_state_lock;
9123 match peer_state.channel_by_id.entry(msg.channel_id) {
9124 hash_map::Entry::Occupied(mut chan_phase_entry) => {
9125 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
9126 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
9127 let funding_txo_opt = chan.context.get_funding_txo();
9128 let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt {
9129 self.raa_monitor_updates_held(
9130 &peer_state.actions_blocking_raa_monitor_updates, funding_txo, msg.channel_id,
9131 *counterparty_node_id)
9132 } else { false };
9133 let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self, peer_state,
9134 chan.revoke_and_ack(&msg, &self.fee_estimator, &&logger, mon_update_blocked), chan_phase_entry);
9135 if let Some(monitor_update) = monitor_update_opt {
9136 let funding_txo = funding_txo_opt
9137 .expect("Funding outpoint must have been set for RAA handling to succeed");
9138 handle_new_monitor_update!(self, funding_txo, monitor_update,
9139 peer_state_lock, peer_state, per_peer_state, chan);
9140 }
9141 htlcs_to_fail
9142 } else {
9143 return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
9144 "Got a revoke_and_ack message for an unfunded channel!".into())), chan_phase_entry);
9145 }
9146 },
9147 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
9148 }
9149 };
9150 self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id);
9151 Ok(())
9152 }
9153
9154 fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
9155 let per_peer_state = self.per_peer_state.read().unwrap();
9156 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
9157 .ok_or_else(|| {
9158 debug_assert!(false);
9159 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
9160 })?;
9161 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9162 let peer_state = &mut *peer_state_lock;
9163 match peer_state.channel_by_id.entry(msg.channel_id) {
9164 hash_map::Entry::Occupied(mut chan_phase_entry) => {
9165 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
9166 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
9167 try_chan_phase_entry!(self, peer_state, chan.update_fee(&self.fee_estimator, &msg, &&logger), chan_phase_entry);
9168 } else {
9169 return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
9170 "Got an update_fee message for an unfunded channel!".into())), chan_phase_entry);
9171 }
9172 },
9173 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
9174 }
9175 Ok(())
9176 }
9177
9178 fn internal_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> {
9179 let per_peer_state = self.per_peer_state.read().unwrap();
9180 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
9181 .ok_or_else(|| {
9182 debug_assert!(false);
9183 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
9184 })?;
9185 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9186 let peer_state = &mut *peer_state_lock;
9187 match peer_state.channel_by_id.entry(msg.channel_id) {
9188 hash_map::Entry::Occupied(mut chan_phase_entry) => {
9189 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
9190 if !chan.context.is_usable() {
9191 return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
9192 }
9193
9194 peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
9195 msg: try_chan_phase_entry!(self, peer_state, chan.announcement_signatures(
9196 &self.node_signer, self.chain_hash, self.best_block.read().unwrap().height,
9197 msg, &self.default_configuration
9198 ), chan_phase_entry),
9199 update_msg: Some(self.get_channel_update_for_broadcast(chan).unwrap()),
9202 });
9203 } else {
9204 return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
9205 "Got an announcement_signatures message for an unfunded channel!".into())), chan_phase_entry);
9206 }
9207 },
9208 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
9209 }
9210 Ok(())
9211 }
9212
9213 fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
9215 let (chan_counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&msg.contents.short_channel_id) {
9216 Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
9217 None => {
9218 return Ok(NotifyOption::SkipPersistNoEvents)
9220 }
9221 };
9222 let per_peer_state = self.per_peer_state.read().unwrap();
9223 let peer_state_mutex_opt = per_peer_state.get(&chan_counterparty_node_id);
9224 if peer_state_mutex_opt.is_none() {
9225 return Ok(NotifyOption::SkipPersistNoEvents)
9226 }
9227 let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
9228 let peer_state = &mut *peer_state_lock;
9229 match peer_state.channel_by_id.entry(chan_id) {
9230 hash_map::Entry::Occupied(mut chan_phase_entry) => {
9231 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
9232 if chan.context.get_counterparty_node_id() != *counterparty_node_id {
9233 if chan.context.should_announce() {
9234 return Ok(NotifyOption::SkipPersistNoEvents);
9238 }
9239 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
9240 }
9241 let were_node_one = self.get_our_node_id().serialize()[..] < chan.context.get_counterparty_node_id().serialize()[..];
9242 let msg_from_node_one = msg.contents.channel_flags & 1 == 0;
9243 if were_node_one == msg_from_node_one {
9244 return Ok(NotifyOption::SkipPersistNoEvents);
9245 } else {
9246 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
9247 log_debug!(logger, "Received channel_update {:?} for channel {}.", msg, chan_id);
9248 let did_change = try_chan_phase_entry!(self, peer_state, chan.channel_update(&msg), chan_phase_entry);
9249 if !did_change {
9252 return Ok(NotifyOption::SkipPersistNoEvents);
9253 }
9254 }
9255 } else {
9256 return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
9257 "Got a channel_update for an unfunded channel!".into())), chan_phase_entry);
9258 }
9259 },
9260 hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersistNoEvents)
9261 }
9262 Ok(NotifyOption::DoPersist)
9263 }
9264
9265 fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<NotifyOption, MsgHandleErrInternal> {
9266 let need_lnd_workaround = {
9267 let per_peer_state = self.per_peer_state.read().unwrap();
9268
9269 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
9270 .ok_or_else(|| {
9271 debug_assert!(false);
9272 MsgHandleErrInternal::send_err_msg_no_close(
9273 format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id),
9274 msg.channel_id
9275 )
9276 })?;
9277 let logger = WithContext::from(&self.logger, Some(*counterparty_node_id), Some(msg.channel_id), None);
9278 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9279 let peer_state = &mut *peer_state_lock;
9280 match peer_state.channel_by_id.entry(msg.channel_id) {
9281 hash_map::Entry::Occupied(mut chan_phase_entry) => {
9282 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
9283 let responses = try_chan_phase_entry!(self, peer_state, chan.channel_reestablish(
9288 msg, &&logger, &self.node_signer, self.chain_hash,
9289 &self.default_configuration, &*self.best_block.read().unwrap()), chan_phase_entry);
9290 let mut channel_update = None;
9291 if let Some(msg) = responses.shutdown_msg {
9292 peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
9293 node_id: counterparty_node_id.clone(),
9294 msg,
9295 });
9296 } else if chan.context.is_usable() {
9297 if let Ok(msg) = self.get_channel_update_for_unicast(chan) {
9301 channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
9302 node_id: chan.context.get_counterparty_node_id(),
9303 msg,
9304 });
9305 }
9306 }
9307 let need_lnd_workaround = chan.context.workaround_lnd_bug_4006.take();
9308 let (htlc_forwards, decode_update_add_htlcs) = self.handle_channel_resumption(
9309 &mut peer_state.pending_msg_events, chan, responses.raa, responses.commitment_update, responses.order,
9310 Vec::new(), Vec::new(), None, responses.channel_ready, responses.announcement_sigs, None);
9311 debug_assert!(htlc_forwards.is_none());
9312 debug_assert!(decode_update_add_htlcs.is_none());
9313 if let Some(upd) = channel_update {
9314 peer_state.pending_msg_events.push(upd);
9315 }
9316 need_lnd_workaround
9317 } else {
9318 return try_chan_phase_entry!(self, peer_state, Err(ChannelError::close(
9319 "Got a channel_reestablish message for an unfunded channel!".into())), chan_phase_entry);
9320 }
9321 },
9322 hash_map::Entry::Vacant(_) => {
9323 log_debug!(logger, "Sending bogus ChannelReestablish for unknown channel {} to force channel closure",
9324 msg.channel_id);
9325 peer_state.pending_msg_events.push(MessageSendEvent::SendChannelReestablish {
9340 node_id: *counterparty_node_id,
9341 msg: msgs::ChannelReestablish {
9342 channel_id: msg.channel_id,
9343 next_local_commitment_number: 0,
9344 next_remote_commitment_number: 0,
9345 your_last_per_commitment_secret: [1u8; 32],
9346 my_current_per_commitment_point: PublicKey::from_slice(&[2u8; 33]).unwrap(),
9347 next_funding_txid: None,
9348 },
9349 });
9350 return Err(MsgHandleErrInternal::send_err_msg_no_close(
9351 format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}",
9352 counterparty_node_id), msg.channel_id)
9353 )
9354 }
9355 }
9356 };
9357
9358 if let Some(channel_ready_msg) = need_lnd_workaround {
9359 self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
9360 }
9361 Ok(NotifyOption::SkipPersistHandleEvents)
9362 }
9363
9364 fn process_pending_monitor_events(&self) -> bool {
9366 debug_assert!(self.total_consistency_lock.try_write().is_err()); let mut failed_channels = Vec::new();
9369 let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
9370 let has_pending_monitor_events = !pending_monitor_events.is_empty();
9371 for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
9372 for monitor_event in monitor_events.drain(..) {
9373 match monitor_event {
9374 MonitorEvent::HTLCEvent(htlc_update) => {
9375 let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id), Some(htlc_update.payment_hash));
9376 if let Some(preimage) = htlc_update.payment_preimage {
9377 log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
9378 self.claim_funds_internal(htlc_update.source, preimage,
9379 htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
9380 false, counterparty_node_id, funding_outpoint, channel_id, None);
9381 } else {
9382 log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
9383 let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
9384 let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
9385 self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
9386 }
9387 },
9388 MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => {
9389 let counterparty_node_id_opt = match counterparty_node_id {
9390 Some(cp_id) => Some(cp_id),
9391 None => {
9392 let outpoint_to_peer = self.outpoint_to_peer.lock().unwrap();
9395 outpoint_to_peer.get(&funding_outpoint).cloned()
9396 }
9397 };
9398 if let Some(counterparty_node_id) = counterparty_node_id_opt {
9399 let per_peer_state = self.per_peer_state.read().unwrap();
9400 if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
9401 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9402 let peer_state = &mut *peer_state_lock;
9403 let pending_msg_events = &mut peer_state.pending_msg_events;
9404 if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) {
9405 let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
9406 reason
9407 } else {
9408 ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }
9409 };
9410 let mut shutdown_res = chan_phase_entry.get_mut().context_mut().force_shutdown(false, reason.clone());
9411 let chan_phase = remove_channel_phase!(self, peer_state, chan_phase_entry, shutdown_res);
9412 failed_channels.push(shutdown_res);
9413 if let ChannelPhase::Funded(chan) = chan_phase {
9414 if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
9415 let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9416 pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
9417 msg: update
9418 });
9419 }
9420 pending_msg_events.push(events::MessageSendEvent::HandleError {
9421 node_id: chan.context.get_counterparty_node_id(),
9422 action: msgs::ErrorAction::DisconnectPeer {
9423 msg: Some(msgs::ErrorMessage {
9424 channel_id: chan.context.channel_id(),
9425 data: reason.to_string()
9426 })
9427 },
9428 });
9429 }
9430 }
9431 }
9432 }
9433 },
9434 MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => {
9435 self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref());
9436 },
9437 }
9438 }
9439 }
9440
9441 for failure in failed_channels.drain(..) {
9442 self.finish_close_channel(failure);
9443 }
9444
9445 has_pending_monitor_events
9446 }
9447
9448 #[cfg(fuzzing)]
9452 pub fn process_monitor_events(&self) {
9453 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
9454 self.process_pending_monitor_events();
9455 }
9456
9457 fn check_free_holding_cells(&self) -> bool {
9461 let mut has_monitor_update = false;
9462 let mut failed_htlcs = Vec::new();
9463
9464 'peer_loop: loop {
9469 let per_peer_state = self.per_peer_state.read().unwrap();
9470 for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
9471 'chan_loop: loop {
9472 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9473 let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
9474 for (channel_id, chan) in peer_state.channel_by_id.iter_mut().filter_map(
9475 |(chan_id, phase)| if let ChannelPhase::Funded(chan) = phase { Some((chan_id, chan)) } else { None }
9476 ) {
9477 let counterparty_node_id = chan.context.get_counterparty_node_id();
9478 let funding_txo = chan.context.get_funding_txo();
9479 let (monitor_opt, holding_cell_failed_htlcs) =
9480 chan.maybe_free_holding_cell_htlcs(&self.fee_estimator, &&WithChannelContext::from(&self.logger, &chan.context, None));
9481 if !holding_cell_failed_htlcs.is_empty() {
9482 failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
9483 }
9484 if let Some(monitor_update) = monitor_opt {
9485 has_monitor_update = true;
9486
9487 handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
9488 peer_state_lock, peer_state, per_peer_state, chan);
9489 continue 'peer_loop;
9490 }
9491 }
9492 break 'chan_loop;
9493 }
9494 }
9495 break 'peer_loop;
9496 }
9497
9498 let has_update = has_monitor_update || !failed_htlcs.is_empty();
9499 for (failures, channel_id, counterparty_node_id) in failed_htlcs.drain(..) {
9500 self.fail_holding_cell_htlcs(failures, channel_id, &counterparty_node_id);
9501 }
9502
9503 has_update
9504 }
9505
9506 pub fn signer_unblocked(&self, channel_opt: Option<(PublicKey, ChannelId)>) {
9514 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
9515
9516 let unblock_chan = |phase: &mut ChannelPhase<SP>, pending_msg_events: &mut Vec<MessageSendEvent>| -> Option<ShutdownResult> {
9518 let node_id = phase.context().get_counterparty_node_id();
9519 match phase {
9520 ChannelPhase::Funded(chan) => {
9521 let msgs = chan.signer_maybe_unblocked(&self.logger);
9522 let cu_msg = msgs.commitment_update.map(|updates| events::MessageSendEvent::UpdateHTLCs {
9523 node_id,
9524 updates,
9525 });
9526 let raa_msg = msgs.revoke_and_ack.map(|msg| events::MessageSendEvent::SendRevokeAndACK {
9527 node_id,
9528 msg,
9529 });
9530 match (cu_msg, raa_msg) {
9531 (Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::CommitmentFirst => {
9532 pending_msg_events.push(cu);
9533 pending_msg_events.push(raa);
9534 },
9535 (Some(cu), Some(raa)) if msgs.order == RAACommitmentOrder::RevokeAndACKFirst => {
9536 pending_msg_events.push(raa);
9537 pending_msg_events.push(cu);
9538 },
9539 (Some(cu), _) => pending_msg_events.push(cu),
9540 (_, Some(raa)) => pending_msg_events.push(raa),
9541 (_, _) => {},
9542 }
9543 if let Some(msg) = msgs.funding_signed {
9544 pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
9545 node_id,
9546 msg,
9547 });
9548 }
9549 if let Some(msg) = msgs.channel_ready {
9550 send_channel_ready!(self, pending_msg_events, chan, msg);
9551 }
9552 if let Some(msg) = msgs.closing_signed {
9553 pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
9554 node_id,
9555 msg,
9556 });
9557 }
9558 if let Some(broadcast_tx) = msgs.signed_closing_tx {
9559 let channel_id = chan.context.channel_id();
9560 let counterparty_node_id = chan.context.get_counterparty_node_id();
9561 let logger = WithContext::from(&self.logger, Some(counterparty_node_id), Some(channel_id), None);
9562 log_info!(logger, "Broadcasting closing tx {}", log_tx!(broadcast_tx));
9563 self.tx_broadcaster.broadcast_transactions(&[&broadcast_tx]);
9564
9565 if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
9566 pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
9567 msg: update
9568 });
9569 }
9570 }
9571 msgs.shutdown_result
9572 }
9573 ChannelPhase::UnfundedOutboundV1(chan) => {
9574 let (open_channel, funding_created) = chan.signer_maybe_unblocked(self.chain_hash.clone(), &self.logger);
9575 if let Some(msg) = open_channel {
9576 pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
9577 node_id,
9578 msg,
9579 });
9580 }
9581 if let Some(msg) = funding_created {
9582 pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
9583 node_id,
9584 msg,
9585 });
9586 }
9587 None
9588 }
9589 ChannelPhase::UnfundedInboundV1(chan) => {
9590 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
9591 if let Some(msg) = chan.signer_maybe_unblocked(&&logger) {
9592 pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
9593 node_id,
9594 msg,
9595 });
9596 }
9597 None
9598 },
9599 ChannelPhase::UnfundedInboundV2(_) | ChannelPhase::UnfundedOutboundV2(_) => None,
9600 }
9601 };
9602
9603 let mut shutdown_results = Vec::new();
9604 let per_peer_state = self.per_peer_state.read().unwrap();
9605 let per_peer_state_iter = per_peer_state.iter().filter(|(cp_id, _)| {
9606 if let Some((counterparty_node_id, _)) = channel_opt {
9607 **cp_id == counterparty_node_id
9608 } else { true }
9609 });
9610 for (_cp_id, peer_state_mutex) in per_peer_state_iter {
9611 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9612 let peer_state = &mut *peer_state_lock;
9613 peer_state.channel_by_id.retain(|_, chan| {
9614 let shutdown_result = match channel_opt {
9615 Some((_, channel_id)) if chan.context().channel_id() != channel_id => None,
9616 _ => unblock_chan(chan, &mut peer_state.pending_msg_events),
9617 };
9618 if let Some(mut shutdown_result) = shutdown_result {
9619 let context = &chan.context();
9620 let logger = WithChannelContext::from(&self.logger, context, None);
9621 log_trace!(logger, "Removing channel {} now that the signer is unblocked", context.channel_id());
9622 locked_close_channel!(self, peer_state, context, shutdown_result);
9623 shutdown_results.push(shutdown_result);
9624 false
9625 } else {
9626 true
9627 }
9628 });
9629 }
9630 drop(per_peer_state);
9631 for shutdown_result in shutdown_results.drain(..) {
9632 self.finish_close_channel(shutdown_result);
9633 }
9634 }
9635
9636 fn maybe_generate_initial_closing_signed(&self) -> bool {
9640 let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new();
9641 let mut has_update = false;
9642 let mut shutdown_results = Vec::new();
9643 {
9644 let per_peer_state = self.per_peer_state.read().unwrap();
9645
9646 for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
9647 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
9648 let peer_state = &mut *peer_state_lock;
9649 let pending_msg_events = &mut peer_state.pending_msg_events;
9650 peer_state.channel_by_id.retain(|channel_id, phase| {
9651 match phase {
9652 ChannelPhase::Funded(chan) => {
9653 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
9654 match chan.maybe_propose_closing_signed(&self.fee_estimator, &&logger) {
9655 Ok((msg_opt, tx_opt, shutdown_result_opt)) => {
9656 if let Some(msg) = msg_opt {
9657 has_update = true;
9658 pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
9659 node_id: chan.context.get_counterparty_node_id(), msg,
9660 });
9661 }
9662 debug_assert_eq!(shutdown_result_opt.is_some(), chan.is_shutdown());
9663 if let Some(mut shutdown_result) = shutdown_result_opt {
9664 locked_close_channel!(self, peer_state, &chan.context, shutdown_result);
9665 shutdown_results.push(shutdown_result);
9666 }
9667 if let Some(tx) = tx_opt {
9668 if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
9671 let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
9672 pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
9673 msg: update
9674 });
9675 }
9676
9677 log_info!(logger, "Broadcasting {}", log_tx!(tx));
9678 self.tx_broadcaster.broadcast_transactions(&[&tx]);
9679 false
9680 } else { true }
9681 },
9682 Err(e) => {
9683 has_update = true;
9684 let (close_channel, res) = convert_chan_phase_err!(self, peer_state, e, chan, channel_id, FUNDED_CHANNEL);
9685 handle_errors.push((chan.context.get_counterparty_node_id(), Err(res)));
9686 !close_channel
9687 }
9688 }
9689 },
9690 _ => true, }
9692 });
9693 }
9694 }
9695
9696 for (counterparty_node_id, err) in handle_errors.drain(..) {
9697 let _ = handle_error!(self, err, counterparty_node_id);
9698 }
9699
9700 for shutdown_result in shutdown_results.drain(..) {
9701 self.finish_close_channel(shutdown_result);
9702 }
9703
9704 has_update
9705 }
9706
9707 pub fn create_bolt11_invoice(
9712 &self, params: Bolt11InvoiceParameters,
9713 ) -> Result<Bolt11Invoice, SignOrCreationError<()>> {
9714 let Bolt11InvoiceParameters {
9715 amount_msats, description, invoice_expiry_delta_secs, min_final_cltv_expiry_delta,
9716 payment_hash,
9717 } = params;
9718
9719 let currency =
9720 Network::from_chain_hash(self.chain_hash).map(Into::into).unwrap_or(Currency::Bitcoin);
9721
9722 #[cfg(feature = "std")]
9723 let duration_since_epoch = {
9724 use std::time::SystemTime;
9725 SystemTime::now().duration_since(SystemTime::UNIX_EPOCH)
9726 .expect("SystemTime::now() should be after SystemTime::UNIX_EPOCH")
9727 };
9728
9729 #[cfg(not(feature = "std"))]
9733 let duration_since_epoch =
9734 Duration::from_secs(self.highest_seen_timestamp.load(Ordering::Acquire) as u64);
9735
9736 if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta {
9737 if min_final_cltv_expiry_delta.saturating_add(3) < MIN_FINAL_CLTV_EXPIRY_DELTA {
9738 return Err(SignOrCreationError::CreationError(CreationError::MinFinalCltvExpiryDeltaTooShort));
9739 }
9740 }
9741
9742 let (payment_hash, payment_secret) = match payment_hash {
9743 Some(payment_hash) => {
9744 let payment_secret = self
9745 .create_inbound_payment_for_hash(
9746 payment_hash, amount_msats,
9747 invoice_expiry_delta_secs.unwrap_or(DEFAULT_EXPIRY_TIME as u32),
9748 min_final_cltv_expiry_delta,
9749 )
9750 .map_err(|()| SignOrCreationError::CreationError(CreationError::InvalidAmount))?;
9751 (payment_hash, payment_secret)
9752 },
9753 None => {
9754 self
9755 .create_inbound_payment(
9756 amount_msats, invoice_expiry_delta_secs.unwrap_or(DEFAULT_EXPIRY_TIME as u32),
9757 min_final_cltv_expiry_delta,
9758 )
9759 .map_err(|()| SignOrCreationError::CreationError(CreationError::InvalidAmount))?
9760 },
9761 };
9762
9763 log_trace!(self.logger, "Creating invoice with payment hash {}", &payment_hash);
9764
9765 let invoice = Bolt11InvoiceBuilder::new(currency);
9766 let invoice = match description {
9767 Bolt11InvoiceDescription::Direct(description) => invoice.description(description.into_inner().0),
9768 Bolt11InvoiceDescription::Hash(hash) => invoice.description_hash(hash.0),
9769 };
9770
9771 let mut invoice = invoice
9772 .duration_since_epoch(duration_since_epoch)
9773 .payee_pub_key(self.get_our_node_id())
9774 .payment_hash(Hash::from_slice(&payment_hash.0).unwrap())
9775 .payment_secret(payment_secret)
9776 .basic_mpp()
9777 .min_final_cltv_expiry_delta(
9778 min_final_cltv_expiry_delta.map(|x| x.saturating_add(3)).unwrap_or(MIN_FINAL_CLTV_EXPIRY_DELTA).into()
9780 );
9781
9782 if let Some(invoice_expiry_delta_secs) = invoice_expiry_delta_secs{
9783 invoice = invoice.expiry_time(Duration::from_secs(invoice_expiry_delta_secs.into()));
9784 }
9785
9786 if let Some(amount_msats) = amount_msats {
9787 invoice = invoice.amount_milli_satoshis(amount_msats);
9788 }
9789
9790 let channels = self.list_channels();
9791 let route_hints = super::invoice_utils::sort_and_filter_channels(channels, amount_msats, &self.logger);
9792 for hint in route_hints {
9793 invoice = invoice.private_route(hint);
9794 }
9795
9796 let raw_invoice = invoice.build_raw().map_err(|e| SignOrCreationError::CreationError(e))?;
9797 let signature = self.node_signer.sign_invoice(&raw_invoice, Recipient::Node);
9798
9799 raw_invoice
9800 .sign(|_| signature)
9801 .map(|invoice| Bolt11Invoice::from_signed(invoice).unwrap())
9802 .map_err(|e| SignOrCreationError::SignError(e))
9803 }
9804}
9805
9806pub struct Bolt11InvoiceParameters {
9810 pub amount_msats: Option<u64>,
9812
9813 pub description: Bolt11InvoiceDescription,
9815
9816 pub invoice_expiry_delta_secs: Option<u32>,
9823
9824 pub min_final_cltv_expiry_delta: Option<u16>,
9830
9831 pub payment_hash: Option<PaymentHash>,
9838}
9839
9840impl Default for Bolt11InvoiceParameters {
9841 fn default() -> Self {
9842 Self {
9843 amount_msats: None,
9844 description: Bolt11InvoiceDescription::Direct(Description::empty()),
9845 invoice_expiry_delta_secs: None,
9846 min_final_cltv_expiry_delta: None,
9847 payment_hash: None,
9848 }
9849 }
9850}
9851
9852macro_rules! create_offer_builder { ($self: ident, $builder: ty) => {
9853 pub fn create_offer_builder(
9878 &$self, absolute_expiry: Option<Duration>
9879 ) -> Result<$builder, Bolt12SemanticError> {
9880 let node_id = $self.get_our_node_id();
9881 let expanded_key = &$self.inbound_payment_key;
9882 let entropy = &*$self.entropy_source;
9883 let secp_ctx = &$self.secp_ctx;
9884
9885 let nonce = Nonce::from_entropy_source(entropy);
9886 let context = OffersContext::InvoiceRequest { nonce };
9887 let path = $self.create_blinded_paths_using_absolute_expiry(context, absolute_expiry)
9888 .and_then(|paths| paths.into_iter().next().ok_or(()))
9889 .map_err(|_| Bolt12SemanticError::MissingPaths)?;
9890 let builder = OfferBuilder::deriving_signing_pubkey(node_id, expanded_key, nonce, secp_ctx)
9891 .chain_hash($self.chain_hash)
9892 .path(path);
9893
9894 let builder = match absolute_expiry {
9895 None => builder,
9896 Some(absolute_expiry) => builder.absolute_expiry(absolute_expiry),
9897 };
9898
9899 Ok(builder.into())
9900 }
9901} }
9902
9903macro_rules! create_refund_builder { ($self: ident, $builder: ty) => {
9904 pub fn create_refund_builder(
9950 &$self, amount_msats: u64, absolute_expiry: Duration, payment_id: PaymentId,
9951 retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>
9952 ) -> Result<$builder, Bolt12SemanticError> {
9953 let node_id = $self.get_our_node_id();
9954 let expanded_key = &$self.inbound_payment_key;
9955 let entropy = &*$self.entropy_source;
9956 let secp_ctx = &$self.secp_ctx;
9957
9958 let nonce = Nonce::from_entropy_source(entropy);
9959 let context = OffersContext::OutboundPayment { payment_id, nonce, hmac: None };
9960 let path = $self.create_blinded_paths_using_absolute_expiry(context, Some(absolute_expiry))
9961 .and_then(|paths| paths.into_iter().next().ok_or(()))
9962 .map_err(|_| Bolt12SemanticError::MissingPaths)?;
9963
9964 let builder = RefundBuilder::deriving_signing_pubkey(
9965 node_id, expanded_key, nonce, secp_ctx, amount_msats, payment_id
9966 )?
9967 .chain_hash($self.chain_hash)
9968 .absolute_expiry(absolute_expiry)
9969 .path(path);
9970
9971 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop($self);
9972
9973 let expiration = StaleExpiration::AbsoluteTimeout(absolute_expiry);
9974 $self.pending_outbound_payments
9975 .add_new_awaiting_invoice(
9976 payment_id, expiration, retry_strategy, max_total_routing_fee_msat, None,
9977 )
9978 .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)?;
9979
9980 Ok(builder.into())
9981 }
9982} }
9983
9984const OFFERS_MESSAGE_REQUEST_LIMIT: usize = 10;
9990
9991impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
9992where
9993 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
9994 T::Target: BroadcasterInterface,
9995 ES::Target: EntropySource,
9996 NS::Target: NodeSigner,
9997 SP::Target: SignerProvider,
9998 F::Target: FeeEstimator,
9999 R::Target: Router,
10000 MR::Target: MessageRouter,
10001 L::Target: Logger,
10002{
10003 #[cfg(not(c_bindings))]
10004 create_offer_builder!(self, OfferBuilder<DerivedMetadata, secp256k1::All>);
10005 #[cfg(not(c_bindings))]
10006 create_refund_builder!(self, RefundBuilder<secp256k1::All>);
10007
10008 #[cfg(c_bindings)]
10009 create_offer_builder!(self, OfferWithDerivedMetadataBuilder);
10010 #[cfg(c_bindings)]
10011 create_refund_builder!(self, RefundMaybeWithDerivedMetadataBuilder);
10012
10013 pub fn pay_for_offer(
10067 &self, offer: &Offer, quantity: Option<u64>, amount_msats: Option<u64>,
10068 payer_note: Option<String>, payment_id: PaymentId, retry_strategy: Retry,
10069 max_total_routing_fee_msat: Option<u64>
10070 ) -> Result<(), Bolt12SemanticError> {
10071 self.pay_for_offer_intern(offer, quantity, amount_msats, payer_note, payment_id, None, |invoice_request, nonce| {
10072 let expiration = StaleExpiration::TimerTicks(1);
10073 let retryable_invoice_request = RetryableInvoiceRequest {
10074 invoice_request: invoice_request.clone(),
10075 nonce,
10076 };
10077 self.pending_outbound_payments
10078 .add_new_awaiting_invoice(
10079 payment_id, expiration, retry_strategy, max_total_routing_fee_msat,
10080 Some(retryable_invoice_request)
10081 )
10082 .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
10083 })
10084 }
10085
10086 fn pay_for_offer_intern<CPP: FnOnce(&InvoiceRequest, Nonce) -> Result<(), Bolt12SemanticError>>(
10087 &self, offer: &Offer, quantity: Option<u64>, amount_msats: Option<u64>,
10088 payer_note: Option<String>, payment_id: PaymentId,
10089 human_readable_name: Option<HumanReadableName>, create_pending_payment: CPP,
10090 ) -> Result<(), Bolt12SemanticError> {
10091 let expanded_key = &self.inbound_payment_key;
10092 let entropy = &*self.entropy_source;
10093 let secp_ctx = &self.secp_ctx;
10094
10095 let nonce = Nonce::from_entropy_source(entropy);
10096 let builder: InvoiceRequestBuilder<secp256k1::All> = offer
10097 .request_invoice(expanded_key, nonce, secp_ctx, payment_id)?
10098 .into();
10099 let builder = builder.chain_hash(self.chain_hash)?;
10100
10101 let builder = match quantity {
10102 None => builder,
10103 Some(quantity) => builder.quantity(quantity)?,
10104 };
10105 let builder = match amount_msats {
10106 None => builder,
10107 Some(amount_msats) => builder.amount_msats(amount_msats)?,
10108 };
10109 let builder = match payer_note {
10110 None => builder,
10111 Some(payer_note) => builder.payer_note(payer_note),
10112 };
10113 let builder = match human_readable_name {
10114 None => builder,
10115 Some(hrn) => builder.sourced_from_human_readable_name(hrn),
10116 };
10117 let invoice_request = builder.build_and_sign()?;
10118
10119 let hmac = payment_id.hmac_for_offer_payment(nonce, expanded_key);
10120 let context = MessageContext::Offers(
10121 OffersContext::OutboundPayment { payment_id, nonce, hmac: Some(hmac) }
10122 );
10123 let reply_paths = self.create_blinded_paths(context)
10124 .map_err(|_| Bolt12SemanticError::MissingPaths)?;
10125
10126 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
10127
10128 create_pending_payment(&invoice_request, nonce)?;
10129
10130 self.enqueue_invoice_request(invoice_request, reply_paths)
10131 }
10132
10133 fn enqueue_invoice_request(
10134 &self,
10135 invoice_request: InvoiceRequest,
10136 reply_paths: Vec<BlindedMessagePath>,
10137 ) -> Result<(), Bolt12SemanticError> {
10138 let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
10139 if !invoice_request.paths().is_empty() {
10140 reply_paths
10141 .iter()
10142 .flat_map(|reply_path| invoice_request.paths().iter().map(move |path| (path, reply_path)))
10143 .take(OFFERS_MESSAGE_REQUEST_LIMIT)
10144 .for_each(|(path, reply_path)| {
10145 let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
10146 destination: Destination::BlindedPath(path.clone()),
10147 reply_path: reply_path.clone(),
10148 };
10149 let message = OffersMessage::InvoiceRequest(invoice_request.clone());
10150 pending_offers_messages.push((message, instructions));
10151 });
10152 } else if let Some(node_id) = invoice_request.issuer_signing_pubkey() {
10153 for reply_path in reply_paths {
10154 let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
10155 destination: Destination::Node(node_id),
10156 reply_path,
10157 };
10158 let message = OffersMessage::InvoiceRequest(invoice_request.clone());
10159 pending_offers_messages.push((message, instructions));
10160 }
10161 } else {
10162 debug_assert!(false);
10163 return Err(Bolt12SemanticError::MissingIssuerSigningPubkey);
10164 }
10165
10166 Ok(())
10167 }
10168
10169 pub fn request_refund_payment(
10192 &self, refund: &Refund
10193 ) -> Result<Bolt12Invoice, Bolt12SemanticError> {
10194 let expanded_key = &self.inbound_payment_key;
10195 let entropy = &*self.entropy_source;
10196 let secp_ctx = &self.secp_ctx;
10197
10198 let amount_msats = refund.amount_msats();
10199 let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
10200
10201 if refund.chain() != self.chain_hash {
10202 return Err(Bolt12SemanticError::UnsupportedChain);
10203 }
10204
10205 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
10206
10207 match self.create_inbound_payment(Some(amount_msats), relative_expiry, None) {
10208 Ok((payment_hash, payment_secret)) => {
10209 let payment_context = PaymentContext::Bolt12Refund(Bolt12RefundContext {});
10210 let payment_paths = self.create_blinded_payment_paths(
10211 amount_msats, payment_secret, payment_context
10212 )
10213 .map_err(|_| Bolt12SemanticError::MissingPaths)?;
10214
10215 #[cfg(feature = "std")]
10216 let builder = refund.respond_using_derived_keys(
10217 payment_paths, payment_hash, expanded_key, entropy
10218 )?;
10219 #[cfg(not(feature = "std"))]
10220 let created_at = Duration::from_secs(
10221 self.highest_seen_timestamp.load(Ordering::Acquire) as u64
10222 );
10223 #[cfg(not(feature = "std"))]
10224 let builder = refund.respond_using_derived_keys_no_std(
10225 payment_paths, payment_hash, created_at, expanded_key, entropy
10226 )?;
10227 let builder: InvoiceBuilder<DerivedSigningPubkey> = builder.into();
10228 let invoice = builder.allow_mpp().build_and_sign(secp_ctx)?;
10229
10230 let nonce = Nonce::from_entropy_source(entropy);
10231 let hmac = payment_hash.hmac_for_offer_payment(nonce, expanded_key);
10232 let context = MessageContext::Offers(OffersContext::InboundPayment {
10233 payment_hash: invoice.payment_hash(), nonce, hmac
10234 });
10235 let reply_paths = self.create_blinded_paths(context)
10236 .map_err(|_| Bolt12SemanticError::MissingPaths)?;
10237
10238 let mut pending_offers_messages = self.pending_offers_messages.lock().unwrap();
10239 if refund.paths().is_empty() {
10240 for reply_path in reply_paths {
10241 let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
10242 destination: Destination::Node(refund.payer_signing_pubkey()),
10243 reply_path,
10244 };
10245 let message = OffersMessage::Invoice(invoice.clone());
10246 pending_offers_messages.push((message, instructions));
10247 }
10248 } else {
10249 reply_paths
10250 .iter()
10251 .flat_map(|reply_path| refund.paths().iter().map(move |path| (path, reply_path)))
10252 .take(OFFERS_MESSAGE_REQUEST_LIMIT)
10253 .for_each(|(path, reply_path)| {
10254 let instructions = MessageSendInstructions::WithSpecifiedReplyPath {
10255 destination: Destination::BlindedPath(path.clone()),
10256 reply_path: reply_path.clone(),
10257 };
10258 let message = OffersMessage::Invoice(invoice.clone());
10259 pending_offers_messages.push((message, instructions));
10260 });
10261 }
10262
10263 Ok(invoice)
10264 },
10265 Err(()) => Err(Bolt12SemanticError::InvalidAmount),
10266 }
10267 }
10268
10269 #[cfg(feature = "dnssec")]
10310 pub fn pay_for_offer_from_human_readable_name(
10311 &self, name: HumanReadableName, amount_msats: u64, payment_id: PaymentId,
10312 retry_strategy: Retry, max_total_routing_fee_msat: Option<u64>,
10313 dns_resolvers: Vec<Destination>,
10314 ) -> Result<(), ()> {
10315 let (onion_message, context) =
10316 self.hrn_resolver.resolve_name(payment_id, name, &*self.entropy_source)?;
10317 let reply_paths = self.create_blinded_paths(MessageContext::DNSResolver(context))?;
10318 let expiration = StaleExpiration::TimerTicks(1);
10319 self.pending_outbound_payments.add_new_awaiting_offer(payment_id, expiration, retry_strategy, max_total_routing_fee_msat, amount_msats)?;
10320 let message_params = dns_resolvers
10321 .iter()
10322 .flat_map(|destination| reply_paths.iter().map(move |path| (path, destination)))
10323 .take(OFFERS_MESSAGE_REQUEST_LIMIT);
10324 for (reply_path, destination) in message_params {
10325 self.pending_dns_onion_messages.lock().unwrap().push((
10326 DNSResolverMessage::DNSSECQuery(onion_message.clone()),
10327 MessageSendInstructions::WithSpecifiedReplyPath {
10328 destination: destination.clone(),
10329 reply_path: reply_path.clone(),
10330 },
10331 ));
10332 }
10333 Ok(())
10334 }
10335
10336 pub fn create_inbound_payment(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32,
10367 min_final_cltv_expiry_delta: Option<u16>) -> Result<(PaymentHash, PaymentSecret), ()> {
10368 inbound_payment::create(&self.inbound_payment_key, min_value_msat, invoice_expiry_delta_secs,
10369 &self.entropy_source, self.highest_seen_timestamp.load(Ordering::Acquire) as u64,
10370 min_final_cltv_expiry_delta)
10371 }
10372
10373 pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>,
10420 invoice_expiry_delta_secs: u32, min_final_cltv_expiry: Option<u16>) -> Result<PaymentSecret, ()> {
10421 inbound_payment::create_from_hash(&self.inbound_payment_key, min_value_msat, payment_hash,
10422 invoice_expiry_delta_secs, self.highest_seen_timestamp.load(Ordering::Acquire) as u64,
10423 min_final_cltv_expiry)
10424 }
10425
10426 pub fn get_payment_preimage(&self, payment_hash: PaymentHash, payment_secret: PaymentSecret) -> Result<PaymentPreimage, APIError> {
10431 inbound_payment::get_payment_preimage(payment_hash, payment_secret, &self.inbound_payment_key)
10432 }
10433
10434 fn create_blinded_paths_using_absolute_expiry(
10441 &self, context: OffersContext, absolute_expiry: Option<Duration>,
10442 ) -> Result<Vec<BlindedMessagePath>, ()> {
10443 let now = self.duration_since_epoch();
10444 let max_short_lived_absolute_expiry = now.saturating_add(MAX_SHORT_LIVED_RELATIVE_EXPIRY);
10445
10446 if absolute_expiry.unwrap_or(Duration::MAX) <= max_short_lived_absolute_expiry {
10447 self.create_compact_blinded_paths(context)
10448 } else {
10449 self.create_blinded_paths(MessageContext::Offers(context))
10450 }
10451 }
10452
10453 pub(super) fn duration_since_epoch(&self) -> Duration {
10454 #[cfg(not(feature = "std"))]
10455 let now = Duration::from_secs(
10456 self.highest_seen_timestamp.load(Ordering::Acquire) as u64
10457 );
10458 #[cfg(feature = "std")]
10459 let now = std::time::SystemTime::now()
10460 .duration_since(std::time::SystemTime::UNIX_EPOCH)
10461 .expect("SystemTime::now() should come after SystemTime::UNIX_EPOCH");
10462
10463 now
10464 }
10465
10466 fn create_blinded_paths(&self, context: MessageContext) -> Result<Vec<BlindedMessagePath>, ()> {
10471 let recipient = self.get_our_node_id();
10472 let secp_ctx = &self.secp_ctx;
10473
10474 let peers = self.per_peer_state.read().unwrap()
10475 .iter()
10476 .map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
10477 .filter(|(_, peer)| peer.is_connected)
10478 .filter(|(_, peer)| peer.latest_features.supports_onion_messages())
10479 .map(|(node_id, _)| *node_id)
10480 .collect::<Vec<_>>();
10481
10482 self.message_router
10483 .create_blinded_paths(recipient, context, peers, secp_ctx)
10484 .and_then(|paths| (!paths.is_empty()).then(|| paths).ok_or(()))
10485 }
10486
10487 fn create_compact_blinded_paths(&self, context: OffersContext) -> Result<Vec<BlindedMessagePath>, ()> {
10492 let recipient = self.get_our_node_id();
10493 let secp_ctx = &self.secp_ctx;
10494
10495 let peers = self.per_peer_state.read().unwrap()
10496 .iter()
10497 .map(|(node_id, peer_state)| (node_id, peer_state.lock().unwrap()))
10498 .filter(|(_, peer)| peer.is_connected)
10499 .filter(|(_, peer)| peer.latest_features.supports_onion_messages())
10500 .map(|(node_id, peer)| MessageForwardNode {
10501 node_id: *node_id,
10502 short_channel_id: peer.channel_by_id
10503 .iter()
10504 .filter(|(_, channel)| channel.context().is_usable())
10505 .min_by_key(|(_, channel)| channel.context().channel_creation_height)
10506 .and_then(|(_, channel)| channel.context().get_short_channel_id()),
10507 })
10508 .collect::<Vec<_>>();
10509
10510 self.message_router
10511 .create_compact_blinded_paths(recipient, MessageContext::Offers(context), peers, secp_ctx)
10512 .and_then(|paths| (!paths.is_empty()).then(|| paths).ok_or(()))
10513 }
10514
10515 fn create_blinded_payment_paths(
10518 &self, amount_msats: u64, payment_secret: PaymentSecret, payment_context: PaymentContext
10519 ) -> Result<Vec<BlindedPaymentPath>, ()> {
10520 let expanded_key = &self.inbound_payment_key;
10521 let entropy = &*self.entropy_source;
10522 let secp_ctx = &self.secp_ctx;
10523
10524 let first_hops = self.list_usable_channels();
10525 let payee_node_id = self.get_our_node_id();
10526 let max_cltv_expiry = self.best_block.read().unwrap().height + CLTV_FAR_FAR_AWAY
10527 + LATENCY_GRACE_PERIOD_BLOCKS;
10528
10529 let payee_tlvs = UnauthenticatedReceiveTlvs {
10530 payment_secret,
10531 payment_constraints: PaymentConstraints {
10532 max_cltv_expiry,
10533 htlc_minimum_msat: 1,
10534 },
10535 payment_context,
10536 };
10537 let nonce = Nonce::from_entropy_source(entropy);
10538 let payee_tlvs = payee_tlvs.authenticate(nonce, expanded_key);
10539
10540 self.router.create_blinded_payment_paths(
10541 payee_node_id, first_hops, payee_tlvs, amount_msats, secp_ctx
10542 )
10543 }
10544
10545 pub fn get_phantom_scid(&self) -> u64 {
10550 let best_block_height = self.best_block.read().unwrap().height;
10551 let short_to_chan_info = self.short_to_chan_info.read().unwrap();
10552 loop {
10553 let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
10554 match short_to_chan_info.get(&scid_candidate) {
10556 Some(_) => continue,
10557 None => return scid_candidate
10558 }
10559 }
10560 }
10561
10562 pub fn get_phantom_route_hints(&self) -> PhantomRouteHints {
10566 PhantomRouteHints {
10567 channels: self.list_usable_channels(),
10568 phantom_scid: self.get_phantom_scid(),
10569 real_node_pubkey: self.get_our_node_id(),
10570 }
10571 }
10572
10573 pub fn get_intercept_scid(&self) -> u64 {
10580 let best_block_height = self.best_block.read().unwrap().height;
10581 let short_to_chan_info = self.short_to_chan_info.read().unwrap();
10582 loop {
10583 let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.chain_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
10584 if short_to_chan_info.contains_key(&scid_candidate) { continue }
10586 return scid_candidate
10587 }
10588 }
10589
10590 pub fn compute_inflight_htlcs(&self) -> InFlightHtlcs {
10593 let mut inflight_htlcs = InFlightHtlcs::new();
10594
10595 let per_peer_state = self.per_peer_state.read().unwrap();
10596 for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
10597 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10598 let peer_state = &mut *peer_state_lock;
10599 for chan in peer_state.channel_by_id.values().filter_map(
10600 |phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }
10601 ) {
10602 for (htlc_source, _) in chan.inflight_htlc_sources() {
10603 if let HTLCSource::OutboundRoute { path, .. } = htlc_source {
10604 inflight_htlcs.process_path(path, self.get_our_node_id());
10605 }
10606 }
10607 }
10608 }
10609
10610 inflight_htlcs
10611 }
10612
10613 #[cfg(any(test, feature = "_test_utils"))]
10614 pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
10615 let events = core::cell::RefCell::new(Vec::new());
10616 let event_handler = |event: events::Event| Ok(events.borrow_mut().push(event));
10617 self.process_pending_events(&event_handler);
10618 events.into_inner()
10619 }
10620
10621 #[cfg(feature = "_test_utils")]
10622 pub fn push_pending_event(&self, event: events::Event) {
10623 let mut events = self.pending_events.lock().unwrap();
10624 events.push_back((event, None));
10625 }
10626
10627 #[cfg(test)]
10628 pub fn pop_pending_event(&self) -> Option<events::Event> {
10629 let mut events = self.pending_events.lock().unwrap();
10630 events.pop_front().map(|(e, _)| e)
10631 }
10632
10633 #[cfg(test)]
10634 pub fn has_pending_payments(&self) -> bool {
10635 self.pending_outbound_payments.has_pending_payments()
10636 }
10637
10638 #[cfg(test)]
10639 pub fn clear_pending_payments(&self) {
10640 self.pending_outbound_payments.clear_pending_payments()
10641 }
10642
10643 fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey,
10648 channel_funding_outpoint: OutPoint, channel_id: ChannelId,
10649 mut completed_blocker: Option<RAAMonitorUpdateBlockingAction>) {
10650
10651 let logger = WithContext::from(
10652 &self.logger, Some(counterparty_node_id), Some(channel_id), None
10653 );
10654 loop {
10655 let per_peer_state = self.per_peer_state.read().unwrap();
10656 if let Some(peer_state_mtx) = per_peer_state.get(&counterparty_node_id) {
10657 let mut peer_state_lck = peer_state_mtx.lock().unwrap();
10658 let peer_state = &mut *peer_state_lck;
10659 if let Some(blocker) = completed_blocker.take() {
10660 if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates
10662 .get_mut(&channel_id)
10663 {
10664 blockers.retain(|iter| iter != &blocker);
10665 }
10666 }
10667
10668 if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates,
10669 channel_funding_outpoint, channel_id, counterparty_node_id) {
10670 log_trace!(logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first",
10674 &channel_id);
10675 break;
10676 }
10677
10678 if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(
10679 channel_id) {
10680 if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
10681 debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint);
10682 if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
10683 log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor",
10684 channel_id);
10685 handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
10686 peer_state_lck, peer_state, per_peer_state, chan);
10687 if further_update_exists {
10688 continue;
10691 }
10692 } else {
10693 log_trace!(logger, "Unlocked monitor updating for channel {} without monitors to update",
10694 channel_id);
10695 }
10696 }
10697 }
10698 } else {
10699 log_debug!(logger,
10700 "Got a release post-RAA monitor update for peer {} but the channel is gone",
10701 log_pubkey!(counterparty_node_id));
10702 }
10703 break;
10704 }
10705 }
10706
10707 fn handle_post_event_actions(&self, actions: Vec<EventCompletionAction>) {
10708 for action in actions {
10709 match action {
10710 EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
10711 channel_funding_outpoint, channel_id, counterparty_node_id
10712 } => {
10713 self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, channel_id, None);
10714 }
10715 }
10716 }
10717 }
10718
10719 pub async fn process_pending_events_async<Future: core::future::Future<Output = Result<(), ReplayEvent>>, H: Fn(Event) -> Future>(
10724 &self, handler: H
10725 ) {
10726 let mut ev;
10727 process_events_body!(self, ev, { handler(ev).await });
10728 }
10729}
10730
10731impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
10732where
10733 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
10734 T::Target: BroadcasterInterface,
10735 ES::Target: EntropySource,
10736 NS::Target: NodeSigner,
10737 SP::Target: SignerProvider,
10738 F::Target: FeeEstimator,
10739 R::Target: Router,
10740 MR::Target: MessageRouter,
10741 L::Target: Logger,
10742{
10743 fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
10757 let events = RefCell::new(Vec::new());
10758 PersistenceNotifierGuard::optionally_notify(self, || {
10759 let mut result = NotifyOption::SkipPersistNoEvents;
10760
10761 if self.process_pending_monitor_events() {
10764 result = NotifyOption::DoPersist;
10765 }
10766
10767 if self.check_free_holding_cells() {
10768 result = NotifyOption::DoPersist;
10769 }
10770 if self.maybe_generate_initial_closing_signed() {
10771 result = NotifyOption::DoPersist;
10772 }
10773
10774 let mut is_any_peer_connected = false;
10775 let mut pending_events = Vec::new();
10776 let per_peer_state = self.per_peer_state.read().unwrap();
10777 for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
10778 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10779 let peer_state = &mut *peer_state_lock;
10780 if peer_state.pending_msg_events.len() > 0 {
10781 pending_events.append(&mut peer_state.pending_msg_events);
10782 }
10783 if peer_state.is_connected {
10784 is_any_peer_connected = true
10785 }
10786 }
10787
10788 if is_any_peer_connected {
10790 let mut broadcast_msgs = self.pending_broadcast_messages.lock().unwrap();
10791 pending_events.append(&mut broadcast_msgs);
10792 }
10793
10794 if !pending_events.is_empty() {
10795 events.replace(pending_events);
10796 }
10797
10798 result
10799 });
10800 events.into_inner()
10801 }
10802}
10803
10804impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> EventsProvider for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
10805where
10806 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
10807 T::Target: BroadcasterInterface,
10808 ES::Target: EntropySource,
10809 NS::Target: NodeSigner,
10810 SP::Target: SignerProvider,
10811 F::Target: FeeEstimator,
10812 R::Target: Router,
10813 MR::Target: MessageRouter,
10814 L::Target: Logger,
10815{
10816 fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
10821 let mut ev;
10822 process_events_body!(self, ev, handler.handle_event(ev));
10823 }
10824}
10825
10826impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> chain::Listen for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
10827where
10828 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
10829 T::Target: BroadcasterInterface,
10830 ES::Target: EntropySource,
10831 NS::Target: NodeSigner,
10832 SP::Target: SignerProvider,
10833 F::Target: FeeEstimator,
10834 R::Target: Router,
10835 MR::Target: MessageRouter,
10836 L::Target: Logger,
10837{
10838 fn filtered_block_connected(&self, header: &Header, txdata: &TransactionData, height: u32) {
10839 {
10840 let best_block = self.best_block.read().unwrap();
10841 assert_eq!(best_block.block_hash, header.prev_blockhash,
10842 "Blocks must be connected in chain-order - the connected header must build on the last connected header");
10843 assert_eq!(best_block.height, height - 1,
10844 "Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
10845 }
10846
10847 self.transactions_confirmed(header, txdata, height);
10848 self.best_block_updated(header, height);
10849 }
10850
10851 fn block_disconnected(&self, header: &Header, height: u32) {
10852 let _persistence_guard =
10853 PersistenceNotifierGuard::optionally_notify_skipping_background_events(
10854 self, || -> NotifyOption { NotifyOption::DoPersist });
10855 let new_height = height - 1;
10856 {
10857 let mut best_block = self.best_block.write().unwrap();
10858 assert_eq!(best_block.block_hash, header.block_hash(),
10859 "Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
10860 assert_eq!(best_block.height, height,
10861 "Blocks must be disconnected in chain-order - the disconnected block must have the correct height");
10862 *best_block = BestBlock::new(header.prev_blockhash, new_height)
10863 }
10864
10865 self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)));
10866 }
10867}
10868
10869impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> chain::Confirm for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
10870where
10871 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
10872 T::Target: BroadcasterInterface,
10873 ES::Target: EntropySource,
10874 NS::Target: NodeSigner,
10875 SP::Target: SignerProvider,
10876 F::Target: FeeEstimator,
10877 R::Target: Router,
10878 MR::Target: MessageRouter,
10879 L::Target: Logger,
10880{
10881 fn transactions_confirmed(&self, header: &Header, txdata: &TransactionData, height: u32) {
10882 let block_hash = header.block_hash();
10887 log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);
10888
10889 let _persistence_guard =
10890 PersistenceNotifierGuard::optionally_notify_skipping_background_events(
10891 self, || -> NotifyOption { NotifyOption::DoPersist });
10892 self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None))
10893 .map(|(a, b)| (a, Vec::new(), b)));
10894
10895 let last_best_block_height = self.best_block.read().unwrap().height;
10896 if height < last_best_block_height {
10897 let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
10898 self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None)));
10899 }
10900 }
10901
10902 fn best_block_updated(&self, header: &Header, height: u32) {
10903 let block_hash = header.block_hash();
10908 log_trace!(self.logger, "New best block: {} at height {}", block_hash, height);
10909
10910 let _persistence_guard =
10911 PersistenceNotifierGuard::optionally_notify_skipping_background_events(
10912 self, || -> NotifyOption { NotifyOption::DoPersist });
10913 *self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
10914
10915 let mut min_anchor_feerate = None;
10916 let mut min_non_anchor_feerate = None;
10917 if self.background_events_processed_since_startup.load(Ordering::Relaxed) {
10918 let mut last_days_feerates = self.last_days_feerates.lock().unwrap();
10920 if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS {
10921 last_days_feerates.pop_front();
10922 }
10923 let anchor_feerate = self.fee_estimator
10924 .bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedAnchorChannelRemoteFee);
10925 let non_anchor_feerate = self.fee_estimator
10926 .bounded_sat_per_1000_weight(ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee);
10927 last_days_feerates.push_back((anchor_feerate, non_anchor_feerate));
10928 if last_days_feerates.len() >= FEERATE_TRACKING_BLOCKS {
10929 min_anchor_feerate = last_days_feerates.iter().map(|(f, _)| f).min().copied();
10930 min_non_anchor_feerate = last_days_feerates.iter().map(|(_, f)| f).min().copied();
10931 }
10932 }
10933
10934 self.do_chain_event(Some(height), |channel| {
10935 let logger = WithChannelContext::from(&self.logger, &channel.context, None);
10936 if channel.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
10937 if let Some(feerate) = min_anchor_feerate {
10938 channel.check_for_stale_feerate(&logger, feerate)?;
10939 }
10940 } else {
10941 if let Some(feerate) = min_non_anchor_feerate {
10942 channel.check_for_stale_feerate(&logger, feerate)?;
10943 }
10944 }
10945 channel.best_block_updated(height, header.time, self.chain_hash, &self.node_signer, &self.default_configuration, &&WithChannelContext::from(&self.logger, &channel.context, None))
10946 });
10947
10948 macro_rules! max_time {
10949 ($timestamp: expr) => {
10950 loop {
10951 let old_serial = $timestamp.load(Ordering::Acquire);
10957 if old_serial >= header.time as usize { break; }
10958 if $timestamp.compare_exchange(old_serial, header.time as usize, Ordering::AcqRel, Ordering::Relaxed).is_ok() {
10959 break;
10960 }
10961 }
10962 }
10963 }
10964 max_time!(self.highest_seen_timestamp);
10965 #[cfg(feature = "dnssec")] {
10966 let timestamp = self.highest_seen_timestamp.load(Ordering::Relaxed) as u32;
10967 self.hrn_resolver.new_best_block(height, timestamp);
10968 }
10969 }
10970
10971 fn get_relevant_txids(&self) -> Vec<(Txid, u32, Option<BlockHash>)> {
10972 let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
10973 for (_cp_id, peer_state_mutex) in self.per_peer_state.read().unwrap().iter() {
10974 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
10975 let peer_state = &mut *peer_state_lock;
10976 for chan in peer_state.channel_by_id.values().filter_map(|phase| if let ChannelPhase::Funded(chan) = phase { Some(chan) } else { None }) {
10977 let txid_opt = chan.context.get_funding_txo();
10978 let height_opt = chan.context.get_funding_tx_confirmation_height();
10979 let hash_opt = chan.context.get_funding_tx_confirmed_in();
10980 if let (Some(funding_txo), Some(conf_height), Some(block_hash)) = (txid_opt, height_opt, hash_opt) {
10981 res.push((funding_txo.txid, conf_height, Some(block_hash)));
10982 }
10983 }
10984 }
10985 res
10986 }
10987
10988 fn transaction_unconfirmed(&self, txid: &Txid) {
10989 let _persistence_guard =
10990 PersistenceNotifierGuard::optionally_notify_skipping_background_events(
10991 self, || -> NotifyOption { NotifyOption::DoPersist });
10992 self.do_chain_event(None, |channel| {
10993 if let Some(funding_txo) = channel.context.get_funding_txo() {
10994 if funding_txo.txid == *txid {
10995 channel.funding_transaction_unconfirmed(&&WithChannelContext::from(&self.logger, &channel.context, None)).map(|()| (None, Vec::new(), None))
10996 } else { Ok((None, Vec::new(), None)) }
10997 } else { Ok((None, Vec::new(), None)) }
10998 });
10999 }
11000}
11001
11002impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
11003where
11004 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
11005 T::Target: BroadcasterInterface,
11006 ES::Target: EntropySource,
11007 NS::Target: NodeSigner,
11008 SP::Target: SignerProvider,
11009 F::Target: FeeEstimator,
11010 R::Target: Router,
11011 MR::Target: MessageRouter,
11012 L::Target: Logger,
11013{
11014 fn do_chain_event<FN: Fn(&mut Channel<SP>) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>>
11018 (&self, height_opt: Option<u32>, f: FN) {
11019 let mut failed_channels = Vec::new();
11024 let mut timed_out_htlcs = Vec::new();
11025 {
11026 let per_peer_state = self.per_peer_state.read().unwrap();
11027 for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
11028 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11029 let peer_state = &mut *peer_state_lock;
11030 let pending_msg_events = &mut peer_state.pending_msg_events;
11031
11032 peer_state.channel_by_id.retain(|_, phase| {
11033 match phase {
11034 ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) |
11036 ChannelPhase::UnfundedOutboundV2(_) | ChannelPhase::UnfundedInboundV2(_) => true,
11037 ChannelPhase::Funded(channel) => {
11038 let res = f(channel);
11039 if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
11040 for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
11041 let failure_code = 0x1000|14; let data = self.get_htlc_inbound_temp_fail_data(failure_code);
11043 timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
11044 HTLCDestination::NextHopChannel { node_id: Some(channel.context.get_counterparty_node_id()), channel_id: channel.context.channel_id() }));
11045 }
11046 let logger = WithChannelContext::from(&self.logger, &channel.context, None);
11047 if let Some(channel_ready) = channel_ready_opt {
11048 send_channel_ready!(self, pending_msg_events, channel, channel_ready);
11049 if channel.context.is_usable() {
11050 log_trace!(logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", channel.context.channel_id());
11051 if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
11052 pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
11053 node_id: channel.context.get_counterparty_node_id(),
11054 msg,
11055 });
11056 }
11057 } else {
11058 log_trace!(logger, "Sending channel_ready WITHOUT channel_update for {}", channel.context.channel_id());
11059 }
11060 }
11061
11062 {
11063 let mut pending_events = self.pending_events.lock().unwrap();
11064 emit_channel_ready_event!(pending_events, channel);
11065 }
11066
11067 if let Some(height) = height_opt {
11068 let funding_conf_height =
11071 channel.context.get_funding_tx_confirmation_height().unwrap_or(height);
11072 let rebroadcast_announcement = funding_conf_height < height + 1008
11077 && funding_conf_height % 6 == height % 6;
11078 #[allow(unused_mut, unused_assignments)]
11079 let mut should_announce = announcement_sigs.is_some() || rebroadcast_announcement;
11080 #[cfg(test)]
11084 {
11085 should_announce = announcement_sigs.is_some();
11086 }
11087 if should_announce {
11088 if let Some(announcement) = channel.get_signed_channel_announcement(
11089 &self.node_signer, self.chain_hash, height, &self.default_configuration,
11090 ) {
11091 pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
11092 msg: announcement,
11093 update_msg: Some(self.get_channel_update_for_broadcast(channel).unwrap()),
11098 });
11099 }
11100 }
11101 }
11102 if let Some(announcement_sigs) = announcement_sigs {
11103 log_trace!(logger, "Sending announcement_signatures for channel {}", channel.context.channel_id());
11104 pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
11105 node_id: channel.context.get_counterparty_node_id(),
11106 msg: announcement_sigs,
11107 });
11108 }
11109 if channel.is_our_channel_ready() {
11110 if let Some(real_scid) = channel.context.get_short_channel_id() {
11111 let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
11118 let scid_insert = short_to_chan_info.insert(real_scid, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
11119 assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.context.get_counterparty_node_id(), channel.context.channel_id()),
11120 "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
11121 fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
11122 }
11123 }
11124 } else if let Err(reason) = res {
11125 let reason_message = format!("{}", reason);
11128 let mut close_res = channel.context.force_shutdown(true, reason);
11129 locked_close_channel!(self, peer_state, &channel.context, close_res);
11130 failed_channels.push(close_res);
11131 if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
11132 let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
11133 pending_broadcast_messages.push(events::MessageSendEvent::BroadcastChannelUpdate {
11134 msg: update
11135 });
11136 }
11137 pending_msg_events.push(events::MessageSendEvent::HandleError {
11138 node_id: channel.context.get_counterparty_node_id(),
11139 action: msgs::ErrorAction::DisconnectPeer {
11140 msg: Some(msgs::ErrorMessage {
11141 channel_id: channel.context.channel_id(),
11142 data: reason_message,
11143 })
11144 },
11145 });
11146 return false;
11147 }
11148 true
11149 }
11150 }
11151 });
11152 }
11153 }
11154
11155 if let Some(height) = height_opt {
11156 self.claimable_payments.lock().unwrap().claimable_payments.retain(|payment_hash, payment| {
11157 payment.htlcs.retain(|htlc| {
11158 if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
11163 let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
11164 htlc_msat_height_data.extend_from_slice(&height.to_be_bytes());
11165
11166 timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(),
11167 HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data),
11168 HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
11169 false
11170 } else { true }
11171 });
11172 !payment.htlcs.is_empty() });
11174
11175 let mut intercepted_htlcs = self.pending_intercepted_htlcs.lock().unwrap();
11176 intercepted_htlcs.retain(|_, htlc| {
11177 if height >= htlc.forward_info.outgoing_cltv_value - HTLC_FAIL_BACK_BUFFER {
11178 let prev_hop_data = HTLCSource::PreviousHopData(HTLCPreviousHopData {
11179 short_channel_id: htlc.prev_short_channel_id,
11180 user_channel_id: Some(htlc.prev_user_channel_id),
11181 htlc_id: htlc.prev_htlc_id,
11182 incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret,
11183 phantom_shared_secret: None,
11184 counterparty_node_id: htlc.prev_counterparty_node_id,
11185 outpoint: htlc.prev_funding_outpoint,
11186 channel_id: htlc.prev_channel_id,
11187 blinded_failure: htlc.forward_info.routing.blinded_failure(),
11188 cltv_expiry: htlc.forward_info.routing.incoming_cltv_expiry(),
11189 });
11190
11191 let requested_forward_scid = match htlc.forward_info.routing {
11192 PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
11193 _ => unreachable!(),
11194 };
11195 timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash,
11196 HTLCFailReason::from_failure_code(0x2000 | 2),
11197 HTLCDestination::InvalidForward { requested_forward_scid }));
11198 let logger = WithContext::from(
11199 &self.logger, None, Some(htlc.prev_channel_id), Some(htlc.forward_info.payment_hash)
11200 );
11201 log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
11202 false
11203 } else { true }
11204 });
11205 }
11206
11207 for failure in failed_channels {
11208 self.finish_close_channel(failure);
11209 }
11210
11211 for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) {
11212 self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, destination);
11213 }
11214 }
11215
11216 pub fn get_event_or_persistence_needed_future(&self) -> Future {
11225 self.event_persist_notifier.get_future()
11226 }
11227
11228 pub fn get_and_clear_needs_persistence(&self) -> bool {
11233 self.needs_persist_flag.swap(false, Ordering::AcqRel)
11234 }
11235
11236 #[cfg(any(test, feature = "_test_utils"))]
11237 pub fn get_event_or_persist_condvar_value(&self) -> bool {
11238 self.event_persist_notifier.notify_pending()
11239 }
11240
11241 pub fn current_best_block(&self) -> BestBlock {
11244 self.best_block.read().unwrap().clone()
11245 }
11246
11247 pub fn node_features(&self) -> NodeFeatures {
11250 provided_node_features(&self.default_configuration)
11251 }
11252
11253 #[cfg(any(feature = "_test_utils", test))]
11259 pub fn bolt11_invoice_features(&self) -> Bolt11InvoiceFeatures {
11260 provided_bolt11_invoice_features(&self.default_configuration)
11261 }
11262
11263 fn bolt12_invoice_features(&self) -> Bolt12InvoiceFeatures {
11266 provided_bolt12_invoice_features(&self.default_configuration)
11267 }
11268
11269 pub fn channel_features(&self) -> ChannelFeatures {
11272 provided_channel_features(&self.default_configuration)
11273 }
11274
11275 pub fn channel_type_features(&self) -> ChannelTypeFeatures {
11278 provided_channel_type_features(&self.default_configuration)
11279 }
11280
11281 pub fn init_features(&self) -> InitFeatures {
11284 provided_init_features(&self.default_configuration)
11285 }
11286}
11287
11288impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
11289 ChannelMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
11290where
11291 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
11292 T::Target: BroadcasterInterface,
11293 ES::Target: EntropySource,
11294 NS::Target: NodeSigner,
11295 SP::Target: SignerProvider,
11296 F::Target: FeeEstimator,
11297 R::Target: Router,
11298 MR::Target: MessageRouter,
11299 L::Target: Logger,
11300{
11301 fn handle_open_channel(&self, counterparty_node_id: PublicKey, msg: &msgs::OpenChannel) {
11302 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11306 let res = self.internal_open_channel(&counterparty_node_id, OpenChannelMessageRef::V1(msg));
11307 let persist = match &res {
11308 Err(e) if e.closes_channel() => {
11309 debug_assert!(false, "We shouldn't close a new channel");
11310 NotifyOption::DoPersist
11311 },
11312 _ => NotifyOption::SkipPersistHandleEvents,
11313 };
11314 let _ = handle_error!(self, res, counterparty_node_id);
11315 persist
11316 });
11317 }
11318
11319 fn handle_open_channel_v2(&self, counterparty_node_id: PublicKey, msg: &msgs::OpenChannelV2) {
11320 #[cfg(dual_funding)]
11324 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11325 let res = self.internal_open_channel(&counterparty_node_id, OpenChannelMessageRef::V2(msg));
11326 let persist = match &res {
11327 Err(e) if e.closes_channel() => {
11328 debug_assert!(false, "We shouldn't close a new channel");
11329 NotifyOption::DoPersist
11330 },
11331 _ => NotifyOption::SkipPersistHandleEvents,
11332 };
11333 let _ = handle_error!(self, res, counterparty_node_id);
11334 persist
11335 });
11336 #[cfg(not(dual_funding))]
11337 let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11338 "Dual-funded channels not supported".to_owned(),
11339 msg.common_fields.temporary_channel_id.clone())), counterparty_node_id);
11340 }
11341
11342 fn handle_accept_channel(&self, counterparty_node_id: PublicKey, msg: &msgs::AcceptChannel) {
11343 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11347 let _ = handle_error!(self, self.internal_accept_channel(&counterparty_node_id, msg), counterparty_node_id);
11348 NotifyOption::SkipPersistHandleEvents
11349 });
11350 }
11351
11352 fn handle_accept_channel_v2(&self, counterparty_node_id: PublicKey, msg: &msgs::AcceptChannelV2) {
11353 let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11354 "Dual-funded channels not supported".to_owned(),
11355 msg.common_fields.temporary_channel_id.clone())), counterparty_node_id);
11356 }
11357
11358 fn handle_funding_created(&self, counterparty_node_id: PublicKey, msg: &msgs::FundingCreated) {
11359 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11360 let _ = handle_error!(self, self.internal_funding_created(&counterparty_node_id, msg), counterparty_node_id);
11361 }
11362
11363 fn handle_funding_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::FundingSigned) {
11364 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11365 let _ = handle_error!(self, self.internal_funding_signed(&counterparty_node_id, msg), counterparty_node_id);
11366 }
11367
11368 fn handle_channel_ready(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelReady) {
11369 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11374 let res = self.internal_channel_ready(&counterparty_node_id, msg);
11375 let persist = match &res {
11376 Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11377 _ => NotifyOption::SkipPersistHandleEvents,
11378 };
11379 let _ = handle_error!(self, res, counterparty_node_id);
11380 persist
11381 });
11382 }
11383
11384 fn handle_stfu(&self, counterparty_node_id: PublicKey, msg: &msgs::Stfu) {
11385 let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11386 "Quiescence not supported".to_owned(),
11387 msg.channel_id.clone())), counterparty_node_id);
11388 }
11389
11390 #[cfg(splicing)]
11391 fn handle_splice_init(&self, counterparty_node_id: PublicKey, msg: &msgs::SpliceInit) {
11392 let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11393 "Splicing not supported".to_owned(),
11394 msg.channel_id.clone())), counterparty_node_id);
11395 }
11396
11397 #[cfg(splicing)]
11398 fn handle_splice_ack(&self, counterparty_node_id: PublicKey, msg: &msgs::SpliceAck) {
11399 let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11400 "Splicing not supported (splice_ack)".to_owned(),
11401 msg.channel_id.clone())), counterparty_node_id);
11402 }
11403
11404 #[cfg(splicing)]
11405 fn handle_splice_locked(&self, counterparty_node_id: PublicKey, msg: &msgs::SpliceLocked) {
11406 let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11407 "Splicing not supported (splice_locked)".to_owned(),
11408 msg.channel_id.clone())), counterparty_node_id);
11409 }
11410
11411 fn handle_shutdown(&self, counterparty_node_id: PublicKey, msg: &msgs::Shutdown) {
11412 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11413 let _ = handle_error!(self, self.internal_shutdown(&counterparty_node_id, msg), counterparty_node_id);
11414 }
11415
11416 fn handle_closing_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::ClosingSigned) {
11417 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11418 let _ = handle_error!(self, self.internal_closing_signed(&counterparty_node_id, msg), counterparty_node_id);
11419 }
11420
11421 fn handle_update_add_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateAddHTLC) {
11422 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11426 let res = self.internal_update_add_htlc(&counterparty_node_id, msg);
11427 let persist = match &res {
11428 Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11429 Err(_) => NotifyOption::SkipPersistHandleEvents,
11430 Ok(()) => NotifyOption::SkipPersistNoEvents,
11431 };
11432 let _ = handle_error!(self, res, counterparty_node_id);
11433 persist
11434 });
11435 }
11436
11437 fn handle_update_fulfill_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFulfillHTLC) {
11438 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11439 let _ = handle_error!(self, self.internal_update_fulfill_htlc(&counterparty_node_id, msg), counterparty_node_id);
11440 }
11441
11442 fn handle_update_fail_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFailHTLC) {
11443 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11447 let res = self.internal_update_fail_htlc(&counterparty_node_id, msg);
11448 let persist = match &res {
11449 Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11450 Err(_) => NotifyOption::SkipPersistHandleEvents,
11451 Ok(()) => NotifyOption::SkipPersistNoEvents,
11452 };
11453 let _ = handle_error!(self, res, counterparty_node_id);
11454 persist
11455 });
11456 }
11457
11458 fn handle_update_fail_malformed_htlc(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFailMalformedHTLC) {
11459 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11463 let res = self.internal_update_fail_malformed_htlc(&counterparty_node_id, msg);
11464 let persist = match &res {
11465 Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11466 Err(_) => NotifyOption::SkipPersistHandleEvents,
11467 Ok(()) => NotifyOption::SkipPersistNoEvents,
11468 };
11469 let _ = handle_error!(self, res, counterparty_node_id);
11470 persist
11471 });
11472 }
11473
11474 fn handle_commitment_signed(&self, counterparty_node_id: PublicKey, msg: &msgs::CommitmentSigned) {
11475 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11476 let _ = handle_error!(self, self.internal_commitment_signed(&counterparty_node_id, msg), counterparty_node_id);
11477 }
11478
11479 fn handle_revoke_and_ack(&self, counterparty_node_id: PublicKey, msg: &msgs::RevokeAndACK) {
11480 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11481 let _ = handle_error!(self, self.internal_revoke_and_ack(&counterparty_node_id, msg), counterparty_node_id);
11482 }
11483
11484 fn handle_update_fee(&self, counterparty_node_id: PublicKey, msg: &msgs::UpdateFee) {
11485 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11489 let res = self.internal_update_fee(&counterparty_node_id, msg);
11490 let persist = match &res {
11491 Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11492 Err(_) => NotifyOption::SkipPersistHandleEvents,
11493 Ok(()) => NotifyOption::SkipPersistNoEvents,
11494 };
11495 let _ = handle_error!(self, res, counterparty_node_id);
11496 persist
11497 });
11498 }
11499
11500 fn handle_announcement_signatures(&self, counterparty_node_id: PublicKey, msg: &msgs::AnnouncementSignatures) {
11501 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11502 let _ = handle_error!(self, self.internal_announcement_signatures(&counterparty_node_id, msg), counterparty_node_id);
11503 }
11504
11505 fn handle_channel_update(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelUpdate) {
11506 PersistenceNotifierGuard::optionally_notify(self, || {
11507 if let Ok(persist) = handle_error!(self, self.internal_channel_update(&counterparty_node_id, msg), counterparty_node_id) {
11508 persist
11509 } else {
11510 NotifyOption::DoPersist
11511 }
11512 });
11513 }
11514
11515 fn handle_channel_reestablish(&self, counterparty_node_id: PublicKey, msg: &msgs::ChannelReestablish) {
11516 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11517 let res = self.internal_channel_reestablish(&counterparty_node_id, msg);
11518 let persist = match &res {
11519 Err(e) if e.closes_channel() => NotifyOption::DoPersist,
11520 Err(_) => NotifyOption::SkipPersistHandleEvents,
11521 Ok(persist) => *persist,
11522 };
11523 let _ = handle_error!(self, res, counterparty_node_id);
11524 persist
11525 });
11526 }
11527
11528 fn peer_disconnected(&self, counterparty_node_id: PublicKey) {
11529 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(
11530 self, || NotifyOption::SkipPersistHandleEvents);
11531 let mut failed_channels = Vec::new();
11532 let mut per_peer_state = self.per_peer_state.write().unwrap();
11533 let remove_peer = {
11534 log_debug!(
11535 WithContext::from(&self.logger, Some(counterparty_node_id), None, None),
11536 "Marking channels with {} disconnected and generating channel_updates.",
11537 log_pubkey!(counterparty_node_id)
11538 );
11539 if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
11540 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11541 let peer_state = &mut *peer_state_lock;
11542 let pending_msg_events = &mut peer_state.pending_msg_events;
11543 peer_state.channel_by_id.retain(|_, phase| {
11544 let context = match phase {
11545 ChannelPhase::Funded(chan) => {
11546 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
11547 if chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok() {
11548 return true;
11550 }
11551 &mut chan.context
11552 },
11553 ChannelPhase::UnfundedOutboundV1(chan) if chan.is_resumable() => return true,
11560 ChannelPhase::UnfundedOutboundV1(chan) => &mut chan.context,
11561 ChannelPhase::UnfundedInboundV1(chan) => {
11563 &mut chan.context
11564 },
11565 ChannelPhase::UnfundedOutboundV2(chan) => {
11566 &mut chan.context
11567 },
11568 ChannelPhase::UnfundedInboundV2(chan) => {
11569 &mut chan.context
11570 },
11571 };
11572 let mut close_res = context.force_shutdown(false, ClosureReason::DisconnectedPeer);
11574 locked_close_channel!(self, peer_state, &context, close_res);
11575 failed_channels.push(close_res);
11576 false
11577 });
11578 peer_state.inbound_channel_request_by_id.clear();
11581 pending_msg_events.retain(|msg| {
11582 match msg {
11583 &events::MessageSendEvent::SendAcceptChannel { .. } => false,
11585 &events::MessageSendEvent::SendOpenChannel { .. } => false,
11586 &events::MessageSendEvent::SendFundingCreated { .. } => false,
11587 &events::MessageSendEvent::SendFundingSigned { .. } => false,
11588 &events::MessageSendEvent::SendAcceptChannelV2 { .. } => false,
11590 &events::MessageSendEvent::SendOpenChannelV2 { .. } => false,
11591 &events::MessageSendEvent::SendChannelReady { .. } => false,
11593 &events::MessageSendEvent::SendAnnouncementSignatures { .. } => false,
11594 &events::MessageSendEvent::SendStfu { .. } => false,
11596 &events::MessageSendEvent::SendSpliceInit { .. } => false,
11598 &events::MessageSendEvent::SendSpliceAck { .. } => false,
11599 &events::MessageSendEvent::SendSpliceLocked { .. } => false,
11600 &events::MessageSendEvent::SendTxAddInput { .. } => false,
11602 &events::MessageSendEvent::SendTxAddOutput { .. } => false,
11603 &events::MessageSendEvent::SendTxRemoveInput { .. } => false,
11604 &events::MessageSendEvent::SendTxRemoveOutput { .. } => false,
11605 &events::MessageSendEvent::SendTxComplete { .. } => false,
11606 &events::MessageSendEvent::SendTxSignatures { .. } => false,
11607 &events::MessageSendEvent::SendTxInitRbf { .. } => false,
11608 &events::MessageSendEvent::SendTxAckRbf { .. } => false,
11609 &events::MessageSendEvent::SendTxAbort { .. } => false,
11610 &events::MessageSendEvent::UpdateHTLCs { .. } => false,
11612 &events::MessageSendEvent::SendRevokeAndACK { .. } => false,
11613 &events::MessageSendEvent::SendClosingSigned { .. } => false,
11614 &events::MessageSendEvent::SendShutdown { .. } => false,
11615 &events::MessageSendEvent::SendChannelReestablish { .. } => false,
11616 &events::MessageSendEvent::HandleError { .. } => false,
11617 &events::MessageSendEvent::SendChannelAnnouncement { .. } => false,
11619 &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
11620 &events::MessageSendEvent::BroadcastChannelUpdate { .. } => {
11623 debug_assert!(false, "This event shouldn't have been here");
11624 false
11625 },
11626 &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
11627 &events::MessageSendEvent::SendChannelUpdate { .. } => false,
11628 &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
11629 &events::MessageSendEvent::SendShortIdsQuery { .. } => false,
11630 &events::MessageSendEvent::SendReplyChannelRange { .. } => false,
11631 &events::MessageSendEvent::SendGossipTimestampFilter { .. } => false,
11632 }
11633 });
11634 debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect");
11635 peer_state.is_connected = false;
11636 peer_state.ok_to_remove(true)
11637 } else { debug_assert!(false, "Unconnected peer disconnected"); true }
11638 };
11639 if remove_peer {
11640 per_peer_state.remove(&counterparty_node_id);
11641 }
11642 mem::drop(per_peer_state);
11643
11644 for failure in failed_channels.drain(..) {
11645 self.finish_close_channel(failure);
11646 }
11647 }
11648
11649 fn peer_connected(&self, counterparty_node_id: PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> {
11650 let logger = WithContext::from(&self.logger, Some(counterparty_node_id), None, None);
11651 if !init_msg.features.supports_static_remote_key() {
11652 log_debug!(logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
11653 return Err(());
11654 }
11655
11656 let mut res = Ok(());
11657
11658 PersistenceNotifierGuard::optionally_notify(self, || {
11659 let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected);
11664 let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
11665
11666 {
11667 let mut peer_state_lock = self.per_peer_state.write().unwrap();
11668 match peer_state_lock.entry(counterparty_node_id.clone()) {
11669 hash_map::Entry::Vacant(e) => {
11670 if inbound_peer_limited {
11671 res = Err(());
11672 return NotifyOption::SkipPersistNoEvents;
11673 }
11674 e.insert(Mutex::new(PeerState {
11675 channel_by_id: new_hash_map(),
11676 inbound_channel_request_by_id: new_hash_map(),
11677 latest_features: init_msg.features.clone(),
11678 pending_msg_events: Vec::new(),
11679 in_flight_monitor_updates: BTreeMap::new(),
11680 monitor_update_blocked_actions: BTreeMap::new(),
11681 actions_blocking_raa_monitor_updates: BTreeMap::new(),
11682 closed_channel_monitor_update_ids: BTreeMap::new(),
11683 is_connected: true,
11684 }));
11685 },
11686 hash_map::Entry::Occupied(e) => {
11687 let mut peer_state = e.get().lock().unwrap();
11688 peer_state.latest_features = init_msg.features.clone();
11689
11690 let best_block_height = self.best_block.read().unwrap().height;
11691 if inbound_peer_limited &&
11692 Self::unfunded_channel_count(&*peer_state, best_block_height) ==
11693 peer_state.channel_by_id.len()
11694 {
11695 res = Err(());
11696 return NotifyOption::SkipPersistNoEvents;
11697 }
11698
11699 debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
11700 peer_state.is_connected = true;
11701 },
11702 }
11703 }
11704
11705 log_debug!(logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
11706
11707 let per_peer_state = self.per_peer_state.read().unwrap();
11708 if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
11709 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
11710 let peer_state = &mut *peer_state_lock;
11711 let pending_msg_events = &mut peer_state.pending_msg_events;
11712
11713 for (_, phase) in peer_state.channel_by_id.iter_mut() {
11714 match phase {
11715 ChannelPhase::Funded(chan) => {
11716 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
11717 pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
11718 node_id: chan.context.get_counterparty_node_id(),
11719 msg: chan.get_channel_reestablish(&&logger),
11720 });
11721 }
11722
11723 ChannelPhase::UnfundedOutboundV1(chan) => {
11724 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
11725 if let Some(msg) = chan.get_open_channel(self.chain_hash, &&logger) {
11726 pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
11727 node_id: chan.context.get_counterparty_node_id(),
11728 msg,
11729 });
11730 }
11731 }
11732
11733 ChannelPhase::UnfundedOutboundV2(chan) => {
11734 pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 {
11735 node_id: chan.context.get_counterparty_node_id(),
11736 msg: chan.get_open_channel_v2(self.chain_hash),
11737 });
11738 },
11739
11740 ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedInboundV2(_) => {
11741 debug_assert!(false);
11745 }
11746 }
11747 }
11748 }
11749
11750 return NotifyOption::SkipPersistHandleEvents;
11751 });
11753 res
11754 }
11755
11756 fn handle_error(&self, counterparty_node_id: PublicKey, msg: &msgs::ErrorMessage) {
11757 match &msg.data as &str {
11758 "cannot co-op close channel w/ active htlcs"|
11759 "link failed to shutdown" =>
11760 {
11761 if !msg.channel_id.is_zero() {
11769 PersistenceNotifierGuard::optionally_notify(
11770 self,
11771 || -> NotifyOption {
11772 let per_peer_state = self.per_peer_state.read().unwrap();
11773 let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
11774 if peer_state_mutex_opt.is_none() { return NotifyOption::SkipPersistNoEvents; }
11775 let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
11776 if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) {
11777 if let Some(msg) = chan.get_outbound_shutdown() {
11778 peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
11779 node_id: counterparty_node_id,
11780 msg,
11781 });
11782 }
11783 peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
11784 node_id: counterparty_node_id,
11785 action: msgs::ErrorAction::SendWarningMessage {
11786 msg: msgs::WarningMessage {
11787 channel_id: msg.channel_id,
11788 data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned()
11789 },
11790 log_level: Level::Trace,
11791 }
11792 });
11793 return NotifyOption::SkipPersistHandleEvents;
11796 }
11797 NotifyOption::SkipPersistNoEvents
11798 }
11799 );
11800 }
11801 return;
11802 }
11803 _ => {}
11804 }
11805
11806 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11807
11808 if msg.channel_id.is_zero() {
11809 let channel_ids: Vec<ChannelId> = {
11810 let per_peer_state = self.per_peer_state.read().unwrap();
11811 let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
11812 if peer_state_mutex_opt.is_none() { return; }
11813 let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
11814 let peer_state = &mut *peer_state_lock;
11815 peer_state.inbound_channel_request_by_id.clear();
11818 peer_state.channel_by_id.keys().cloned().collect()
11819 };
11820 for channel_id in channel_ids {
11821 let _ = self.force_close_channel_with_peer(&channel_id, &counterparty_node_id, Some(&msg.data), true);
11823 }
11824 } else {
11825 {
11826 let per_peer_state = self.per_peer_state.read().unwrap();
11828 let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
11829 if peer_state_mutex_opt.is_none() { return; }
11830 let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
11831 let peer_state = &mut *peer_state_lock;
11832 match peer_state.channel_by_id.get_mut(&msg.channel_id) {
11833 Some(ChannelPhase::UnfundedOutboundV1(ref mut chan)) => {
11834 let logger = WithChannelContext::from(&self.logger, &chan.context, None);
11835 if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator, &&logger) {
11836 peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
11837 node_id: counterparty_node_id,
11838 msg,
11839 });
11840 return;
11841 }
11842 },
11843 Some(ChannelPhase::UnfundedOutboundV2(ref mut chan)) => {
11844 if let Ok(msg) = chan.maybe_handle_error_without_close(self.chain_hash, &self.fee_estimator) {
11845 peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannelV2 {
11846 node_id: counterparty_node_id,
11847 msg,
11848 });
11849 return;
11850 }
11851 },
11852 None | Some(ChannelPhase::UnfundedInboundV1(_) | ChannelPhase::UnfundedInboundV2(_) | ChannelPhase::Funded(_)) => (),
11853 }
11854 }
11855
11856 let _ = self.force_close_channel_with_peer(&msg.channel_id, &counterparty_node_id, Some(&msg.data), true);
11858 }
11859 }
11860
11861 fn provided_node_features(&self) -> NodeFeatures {
11862 provided_node_features(&self.default_configuration)
11863 }
11864
11865 fn provided_init_features(&self, _their_init_features: PublicKey) -> InitFeatures {
11866 provided_init_features(&self.default_configuration)
11867 }
11868
11869 fn get_chain_hashes(&self) -> Option<Vec<ChainHash>> {
11870 Some(vec![self.chain_hash])
11871 }
11872
11873 fn handle_tx_add_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddInput) {
11874 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11878 let _ = handle_error!(self, self.internal_tx_add_input(counterparty_node_id, msg), counterparty_node_id);
11879 NotifyOption::SkipPersistHandleEvents
11880 });
11881 }
11882
11883 fn handle_tx_add_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAddOutput) {
11884 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11888 let _ = handle_error!(self, self.internal_tx_add_output(counterparty_node_id, msg), counterparty_node_id);
11889 NotifyOption::SkipPersistHandleEvents
11890 });
11891 }
11892
11893 fn handle_tx_remove_input(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveInput) {
11894 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11898 let _ = handle_error!(self, self.internal_tx_remove_input(counterparty_node_id, msg), counterparty_node_id);
11899 NotifyOption::SkipPersistHandleEvents
11900 });
11901 }
11902
11903 fn handle_tx_remove_output(&self, counterparty_node_id: PublicKey, msg: &msgs::TxRemoveOutput) {
11904 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11908 let _ = handle_error!(self, self.internal_tx_remove_output(counterparty_node_id, msg), counterparty_node_id);
11909 NotifyOption::SkipPersistHandleEvents
11910 });
11911 }
11912
11913 fn handle_tx_complete(&self, counterparty_node_id: PublicKey, msg: &msgs::TxComplete) {
11914 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11918 let _ = handle_error!(self, self.internal_tx_complete(counterparty_node_id, msg), counterparty_node_id);
11919 NotifyOption::SkipPersistHandleEvents
11920 });
11921 }
11922
11923 fn handle_tx_signatures(&self, counterparty_node_id: PublicKey, msg: &msgs::TxSignatures) {
11924 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
11925 let _ = handle_error!(self, self.internal_tx_signatures(&counterparty_node_id, msg), counterparty_node_id);
11926 }
11927
11928 fn handle_tx_init_rbf(&self, counterparty_node_id: PublicKey, msg: &msgs::TxInitRbf) {
11929 let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11930 "Dual-funded channels not supported".to_owned(),
11931 msg.channel_id.clone())), counterparty_node_id);
11932 }
11933
11934 fn handle_tx_ack_rbf(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAckRbf) {
11935 let _: Result<(), _> = handle_error!(self, Err(MsgHandleErrInternal::send_err_msg_no_close(
11936 "Dual-funded channels not supported".to_owned(),
11937 msg.channel_id.clone())), counterparty_node_id);
11938 }
11939
11940 fn handle_tx_abort(&self, counterparty_node_id: PublicKey, msg: &msgs::TxAbort) {
11941 let _persistence_guard = PersistenceNotifierGuard::optionally_notify(self, || {
11945 let _ = handle_error!(self, self.internal_tx_abort(&counterparty_node_id, msg), counterparty_node_id);
11946 NotifyOption::SkipPersistHandleEvents
11947 });
11948 }
11949
11950 fn message_received(&self) {
11951 for (payment_id, retryable_invoice_request) in self
11952 .pending_outbound_payments
11953 .release_invoice_requests_awaiting_invoice()
11954 {
11955 let RetryableInvoiceRequest { invoice_request, nonce } = retryable_invoice_request;
11956 let hmac = payment_id.hmac_for_offer_payment(nonce, &self.inbound_payment_key);
11957 let context = MessageContext::Offers(OffersContext::OutboundPayment {
11958 payment_id,
11959 nonce,
11960 hmac: Some(hmac)
11961 });
11962 match self.create_blinded_paths(context) {
11963 Ok(reply_paths) => match self.enqueue_invoice_request(invoice_request, reply_paths) {
11964 Ok(_) => {}
11965 Err(_) => {
11966 log_warn!(self.logger,
11967 "Retry failed for an invoice request with payment_id: {}",
11968 payment_id
11969 );
11970 }
11971 },
11972 Err(_) => {
11973 log_warn!(self.logger,
11974 "Retry failed for an invoice request with payment_id: {}. \
11975 Reason: router could not find a blinded path to include as the reply path",
11976 payment_id
11977 );
11978 }
11979 }
11980 }
11981 }
11982}
11983
11984impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
11985OffersMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
11986where
11987 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
11988 T::Target: BroadcasterInterface,
11989 ES::Target: EntropySource,
11990 NS::Target: NodeSigner,
11991 SP::Target: SignerProvider,
11992 F::Target: FeeEstimator,
11993 R::Target: Router,
11994 MR::Target: MessageRouter,
11995 L::Target: Logger,
11996{
11997 fn handle_message(
11998 &self, message: OffersMessage, context: Option<OffersContext>, responder: Option<Responder>,
11999 ) -> Option<(OffersMessage, ResponseInstruction)> {
12000 let secp_ctx = &self.secp_ctx;
12001 let expanded_key = &self.inbound_payment_key;
12002
12003 macro_rules! handle_pay_invoice_res {
12004 ($res: expr, $invoice: expr, $logger: expr) => {{
12005 let error = match $res {
12006 Err(Bolt12PaymentError::UnknownRequiredFeatures) => {
12007 log_trace!(
12008 $logger, "Invoice requires unknown features: {:?}",
12009 $invoice.invoice_features()
12010 );
12011 InvoiceError::from(Bolt12SemanticError::UnknownRequiredFeatures)
12012 },
12013 Err(Bolt12PaymentError::SendingFailed(e)) => {
12014 log_trace!($logger, "Failed paying invoice: {:?}", e);
12015 InvoiceError::from_string(format!("{:?}", e))
12016 },
12017 #[cfg(async_payments)]
12018 Err(Bolt12PaymentError::BlindedPathCreationFailed) => {
12019 let err_msg = "Failed to create a blinded path back to ourselves";
12020 log_trace!($logger, "{}", err_msg);
12021 InvoiceError::from_string(err_msg.to_string())
12022 },
12023 Err(Bolt12PaymentError::UnexpectedInvoice)
12024 | Err(Bolt12PaymentError::DuplicateInvoice)
12025 | Ok(()) => return None,
12026 };
12027
12028 match responder {
12029 Some(responder) => return Some((OffersMessage::InvoiceError(error), responder.respond())),
12030 None => {
12031 log_trace!($logger, "No reply path to send error: {:?}", error);
12032 return None
12033 },
12034 }
12035 }}
12036 }
12037
12038 match message {
12039 OffersMessage::InvoiceRequest(invoice_request) => {
12040 let responder = match responder {
12041 Some(responder) => responder,
12042 None => return None,
12043 };
12044
12045 let nonce = match context {
12046 None if invoice_request.metadata().is_some() => None,
12047 Some(OffersContext::InvoiceRequest { nonce }) => Some(nonce),
12048 _ => return None,
12049 };
12050
12051 let invoice_request = match nonce {
12052 Some(nonce) => match invoice_request.verify_using_recipient_data(
12053 nonce, expanded_key, secp_ctx,
12054 ) {
12055 Ok(invoice_request) => invoice_request,
12056 Err(()) => return None,
12057 },
12058 None => match invoice_request.verify_using_metadata(expanded_key, secp_ctx) {
12059 Ok(invoice_request) => invoice_request,
12060 Err(()) => return None,
12061 },
12062 };
12063
12064 let amount_msats = match InvoiceBuilder::<DerivedSigningPubkey>::amount_msats(
12065 &invoice_request.inner
12066 ) {
12067 Ok(amount_msats) => amount_msats,
12068 Err(error) => return Some((OffersMessage::InvoiceError(error.into()), responder.respond())),
12069 };
12070
12071 let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
12072 let (payment_hash, payment_secret) = match self.create_inbound_payment(
12073 Some(amount_msats), relative_expiry, None
12074 ) {
12075 Ok((payment_hash, payment_secret)) => (payment_hash, payment_secret),
12076 Err(()) => {
12077 let error = Bolt12SemanticError::InvalidAmount;
12078 return Some((OffersMessage::InvoiceError(error.into()), responder.respond()));
12079 },
12080 };
12081
12082 let payment_context = PaymentContext::Bolt12Offer(Bolt12OfferContext {
12083 offer_id: invoice_request.offer_id,
12084 invoice_request: invoice_request.fields(),
12085 });
12086 let payment_paths = match self.create_blinded_payment_paths(
12087 amount_msats, payment_secret, payment_context
12088 ) {
12089 Ok(payment_paths) => payment_paths,
12090 Err(()) => {
12091 let error = Bolt12SemanticError::MissingPaths;
12092 return Some((OffersMessage::InvoiceError(error.into()), responder.respond()));
12093 },
12094 };
12095
12096 #[cfg(not(feature = "std"))]
12097 let created_at = Duration::from_secs(
12098 self.highest_seen_timestamp.load(Ordering::Acquire) as u64
12099 );
12100
12101 let response = if invoice_request.keys.is_some() {
12102 #[cfg(feature = "std")]
12103 let builder = invoice_request.respond_using_derived_keys(
12104 payment_paths, payment_hash
12105 );
12106 #[cfg(not(feature = "std"))]
12107 let builder = invoice_request.respond_using_derived_keys_no_std(
12108 payment_paths, payment_hash, created_at
12109 );
12110 builder
12111 .map(InvoiceBuilder::<DerivedSigningPubkey>::from)
12112 .and_then(|builder| builder.allow_mpp().build_and_sign(secp_ctx))
12113 .map_err(InvoiceError::from)
12114 } else {
12115 #[cfg(feature = "std")]
12116 let builder = invoice_request.respond_with(payment_paths, payment_hash);
12117 #[cfg(not(feature = "std"))]
12118 let builder = invoice_request.respond_with_no_std(
12119 payment_paths, payment_hash, created_at
12120 );
12121 builder
12122 .map(InvoiceBuilder::<ExplicitSigningPubkey>::from)
12123 .and_then(|builder| builder.allow_mpp().build())
12124 .map_err(InvoiceError::from)
12125 .and_then(|invoice| {
12126 #[cfg(c_bindings)]
12127 let mut invoice = invoice;
12128 invoice
12129 .sign(|invoice: &UnsignedBolt12Invoice|
12130 self.node_signer.sign_bolt12_invoice(invoice)
12131 )
12132 .map_err(InvoiceError::from)
12133 })
12134 };
12135
12136 match response {
12137 Ok(invoice) => {
12138 let nonce = Nonce::from_entropy_source(&*self.entropy_source);
12139 let hmac = payment_hash.hmac_for_offer_payment(nonce, expanded_key);
12140 let context = MessageContext::Offers(OffersContext::InboundPayment { payment_hash, nonce, hmac });
12141 Some((OffersMessage::Invoice(invoice), responder.respond_with_reply_path(context)))
12142 },
12143 Err(error) => Some((OffersMessage::InvoiceError(error.into()), responder.respond())),
12144 }
12145 },
12146 OffersMessage::Invoice(invoice) => {
12147 let payment_id = match self.verify_bolt12_invoice(&invoice, context.as_ref()) {
12148 Ok(payment_id) => payment_id,
12149 Err(()) => return None,
12150 };
12151
12152 let logger = WithContext::from(
12153 &self.logger, None, None, Some(invoice.payment_hash()),
12154 );
12155
12156 if self.default_configuration.manually_handle_bolt12_invoices {
12157 self.pending_outbound_payments.mark_invoice_received(&invoice, payment_id).ok()?;
12161
12162 let event = Event::InvoiceReceived {
12163 payment_id, invoice, context, responder,
12164 };
12165 self.pending_events.lock().unwrap().push_back((event, None));
12166 return None;
12167 }
12168
12169 let res = self.send_payment_for_verified_bolt12_invoice(&invoice, payment_id);
12170 handle_pay_invoice_res!(res, invoice, logger);
12171 },
12172 #[cfg(async_payments)]
12173 OffersMessage::StaticInvoice(invoice) => {
12174 let payment_id = match context {
12175 Some(OffersContext::OutboundPayment { payment_id, nonce, hmac: Some(hmac) }) => {
12176 if payment_id.verify_for_offer_payment(hmac, nonce, expanded_key).is_err() {
12177 return None
12178 }
12179 payment_id
12180 },
12181 _ => return None
12182 };
12183 let res = self.initiate_async_payment(&invoice, payment_id);
12184 handle_pay_invoice_res!(res, invoice, self.logger);
12185 },
12186 OffersMessage::InvoiceError(invoice_error) => {
12187 let payment_hash = match context {
12188 Some(OffersContext::InboundPayment { payment_hash, nonce, hmac }) => {
12189 match payment_hash.verify_for_offer_payment(hmac, nonce, expanded_key) {
12190 Ok(_) => Some(payment_hash),
12191 Err(_) => None,
12192 }
12193 },
12194 _ => None,
12195 };
12196
12197 let logger = WithContext::from(&self.logger, None, None, payment_hash);
12198 log_trace!(logger, "Received invoice_error: {}", invoice_error);
12199
12200 match context {
12201 Some(OffersContext::OutboundPayment { payment_id, nonce, hmac: Some(hmac) }) => {
12202 if let Ok(()) = payment_id.verify_for_offer_payment(hmac, nonce, expanded_key) {
12203 self.abandon_payment_with_reason(
12204 payment_id, PaymentFailureReason::InvoiceRequestRejected,
12205 );
12206 }
12207 },
12208 _ => {},
12209 }
12210
12211 None
12212 },
12213 }
12214 }
12215
12216 fn release_pending_messages(&self) -> Vec<(OffersMessage, MessageSendInstructions)> {
12217 core::mem::take(&mut self.pending_offers_messages.lock().unwrap())
12218 }
12219}
12220
12221impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
12222AsyncPaymentsMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
12223where
12224 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
12225 T::Target: BroadcasterInterface,
12226 ES::Target: EntropySource,
12227 NS::Target: NodeSigner,
12228 SP::Target: SignerProvider,
12229 F::Target: FeeEstimator,
12230 R::Target: Router,
12231 MR::Target: MessageRouter,
12232 L::Target: Logger,
12233{
12234 fn handle_held_htlc_available(
12235 &self, _message: HeldHtlcAvailable, _responder: Option<Responder>
12236 ) -> Option<(ReleaseHeldHtlc, ResponseInstruction)> {
12237 None
12238 }
12239
12240 fn handle_release_held_htlc(&self, _message: ReleaseHeldHtlc, _context: AsyncPaymentsContext) {
12241 #[cfg(async_payments)] {
12242 let AsyncPaymentsContext::OutboundPayment { payment_id, hmac, nonce } = _context;
12243 if payment_id.verify_for_async_payment(hmac, nonce, &self.inbound_payment_key).is_err() { return }
12244 if let Err(e) = self.send_payment_for_static_invoice(payment_id) {
12245 log_trace!(
12246 self.logger, "Failed to release held HTLC with payment id {}: {:?}", payment_id, e
12247 );
12248 }
12249 }
12250 }
12251
12252 fn release_pending_messages(&self) -> Vec<(AsyncPaymentsMessage, MessageSendInstructions)> {
12253 core::mem::take(&mut self.pending_async_payments_messages.lock().unwrap())
12254 }
12255}
12256
12257#[cfg(feature = "dnssec")]
12258impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
12259DNSResolverMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
12260where
12261 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
12262 T::Target: BroadcasterInterface,
12263 ES::Target: EntropySource,
12264 NS::Target: NodeSigner,
12265 SP::Target: SignerProvider,
12266 F::Target: FeeEstimator,
12267 R::Target: Router,
12268 MR::Target: MessageRouter,
12269 L::Target: Logger,
12270{
12271 fn handle_dnssec_query(
12272 &self, _message: DNSSECQuery, _responder: Option<Responder>,
12273 ) -> Option<(DNSResolverMessage, ResponseInstruction)> {
12274 None
12275 }
12276
12277 fn handle_dnssec_proof(&self, message: DNSSECProof, context: DNSResolverContext) {
12278 let offer_opt = self.hrn_resolver.handle_dnssec_proof_for_offer(message, context);
12279 #[cfg_attr(not(feature = "_test_utils"), allow(unused_mut))]
12280 if let Some((completed_requests, mut offer)) = offer_opt {
12281 for (name, payment_id) in completed_requests {
12282 #[cfg(feature = "_test_utils")]
12283 if let Some(replacement_offer) = self.testing_dnssec_proof_offer_resolution_override.lock().unwrap().remove(&name) {
12284 offer = replacement_offer;
12287 }
12288 if let Ok(amt_msats) = self.pending_outbound_payments.amt_msats_for_payment_awaiting_offer(payment_id) {
12289 let offer_pay_res =
12290 self.pay_for_offer_intern(&offer, None, Some(amt_msats), None, payment_id, Some(name),
12291 |invoice_request, nonce| {
12292 let retryable_invoice_request = RetryableInvoiceRequest {
12293 invoice_request: invoice_request.clone(),
12294 nonce,
12295 };
12296 self.pending_outbound_payments
12297 .received_offer(payment_id, Some(retryable_invoice_request))
12298 .map_err(|_| Bolt12SemanticError::DuplicatePaymentId)
12299 });
12300 if offer_pay_res.is_err() {
12301 self.pending_outbound_payments.abandon_payment(
12307 payment_id, PaymentFailureReason::RouteNotFound, &self.pending_events,
12308 );
12309 }
12310 }
12311 }
12312 }
12313 }
12314
12315 fn release_pending_messages(&self) -> Vec<(DNSResolverMessage, MessageSendInstructions)> {
12316 core::mem::take(&mut self.pending_dns_onion_messages.lock().unwrap())
12317 }
12318}
12319
12320impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
12321NodeIdLookUp for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
12322where
12323 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
12324 T::Target: BroadcasterInterface,
12325 ES::Target: EntropySource,
12326 NS::Target: NodeSigner,
12327 SP::Target: SignerProvider,
12328 F::Target: FeeEstimator,
12329 R::Target: Router,
12330 MR::Target: MessageRouter,
12331 L::Target: Logger,
12332{
12333 fn next_node_id(&self, short_channel_id: u64) -> Option<PublicKey> {
12334 self.short_to_chan_info.read().unwrap().get(&short_channel_id).map(|(pubkey, _)| *pubkey)
12335 }
12336}
12337
12338pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures {
12341 let mut node_features = provided_init_features(config).to_context();
12342 node_features.set_keysend_optional();
12343 node_features
12344}
12345
12346#[cfg(any(feature = "_test_utils", test))]
12352pub(crate) fn provided_bolt11_invoice_features(config: &UserConfig) -> Bolt11InvoiceFeatures {
12353 provided_init_features(config).to_context()
12354}
12355
12356pub(crate) fn provided_bolt12_invoice_features(config: &UserConfig) -> Bolt12InvoiceFeatures {
12359 provided_init_features(config).to_context()
12360}
12361
12362pub(crate) fn provided_channel_features(config: &UserConfig) -> ChannelFeatures {
12365 provided_init_features(config).to_context()
12366}
12367
12368pub(crate) fn provided_channel_type_features(config: &UserConfig) -> ChannelTypeFeatures {
12371 ChannelTypeFeatures::from_init(&provided_init_features(config))
12372}
12373
12374pub fn provided_init_features(config: &UserConfig) -> InitFeatures {
12377 let mut features = InitFeatures::empty();
12381 features.set_data_loss_protect_required();
12382 features.set_upfront_shutdown_script_optional();
12383 features.set_variable_length_onion_required();
12384 features.set_static_remote_key_required();
12385 features.set_payment_secret_required();
12386 features.set_basic_mpp_optional();
12387 features.set_wumbo_optional();
12388 features.set_shutdown_any_segwit_optional();
12389 features.set_channel_type_optional();
12390 features.set_scid_privacy_optional();
12391 features.set_zero_conf_optional();
12392 features.set_route_blinding_optional();
12393 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx {
12394 features.set_anchors_zero_fee_htlc_tx_optional();
12395 }
12396 #[cfg(dual_funding)]
12397 features.set_dual_fund_optional();
12398 features
12399}
12400
12401const SERIALIZATION_VERSION: u8 = 1;
12402const MIN_SERIALIZATION_VERSION: u8 = 1;
12403
12404impl_writeable_tlv_based!(PhantomRouteHints, {
12405 (2, channels, required_vec),
12406 (4, phantom_scid, required),
12407 (6, real_node_pubkey, required),
12408});
12409
12410impl_writeable_tlv_based!(BlindedForward, {
12411 (0, inbound_blinding_point, required),
12412 (1, failure, (default_value, BlindedFailure::FromIntroductionNode)),
12413 (3, next_blinding_override, option),
12414});
12415
12416impl_writeable_tlv_based_enum!(PendingHTLCRouting,
12417 (0, Forward) => {
12418 (0, onion_packet, required),
12419 (1, blinded, option),
12420 (2, short_channel_id, required),
12421 (3, incoming_cltv_expiry, option),
12422 },
12423 (1, Receive) => {
12424 (0, payment_data, required),
12425 (1, phantom_shared_secret, option),
12426 (2, incoming_cltv_expiry, required),
12427 (3, payment_metadata, option),
12428 (5, custom_tlvs, optional_vec),
12429 (7, requires_blinded_error, (default_value, false)),
12430 (9, payment_context, option),
12431 },
12432 (2, ReceiveKeysend) => {
12433 (0, payment_preimage, required),
12434 (1, requires_blinded_error, (default_value, false)),
12435 (2, incoming_cltv_expiry, required),
12436 (3, payment_metadata, option),
12437 (4, payment_data, option), (5, custom_tlvs, optional_vec),
12439 (7, has_recipient_created_payment_secret, (default_value, false)),
12440 },
12441);
12442
12443impl_writeable_tlv_based!(PendingHTLCInfo, {
12444 (0, routing, required),
12445 (2, incoming_shared_secret, required),
12446 (4, payment_hash, required),
12447 (6, outgoing_amt_msat, required),
12448 (8, outgoing_cltv_value, required),
12449 (9, incoming_amt_msat, option),
12450 (10, skimmed_fee_msat, option),
12451});
12452
12453
12454impl Writeable for HTLCFailureMsg {
12455 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
12456 match self {
12457 HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id, htlc_id, reason }) => {
12458 0u8.write(writer)?;
12459 channel_id.write(writer)?;
12460 htlc_id.write(writer)?;
12461 reason.write(writer)?;
12462 },
12463 HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
12464 channel_id, htlc_id, sha256_of_onion, failure_code
12465 }) => {
12466 1u8.write(writer)?;
12467 channel_id.write(writer)?;
12468 htlc_id.write(writer)?;
12469 sha256_of_onion.write(writer)?;
12470 failure_code.write(writer)?;
12471 },
12472 }
12473 Ok(())
12474 }
12475}
12476
12477impl Readable for HTLCFailureMsg {
12478 fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
12479 let id: u8 = Readable::read(reader)?;
12480 match id {
12481 0 => {
12482 Ok(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
12483 channel_id: Readable::read(reader)?,
12484 htlc_id: Readable::read(reader)?,
12485 reason: Readable::read(reader)?,
12486 }))
12487 },
12488 1 => {
12489 Ok(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
12490 channel_id: Readable::read(reader)?,
12491 htlc_id: Readable::read(reader)?,
12492 sha256_of_onion: Readable::read(reader)?,
12493 failure_code: Readable::read(reader)?,
12494 }))
12495 },
12496 2 => {
12503 let length: BigSize = Readable::read(reader)?;
12504 let mut s = FixedLengthReader::new(reader, length.0);
12505 let res = Readable::read(&mut s)?;
12506 s.eat_remaining()?; Ok(HTLCFailureMsg::Relay(res))
12508 },
12509 3 => {
12510 let length: BigSize = Readable::read(reader)?;
12511 let mut s = FixedLengthReader::new(reader, length.0);
12512 let res = Readable::read(&mut s)?;
12513 s.eat_remaining()?; Ok(HTLCFailureMsg::Malformed(res))
12515 },
12516 _ => Err(DecodeError::UnknownRequiredFeature),
12517 }
12518 }
12519}
12520
12521impl_writeable_tlv_based_enum_legacy!(PendingHTLCStatus, ;
12522 (0, Forward),
12523 (1, Fail),
12524);
12525
12526impl_writeable_tlv_based_enum!(BlindedFailure,
12527 (0, FromIntroductionNode) => {},
12528 (2, FromBlindedNode) => {},
12529);
12530
12531impl_writeable_tlv_based!(HTLCPreviousHopData, {
12532 (0, short_channel_id, required),
12533 (1, phantom_shared_secret, option),
12534 (2, outpoint, required),
12535 (3, blinded_failure, option),
12536 (4, htlc_id, required),
12537 (5, cltv_expiry, option),
12538 (6, incoming_packet_shared_secret, required),
12539 (7, user_channel_id, option),
12540 (9, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(outpoint.0.unwrap()))),
12543 (11, counterparty_node_id, option),
12544});
12545
12546impl Writeable for ClaimableHTLC {
12547 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
12548 let (payment_data, keysend_preimage) = match &self.onion_payload {
12549 OnionPayload::Invoice { _legacy_hop_data } => {
12550 (_legacy_hop_data.as_ref(), None)
12551 },
12552 OnionPayload::Spontaneous(preimage) => (None, Some(preimage)),
12553 };
12554 write_tlv_fields!(writer, {
12555 (0, self.prev_hop, required),
12556 (1, self.total_msat, required),
12557 (2, self.value, required),
12558 (3, self.sender_intended_value, required),
12559 (4, payment_data, option),
12560 (5, self.total_value_received, option),
12561 (6, self.cltv_expiry, required),
12562 (8, keysend_preimage, option),
12563 (10, self.counterparty_skimmed_fee_msat, option),
12564 });
12565 Ok(())
12566 }
12567}
12568
12569impl Readable for ClaimableHTLC {
12570 fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
12571 _init_and_read_len_prefixed_tlv_fields!(reader, {
12572 (0, prev_hop, required),
12573 (1, total_msat, option),
12574 (2, value_ser, required),
12575 (3, sender_intended_value, option),
12576 (4, payment_data_opt, option),
12577 (5, total_value_received, option),
12578 (6, cltv_expiry, required),
12579 (8, keysend_preimage, option),
12580 (10, counterparty_skimmed_fee_msat, option),
12581 });
12582 let payment_data: Option<msgs::FinalOnionHopData> = payment_data_opt;
12583 let value = value_ser.0.unwrap();
12584 let onion_payload = match keysend_preimage {
12585 Some(p) => {
12586 if payment_data.is_some() {
12587 return Err(DecodeError::InvalidValue)
12588 }
12589 if total_msat.is_none() {
12590 total_msat = Some(value);
12591 }
12592 OnionPayload::Spontaneous(p)
12593 },
12594 None => {
12595 if total_msat.is_none() {
12596 if payment_data.is_none() {
12597 return Err(DecodeError::InvalidValue)
12598 }
12599 total_msat = Some(payment_data.as_ref().unwrap().total_msat);
12600 }
12601 OnionPayload::Invoice { _legacy_hop_data: payment_data }
12602 },
12603 };
12604 Ok(Self {
12605 prev_hop: prev_hop.0.unwrap(),
12606 timer_ticks: 0,
12607 value,
12608 sender_intended_value: sender_intended_value.unwrap_or(value),
12609 total_value_received,
12610 total_msat: total_msat.unwrap(),
12611 onion_payload,
12612 cltv_expiry: cltv_expiry.0.unwrap(),
12613 counterparty_skimmed_fee_msat,
12614 })
12615 }
12616}
12617
12618impl Readable for HTLCSource {
12619 fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
12620 let id: u8 = Readable::read(reader)?;
12621 match id {
12622 0 => {
12623 let mut session_priv: crate::util::ser::RequiredWrapper<SecretKey> = crate::util::ser::RequiredWrapper(None);
12624 let mut first_hop_htlc_msat: u64 = 0;
12625 let mut path_hops = Vec::new();
12626 let mut payment_id = None;
12627 let mut payment_params: Option<PaymentParameters> = None;
12628 let mut blinded_tail: Option<BlindedTail> = None;
12629 read_tlv_fields!(reader, {
12630 (0, session_priv, required),
12631 (1, payment_id, option),
12632 (2, first_hop_htlc_msat, required),
12633 (4, path_hops, required_vec),
12634 (5, payment_params, (option: ReadableArgs, 0)),
12635 (6, blinded_tail, option),
12636 });
12637 if payment_id.is_none() {
12638 payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref()));
12641 }
12642 let path = Path { hops: path_hops, blinded_tail };
12643 if path.hops.len() == 0 {
12644 return Err(DecodeError::InvalidValue);
12645 }
12646 if let Some(params) = payment_params.as_mut() {
12647 if let Payee::Clear { ref mut final_cltv_expiry_delta, .. } = params.payee {
12648 if final_cltv_expiry_delta == &0 {
12649 *final_cltv_expiry_delta = path.final_cltv_expiry_delta().ok_or(DecodeError::InvalidValue)?;
12650 }
12651 }
12652 }
12653 Ok(HTLCSource::OutboundRoute {
12654 session_priv: session_priv.0.unwrap(),
12655 first_hop_htlc_msat,
12656 path,
12657 payment_id: payment_id.unwrap(),
12658 })
12659 }
12660 1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
12661 _ => Err(DecodeError::UnknownRequiredFeature),
12662 }
12663 }
12664}
12665
12666impl Writeable for HTLCSource {
12667 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), crate::io::Error> {
12668 match self {
12669 HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id } => {
12670 0u8.write(writer)?;
12671 let payment_id_opt = Some(payment_id);
12672 write_tlv_fields!(writer, {
12673 (0, session_priv, required),
12674 (1, payment_id_opt, option),
12675 (2, first_hop_htlc_msat, required),
12676 (4, path.hops, required_vec),
12678 (5, None::<PaymentParameters>, option), (6, path.blinded_tail, option),
12680 });
12681 }
12682 HTLCSource::PreviousHopData(ref field) => {
12683 1u8.write(writer)?;
12684 field.write(writer)?;
12685 }
12686 }
12687 Ok(())
12688 }
12689}
12690
12691impl_writeable_tlv_based!(PendingAddHTLCInfo, {
12692 (0, forward_info, required),
12693 (1, prev_user_channel_id, (default_value, 0)),
12694 (2, prev_short_channel_id, required),
12695 (4, prev_htlc_id, required),
12696 (6, prev_funding_outpoint, required),
12697 (7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))),
12700 (9, prev_counterparty_node_id, option),
12701});
12702
12703impl Writeable for HTLCForwardInfo {
12704 fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
12705 const FAIL_HTLC_VARIANT_ID: u8 = 1;
12706 match self {
12707 Self::AddHTLC(info) => {
12708 0u8.write(w)?;
12709 info.write(w)?;
12710 },
12711 Self::FailHTLC { htlc_id, err_packet } => {
12712 FAIL_HTLC_VARIANT_ID.write(w)?;
12713 write_tlv_fields!(w, {
12714 (0, htlc_id, required),
12715 (2, err_packet, required),
12716 });
12717 },
12718 Self::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
12719 FAIL_HTLC_VARIANT_ID.write(w)?;
12723 let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
12724 write_tlv_fields!(w, {
12725 (0, htlc_id, required),
12726 (1, failure_code, required),
12727 (2, dummy_err_packet, required),
12728 (3, sha256_of_onion, required),
12729 });
12730 },
12731 }
12732 Ok(())
12733 }
12734}
12735
12736impl Readable for HTLCForwardInfo {
12737 fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
12738 let id: u8 = Readable::read(r)?;
12739 Ok(match id {
12740 0 => Self::AddHTLC(Readable::read(r)?),
12741 1 => {
12742 _init_and_read_len_prefixed_tlv_fields!(r, {
12743 (0, htlc_id, required),
12744 (1, malformed_htlc_failure_code, option),
12745 (2, err_packet, required),
12746 (3, sha256_of_onion, option),
12747 });
12748 if let Some(failure_code) = malformed_htlc_failure_code {
12749 Self::FailMalformedHTLC {
12750 htlc_id: _init_tlv_based_struct_field!(htlc_id, required),
12751 failure_code,
12752 sha256_of_onion: sha256_of_onion.ok_or(DecodeError::InvalidValue)?,
12753 }
12754 } else {
12755 Self::FailHTLC {
12756 htlc_id: _init_tlv_based_struct_field!(htlc_id, required),
12757 err_packet: _init_tlv_based_struct_field!(err_packet, required),
12758 }
12759 }
12760 },
12761 _ => return Err(DecodeError::InvalidValue),
12762 })
12763 }
12764}
12765
12766impl_writeable_tlv_based!(PendingInboundPayment, {
12767 (0, payment_secret, required),
12768 (2, expiry_time, required),
12769 (4, user_payment_id, required),
12770 (6, payment_preimage, required),
12771 (8, min_value_msat, required),
12772});
12773
12774impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref> Writeable for ChannelManager<M, T, ES, NS, SP, F, R, MR, L>
12775where
12776 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
12777 T::Target: BroadcasterInterface,
12778 ES::Target: EntropySource,
12779 NS::Target: NodeSigner,
12780 SP::Target: SignerProvider,
12781 F::Target: FeeEstimator,
12782 R::Target: Router,
12783 MR::Target: MessageRouter,
12784 L::Target: Logger,
12785{
12786 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
12787 let _consistency_lock = self.total_consistency_lock.write().unwrap();
12788
12789 write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
12790
12791 self.chain_hash.write(writer)?;
12792 {
12793 let best_block = self.best_block.read().unwrap();
12794 best_block.height.write(writer)?;
12795 best_block.block_hash.write(writer)?;
12796 }
12797
12798 let per_peer_state = self.per_peer_state.write().unwrap();
12799
12800 let mut serializable_peer_count: u64 = 0;
12801 {
12802 let mut number_of_funded_channels = 0;
12803 for (_, peer_state_mutex) in per_peer_state.iter() {
12804 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
12805 let peer_state = &mut *peer_state_lock;
12806 if !peer_state.ok_to_remove(false) {
12807 serializable_peer_count += 1;
12808 }
12809
12810 number_of_funded_channels += peer_state.channel_by_id.iter().filter(
12811 |(_, phase)| if let ChannelPhase::Funded(chan) = phase { chan.context.is_funding_broadcast() } else { false }
12812 ).count();
12813 }
12814
12815 (number_of_funded_channels as u64).write(writer)?;
12816
12817 for (_, peer_state_mutex) in per_peer_state.iter() {
12818 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
12819 let peer_state = &mut *peer_state_lock;
12820 for channel in peer_state.channel_by_id.iter().filter_map(
12821 |(_, phase)| if let ChannelPhase::Funded(channel) = phase {
12822 if channel.context.is_funding_broadcast() { Some(channel) } else { None }
12823 } else { None }
12824 ) {
12825 channel.write(writer)?;
12826 }
12827 }
12828 }
12829
12830 {
12831 let forward_htlcs = self.forward_htlcs.lock().unwrap();
12832 (forward_htlcs.len() as u64).write(writer)?;
12833 for (short_channel_id, pending_forwards) in forward_htlcs.iter() {
12834 short_channel_id.write(writer)?;
12835 (pending_forwards.len() as u64).write(writer)?;
12836 for forward in pending_forwards {
12837 forward.write(writer)?;
12838 }
12839 }
12840 }
12841
12842 let mut decode_update_add_htlcs_opt = None;
12843 let decode_update_add_htlcs = self.decode_update_add_htlcs.lock().unwrap();
12844 if !decode_update_add_htlcs.is_empty() {
12845 decode_update_add_htlcs_opt = Some(decode_update_add_htlcs);
12846 }
12847
12848 let claimable_payments = self.claimable_payments.lock().unwrap();
12849 let pending_outbound_payments = self.pending_outbound_payments.pending_outbound_payments.lock().unwrap();
12850
12851 let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
12852 let mut htlc_onion_fields: Vec<&_> = Vec::new();
12853 (claimable_payments.claimable_payments.len() as u64).write(writer)?;
12854 for (payment_hash, payment) in claimable_payments.claimable_payments.iter() {
12855 payment_hash.write(writer)?;
12856 (payment.htlcs.len() as u64).write(writer)?;
12857 for htlc in payment.htlcs.iter() {
12858 htlc.write(writer)?;
12859 }
12860 htlc_purposes.push(&payment.purpose);
12861 htlc_onion_fields.push(&payment.onion_fields);
12862 }
12863
12864 let mut monitor_update_blocked_actions_per_peer = None;
12865 let mut peer_states = Vec::new();
12866 for (_, peer_state_mutex) in per_peer_state.iter() {
12867 peer_states.push(peer_state_mutex.unsafe_well_ordered_double_lock_self());
12871 }
12872
12873 (serializable_peer_count).write(writer)?;
12874 for ((peer_pubkey, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
12875 if !peer_state.ok_to_remove(false) {
12880 peer_pubkey.write(writer)?;
12881 peer_state.latest_features.write(writer)?;
12882 if !peer_state.monitor_update_blocked_actions.is_empty() {
12883 monitor_update_blocked_actions_per_peer
12884 .get_or_insert_with(Vec::new)
12885 .push((*peer_pubkey, &peer_state.monitor_update_blocked_actions));
12886 }
12887 }
12888 }
12889
12890 let events = self.pending_events.lock().unwrap();
12891 let events_not_backwards_compatible = events.iter().any(|(_, action)| action.is_some());
12895 if events_not_backwards_compatible {
12896 0u64.write(writer)?;
12899 } else {
12900 (events.len() as u64).write(writer)?;
12901 for (event, _) in events.iter() {
12902 event.write(writer)?;
12903 }
12904 }
12905
12906 0u64.write(writer)?;
12912
12913 (self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?;
12917 (self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?;
12918
12919 (0 as u64).write(writer)?;
12923
12924 let mut num_pending_outbounds_compat: u64 = 0;
12926 for (_, outbound) in pending_outbound_payments.iter() {
12927 if !outbound.is_fulfilled() && !outbound.abandoned() {
12928 num_pending_outbounds_compat += outbound.remaining_parts() as u64;
12929 }
12930 }
12931 num_pending_outbounds_compat.write(writer)?;
12932 for (_, outbound) in pending_outbound_payments.iter() {
12933 match outbound {
12934 PendingOutboundPayment::Legacy { session_privs } |
12935 PendingOutboundPayment::Retryable { session_privs, .. } => {
12936 for session_priv in session_privs.iter() {
12937 session_priv.write(writer)?;
12938 }
12939 }
12940 PendingOutboundPayment::AwaitingInvoice { .. } => {},
12941 PendingOutboundPayment::AwaitingOffer { .. } => {},
12942 PendingOutboundPayment::InvoiceReceived { .. } => {},
12943 PendingOutboundPayment::StaticInvoiceReceived { .. } => {},
12944 PendingOutboundPayment::Fulfilled { .. } => {},
12945 PendingOutboundPayment::Abandoned { .. } => {},
12946 }
12947 }
12948
12949 let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = new_hash_map();
12951 for (id, outbound) in pending_outbound_payments.iter() {
12952 match outbound {
12953 PendingOutboundPayment::Legacy { session_privs } |
12954 PendingOutboundPayment::Retryable { session_privs, .. } => {
12955 pending_outbound_payments_no_retry.insert(*id, session_privs.clone());
12956 },
12957 _ => {},
12958 }
12959 }
12960
12961 let mut pending_intercepted_htlcs = None;
12962 let our_pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
12963 if our_pending_intercepts.len() != 0 {
12964 pending_intercepted_htlcs = Some(our_pending_intercepts);
12965 }
12966
12967 let mut pending_claiming_payments = Some(&claimable_payments.pending_claiming_payments);
12968 if pending_claiming_payments.as_ref().unwrap().is_empty() {
12969 pending_claiming_payments = None;
12972 }
12973
12974 let mut in_flight_monitor_updates: Option<HashMap<(&PublicKey, &OutPoint), &Vec<ChannelMonitorUpdate>>> = None;
12975 for ((counterparty_id, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
12976 for (funding_outpoint, updates) in peer_state.in_flight_monitor_updates.iter() {
12977 if !updates.is_empty() {
12978 if in_flight_monitor_updates.is_none() { in_flight_monitor_updates = Some(new_hash_map()); }
12979 in_flight_monitor_updates.as_mut().unwrap().insert((counterparty_id, funding_outpoint), updates);
12980 }
12981 }
12982 }
12983
12984 write_tlv_fields!(writer, {
12985 (1, pending_outbound_payments_no_retry, required),
12986 (2, pending_intercepted_htlcs, option),
12987 (3, pending_outbound_payments, required),
12988 (4, pending_claiming_payments, option),
12989 (5, self.our_network_pubkey, required),
12990 (6, monitor_update_blocked_actions_per_peer, option),
12991 (7, self.fake_scid_rand_bytes, required),
12992 (8, if events_not_backwards_compatible { Some(&*events) } else { None }, option),
12993 (9, htlc_purposes, required_vec),
12994 (10, in_flight_monitor_updates, option),
12995 (11, self.probing_cookie_secret, required),
12996 (13, htlc_onion_fields, optional_vec),
12997 (14, decode_update_add_htlcs_opt, option),
12998 (15, self.inbound_payment_id_secret, required),
12999 });
13000
13001 Ok(())
13002 }
13003}
13004
13005impl Writeable for VecDeque<(Event, Option<EventCompletionAction>)> {
13006 fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
13007 (self.len() as u64).write(w)?;
13008 for (event, action) in self.iter() {
13009 event.write(w)?;
13010 action.write(w)?;
13011 #[cfg(debug_assertions)] {
13012 let event_encoded = event.encode();
13018 let event_read: Option<Event> =
13019 MaybeReadable::read(&mut &event_encoded[..]).unwrap();
13020 if action.is_some() { assert!(event_read.is_some()); }
13021 }
13022 }
13023 Ok(())
13024 }
13025}
13026impl Readable for VecDeque<(Event, Option<EventCompletionAction>)> {
13027 fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
13028 let len: u64 = Readable::read(reader)?;
13029 const MAX_ALLOC_SIZE: u64 = 1024 * 16;
13030 let mut events: Self = VecDeque::with_capacity(cmp::min(
13031 MAX_ALLOC_SIZE/mem::size_of::<(events::Event, Option<EventCompletionAction>)>() as u64,
13032 len) as usize);
13033 for _ in 0..len {
13034 let ev_opt = MaybeReadable::read(reader)?;
13035 let action = Readable::read(reader)?;
13036 if let Some(ev) = ev_opt {
13037 events.push_back((ev, action));
13038 } else if action.is_some() {
13039 return Err(DecodeError::InvalidValue);
13040 }
13041 }
13042 Ok(events)
13043 }
13044}
13045
13046pub struct ChannelManagerReadArgs<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
13082where
13083 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
13084 T::Target: BroadcasterInterface,
13085 ES::Target: EntropySource,
13086 NS::Target: NodeSigner,
13087 SP::Target: SignerProvider,
13088 F::Target: FeeEstimator,
13089 R::Target: Router,
13090 MR::Target: MessageRouter,
13091 L::Target: Logger,
13092{
13093 pub entropy_source: ES,
13095
13096 pub node_signer: NS,
13098
13099 pub signer_provider: SP,
13103
13104 pub fee_estimator: F,
13108 pub chain_monitor: M,
13114
13115 pub tx_broadcaster: T,
13119 pub router: R,
13124 pub message_router: MR,
13127 pub logger: L,
13130 pub default_config: UserConfig,
13133
13134 pub channel_monitors: HashMap<OutPoint, &'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
13147}
13148
13149impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
13150 ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>
13151where
13152 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
13153 T::Target: BroadcasterInterface,
13154 ES::Target: EntropySource,
13155 NS::Target: NodeSigner,
13156 SP::Target: SignerProvider,
13157 F::Target: FeeEstimator,
13158 R::Target: Router,
13159 MR::Target: MessageRouter,
13160 L::Target: Logger,
13161{
13162 pub fn new(
13166 entropy_source: ES, node_signer: NS, signer_provider: SP, fee_estimator: F,
13167 chain_monitor: M, tx_broadcaster: T, router: R, message_router: MR, logger: L,
13168 default_config: UserConfig,
13169 mut channel_monitors: Vec<&'a ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>>,
13170 ) -> Self {
13171 Self {
13172 entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor,
13173 tx_broadcaster, router, message_router, logger, default_config,
13174 channel_monitors: hash_map_from_iter(
13175 channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) })
13176 ),
13177 }
13178 }
13179}
13180
13181impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
13184 ReadableArgs<ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>> for (BlockHash, Arc<ChannelManager<M, T, ES, NS, SP, F, R, MR, L>>)
13185where
13186 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
13187 T::Target: BroadcasterInterface,
13188 ES::Target: EntropySource,
13189 NS::Target: NodeSigner,
13190 SP::Target: SignerProvider,
13191 F::Target: FeeEstimator,
13192 R::Target: Router,
13193 MR::Target: MessageRouter,
13194 L::Target: Logger,
13195{
13196 fn read<Reader: io::Read>(reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>) -> Result<Self, DecodeError> {
13197 let (blockhash, chan_manager) = <(BlockHash, ChannelManager<M, T, ES, NS, SP, F, R, MR, L>)>::read(reader, args)?;
13198 Ok((blockhash, Arc::new(chan_manager)))
13199 }
13200}
13201
13202impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, MR: Deref, L: Deref>
13203 ReadableArgs<ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>> for (BlockHash, ChannelManager<M, T, ES, NS, SP, F, R, MR, L>)
13204where
13205 M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
13206 T::Target: BroadcasterInterface,
13207 ES::Target: EntropySource,
13208 NS::Target: NodeSigner,
13209 SP::Target: SignerProvider,
13210 F::Target: FeeEstimator,
13211 R::Target: Router,
13212 MR::Target: MessageRouter,
13213 L::Target: Logger,
13214{
13215 fn read<Reader: io::Read>(reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, MR, L>) -> Result<Self, DecodeError> {
13216 let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
13217
13218 let chain_hash: ChainHash = Readable::read(reader)?;
13219 let best_block_height: u32 = Readable::read(reader)?;
13220 let best_block_hash: BlockHash = Readable::read(reader)?;
13221
13222 let empty_peer_state = || {
13223 PeerState {
13224 channel_by_id: new_hash_map(),
13225 inbound_channel_request_by_id: new_hash_map(),
13226 latest_features: InitFeatures::empty(),
13227 pending_msg_events: Vec::new(),
13228 in_flight_monitor_updates: BTreeMap::new(),
13229 monitor_update_blocked_actions: BTreeMap::new(),
13230 actions_blocking_raa_monitor_updates: BTreeMap::new(),
13231 closed_channel_monitor_update_ids: BTreeMap::new(),
13232 is_connected: false,
13233 }
13234 };
13235
13236 let mut failed_htlcs = Vec::new();
13237 let channel_count: u64 = Readable::read(reader)?;
13238 let mut funding_txo_set = hash_set_with_capacity(cmp::min(channel_count as usize, 128));
13239 let mut per_peer_state = hash_map_with_capacity(cmp::min(channel_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<SP>>)>()));
13240 let mut outpoint_to_peer = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
13241 let mut short_to_chan_info = hash_map_with_capacity(cmp::min(channel_count as usize, 128));
13242 let mut channel_closures = VecDeque::new();
13243 let mut close_background_events = Vec::new();
13244 let mut funding_txo_to_channel_id = hash_map_with_capacity(channel_count as usize);
13245 for _ in 0..channel_count {
13246 let mut channel: Channel<SP> = Channel::read(reader, (
13247 &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
13248 ))?;
13249 let logger = WithChannelContext::from(&args.logger, &channel.context, None);
13250 let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
13251 funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id());
13252 funding_txo_set.insert(funding_txo.clone());
13253 if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
13254 if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
13255 channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() ||
13256 channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
13257 channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
13258 log_error!(logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
13260 log_error!(logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
13261 if channel.context.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
13262 log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
13263 &channel.context.channel_id(), monitor.get_latest_update_id(), channel.context.get_latest_monitor_update_id());
13264 }
13265 if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() {
13266 log_error!(logger, " The ChannelMonitor for channel {} is at holder commitment number {} but the ChannelManager is at holder commitment number {}.",
13267 &channel.context.channel_id(), monitor.get_cur_holder_commitment_number(), channel.get_cur_holder_commitment_transaction_number());
13268 }
13269 if channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() {
13270 log_error!(logger, " The ChannelMonitor for channel {} is at revoked counterparty transaction number {} but the ChannelManager is at revoked counterparty transaction number {}.",
13271 &channel.context.channel_id(), monitor.get_min_seen_secret(), channel.get_revoked_counterparty_commitment_transaction_number());
13272 }
13273 if channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() {
13274 log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
13275 &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
13276 }
13277 let mut shutdown_result = channel.context.force_shutdown(true, ClosureReason::OutdatedChannelManager);
13278 if shutdown_result.unbroadcasted_batch_funding_txid.is_some() {
13279 return Err(DecodeError::InvalidValue);
13280 }
13281 if let Some((counterparty_node_id, funding_txo, channel_id, mut update)) = shutdown_result.monitor_update {
13282 let latest_update_id = monitor.get_latest_update_id().saturating_add(1);
13286 update.update_id = latest_update_id;
13287 per_peer_state.entry(counterparty_node_id)
13288 .or_insert_with(|| Mutex::new(empty_peer_state()))
13289 .lock().unwrap()
13290 .closed_channel_monitor_update_ids.entry(channel_id)
13291 .and_modify(|v| *v = cmp::max(latest_update_id, *v))
13292 .or_insert(latest_update_id);
13293
13294 close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13295 counterparty_node_id, funding_txo, channel_id, update
13296 });
13297 }
13298 failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs);
13299 channel_closures.push_back((events::Event::ChannelClosed {
13300 channel_id: channel.context.channel_id(),
13301 user_channel_id: channel.context.get_user_id(),
13302 reason: ClosureReason::OutdatedChannelManager,
13303 counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
13304 channel_capacity_sats: Some(channel.context.get_value_satoshis()),
13305 channel_funding_txo: channel.context.get_funding_txo(),
13306 last_local_balance_msat: Some(channel.context.get_value_to_self_msat()),
13307 }, None));
13308 for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
13309 let mut found_htlc = false;
13310 for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() {
13311 if *channel_htlc_source == monitor_htlc_source { found_htlc = true; break; }
13312 }
13313 if !found_htlc {
13314 let logger = WithChannelContext::from(&args.logger, &channel.context, Some(*payment_hash));
13322 log_info!(logger,
13323 "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager",
13324 &channel.context.channel_id(), &payment_hash);
13325 failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.context.get_counterparty_node_id(), channel.context.channel_id()));
13326 }
13327 }
13328 } else {
13329 channel.on_startup_drop_completed_blocked_mon_updates_through(&logger, monitor.get_latest_update_id());
13330 log_info!(logger, "Successfully loaded channel {} at update_id {} against monitor at update id {} with {} blocked updates",
13331 &channel.context.channel_id(), channel.context.get_latest_monitor_update_id(),
13332 monitor.get_latest_update_id(), channel.blocked_monitor_updates_pending());
13333 if let Some(short_channel_id) = channel.context.get_short_channel_id() {
13334 short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
13335 }
13336 if let Some(funding_txo) = channel.context.get_funding_txo() {
13337 outpoint_to_peer.insert(funding_txo, channel.context.get_counterparty_node_id());
13338 }
13339 per_peer_state.entry(channel.context.get_counterparty_node_id())
13340 .or_insert_with(|| Mutex::new(empty_peer_state()))
13341 .get_mut().unwrap()
13342 .channel_by_id.insert(channel.context.channel_id(), ChannelPhase::Funded(channel));
13343 }
13344 } else if channel.is_awaiting_initial_mon_persist() {
13345 let _ = channel.context.force_shutdown(false, ClosureReason::DisconnectedPeer);
13349 channel_closures.push_back((events::Event::ChannelClosed {
13350 channel_id: channel.context.channel_id(),
13351 user_channel_id: channel.context.get_user_id(),
13352 reason: ClosureReason::DisconnectedPeer,
13353 counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
13354 channel_capacity_sats: Some(channel.context.get_value_satoshis()),
13355 channel_funding_txo: channel.context.get_funding_txo(),
13356 last_local_balance_msat: Some(channel.context.get_value_to_self_msat()),
13357 }, None));
13358 } else {
13359 log_error!(logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", &channel.context.channel_id());
13360 log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
13361 log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
13362 log_error!(logger, " Without the ChannelMonitor we cannot continue without risking funds.");
13363 log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
13364 return Err(DecodeError::InvalidValue);
13365 }
13366 }
13367
13368 for (funding_txo, monitor) in args.channel_monitors.iter() {
13369 if !funding_txo_set.contains(funding_txo) {
13370 let mut should_queue_fc_update = false;
13371 if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13372 if !monitor.no_further_updates_allowed() || monitor.get_latest_update_id() > 1 {
13379 should_queue_fc_update = !monitor.no_further_updates_allowed();
13380 let mut latest_update_id = monitor.get_latest_update_id();
13381 if should_queue_fc_update {
13382 latest_update_id += 1;
13383 }
13384 per_peer_state.entry(counterparty_node_id)
13385 .or_insert_with(|| Mutex::new(empty_peer_state()))
13386 .lock().unwrap()
13387 .closed_channel_monitor_update_ids.entry(monitor.channel_id())
13388 .and_modify(|v| *v = cmp::max(latest_update_id, *v))
13389 .or_insert(latest_update_id);
13390 }
13391 }
13392
13393 if !should_queue_fc_update {
13394 continue;
13395 }
13396
13397 let logger = WithChannelMonitor::from(&args.logger, monitor, None);
13398 let channel_id = monitor.channel_id();
13399 log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed",
13400 &channel_id);
13401 let mut monitor_update = ChannelMonitorUpdate {
13402 update_id: monitor.get_latest_update_id().saturating_add(1),
13403 counterparty_node_id: None,
13404 updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
13405 channel_id: Some(monitor.channel_id()),
13406 };
13407 if let Some(counterparty_node_id) = monitor.get_counterparty_node_id() {
13408 let update = BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13409 counterparty_node_id,
13410 funding_txo: *funding_txo,
13411 channel_id,
13412 update: monitor_update,
13413 };
13414 close_background_events.push(update);
13415 } else {
13416 monitor_update.update_id = u64::MAX;
13423 close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, channel_id, monitor_update)));
13424 }
13425 }
13426 }
13427
13428 const MAX_ALLOC_SIZE: usize = 1024 * 64;
13429 let forward_htlcs_count: u64 = Readable::read(reader)?;
13430 let mut forward_htlcs = hash_map_with_capacity(cmp::min(forward_htlcs_count as usize, 128));
13431 for _ in 0..forward_htlcs_count {
13432 let short_channel_id = Readable::read(reader)?;
13433 let pending_forwards_count: u64 = Readable::read(reader)?;
13434 let mut pending_forwards = Vec::with_capacity(cmp::min(pending_forwards_count as usize, MAX_ALLOC_SIZE/mem::size_of::<HTLCForwardInfo>()));
13435 for _ in 0..pending_forwards_count {
13436 pending_forwards.push(Readable::read(reader)?);
13437 }
13438 forward_htlcs.insert(short_channel_id, pending_forwards);
13439 }
13440
13441 let claimable_htlcs_count: u64 = Readable::read(reader)?;
13442 let mut claimable_htlcs_list = Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128));
13443 for _ in 0..claimable_htlcs_count {
13444 let payment_hash = Readable::read(reader)?;
13445 let previous_hops_len: u64 = Readable::read(reader)?;
13446 let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, MAX_ALLOC_SIZE/mem::size_of::<ClaimableHTLC>()));
13447 for _ in 0..previous_hops_len {
13448 previous_hops.push(<ClaimableHTLC as Readable>::read(reader)?);
13449 }
13450 claimable_htlcs_list.push((payment_hash, previous_hops));
13451 }
13452
13453 let peer_count: u64 = Readable::read(reader)?;
13454 for _ in 0..peer_count {
13455 let peer_pubkey: PublicKey = Readable::read(reader)?;
13456 let latest_features = Readable::read(reader)?;
13457 if let Some(peer_state) = per_peer_state.get_mut(&peer_pubkey) {
13458 peer_state.get_mut().unwrap().latest_features = latest_features;
13459 }
13460 }
13461
13462 let event_count: u64 = Readable::read(reader)?;
13463 let mut pending_events_read: VecDeque<(events::Event, Option<EventCompletionAction>)> =
13464 VecDeque::with_capacity(cmp::min(event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(events::Event, Option<EventCompletionAction>)>()));
13465 for _ in 0..event_count {
13466 match MaybeReadable::read(reader)? {
13467 Some(event) => pending_events_read.push_back((event, None)),
13468 None => continue,
13469 }
13470 }
13471
13472 let background_event_count: u64 = Readable::read(reader)?;
13473 for _ in 0..background_event_count {
13474 match <u8 as Readable>::read(reader)? {
13475 0 => {
13476 let _: OutPoint = Readable::read(reader)?;
13480 let _: ChannelMonitorUpdate = Readable::read(reader)?;
13481 }
13482 _ => return Err(DecodeError::InvalidValue),
13483 }
13484 }
13485
13486 let _last_node_announcement_serial: u32 = Readable::read(reader)?; let highest_seen_timestamp: u32 = Readable::read(reader)?;
13488
13489 let pending_inbound_payment_count: u64 = Readable::read(reader)?;
13491 for _ in 0..pending_inbound_payment_count {
13492 let payment_hash: PaymentHash = Readable::read(reader)?;
13493 let logger = WithContext::from(&args.logger, None, None, Some(payment_hash));
13494 let inbound: PendingInboundPayment = Readable::read(reader)?;
13495 log_warn!(logger, "Ignoring deprecated pending inbound payment with payment hash {}: {:?}", payment_hash, inbound);
13496 }
13497
13498 let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?;
13499 let mut pending_outbound_payments_compat: HashMap<PaymentId, PendingOutboundPayment> =
13500 hash_map_with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
13501 for _ in 0..pending_outbound_payments_count_compat {
13502 let session_priv = Readable::read(reader)?;
13503 let payment = PendingOutboundPayment::Legacy {
13504 session_privs: hash_set_from_iter([session_priv]),
13505 };
13506 if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() {
13507 return Err(DecodeError::InvalidValue)
13508 };
13509 }
13510
13511 let mut pending_outbound_payments_no_retry: Option<HashMap<PaymentId, HashSet<[u8; 32]>>> = None;
13513 let mut pending_outbound_payments = None;
13514 let mut pending_intercepted_htlcs: Option<HashMap<InterceptId, PendingAddHTLCInfo>> = Some(new_hash_map());
13515 let mut received_network_pubkey: Option<PublicKey> = None;
13516 let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
13517 let mut probing_cookie_secret: Option<[u8; 32]> = None;
13518 let mut claimable_htlc_purposes = None;
13519 let mut claimable_htlc_onion_fields = None;
13520 let mut pending_claiming_payments = Some(new_hash_map());
13521 let mut monitor_update_blocked_actions_per_peer: Option<Vec<(_, BTreeMap<_, Vec<_>>)>> = Some(Vec::new());
13522 let mut events_override = None;
13523 let mut in_flight_monitor_updates: Option<HashMap<(PublicKey, OutPoint), Vec<ChannelMonitorUpdate>>> = None;
13524 let mut decode_update_add_htlcs: Option<HashMap<u64, Vec<msgs::UpdateAddHTLC>>> = None;
13525 let mut inbound_payment_id_secret = None;
13526 read_tlv_fields!(reader, {
13527 (1, pending_outbound_payments_no_retry, option),
13528 (2, pending_intercepted_htlcs, option),
13529 (3, pending_outbound_payments, option),
13530 (4, pending_claiming_payments, option),
13531 (5, received_network_pubkey, option),
13532 (6, monitor_update_blocked_actions_per_peer, option),
13533 (7, fake_scid_rand_bytes, option),
13534 (8, events_override, option),
13535 (9, claimable_htlc_purposes, optional_vec),
13536 (10, in_flight_monitor_updates, option),
13537 (11, probing_cookie_secret, option),
13538 (13, claimable_htlc_onion_fields, optional_vec),
13539 (14, decode_update_add_htlcs, option),
13540 (15, inbound_payment_id_secret, option),
13541 });
13542 let mut decode_update_add_htlcs = decode_update_add_htlcs.unwrap_or_else(|| new_hash_map());
13543 if fake_scid_rand_bytes.is_none() {
13544 fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes());
13545 }
13546
13547 if probing_cookie_secret.is_none() {
13548 probing_cookie_secret = Some(args.entropy_source.get_secure_random_bytes());
13549 }
13550
13551 if inbound_payment_id_secret.is_none() {
13552 inbound_payment_id_secret = Some(args.entropy_source.get_secure_random_bytes());
13553 }
13554
13555 if let Some(events) = events_override {
13556 pending_events_read = events;
13557 }
13558
13559 if !channel_closures.is_empty() {
13560 pending_events_read.append(&mut channel_closures);
13561 }
13562
13563 if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
13564 pending_outbound_payments = Some(pending_outbound_payments_compat);
13565 } else if pending_outbound_payments.is_none() {
13566 let mut outbounds = new_hash_map();
13567 for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() {
13568 outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
13569 }
13570 pending_outbound_payments = Some(outbounds);
13571 }
13572 let pending_outbounds = OutboundPayments::new(pending_outbound_payments.unwrap());
13573
13574 let mut pending_background_events = Vec::new();
13586 macro_rules! handle_in_flight_updates {
13587 ($counterparty_node_id: expr, $chan_in_flight_upds: expr, $funding_txo: expr,
13588 $monitor: expr, $peer_state: expr, $logger: expr, $channel_info_log: expr
13589 ) => { {
13590 let mut max_in_flight_update_id = 0;
13591 let starting_len = $chan_in_flight_upds.len();
13592 $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id());
13593 if $chan_in_flight_upds.len() < starting_len {
13594 log_debug!(
13595 $logger,
13596 "{} ChannelMonitorUpdates completed after ChannelManager was last serialized",
13597 starting_len - $chan_in_flight_upds.len()
13598 );
13599 }
13600 for update in $chan_in_flight_upds.iter() {
13601 log_debug!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}",
13602 update.update_id, $channel_info_log, &$monitor.channel_id());
13603 max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id);
13604 pending_background_events.push(
13605 BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13606 counterparty_node_id: $counterparty_node_id,
13607 funding_txo: $funding_txo,
13608 channel_id: $monitor.channel_id(),
13609 update: update.clone(),
13610 });
13611 }
13612 if $chan_in_flight_upds.is_empty() {
13613 pending_background_events.push(
13617 BackgroundEvent::MonitorUpdatesComplete {
13618 counterparty_node_id: $counterparty_node_id,
13619 channel_id: $monitor.channel_id(),
13620 });
13621 } else {
13622 $peer_state.closed_channel_monitor_update_ids.entry($monitor.channel_id())
13623 .and_modify(|v| *v = cmp::max(max_in_flight_update_id, *v))
13624 .or_insert(max_in_flight_update_id);
13625 }
13626 if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() {
13627 log_error!($logger, "Duplicate in-flight monitor update set for the same channel!");
13628 return Err(DecodeError::InvalidValue);
13629 }
13630 max_in_flight_update_id
13631 } }
13632 }
13633
13634 for (counterparty_id, peer_state_mtx) in per_peer_state.iter_mut() {
13635 let mut peer_state_lock = peer_state_mtx.lock().unwrap();
13636 let peer_state = &mut *peer_state_lock;
13637 for phase in peer_state.channel_by_id.values() {
13638 if let ChannelPhase::Funded(chan) = phase {
13639 let logger = WithChannelContext::from(&args.logger, &chan.context, None);
13640
13641 let funding_txo = chan.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
13644 let monitor = args.channel_monitors.get(&funding_txo)
13645 .expect("We already checked for monitor presence when loading channels");
13646 let mut max_in_flight_update_id = monitor.get_latest_update_id();
13647 if let Some(in_flight_upds) = &mut in_flight_monitor_updates {
13648 if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, funding_txo)) {
13649 max_in_flight_update_id = cmp::max(max_in_flight_update_id,
13650 handle_in_flight_updates!(*counterparty_id, chan_in_flight_upds,
13651 funding_txo, monitor, peer_state, logger, ""));
13652 }
13653 }
13654 if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id {
13655 log_error!(logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
13657 log_error!(logger, " The ChannelMonitor for channel {} is at update_id {} with update_id through {} in-flight",
13658 chan.context.channel_id(), monitor.get_latest_update_id(), max_in_flight_update_id);
13659 log_error!(logger, " but the ChannelManager is at update_id {}.", chan.get_latest_unblocked_monitor_update_id());
13660 log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
13661 log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
13662 log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
13663 log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
13664 return Err(DecodeError::DangerousValue);
13665 }
13666 } else {
13667 debug_assert!(false);
13670 return Err(DecodeError::InvalidValue);
13671 }
13672 }
13673 }
13674
13675 if let Some(in_flight_upds) = in_flight_monitor_updates {
13676 for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds {
13677 let channel_id = funding_txo_to_channel_id.get(&funding_txo).copied();
13678 let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id, None);
13679 if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
13680 let peer_state_mutex = per_peer_state.entry(counterparty_id).or_insert_with(|| {
13684 Mutex::new(empty_peer_state())
13685 });
13686 let mut peer_state = peer_state_mutex.lock().unwrap();
13687 handle_in_flight_updates!(counterparty_id, chan_in_flight_updates,
13688 funding_txo, monitor, peer_state, logger, "closed ");
13689 } else {
13690 log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!");
13691 log_error!(logger, " The ChannelMonitor for channel {} is missing.", if let Some(channel_id) =
13692 channel_id { channel_id.to_string() } else { format!("with outpoint {}", funding_txo) } );
13693 log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
13694 log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
13695 log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
13696 log_error!(logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
13697 log_error!(logger, " Pending in-flight updates are: {:?}", chan_in_flight_updates);
13698 return Err(DecodeError::InvalidValue);
13699 }
13700 }
13701 }
13702
13703 pending_background_events.reserve(close_background_events.len());
13706 'each_bg_event: for mut new_event in close_background_events {
13707 if let BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13708 counterparty_node_id, funding_txo, channel_id, update,
13709 } = &mut new_event {
13710 debug_assert_eq!(update.updates.len(), 1);
13711 debug_assert!(matches!(update.updates[0], ChannelMonitorUpdateStep::ChannelForceClosed { .. }));
13712 let mut updated_id = false;
13713 for pending_event in pending_background_events.iter() {
13714 if let BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
13715 counterparty_node_id: pending_cp, funding_txo: pending_funding,
13716 channel_id: pending_chan_id, update: pending_update,
13717 } = pending_event {
13718 let for_same_channel = counterparty_node_id == pending_cp
13719 && funding_txo == pending_funding
13720 && channel_id == pending_chan_id;
13721 if for_same_channel {
13722 debug_assert!(update.update_id >= pending_update.update_id);
13723 if pending_update.updates.iter().any(|upd| matches!(upd, ChannelMonitorUpdateStep::ChannelForceClosed { .. })) {
13724 continue 'each_bg_event;
13728 }
13729 update.update_id = pending_update.update_id.saturating_add(1);
13730 updated_id = true;
13731 }
13732 }
13733 }
13734 let mut per_peer_state = per_peer_state.get(counterparty_node_id)
13735 .expect("If we have pending updates for a channel it must have an entry")
13736 .lock().unwrap();
13737 if updated_id {
13738 per_peer_state
13739 .closed_channel_monitor_update_ids.entry(*channel_id)
13740 .and_modify(|v| *v = cmp::max(update.update_id, *v))
13741 .or_insert(update.update_id);
13742 }
13743 let in_flight_updates = per_peer_state.in_flight_monitor_updates
13744 .entry(*funding_txo)
13745 .or_insert_with(Vec::new);
13746 debug_assert!(!in_flight_updates.iter().any(|upd| upd == update));
13747 in_flight_updates.push(update.clone());
13748 }
13749 pending_background_events.push(new_event);
13750 }
13751
13752 let mut pending_claims_to_replay = Vec::new();
13756
13757 {
13758 for (_, monitor) in args.channel_monitors.iter() {
13767 let counterparty_opt = outpoint_to_peer.get(&monitor.get_funding_txo().0);
13768 if counterparty_opt.is_none() {
13769 for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
13770 let logger = WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash));
13771 if let HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } = htlc_source {
13772 if path.hops.is_empty() {
13773 log_error!(logger, "Got an empty path for a pending payment");
13774 return Err(DecodeError::InvalidValue);
13775 }
13776
13777 let mut session_priv_bytes = [0; 32];
13778 session_priv_bytes[..].copy_from_slice(&session_priv[..]);
13779 pending_outbounds.insert_from_monitor_on_startup(
13780 payment_id, htlc.payment_hash, session_priv_bytes, &path, best_block_height, logger
13781 );
13782 }
13783 }
13784 for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() {
13785 let logger = WithChannelMonitor::from(&args.logger, monitor, Some(htlc.payment_hash));
13786 match htlc_source {
13787 HTLCSource::PreviousHopData(prev_hop_data) => {
13788 let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
13789 info.prev_funding_outpoint == prev_hop_data.outpoint &&
13790 info.prev_htlc_id == prev_hop_data.htlc_id
13791 };
13792 decode_update_add_htlcs.retain(|scid, update_add_htlcs| {
13798 update_add_htlcs.retain(|update_add_htlc| {
13799 let matches = *scid == prev_hop_data.short_channel_id &&
13800 update_add_htlc.htlc_id == prev_hop_data.htlc_id;
13801 if matches {
13802 log_info!(logger, "Removing pending to-decode HTLC with hash {} as it was forwarded to the closed channel {}",
13803 &htlc.payment_hash, &monitor.channel_id());
13804 }
13805 !matches
13806 });
13807 !update_add_htlcs.is_empty()
13808 });
13809 forward_htlcs.retain(|_, forwards| {
13810 forwards.retain(|forward| {
13811 if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
13812 if pending_forward_matches_htlc(&htlc_info) {
13813 log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
13814 &htlc.payment_hash, &monitor.channel_id());
13815 false
13816 } else { true }
13817 } else { true }
13818 });
13819 !forwards.is_empty()
13820 });
13821 pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
13822 if pending_forward_matches_htlc(&htlc_info) {
13823 log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
13824 &htlc.payment_hash, &monitor.channel_id());
13825 pending_events_read.retain(|(event, _)| {
13826 if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
13827 intercepted_id != ev_id
13828 } else { true }
13829 });
13830 false
13831 } else { true }
13832 });
13833 },
13834 HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } => {
13835 if let Some(preimage) = preimage_opt {
13836 let pending_events = Mutex::new(pending_events_read);
13837 let compl_action =
13846 EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
13847 channel_funding_outpoint: monitor.get_funding_txo().0,
13848 channel_id: monitor.channel_id(),
13849 counterparty_node_id: path.hops[0].pubkey,
13850 };
13851 pending_outbounds.claim_htlc(payment_id, preimage, session_priv,
13852 path, false, compl_action, &pending_events, &&logger);
13853 pending_events_read = pending_events.into_inner().unwrap();
13854 }
13855 },
13856 }
13857 }
13858 }
13859
13860 let mut fail_read = false;
13864 let outbound_claimed_htlcs_iter = monitor.get_all_current_outbound_htlcs()
13865 .into_iter()
13866 .filter_map(|(htlc_source, (htlc, preimage_opt))| {
13867 if let HTLCSource::PreviousHopData(prev_hop) = &htlc_source {
13868 if let Some(payment_preimage) = preimage_opt {
13869 let inbound_edge_monitor = args.channel_monitors.get(&prev_hop.outpoint);
13870 let inbound_edge_monitor = if let Some(monitor) = inbound_edge_monitor {
13882 monitor
13883 } else {
13884 return None;
13885 };
13886 let inbound_edge_balances = inbound_edge_monitor.get_claimable_balances();
13891 if inbound_edge_balances.is_empty() {
13892 return None;
13893 }
13894
13895 if prev_hop.counterparty_node_id.is_none() {
13896 let htlc_payment_hash: PaymentHash = payment_preimage.into();
13907 let balance_could_incl_htlc = |bal| match bal {
13908 &Balance::ClaimableOnChannelClose { .. } => {
13909 true
13912 },
13913 &Balance::MaybePreimageClaimableHTLC { payment_hash, .. } => {
13914 payment_hash == htlc_payment_hash
13915 },
13916 _ => false,
13917 };
13918 let htlc_may_be_in_balances =
13919 inbound_edge_balances.iter().any(balance_could_incl_htlc);
13920 if !htlc_may_be_in_balances {
13921 return None;
13922 }
13923
13924 if short_to_chan_info.get(&prev_hop.short_channel_id).is_none() {
13929 log_error!(args.logger,
13930 "We need to replay the HTLC claim for payment_hash {} (preimage {}) but cannot do so as the HTLC was forwarded prior to LDK 0.0.124.\
13931 All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1",
13932 htlc_payment_hash,
13933 payment_preimage,
13934 );
13935 fail_read = true;
13936 }
13937
13938 log_error!(args.logger,
13945 "We need to replay the HTLC claim for payment_hash {} (preimage {}) but don't have all the required information to do so reliably.\
13946 As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime!\
13947 All HTLCs that were forwarded by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1\
13948 Continuing anyway, though panics may occur!",
13949 htlc_payment_hash,
13950 payment_preimage,
13951 );
13952 }
13953
13954 Some((htlc_source, payment_preimage, htlc.amount_msat,
13955 counterparty_opt.is_none(),
13959 counterparty_opt.cloned().or(monitor.get_counterparty_node_id()),
13960 monitor.get_funding_txo().0, monitor.channel_id()))
13961 } else { None }
13962 } else {
13963 None
13968 }
13969 });
13970 for tuple in outbound_claimed_htlcs_iter {
13971 pending_claims_to_replay.push(tuple);
13972 }
13973 if fail_read {
13974 return Err(DecodeError::InvalidValue);
13975 }
13976 }
13977 }
13978
13979 if !forward_htlcs.is_empty() || !decode_update_add_htlcs.is_empty() || pending_outbounds.needs_abandon() {
13980 pending_events_read.push_back((events::Event::PendingHTLCsForwardable {
13986 time_forwardable: Duration::from_secs(2),
13987 }, None));
13988 }
13989
13990 let expanded_inbound_key = args.node_signer.get_inbound_payment_key();
13991
13992 let mut claimable_payments = hash_map_with_capacity(claimable_htlcs_list.len());
13993 if let Some(purposes) = claimable_htlc_purposes {
13994 if purposes.len() != claimable_htlcs_list.len() {
13995 return Err(DecodeError::InvalidValue);
13996 }
13997 if let Some(onion_fields) = claimable_htlc_onion_fields {
13998 if onion_fields.len() != claimable_htlcs_list.len() {
13999 return Err(DecodeError::InvalidValue);
14000 }
14001 for (purpose, (onion, (payment_hash, htlcs))) in
14002 purposes.into_iter().zip(onion_fields.into_iter().zip(claimable_htlcs_list.into_iter()))
14003 {
14004 let existing_payment = claimable_payments.insert(payment_hash, ClaimablePayment {
14005 purpose, htlcs, onion_fields: onion,
14006 });
14007 if existing_payment.is_some() { return Err(DecodeError::InvalidValue); }
14008 }
14009 } else {
14010 for (purpose, (payment_hash, htlcs)) in purposes.into_iter().zip(claimable_htlcs_list.into_iter()) {
14011 let existing_payment = claimable_payments.insert(payment_hash, ClaimablePayment {
14012 purpose, htlcs, onion_fields: None,
14013 });
14014 if existing_payment.is_some() { return Err(DecodeError::InvalidValue); }
14015 }
14016 }
14017 } else {
14018 for (payment_hash, htlcs) in claimable_htlcs_list.drain(..) {
14021 if htlcs.is_empty() {
14022 return Err(DecodeError::InvalidValue);
14023 }
14024 let purpose = match &htlcs[0].onion_payload {
14025 OnionPayload::Invoice { _legacy_hop_data } => {
14026 if let Some(hop_data) = _legacy_hop_data {
14027 events::PaymentPurpose::Bolt11InvoicePayment {
14028 payment_preimage:
14029 match inbound_payment::verify(
14030 payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger
14031 ) {
14032 Ok((payment_preimage, _)) => payment_preimage,
14033 Err(()) => {
14034 log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", &payment_hash);
14035 return Err(DecodeError::InvalidValue);
14036 }
14037 },
14038 payment_secret: hop_data.payment_secret,
14039 }
14040 } else { return Err(DecodeError::InvalidValue); }
14041 },
14042 OnionPayload::Spontaneous(payment_preimage) =>
14043 events::PaymentPurpose::SpontaneousPayment(*payment_preimage),
14044 };
14045 claimable_payments.insert(payment_hash, ClaimablePayment {
14046 purpose, htlcs, onion_fields: None,
14047 });
14048 }
14049 }
14050
14051 for (payment_hash, payment) in claimable_payments.iter() {
14055 for htlc in payment.htlcs.iter() {
14056 if htlc.prev_hop.counterparty_node_id.is_some() {
14057 continue;
14058 }
14059 if short_to_chan_info.get(&htlc.prev_hop.short_channel_id).is_some() {
14060 log_error!(args.logger,
14061 "We do not have the required information to claim a pending payment with payment hash {} reliably.\
14062 As long as the channel for the inbound edge of the forward remains open, this may work okay, but we may panic at runtime!\
14063 All HTLCs that were received by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1\
14064 Continuing anyway, though panics may occur!",
14065 payment_hash,
14066 );
14067 } else {
14068 log_error!(args.logger,
14069 "We do not have the required information to claim a pending payment with payment hash {}.\
14070 All HTLCs that were received by LDK 0.0.123 and prior must be resolved prior to upgrading to LDK 0.1",
14071 payment_hash,
14072 );
14073 return Err(DecodeError::InvalidValue);
14074 }
14075 }
14076 }
14077
14078 let mut secp_ctx = Secp256k1::new();
14079 secp_ctx.seeded_randomize(&args.entropy_source.get_secure_random_bytes());
14080
14081 let our_network_pubkey = match args.node_signer.get_node_id(Recipient::Node) {
14082 Ok(key) => key,
14083 Err(()) => return Err(DecodeError::InvalidValue)
14084 };
14085 if let Some(network_pubkey) = received_network_pubkey {
14086 if network_pubkey != our_network_pubkey {
14087 log_error!(args.logger, "Key that was generated does not match the existing key.");
14088 return Err(DecodeError::InvalidValue);
14089 }
14090 }
14091
14092 let mut outbound_scid_aliases = new_hash_set();
14093 for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() {
14094 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
14095 let peer_state = &mut *peer_state_lock;
14096 for (chan_id, phase) in peer_state.channel_by_id.iter_mut() {
14097 if let ChannelPhase::Funded(chan) = phase {
14098 let logger = WithChannelContext::from(&args.logger, &chan.context, None);
14099 if chan.context.outbound_scid_alias() == 0 {
14100 let mut outbound_scid_alias;
14101 loop {
14102 outbound_scid_alias = fake_scid::Namespace::OutboundAlias
14103 .get_fake_scid(best_block_height, &chain_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
14104 if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
14105 }
14106 chan.context.set_outbound_scid_alias(outbound_scid_alias);
14107 } else if !outbound_scid_aliases.insert(chan.context.outbound_scid_alias()) {
14108 log_error!(logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
14111 return Err(DecodeError::InvalidValue);
14112 }
14113 if chan.context.is_usable() {
14114 if short_to_chan_info.insert(chan.context.outbound_scid_alias(), (chan.context.get_counterparty_node_id(), *chan_id)).is_some() {
14115 log_error!(logger, "Got duplicate outbound SCID alias; {}", chan.context.outbound_scid_alias());
14118 return Err(DecodeError::InvalidValue);
14119 }
14120 }
14121 } else {
14122 debug_assert!(false);
14125 return Err(DecodeError::InvalidValue);
14126 }
14127 }
14128 }
14129
14130 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(args.fee_estimator);
14131
14132 for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
14133 if let Some(peer_state) = per_peer_state.get(&node_id) {
14134 for (channel_id, actions) in monitor_update_blocked_actions.iter() {
14135 let logger = WithContext::from(&args.logger, Some(node_id), Some(*channel_id), None);
14136 for action in actions.iter() {
14137 if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
14138 downstream_counterparty_and_funding_outpoint:
14139 Some(EventUnblockedChannel {
14140 counterparty_node_id: blocked_node_id,
14141 funding_txo: _,
14142 channel_id: blocked_channel_id,
14143 blocking_action,
14144 }), ..
14145 } = action {
14146 if let Some(blocked_peer_state) = per_peer_state.get(blocked_node_id) {
14147 log_trace!(logger,
14148 "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor",
14149 blocked_channel_id);
14150 blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates
14151 .entry(*blocked_channel_id)
14152 .or_insert_with(Vec::new).push(blocking_action.clone());
14153 } else {
14154 }
14160 }
14161 if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately { .. } = action {
14162 debug_assert!(false, "Non-event-generating channel freeing should not appear in our queue");
14163 }
14164 }
14165 }
14171 peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions;
14172 } else {
14173 for actions in monitor_update_blocked_actions.values() {
14174 for action in actions.iter() {
14175 if matches!(action, MonitorUpdateCompletionAction::PaymentClaimed { .. }) {
14176 } else {
14184 let logger = WithContext::from(&args.logger, Some(node_id), None, None);
14185 log_error!(logger, "Got blocked actions {:?} without a per-peer-state for {}", monitor_update_blocked_actions, node_id);
14186 return Err(DecodeError::InvalidValue);
14187 }
14188 }
14189 }
14190 }
14191 }
14192
14193 let channel_manager = ChannelManager {
14194 chain_hash,
14195 fee_estimator: bounded_fee_estimator,
14196 chain_monitor: args.chain_monitor,
14197 tx_broadcaster: args.tx_broadcaster,
14198 router: args.router,
14199 message_router: args.message_router,
14200
14201 best_block: RwLock::new(BestBlock::new(best_block_hash, best_block_height)),
14202
14203 inbound_payment_key: expanded_inbound_key,
14204 pending_outbound_payments: pending_outbounds,
14205 pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()),
14206
14207 forward_htlcs: Mutex::new(forward_htlcs),
14208 decode_update_add_htlcs: Mutex::new(decode_update_add_htlcs),
14209 claimable_payments: Mutex::new(ClaimablePayments { claimable_payments, pending_claiming_payments: pending_claiming_payments.unwrap() }),
14210 outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
14211 outpoint_to_peer: Mutex::new(outpoint_to_peer),
14212 short_to_chan_info: FairRwLock::new(short_to_chan_info),
14213 fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(),
14214
14215 probing_cookie_secret: probing_cookie_secret.unwrap(),
14216 inbound_payment_id_secret: inbound_payment_id_secret.unwrap(),
14217
14218 our_network_pubkey,
14219 secp_ctx,
14220
14221 highest_seen_timestamp: AtomicUsize::new(highest_seen_timestamp as usize),
14222
14223 per_peer_state: FairRwLock::new(per_peer_state),
14224
14225 pending_events: Mutex::new(pending_events_read),
14226 pending_events_processor: AtomicBool::new(false),
14227 pending_background_events: Mutex::new(pending_background_events),
14228 total_consistency_lock: RwLock::new(()),
14229 background_events_processed_since_startup: AtomicBool::new(false),
14230
14231 event_persist_notifier: Notifier::new(),
14232 needs_persist_flag: AtomicBool::new(false),
14233
14234 funding_batch_states: Mutex::new(BTreeMap::new()),
14235
14236 pending_offers_messages: Mutex::new(Vec::new()),
14237 pending_async_payments_messages: Mutex::new(Vec::new()),
14238
14239 pending_broadcast_messages: Mutex::new(Vec::new()),
14240
14241 entropy_source: args.entropy_source,
14242 node_signer: args.node_signer,
14243 signer_provider: args.signer_provider,
14244
14245 last_days_feerates: Mutex::new(VecDeque::new()),
14246
14247 logger: args.logger,
14248 default_configuration: args.default_config,
14249
14250 #[cfg(feature = "dnssec")]
14251 hrn_resolver: OMNameResolver::new(highest_seen_timestamp, best_block_height),
14252 #[cfg(feature = "dnssec")]
14253 pending_dns_onion_messages: Mutex::new(Vec::new()),
14254
14255 #[cfg(feature = "_test_utils")]
14256 testing_dnssec_proof_offer_resolution_override: Mutex::new(new_hash_map()),
14257 };
14258
14259 let mut processed_claims: HashSet<Vec<MPPClaimHTLCSource>> = new_hash_set();
14260 for (_, monitor) in args.channel_monitors.iter() {
14261 for (payment_hash, (payment_preimage, payment_claims)) in monitor.get_stored_preimages() {
14262 if !payment_claims.is_empty() {
14263 for payment_claim in payment_claims {
14264 if processed_claims.contains(&payment_claim.mpp_parts) {
14265 continue;
14270 }
14271 if payment_claim.mpp_parts.is_empty() {
14272 return Err(DecodeError::InvalidValue);
14273 }
14274 {
14275 let payments = channel_manager.claimable_payments.lock().unwrap();
14276 if !payments.claimable_payments.contains_key(&payment_hash) {
14277 if let Some(payment) = payments.pending_claiming_payments.get(&payment_hash) {
14278 if payment.payment_id == payment_claim.claiming_payment.payment_id {
14279 continue;
14285 }
14286 }
14287 }
14288 }
14289
14290 let mut channels_without_preimage = payment_claim.mpp_parts.iter()
14291 .map(|htlc_info| (htlc_info.counterparty_node_id, htlc_info.funding_txo, htlc_info.channel_id))
14292 .collect::<Vec<_>>();
14293 channels_without_preimage.sort_unstable();
14297 channels_without_preimage.dedup();
14298 let pending_claims = PendingMPPClaim {
14299 channels_without_preimage,
14300 channels_with_preimage: Vec::new(),
14301 };
14302 let pending_claim_ptr_opt = Some(Arc::new(Mutex::new(pending_claims)));
14303
14304 let claim_found =
14313 channel_manager.claimable_payments.lock().unwrap().begin_claiming_payment(
14314 payment_hash, &channel_manager.node_signer, &channel_manager.logger,
14315 &channel_manager.inbound_payment_id_secret, true,
14316 );
14317 if claim_found.is_err() {
14318 let mut claimable_payments = channel_manager.claimable_payments.lock().unwrap();
14319 match claimable_payments.pending_claiming_payments.entry(payment_hash) {
14320 hash_map::Entry::Occupied(_) => {
14321 debug_assert!(false, "Entry was added in begin_claiming_payment");
14322 return Err(DecodeError::InvalidValue);
14323 },
14324 hash_map::Entry::Vacant(entry) => {
14325 entry.insert(payment_claim.claiming_payment);
14326 },
14327 }
14328 }
14329
14330 for part in payment_claim.mpp_parts.iter() {
14331 let pending_mpp_claim = pending_claim_ptr_opt.as_ref().map(|ptr| (
14332 part.counterparty_node_id, part.channel_id,
14333 PendingMPPClaimPointer(Arc::clone(&ptr))
14334 ));
14335 let pending_claim_ptr = pending_claim_ptr_opt.as_ref().map(|ptr|
14336 RAAMonitorUpdateBlockingAction::ClaimedMPPPayment {
14337 pending_claim: PendingMPPClaimPointer(Arc::clone(&ptr)),
14338 }
14339 );
14340 channel_manager.claim_mpp_part(
14344 part.into(), payment_preimage, None,
14345 |_, _|
14346 (Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash, pending_mpp_claim }), pending_claim_ptr)
14347 );
14348 }
14349 processed_claims.insert(payment_claim.mpp_parts);
14350 }
14351 } else {
14352 let per_peer_state = channel_manager.per_peer_state.read().unwrap();
14353 let mut claimable_payments = channel_manager.claimable_payments.lock().unwrap();
14354 let payment = claimable_payments.claimable_payments.remove(&payment_hash);
14355 mem::drop(claimable_payments);
14356 if let Some(payment) = payment {
14357 log_info!(channel_manager.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", &payment_hash);
14358 let mut claimable_amt_msat = 0;
14359 let mut receiver_node_id = Some(our_network_pubkey);
14360 let phantom_shared_secret = payment.htlcs[0].prev_hop.phantom_shared_secret;
14361 if phantom_shared_secret.is_some() {
14362 let phantom_pubkey = channel_manager.node_signer.get_node_id(Recipient::PhantomNode)
14363 .expect("Failed to get node_id for phantom node recipient");
14364 receiver_node_id = Some(phantom_pubkey)
14365 }
14366 for claimable_htlc in &payment.htlcs {
14367 claimable_amt_msat += claimable_htlc.value;
14368
14369 let previous_channel_id = claimable_htlc.prev_hop.channel_id;
14385 let peer_node_id_opt = channel_manager.outpoint_to_peer.lock().unwrap()
14386 .get(&claimable_htlc.prev_hop.outpoint).cloned();
14387 if let Some(peer_node_id) = peer_node_id_opt {
14388 let peer_state_mutex = per_peer_state.get(&peer_node_id).unwrap();
14389 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
14390 let peer_state = &mut *peer_state_lock;
14391 if let Some(ChannelPhase::Funded(channel)) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
14392 let logger = WithChannelContext::from(&channel_manager.logger, &channel.context, Some(payment_hash));
14393 channel.claim_htlc_while_disconnected_dropping_mon_update_legacy(
14394 claimable_htlc.prev_hop.htlc_id, payment_preimage, &&logger
14395 );
14396 }
14397 }
14398 if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) {
14399 previous_hop_monitor.provide_payment_preimage_unsafe_legacy(
14412 &payment_hash, &payment_preimage, &channel_manager.tx_broadcaster,
14413 &channel_manager.fee_estimator, &channel_manager.logger
14414 );
14415 }
14416 }
14417 let mut pending_events = channel_manager.pending_events.lock().unwrap();
14418 let payment_id = payment.inbound_payment_id(&inbound_payment_id_secret.unwrap());
14419 pending_events.push_back((events::Event::PaymentClaimed {
14420 receiver_node_id,
14421 payment_hash,
14422 purpose: payment.purpose,
14423 amount_msat: claimable_amt_msat,
14424 htlcs: payment.htlcs.iter().map(events::ClaimedHTLC::from).collect(),
14425 sender_intended_total_msat: payment.htlcs.first().map(|htlc| htlc.total_msat),
14426 onion_fields: payment.onion_fields,
14427 payment_id: Some(payment_id),
14428 }, None));
14429 }
14430 }
14431 }
14432 }
14433
14434 for htlc_source in failed_htlcs.drain(..) {
14435 let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
14436 let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
14437 let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
14438 channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
14439 }
14440
14441 for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding, downstream_channel_id) in pending_claims_to_replay {
14442 channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None,
14446 downstream_closed, true, downstream_node_id, downstream_funding,
14447 downstream_channel_id, None
14448 );
14449 }
14450
14451 Ok((best_block_hash.clone(), channel_manager))
14455 }
14456}
14457
14458#[cfg(test)]
14459mod tests {
14460 use bitcoin::hashes::Hash;
14461 use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
14462 use core::sync::atomic::Ordering;
14463 use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
14464 use crate::ln::types::ChannelId;
14465 use crate::types::payment::{PaymentPreimage, PaymentHash, PaymentSecret};
14466 use crate::ln::channelmanager::{create_recv_pending_htlc_info, HTLCForwardInfo, inbound_payment, PaymentId, RecipientOnionFields, InterceptId};
14467 use crate::ln::functional_test_utils::*;
14468 use crate::ln::msgs::{self, ErrorAction};
14469 use crate::ln::msgs::ChannelMessageHandler;
14470 use crate::ln::outbound_payment::Retry;
14471 use crate::prelude::*;
14472 use crate::routing::router::{PaymentParameters, RouteParameters, find_route};
14473 use crate::util::errors::APIError;
14474 use crate::util::ser::Writeable;
14475 use crate::util::test_utils;
14476 use crate::util::config::{ChannelConfig, ChannelConfigUpdate};
14477 use crate::sign::EntropySource;
14478
14479 #[test]
14480 fn test_notify_limits() {
14481 let chanmon_cfgs = create_chanmon_cfgs(3);
14484 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
14485 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
14486 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
14487
14488 assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14491 assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14492 assert!(nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
14493
14494 let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1);
14495
14496 chan.0.contents.fee_base_msat *= 2;
14499 chan.1.contents.fee_base_msat *= 2;
14500 let node_a_chan_info = nodes[0].node.list_channels_with_counterparty(
14501 &nodes[1].node.get_our_node_id()).pop().unwrap();
14502 let node_b_chan_info = nodes[1].node.list_channels_with_counterparty(
14503 &nodes[0].node.get_our_node_id()).pop().unwrap();
14504
14505 assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14507 assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14508 assert!(!nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
14510 assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14512 assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14513
14514 nodes[2].node.handle_channel_update(nodes[1].node.get_our_node_id(), &chan.0);
14517 nodes[2].node.handle_channel_update(nodes[1].node.get_our_node_id(), &chan.1);
14518 assert!(!nodes[2].node.get_event_or_persistence_needed_future().poll_is_complete());
14519
14520 nodes[0].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.0);
14523 nodes[0].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.1);
14524 nodes[1].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.0);
14525 nodes[1].node.handle_channel_update(nodes[2].node.get_our_node_id(), &chan.1);
14526 assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14527 assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14528
14529 assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
14531 assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
14532
14533 let as_node_one = nodes[0].node.get_our_node_id().serialize()[..] < nodes[1].node.get_our_node_id().serialize()[..];
14537 let as_update = if as_node_one == (chan.0.contents.channel_flags & 1 == 0 ) { &chan.0 } else { &chan.1 };
14538 let bs_update = if as_node_one == (chan.0.contents.channel_flags & 1 == 0 ) { &chan.1 } else { &chan.0 };
14539
14540 nodes[0].node.handle_channel_update(nodes[1].node.get_our_node_id(), &as_update);
14543 nodes[1].node.handle_channel_update(nodes[0].node.get_our_node_id(), &bs_update);
14544 assert!(!nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14545 assert!(!nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14546 assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
14547 assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
14548
14549 nodes[0].node.handle_channel_update(nodes[1].node.get_our_node_id(), &bs_update);
14552 nodes[1].node.handle_channel_update(nodes[0].node.get_our_node_id(), &as_update);
14553 assert!(nodes[0].node.get_event_or_persistence_needed_future().poll_is_complete());
14554 assert!(nodes[1].node.get_event_or_persistence_needed_future().poll_is_complete());
14555 assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
14556 assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
14557 }
14558
14559 #[test]
14560 fn test_keysend_dup_hash_partial_mpp() {
14561 let chanmon_cfgs = create_chanmon_cfgs(2);
14564 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14565 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
14566 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
14567 create_announced_chan_between_nodes(&nodes, 0, 1);
14568
14569 let (route, our_payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000);
14571 let mut mpp_route = route.clone();
14572 mpp_route.paths.push(mpp_route.paths[0].clone());
14573
14574 let payment_id = PaymentId([42; 32]);
14575 let cur_height = CHAN_CONFIRM_DEPTH + 1; let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
14579 RecipientOnionFields::secret_only(payment_secret), payment_id, &mpp_route).unwrap();
14580 nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &our_payment_hash,
14581 RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
14582 check_added_monitors!(nodes[0], 1);
14583 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14584 assert_eq!(events.len(), 1);
14585 pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
14586
14587 nodes[0].node.send_spontaneous_payment(
14589 Some(payment_preimage), RecipientOnionFields::spontaneous_empty(),
14590 PaymentId(payment_preimage.0), route.route_params.clone().unwrap(), Retry::Attempts(0)
14591 ).unwrap();
14592 check_added_monitors!(nodes[0], 1);
14593 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14594 assert_eq!(events.len(), 1);
14595 let ev = events.drain(..).next().unwrap();
14596 let payment_event = SendEvent::from_event(ev);
14597 nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
14598 check_added_monitors!(nodes[1], 0);
14599 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
14600 expect_pending_htlcs_forwardable!(nodes[1]);
14601 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
14602 check_added_monitors!(nodes[1], 1);
14603 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14604 assert!(updates.update_add_htlcs.is_empty());
14605 assert!(updates.update_fulfill_htlcs.is_empty());
14606 assert_eq!(updates.update_fail_htlcs.len(), 1);
14607 assert!(updates.update_fail_malformed_htlcs.is_empty());
14608 assert!(updates.update_fee.is_none());
14609 nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
14610 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
14611 expect_payment_failed!(nodes[0], our_payment_hash, true);
14612
14613 nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &our_payment_hash,
14615 RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
14616 check_added_monitors!(nodes[0], 1);
14617 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14618 assert_eq!(events.len(), 1);
14619 pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), true, None);
14620
14621 nodes[1].node.claim_funds(payment_preimage);
14626 expect_payment_claimed!(nodes[1], our_payment_hash, 200_000);
14627 check_added_monitors!(nodes[1], 2);
14628
14629 let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14630 nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]);
14631 expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
14632 nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed);
14633 check_added_monitors!(nodes[0], 1);
14634 let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
14635 nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa);
14636 check_added_monitors!(nodes[1], 1);
14637 let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14638 nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_first_cs);
14639 check_added_monitors!(nodes[1], 1);
14640 let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
14641 nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
14642 nodes[0].node.handle_commitment_signed(nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
14643 check_added_monitors!(nodes[0], 1);
14644 let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
14645 nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa);
14646 let as_second_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
14647 check_added_monitors!(nodes[0], 1);
14648 nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa);
14649 check_added_monitors!(nodes[1], 1);
14650 nodes[1].node.handle_commitment_signed(nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed);
14651 check_added_monitors!(nodes[1], 1);
14652 let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
14653 nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_third_raa);
14654 check_added_monitors!(nodes[0], 1);
14655
14656 let events = nodes[0].node.get_and_clear_pending_events();
14659 assert_eq!(events.len(), 2);
14660 match events[0] {
14661 Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
14662 assert_eq!(payment_id, *actual_payment_id);
14663 assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
14664 assert_eq!(route.paths[0], *path);
14665 },
14666 _ => panic!("Unexpected event"),
14667 }
14668 match events[1] {
14669 Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
14670 assert_eq!(payment_id, *actual_payment_id);
14671 assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
14672 assert_eq!(route.paths[0], *path);
14673 },
14674 _ => panic!("Unexpected event"),
14675 }
14676 }
14677
14678 #[test]
14679 fn test_keysend_dup_payment_hash() {
14680 let chanmon_cfgs = create_chanmon_cfgs(2);
14688 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14689 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
14690 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
14691 create_announced_chan_between_nodes(&nodes, 0, 1);
14692 let scorer = test_utils::TestScorer::new();
14693 let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
14694
14695 let expected_route = [&nodes[1]];
14697 let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &expected_route, 100_000);
14698
14699 let route_params = RouteParameters::from_payment_params_and_value(
14701 PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(),
14702 TEST_FINAL_CLTV, false), 100_000);
14703 nodes[0].node.send_spontaneous_payment(
14704 Some(payment_preimage), RecipientOnionFields::spontaneous_empty(),
14705 PaymentId(payment_preimage.0), route_params.clone(), Retry::Attempts(0)
14706 ).unwrap();
14707 check_added_monitors!(nodes[0], 1);
14708 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14709 assert_eq!(events.len(), 1);
14710 let ev = events.drain(..).next().unwrap();
14711 let payment_event = SendEvent::from_event(ev);
14712 nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
14713 check_added_monitors!(nodes[1], 0);
14714 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
14715 expect_pending_htlcs_forwardable!(nodes[1]);
14718 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
14719 check_added_monitors!(nodes[1], 1);
14720 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14721 assert!(updates.update_add_htlcs.is_empty());
14722 assert!(updates.update_fulfill_htlcs.is_empty());
14723 assert_eq!(updates.update_fail_htlcs.len(), 1);
14724 assert!(updates.update_fail_malformed_htlcs.is_empty());
14725 assert!(updates.update_fee.is_none());
14726 nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
14727 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
14728 expect_payment_failed!(nodes[0], payment_hash, true);
14729
14730 claim_payment(&nodes[0], &expected_route, payment_preimage);
14732
14733 let payment_preimage = PaymentPreimage([42; 32]);
14735 let route = find_route(
14736 &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
14737 None, nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
14738 ).unwrap();
14739 let payment_hash = nodes[0].node.send_spontaneous_payment(
14740 Some(payment_preimage), RecipientOnionFields::spontaneous_empty(),
14741 PaymentId(payment_preimage.0), route.route_params.clone().unwrap(), Retry::Attempts(0)
14742 ).unwrap();
14743 check_added_monitors!(nodes[0], 1);
14744 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14745 assert_eq!(events.len(), 1);
14746 let event = events.pop().unwrap();
14747 let path = vec![&nodes[1]];
14748 pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
14749
14750 let payment_secret = PaymentSecret([43; 32]);
14752 nodes[0].node.send_payment_with_route(route.clone(), payment_hash,
14753 RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
14754 check_added_monitors!(nodes[0], 1);
14755 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14756 assert_eq!(events.len(), 1);
14757 let ev = events.drain(..).next().unwrap();
14758 let payment_event = SendEvent::from_event(ev);
14759 nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
14760 check_added_monitors!(nodes[1], 0);
14761 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
14762 expect_pending_htlcs_forwardable!(nodes[1]);
14763 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
14764 check_added_monitors!(nodes[1], 1);
14765 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14766 assert!(updates.update_add_htlcs.is_empty());
14767 assert!(updates.update_fulfill_htlcs.is_empty());
14768 assert_eq!(updates.update_fail_htlcs.len(), 1);
14769 assert!(updates.update_fail_malformed_htlcs.is_empty());
14770 assert!(updates.update_fee.is_none());
14771 nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
14772 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
14773 expect_payment_failed!(nodes[0], payment_hash, true);
14774
14775 claim_payment(&nodes[0], &expected_route, payment_preimage);
14777
14778 let payment_id_1 = PaymentId([44; 32]);
14780 let payment_hash = nodes[0].node.send_spontaneous_payment(
14781 Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_1,
14782 route.route_params.clone().unwrap(), Retry::Attempts(0)
14783 ).unwrap();
14784 check_added_monitors!(nodes[0], 1);
14785 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14786 assert_eq!(events.len(), 1);
14787 let event = events.pop().unwrap();
14788 let path = vec![&nodes[1]];
14789 pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
14790
14791 let route_params = RouteParameters::from_payment_params_and_value(
14793 PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV, false),
14794 100_000
14795 );
14796 let payment_id_2 = PaymentId([45; 32]);
14797 nodes[0].node.send_spontaneous_payment(
14798 Some(payment_preimage), RecipientOnionFields::spontaneous_empty(), payment_id_2, route_params,
14799 Retry::Attempts(0)
14800 ).unwrap();
14801 check_added_monitors!(nodes[0], 1);
14802 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
14803 assert_eq!(events.len(), 1);
14804 let ev = events.drain(..).next().unwrap();
14805 let payment_event = SendEvent::from_event(ev);
14806 nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
14807 check_added_monitors!(nodes[1], 0);
14808 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
14809 expect_pending_htlcs_forwardable!(nodes[1]);
14810 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
14811 check_added_monitors!(nodes[1], 1);
14812 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
14813 assert!(updates.update_add_htlcs.is_empty());
14814 assert!(updates.update_fulfill_htlcs.is_empty());
14815 assert_eq!(updates.update_fail_htlcs.len(), 1);
14816 assert!(updates.update_fail_malformed_htlcs.is_empty());
14817 assert!(updates.update_fee.is_none());
14818 nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
14819 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
14820 expect_payment_failed!(nodes[0], payment_hash, true);
14821
14822 claim_payment(&nodes[0], &expected_route, payment_preimage);
14824 }
14825
14826 #[test]
14827 fn test_keysend_hash_mismatch() {
14828 let chanmon_cfgs = create_chanmon_cfgs(2);
14831 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14832 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
14833 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
14834
14835 let payer_pubkey = nodes[0].node.get_our_node_id();
14836 let payee_pubkey = nodes[1].node.get_our_node_id();
14837
14838 let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
14839 let route_params = RouteParameters::from_payment_params_and_value(
14840 PaymentParameters::for_keysend(payee_pubkey, 40, false), 10_000);
14841 let network_graph = nodes[0].network_graph;
14842 let first_hops = nodes[0].node.list_usable_channels();
14843 let scorer = test_utils::TestScorer::new();
14844 let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
14845 let route = find_route(
14846 &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
14847 nodes[0].logger, &scorer, &Default::default(), &random_seed_bytes
14848 ).unwrap();
14849
14850 let test_preimage = PaymentPreimage([42; 32]);
14851 let mismatch_payment_hash = PaymentHash([43; 32]);
14852 let session_privs = nodes[0].node.test_add_new_pending_payment(mismatch_payment_hash,
14853 RecipientOnionFields::spontaneous_empty(), PaymentId(mismatch_payment_hash.0), &route).unwrap();
14854 nodes[0].node.test_send_payment_internal(&route, mismatch_payment_hash,
14855 RecipientOnionFields::spontaneous_empty(), Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap();
14856 check_added_monitors!(nodes[0], 1);
14857
14858 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
14859 assert_eq!(updates.update_add_htlcs.len(), 1);
14860 assert!(updates.update_fulfill_htlcs.is_empty());
14861 assert!(updates.update_fail_htlcs.is_empty());
14862 assert!(updates.update_fail_malformed_htlcs.is_empty());
14863 assert!(updates.update_fee.is_none());
14864 nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
14865
14866 nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Payment preimage didn't match payment hash", 1);
14867 }
14868
14869 #[test]
14870 fn test_multi_hop_missing_secret() {
14871 let chanmon_cfgs = create_chanmon_cfgs(4);
14872 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
14873 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
14874 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
14875
14876 let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
14877 let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
14878 let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
14879 let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
14880
14881 let (mut route, payment_hash, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
14883 let path = route.paths[0].clone();
14884 route.paths.push(path);
14885 route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id();
14886 route.paths[0].hops[0].short_channel_id = chan_1_id;
14887 route.paths[0].hops[1].short_channel_id = chan_3_id;
14888 route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id();
14889 route.paths[1].hops[0].short_channel_id = chan_2_id;
14890 route.paths[1].hops[1].short_channel_id = chan_4_id;
14891
14892 nodes[0].node.send_payment_with_route(route, payment_hash,
14893 RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0)).unwrap();
14894 let events = nodes[0].node.get_and_clear_pending_events();
14895 assert_eq!(events.len(), 1);
14896 match events[0] {
14897 Event::PaymentFailed { reason, .. } => {
14898 assert_eq!(reason.unwrap(), crate::events::PaymentFailureReason::UnexpectedError);
14899 }
14900 _ => panic!()
14901 }
14902 nodes[0].logger.assert_log_contains("lightning::ln::outbound_payment", "Payment secret is required for multi-path payments", 2);
14903 assert!(nodes[0].node.list_recent_payments().is_empty());
14904 }
14905
14906 #[test]
14907 fn test_channel_update_cached() {
14908 let chanmon_cfgs = create_chanmon_cfgs(3);
14909 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
14910 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
14911 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
14912
14913 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
14914
14915 nodes[0].node.force_close_channel_with_peer(&chan.2, &nodes[1].node.get_our_node_id(), None, true).unwrap();
14916 check_added_monitors!(nodes[0], 1);
14917 check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
14918
14919 let node_1_events = nodes[1].node.get_and_clear_pending_msg_events();
14921 assert_eq!(node_1_events.len(), 0);
14922
14923 {
14924 let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
14926 assert_eq!(pending_broadcast_messages.len(), 1);
14927 }
14928
14929 nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
14931 nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
14932
14933 nodes[0].node.peer_disconnected(nodes[2].node.get_our_node_id());
14934 nodes[2].node.peer_disconnected(nodes[0].node.get_our_node_id());
14935
14936 let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
14937 assert_eq!(node_0_events.len(), 0);
14938
14939 nodes[0].node.peer_connected(nodes[2].node.get_our_node_id(), &msgs::Init {
14941 features: nodes[2].node.init_features(), networks: None, remote_network_address: None
14942 }, true).unwrap();
14943 nodes[2].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
14944 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
14945 }, false).unwrap();
14946
14947 let node_0_events = nodes[0].node.get_and_clear_pending_msg_events();
14949 assert_eq!(node_0_events.len(), 1);
14950 match &node_0_events[0] {
14951 MessageSendEvent::BroadcastChannelUpdate { .. } => (),
14952 _ => panic!("Unexpected event"),
14953 }
14954 {
14955 let pending_broadcast_messages= nodes[0].node.pending_broadcast_messages.lock().unwrap();
14957 assert_eq!(pending_broadcast_messages.len(), 0);
14958 }
14959 }
14960
14961 #[test]
14962 fn test_drop_disconnected_peers_when_removing_channels() {
14963 let chanmon_cfgs = create_chanmon_cfgs(2);
14964 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14965 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
14966 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
14967
14968 create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0);
14969
14970 nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
14971 nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
14972 let chan_id = nodes[0].node.list_channels()[0].channel_id;
14973 let error_message = "Channel force-closed";
14974 nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
14975 check_added_monitors!(nodes[0], 1);
14976 check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 1_000_000);
14977
14978 {
14979 let nodes_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
14982 assert_eq!(nodes_0_per_peer_state.len(), 1);
14984 assert!(nodes_0_per_peer_state.get(&nodes[1].node.get_our_node_id()).is_some());
14985 }
14986
14987 nodes[0].node.timer_tick_occurred();
14988
14989 {
14990 assert_eq!(nodes[0].node.per_peer_state.read().unwrap().len(), 0);
14992 }
14993 }
14994
14995 #[test]
14996 fn test_drop_peers_when_removing_unfunded_channels() {
14997 let chanmon_cfgs = create_chanmon_cfgs(2);
14998 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
14999 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
15000 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15001
15002 exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0);
15003 let events = nodes[0].node.get_and_clear_pending_events();
15004 assert_eq!(events.len(), 1, "Unexpected events {:?}", events);
15005 match events[0] {
15006 Event::FundingGenerationReady { .. } => {}
15007 _ => panic!("Unexpected event {:?}", events),
15008 }
15009
15010 nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
15011 nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
15012 check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer, [nodes[1].node.get_our_node_id()], 1_000_000);
15013 check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer, [nodes[0].node.get_our_node_id()], 1_000_000);
15014
15015 assert_eq!(nodes[0].node.per_peer_state.read().unwrap().len(), 0);
15017 assert_eq!(nodes[1].node.per_peer_state.read().unwrap().len(), 0);
15018 }
15019
15020 #[test]
15021 fn bad_inbound_payment_hash() {
15022 let chanmon_cfgs = create_chanmon_cfgs(2);
15024 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
15025 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
15026 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15027
15028 let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[0]);
15029 let payment_data = msgs::FinalOnionHopData {
15030 payment_secret,
15031 total_msat: 100_000,
15032 };
15033
15034 let mut bad_payment_hash = payment_hash.clone();
15037 bad_payment_hash.0[0] += 1;
15038 match inbound_payment::verify(bad_payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) {
15039 Ok(_) => panic!("Unexpected ok"),
15040 Err(()) => {
15041 nodes[0].logger.assert_log_contains("lightning::ln::inbound_payment", "Failing HTLC with user-generated payment_hash", 1);
15042 }
15043 }
15044
15045 assert!(inbound_payment::verify(payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger).is_ok());
15047 }
15048
15049 #[test]
15050 fn test_outpoint_to_peer_coverage() {
15051 let chanmon_cfgs = create_chanmon_cfgs(2);
15055 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
15056 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
15057 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15058
15059 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None, None).unwrap();
15060 let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15061 nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel);
15062 let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
15063 nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel);
15064
15065 let (temporary_channel_id, tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
15066 let channel_id = ChannelId::from_bytes(tx.compute_txid().to_byte_array());
15067 {
15068 assert_eq!(nodes[0].node.outpoint_to_peer.lock().unwrap().len(), 0);
15071 assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
15072 }
15073
15074 nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
15075 {
15076 let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
15079 assert_eq!(nodes_0_lock.len(), 1);
15080 assert!(nodes_0_lock.contains_key(&funding_output));
15081 }
15082
15083 assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
15084
15085 let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
15086
15087 nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg);
15088 {
15089 let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
15090 assert_eq!(nodes_0_lock.len(), 1);
15091 assert!(nodes_0_lock.contains_key(&funding_output));
15092 }
15093 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
15094
15095 {
15096 let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
15099 assert_eq!(nodes_1_lock.len(), 1);
15100 assert!(nodes_1_lock.contains_key(&funding_output));
15101 }
15102 check_added_monitors!(nodes[1], 1);
15103 let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
15104 nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed);
15105 check_added_monitors!(nodes[0], 1);
15106 expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
15107 let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
15108 let (announcement, nodes_0_update, nodes_1_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
15109 update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update);
15110
15111 nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
15112 nodes[1].node.handle_shutdown(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
15113 let nodes_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
15114 nodes[0].node.handle_shutdown(nodes[1].node.get_our_node_id(), &nodes_1_shutdown);
15115
15116 let closing_signed_node_0 = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
15117 nodes[1].node.handle_closing_signed(nodes[0].node.get_our_node_id(), &closing_signed_node_0);
15118 {
15119 let nodes_0_lock = nodes[0].node.outpoint_to_peer.lock().unwrap();
15124 assert_eq!(nodes_0_lock.len(), 1);
15125 assert!(nodes_0_lock.contains_key(&funding_output));
15126 }
15127
15128 {
15129 let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
15134 assert_eq!(nodes_1_lock.len(), 1);
15135 assert!(nodes_1_lock.contains_key(&funding_output));
15136 }
15137
15138 nodes[0].node.handle_closing_signed(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
15139 {
15140 assert_eq!(nodes[0].node.outpoint_to_peer.lock().unwrap().len(), 0);
15146
15147 let nodes_1_lock = nodes[1].node.outpoint_to_peer.lock().unwrap();
15150 assert_eq!(nodes_1_lock.len(), 1);
15151 assert!(nodes_1_lock.contains_key(&funding_output));
15152 }
15153
15154 let (_nodes_0_update, closing_signed_node_0) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
15155
15156 nodes[1].node.handle_closing_signed(nodes[0].node.get_our_node_id(), &closing_signed_node_0.unwrap());
15157 {
15158 assert_eq!(nodes[1].node.outpoint_to_peer.lock().unwrap().len(), 0);
15161 }
15162 let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
15163
15164 check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
15165 check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
15166 }
15167
15168 fn check_not_connected_to_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
15169 let expected_message = format!("Not connected to node: {}", expected_public_key);
15170 check_api_error_message(expected_message, res_err)
15171 }
15172
15173 fn check_unkown_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
15174 let expected_message = format!("Can't find a peer matching the passed counterparty node_id {}", expected_public_key);
15175 check_api_error_message(expected_message, res_err)
15176 }
15177
15178 fn check_channel_unavailable_error<T>(res_err: Result<T, APIError>, expected_channel_id: ChannelId, peer_node_id: PublicKey) {
15179 let expected_message = format!("Channel with id {} not found for the passed counterparty node_id {}", expected_channel_id, peer_node_id);
15180 check_api_error_message(expected_message, res_err)
15181 }
15182
15183 fn check_api_misuse_error<T>(res_err: Result<T, APIError>) {
15184 let expected_message = "No such channel awaiting to be accepted.".to_string();
15185 check_api_error_message(expected_message, res_err)
15186 }
15187
15188 fn check_api_error_message<T>(expected_err_message: String, res_err: Result<T, APIError>) {
15189 match res_err {
15190 Err(APIError::APIMisuseError { err }) => {
15191 assert_eq!(err, expected_err_message);
15192 },
15193 Err(APIError::ChannelUnavailable { err }) => {
15194 assert_eq!(err, expected_err_message);
15195 },
15196 Ok(_) => panic!("Unexpected Ok"),
15197 Err(_) => panic!("Unexpected Error"),
15198 }
15199 }
15200
15201 #[test]
15202 fn test_api_calls_with_unkown_counterparty_node() {
15203 let chanmon_cfg = create_chanmon_cfgs(2);
15207 let node_cfg = create_node_cfgs(2, &chanmon_cfg);
15208 let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
15209 let nodes = create_network(2, &node_cfg, &node_chanmgr);
15210
15211 let channel_id = ChannelId::from_bytes([4; 32]);
15213 let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap());
15214 let intercept_id = InterceptId([0; 32]);
15215 let error_message = "Channel force-closed";
15216
15217 check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None, None), unkown_public_key);
15219
15220 check_unkown_peer_error(nodes[0].node.accept_inbound_channel(&channel_id, &unkown_public_key, 42), unkown_public_key);
15221
15222 check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key);
15223
15224 check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key);
15225
15226 check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key, error_message.to_string()), unkown_public_key);
15227
15228 check_unkown_peer_error(nodes[0].node.forward_intercepted_htlc(intercept_id, &channel_id, unkown_public_key, 1_000_000), unkown_public_key);
15229
15230 check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key);
15231 }
15232
15233 #[test]
15234 fn test_api_calls_with_unavailable_channel() {
15235 let chanmon_cfg = create_chanmon_cfgs(2);
15240 let node_cfg = create_node_cfgs(2, &chanmon_cfg);
15241 let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
15242 let nodes = create_network(2, &node_cfg, &node_chanmgr);
15243
15244 let counterparty_node_id = nodes[1].node.get_our_node_id();
15245
15246 let channel_id = ChannelId::from_bytes([4; 32]);
15248 let error_message = "Channel force-closed";
15249
15250 check_api_misuse_error(nodes[0].node.accept_inbound_channel(&channel_id, &counterparty_node_id, 42));
15252
15253 check_channel_unavailable_error(nodes[0].node.close_channel(&channel_id, &counterparty_node_id), channel_id, counterparty_node_id);
15254
15255 check_channel_unavailable_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id);
15256
15257 check_channel_unavailable_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &counterparty_node_id, error_message.to_string()), channel_id, counterparty_node_id);
15258
15259 check_channel_unavailable_error(nodes[0].node.forward_intercepted_htlc(InterceptId([0; 32]), &channel_id, counterparty_node_id, 1_000_000), channel_id, counterparty_node_id);
15260
15261 check_channel_unavailable_error(nodes[0].node.update_channel_config(&counterparty_node_id, &[channel_id], &ChannelConfig::default()), channel_id, counterparty_node_id);
15262 }
15263
15264 #[test]
15265 fn test_connection_limiting() {
15266 let chanmon_cfgs = create_chanmon_cfgs(2);
15268 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
15269 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
15270 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15271
15272 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
15275 let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15276
15277 let mut funding_tx = None;
15278 for idx in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
15279 nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15280 let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
15281
15282 if idx == 0 {
15283 nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &accept_channel);
15284 let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
15285 funding_tx = Some(tx.clone());
15286 nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), tx).unwrap();
15287 let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
15288
15289 nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg);
15290 check_added_monitors!(nodes[1], 1);
15291 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
15292
15293 let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
15294
15295 nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed);
15296 check_added_monitors!(nodes[0], 1);
15297 expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
15298 }
15299 open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
15300 }
15301
15302 open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(
15304 &nodes[0].keys_manager);
15305 nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15306 assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
15307 open_channel_msg.common_fields.temporary_channel_id);
15308
15309 let mut peer_pks = Vec::with_capacity(super::MAX_NO_CHANNEL_PEERS);
15313 for _ in 1..super::MAX_NO_CHANNEL_PEERS {
15314 let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
15315 &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
15316 peer_pks.push(random_pk);
15317 nodes[1].node.peer_connected(random_pk, &msgs::Init {
15318 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15319 }, true).unwrap();
15320 }
15321 let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
15322 &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
15323 nodes[1].node.peer_connected(last_random_pk, &msgs::Init {
15324 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15325 }, true).unwrap_err();
15326
15327 nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
15330 let chan_closed_events = nodes[1].node.get_and_clear_pending_events();
15331 assert_eq!(chan_closed_events.len(), super::MAX_UNFUNDED_CHANS_PER_PEER - 1);
15332 for ev in chan_closed_events {
15333 if let Event::ChannelClosed { .. } = ev { } else { panic!(); }
15334 }
15335 nodes[1].node.peer_connected(last_random_pk, &msgs::Init {
15336 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15337 }, true).unwrap();
15338 nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
15339 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15340 }, true).unwrap_err();
15341
15342 nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
15344 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15345 }, false).unwrap();
15346 nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
15347
15348 assert!(peer_pks.len() > super::MAX_UNFUNDED_CHANNEL_PEERS - 1);
15352 for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
15353 nodes[1].node.handle_open_channel(peer_pks[i], &open_channel_msg);
15354 get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]);
15355 open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
15356 }
15357 nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
15358 assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
15359 open_channel_msg.common_fields.temporary_channel_id);
15360
15361 nodes[1].node.create_channel(last_random_pk, 100_000, 0, 42, None, None).unwrap();
15363 get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, last_random_pk);
15364
15365 mine_transaction(&nodes[1], funding_tx.as_ref().unwrap());
15368 nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
15369 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15370 }, true).unwrap();
15371 get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
15372
15373 nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
15376 get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
15377 }
15378
15379 #[test]
15380 fn test_outbound_chans_unlimited() {
15381 let chanmon_cfgs = create_chanmon_cfgs(2);
15383 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
15384 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
15385 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15386
15387 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
15390 let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15391
15392 for _ in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
15393 nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15394 get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
15395 open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
15396 }
15397
15398 nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15401 assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
15402 open_channel_msg.common_fields.temporary_channel_id);
15403
15404 nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
15406 get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
15407
15408 nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15410 assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
15411 open_channel_msg.common_fields.temporary_channel_id);
15412 }
15413
15414 #[test]
15415 fn test_0conf_limiting() {
15416 let chanmon_cfgs = create_chanmon_cfgs(2);
15419 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
15420 let mut settings = test_default_channel_config();
15421 settings.manually_accept_inbound_channels = true;
15422 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(settings)]);
15423 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15424
15425 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
15428 let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15429
15430 for _ in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
15432 let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
15433 &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
15434 nodes[1].node.peer_connected(random_pk, &msgs::Init {
15435 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15436 }, true).unwrap();
15437
15438 nodes[1].node.handle_open_channel(random_pk, &open_channel_msg);
15439 let events = nodes[1].node.get_and_clear_pending_events();
15440 match events[0] {
15441 Event::OpenChannelRequest { temporary_channel_id, .. } => {
15442 nodes[1].node.accept_inbound_channel(&temporary_channel_id, &random_pk, 23).unwrap();
15443 }
15444 _ => panic!("Unexpected event"),
15445 }
15446 get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, random_pk);
15447 open_channel_msg.common_fields.temporary_channel_id = ChannelId::temporary_from_entropy_source(&nodes[0].keys_manager);
15448 }
15449
15450 let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
15452 &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
15453 nodes[1].node.peer_connected(last_random_pk, &msgs::Init {
15454 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15455 }, true).unwrap();
15456 nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
15457 let events = nodes[1].node.get_and_clear_pending_events();
15458 match events[0] {
15459 Event::OpenChannelRequest { temporary_channel_id, .. } => {
15460 match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &last_random_pk, 23) {
15461 Err(APIError::APIMisuseError { err }) =>
15462 assert_eq!(err, "Too many peers with unfunded channels, refusing to accept new ones"),
15463 _ => panic!(),
15464 }
15465 }
15466 _ => panic!("Unexpected event"),
15467 }
15468 assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
15469 open_channel_msg.common_fields.temporary_channel_id);
15470
15471 nodes[1].node.handle_open_channel(last_random_pk, &open_channel_msg);
15473 let events = nodes[1].node.get_and_clear_pending_events();
15474 match events[0] {
15475 Event::OpenChannelRequest { temporary_channel_id, .. } => {
15476 nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &last_random_pk, 23).unwrap();
15477 }
15478 _ => panic!("Unexpected event"),
15479 }
15480 get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
15481 }
15482
15483 #[test]
15484 fn reject_excessively_underpaying_htlcs() {
15485 let chanmon_cfg = create_chanmon_cfgs(1);
15486 let node_cfg = create_node_cfgs(1, &chanmon_cfg);
15487 let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
15488 let node = create_network(1, &node_cfg, &node_chanmgr);
15489 let sender_intended_amt_msat = 100;
15490 let extra_fee_msat = 10;
15491 let hop_data = msgs::InboundOnionPayload::Receive {
15492 sender_intended_htlc_amt_msat: 100,
15493 cltv_expiry_height: 42,
15494 payment_metadata: None,
15495 keysend_preimage: None,
15496 payment_data: Some(msgs::FinalOnionHopData {
15497 payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat,
15498 }),
15499 custom_tlvs: Vec::new(),
15500 };
15501 let current_height: u32 = node[0].node.best_block.read().unwrap().height;
15504 if let Err(crate::ln::channelmanager::InboundHTLCErr { err_code, .. }) =
15505 create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
15506 sender_intended_amt_msat - extra_fee_msat - 1, 42, None, true, Some(extra_fee_msat),
15507 current_height)
15508 {
15509 assert_eq!(err_code, 19);
15510 } else { panic!(); }
15511
15512 let hop_data = msgs::InboundOnionPayload::Receive { sender_intended_htlc_amt_msat: 100,
15515 cltv_expiry_height: 42,
15516 payment_metadata: None,
15517 keysend_preimage: None,
15518 payment_data: Some(msgs::FinalOnionHopData {
15519 payment_secret: PaymentSecret([0; 32]), total_msat: sender_intended_amt_msat,
15520 }),
15521 custom_tlvs: Vec::new(),
15522 };
15523 let current_height: u32 = node[0].node.best_block.read().unwrap().height;
15524 assert!(create_recv_pending_htlc_info(hop_data, [0; 32], PaymentHash([0; 32]),
15525 sender_intended_amt_msat - extra_fee_msat, 42, None, true, Some(extra_fee_msat),
15526 current_height).is_ok());
15527 }
15528
15529 #[test]
15530 fn test_final_incorrect_cltv(){
15531 let chanmon_cfg = create_chanmon_cfgs(1);
15532 let node_cfg = create_node_cfgs(1, &chanmon_cfg);
15533 let node_chanmgr = create_node_chanmgrs(1, &node_cfg, &[None]);
15534 let node = create_network(1, &node_cfg, &node_chanmgr);
15535
15536 let current_height: u32 = node[0].node.best_block.read().unwrap().height;
15537 let result = create_recv_pending_htlc_info(msgs::InboundOnionPayload::Receive {
15538 sender_intended_htlc_amt_msat: 100,
15539 cltv_expiry_height: 22,
15540 payment_metadata: None,
15541 keysend_preimage: None,
15542 payment_data: Some(msgs::FinalOnionHopData {
15543 payment_secret: PaymentSecret([0; 32]), total_msat: 100,
15544 }),
15545 custom_tlvs: Vec::new(),
15546 }, [0; 32], PaymentHash([0; 32]), 100, 23, None, true, None, current_height);
15547
15548 assert!(result.is_ok());
15552 }
15553
15554 #[test]
15555 fn test_inbound_anchors_manual_acceptance() {
15556 let mut anchors_cfg = test_default_channel_config();
15559 anchors_cfg.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
15560
15561 let mut anchors_manual_accept_cfg = anchors_cfg.clone();
15562 anchors_manual_accept_cfg.manually_accept_inbound_channels = true;
15563
15564 let chanmon_cfgs = create_chanmon_cfgs(3);
15565 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
15566 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs,
15567 &[Some(anchors_cfg.clone()), Some(anchors_cfg.clone()), Some(anchors_manual_accept_cfg.clone())]);
15568 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
15569
15570 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
15571 let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15572
15573 nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15574 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
15575 let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
15576 match &msg_events[0] {
15577 MessageSendEvent::HandleError { node_id, action } => {
15578 assert_eq!(*node_id, nodes[0].node.get_our_node_id());
15579 match action {
15580 ErrorAction::SendErrorMessage { msg } =>
15581 assert_eq!(msg.data, "No channels with anchor outputs accepted".to_owned()),
15582 _ => panic!("Unexpected error action"),
15583 }
15584 }
15585 _ => panic!("Unexpected event"),
15586 }
15587
15588 nodes[2].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15589 let events = nodes[2].node.get_and_clear_pending_events();
15590 match events[0] {
15591 Event::OpenChannelRequest { temporary_channel_id, .. } =>
15592 nodes[2].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23).unwrap(),
15593 _ => panic!("Unexpected event"),
15594 }
15595 get_event_msg!(nodes[2], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
15596 }
15597
15598 #[test]
15599 fn test_anchors_zero_fee_htlc_tx_fallback() {
15600 let chanmon_cfgs = create_chanmon_cfgs(2);
15604 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
15605 let mut anchors_config = test_default_channel_config();
15606 anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
15607 anchors_config.manually_accept_inbound_channels = true;
15608 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config.clone())]);
15609 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
15610 let error_message = "Channel force-closed";
15611
15612 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 0, None, None).unwrap();
15613 let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15614 assert!(open_channel_msg.common_fields.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx());
15615
15616 nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &open_channel_msg);
15617 let events = nodes[1].node.get_and_clear_pending_events();
15618 match events[0] {
15619 Event::OpenChannelRequest { temporary_channel_id, .. } => {
15620 nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap();
15621 }
15622 _ => panic!("Unexpected event"),
15623 }
15624
15625 let error_msg = get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id());
15626 nodes[0].node.handle_error(nodes[1].node.get_our_node_id(), &error_msg);
15627
15628 let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
15629 assert!(!open_channel_msg.common_fields.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx());
15630
15631 assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
15634 }
15635
15636 #[test]
15637 fn test_update_channel_config() {
15638 let chanmon_cfg = create_chanmon_cfgs(2);
15639 let node_cfg = create_node_cfgs(2, &chanmon_cfg);
15640 let mut user_config = test_default_channel_config();
15641 let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]);
15642 let nodes = create_network(2, &node_cfg, &node_chanmgr);
15643 let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
15644 let channel = &nodes[0].node.list_channels()[0];
15645
15646 nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap();
15647 let events = nodes[0].node.get_and_clear_pending_msg_events();
15648 assert_eq!(events.len(), 0);
15649
15650 user_config.channel_config.forwarding_fee_base_msat += 10;
15651 nodes[0].node.update_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &user_config.channel_config).unwrap();
15652 assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_base_msat, user_config.channel_config.forwarding_fee_base_msat);
15653 let events = nodes[0].node.get_and_clear_pending_msg_events();
15654 assert_eq!(events.len(), 1);
15655 match &events[0] {
15656 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
15657 _ => panic!("expected BroadcastChannelUpdate event"),
15658 }
15659
15660 nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate::default()).unwrap();
15661 let events = nodes[0].node.get_and_clear_pending_msg_events();
15662 assert_eq!(events.len(), 0);
15663
15664 let new_cltv_expiry_delta = user_config.channel_config.cltv_expiry_delta + 6;
15665 nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate {
15666 cltv_expiry_delta: Some(new_cltv_expiry_delta),
15667 ..Default::default()
15668 }).unwrap();
15669 assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta);
15670 let events = nodes[0].node.get_and_clear_pending_msg_events();
15671 assert_eq!(events.len(), 1);
15672 match &events[0] {
15673 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
15674 _ => panic!("expected BroadcastChannelUpdate event"),
15675 }
15676
15677 let new_fee = user_config.channel_config.forwarding_fee_proportional_millionths + 100;
15678 nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id], &ChannelConfigUpdate {
15679 forwarding_fee_proportional_millionths: Some(new_fee),
15680 ..Default::default()
15681 }).unwrap();
15682 assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().cltv_expiry_delta, new_cltv_expiry_delta);
15683 assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, new_fee);
15684 let events = nodes[0].node.get_and_clear_pending_msg_events();
15685 assert_eq!(events.len(), 1);
15686 match &events[0] {
15687 MessageSendEvent::BroadcastChannelUpdate { .. } => {},
15688 _ => panic!("expected BroadcastChannelUpdate event"),
15689 }
15690
15691 let bad_channel_id = ChannelId::v1_from_funding_txid(&[10; 32], 10);
15694 let current_fee = nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths;
15695 let new_fee = current_fee + 100;
15696 assert!(
15697 matches!(
15698 nodes[0].node.update_partial_channel_config(&channel.counterparty.node_id, &[channel.channel_id, bad_channel_id], &ChannelConfigUpdate {
15699 forwarding_fee_proportional_millionths: Some(new_fee),
15700 ..Default::default()
15701 }),
15702 Err(APIError::ChannelUnavailable { err: _ }),
15703 )
15704 );
15705 assert_eq!(nodes[0].node.list_channels()[0].config.unwrap().forwarding_fee_proportional_millionths, current_fee);
15707 let events = nodes[0].node.get_and_clear_pending_msg_events();
15708 assert_eq!(events.len(), 0);
15709 }
15710
15711 #[test]
15712 fn test_payment_display() {
15713 let payment_id = PaymentId([42; 32]);
15714 assert_eq!(format!("{}", &payment_id), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
15715 let payment_hash = PaymentHash([42; 32]);
15716 assert_eq!(format!("{}", &payment_hash), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
15717 let payment_preimage = PaymentPreimage([42; 32]);
15718 assert_eq!(format!("{}", &payment_preimage), "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a");
15719 }
15720
15721 #[test]
15722 fn test_trigger_lnd_force_close() {
15723 let chanmon_cfg = create_chanmon_cfgs(2);
15724 let node_cfg = create_node_cfgs(2, &chanmon_cfg);
15725 let user_config = test_default_channel_config();
15726 let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[Some(user_config), Some(user_config)]);
15727 let nodes = create_network(2, &node_cfg, &node_chanmgr);
15728 let error_message = "Channel force-closed";
15729
15730 let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1);
15732 nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id());
15733 nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id());
15734 nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap();
15735 check_closed_broadcast(&nodes[0], 1, true);
15736 check_added_monitors(&nodes[0], 1);
15737 check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000);
15738 {
15739 let txn = nodes[0].tx_broadcaster.txn_broadcast();
15740 assert_eq!(txn.len(), 1);
15741 check_spends!(txn[0], funding_tx);
15742 }
15743
15744 nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init {
15748 features: nodes[1].node.init_features(), networks: None, remote_network_address: None
15749 }, true).unwrap();
15750 nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init {
15751 features: nodes[0].node.init_features(), networks: None, remote_network_address: None
15752 }, false).unwrap();
15753 assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
15754 let channel_reestablish = get_event_msg!(
15755 nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()
15756 );
15757 nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &channel_reestablish);
15758
15759 let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
15763 assert_eq!(msg_events.len(), 2);
15764 if let MessageSendEvent::SendChannelReestablish { node_id, msg } = &msg_events[0] {
15765 assert_eq!(*node_id, nodes[1].node.get_our_node_id());
15766 assert_eq!(msg.next_local_commitment_number, 0);
15767 assert_eq!(msg.next_remote_commitment_number, 0);
15768 nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &msg);
15769 } else { panic!() };
15770 check_closed_broadcast(&nodes[1], 1, true);
15771 check_added_monitors(&nodes[1], 1);
15772 let expected_close_reason = ClosureReason::ProcessingError {
15773 err: "Peer sent an invalid channel_reestablish to force close in a non-standard way".to_string()
15774 };
15775 check_closed_event!(nodes[1], 1, expected_close_reason, [nodes[0].node.get_our_node_id()], 100000);
15776 {
15777 let txn = nodes[1].tx_broadcaster.txn_broadcast();
15778 assert_eq!(txn.len(), 1);
15779 check_spends!(txn[0], funding_tx);
15780 }
15781 }
15782
15783 #[test]
15784 fn test_malformed_forward_htlcs_ser() {
15785 let chanmon_cfg = create_chanmon_cfgs(1);
15787 let node_cfg = create_node_cfgs(1, &chanmon_cfg);
15788 let persister;
15789 let chain_monitor;
15790 let chanmgrs = create_node_chanmgrs(1, &node_cfg, &[None]);
15791 let deserialized_chanmgr;
15792 let mut nodes = create_network(1, &node_cfg, &chanmgrs);
15793
15794 let dummy_failed_htlc = |htlc_id| {
15795 HTLCForwardInfo::FailHTLC { htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }, }
15796 };
15797 let dummy_malformed_htlc = |htlc_id| {
15798 HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code: 0x4000, sha256_of_onion: [0; 32] }
15799 };
15800
15801 let dummy_htlcs_1: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
15802 if htlc_id % 2 == 0 {
15803 dummy_failed_htlc(htlc_id)
15804 } else {
15805 dummy_malformed_htlc(htlc_id)
15806 }
15807 }).collect();
15808
15809 let dummy_htlcs_2: Vec<HTLCForwardInfo> = (1..10).map(|htlc_id| {
15810 if htlc_id % 2 == 1 {
15811 dummy_failed_htlc(htlc_id)
15812 } else {
15813 dummy_malformed_htlc(htlc_id)
15814 }
15815 }).collect();
15816
15817
15818 let (scid_1, scid_2) = (42, 43);
15819 let mut forward_htlcs = new_hash_map();
15820 forward_htlcs.insert(scid_1, dummy_htlcs_1.clone());
15821 forward_htlcs.insert(scid_2, dummy_htlcs_2.clone());
15822
15823 let mut chanmgr_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
15824 *chanmgr_fwd_htlcs = forward_htlcs.clone();
15825 core::mem::drop(chanmgr_fwd_htlcs);
15826
15827 reload_node!(nodes[0], nodes[0].node.encode(), &[], persister, chain_monitor, deserialized_chanmgr);
15828
15829 let mut deserialized_fwd_htlcs = nodes[0].node.forward_htlcs.lock().unwrap();
15830 for scid in [scid_1, scid_2].iter() {
15831 let deserialized_htlcs = deserialized_fwd_htlcs.remove(scid).unwrap();
15832 assert_eq!(forward_htlcs.remove(scid).unwrap(), deserialized_htlcs);
15833 }
15834 assert!(deserialized_fwd_htlcs.is_empty());
15835 core::mem::drop(deserialized_fwd_htlcs);
15836
15837 expect_pending_htlcs_forwardable!(nodes[0]);
15838 }
15839}
15840
15841#[cfg(ldk_bench)]
15842pub mod bench {
15843 use crate::chain::Listen;
15844 use crate::chain::chainmonitor::{ChainMonitor, Persist};
15845 use crate::sign::{KeysManager, InMemorySigner};
15846 use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider};
15847 use crate::ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId, RecipientOnionFields, Retry};
15848 use crate::ln::functional_test_utils::*;
15849 use crate::ln::msgs::{ChannelMessageHandler, Init};
15850 use crate::routing::gossip::NetworkGraph;
15851 use crate::routing::router::{PaymentParameters, RouteParameters};
15852 use crate::util::test_utils;
15853 use crate::util::config::{UserConfig, MaxDustHTLCExposure};
15854
15855 use bitcoin::amount::Amount;
15856 use bitcoin::locktime::absolute::LockTime;
15857 use bitcoin::hashes::Hash;
15858 use bitcoin::hashes::sha256::Hash as Sha256;
15859 use bitcoin::{Transaction, TxOut};
15860 use bitcoin::transaction::Version;
15861
15862 use crate::sync::{Arc, Mutex, RwLock};
15863
15864 use criterion::Criterion;
15865
15866 type Manager<'a, P> = ChannelManager<
15867 &'a ChainMonitor<InMemorySigner, &'a test_utils::TestChainSource,
15868 &'a test_utils::TestBroadcaster, &'a test_utils::TestFeeEstimator,
15869 &'a test_utils::TestLogger, &'a P>,
15870 &'a test_utils::TestBroadcaster, &'a KeysManager, &'a KeysManager, &'a KeysManager,
15871 &'a test_utils::TestFeeEstimator, &'a test_utils::TestRouter<'a>,
15872 &'a test_utils::TestMessageRouter<'a>, &'a test_utils::TestLogger>;
15873
15874 struct ANodeHolder<'node_cfg, 'chan_mon_cfg: 'node_cfg, P: Persist<InMemorySigner>> {
15875 node: &'node_cfg Manager<'chan_mon_cfg, P>,
15876 }
15877 impl<'node_cfg, 'chan_mon_cfg: 'node_cfg, P: Persist<InMemorySigner>> NodeHolder for ANodeHolder<'node_cfg, 'chan_mon_cfg, P> {
15878 type CM = Manager<'chan_mon_cfg, P>;
15879 #[inline]
15880 fn node(&self) -> &Manager<'chan_mon_cfg, P> { self.node }
15881 #[inline]
15882 fn chain_monitor(&self) -> Option<&test_utils::TestChainMonitor> { None }
15883 }
15884
15885 pub fn bench_sends(bench: &mut Criterion) {
15886 bench_two_sends(bench, "bench_sends", test_utils::TestPersister::new(), test_utils::TestPersister::new());
15887 }
15888
15889 pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Criterion, bench_name: &str, persister_a: P, persister_b: P) {
15890 let network = bitcoin::Network::Testnet;
15894 let genesis_block = bitcoin::constants::genesis_block(network);
15895
15896 let tx_broadcaster = test_utils::TestBroadcaster::new(network);
15897 let fee_estimator = test_utils::TestFeeEstimator::new(253);
15898 let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
15899 let scorer = RwLock::new(test_utils::TestScorer::new());
15900 let entropy = test_utils::TestKeysInterface::new(&[0u8; 32], network);
15901 let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &logger_a, &scorer);
15902 let message_router = test_utils::TestMessageRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &entropy);
15903
15904 let mut config: UserConfig = Default::default();
15905 config.channel_config.max_dust_htlc_exposure = MaxDustHTLCExposure::FeeRateMultiplier(5_000_000 / 253);
15906 config.channel_handshake_config.minimum_depth = 1;
15907
15908 let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a);
15909 let seed_a = [1u8; 32];
15910 let keys_manager_a = KeysManager::new(&seed_a, 42, 42);
15911 let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &message_router, &logger_a, &keys_manager_a, &keys_manager_a, &keys_manager_a, config.clone(), ChainParameters {
15912 network,
15913 best_block: BestBlock::from_network(network),
15914 }, genesis_block.header.time);
15915 let node_a_holder = ANodeHolder { node: &node_a };
15916
15917 let logger_b = test_utils::TestLogger::with_id("node a".to_owned());
15918 let chain_monitor_b = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_b);
15919 let seed_b = [2u8; 32];
15920 let keys_manager_b = KeysManager::new(&seed_b, 42, 42);
15921 let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &message_router, &logger_b, &keys_manager_b, &keys_manager_b, &keys_manager_b, config.clone(), ChainParameters {
15922 network,
15923 best_block: BestBlock::from_network(network),
15924 }, genesis_block.header.time);
15925 let node_b_holder = ANodeHolder { node: &node_b };
15926
15927 node_a.peer_connected(node_b.get_our_node_id(), &Init {
15928 features: node_b.init_features(), networks: None, remote_network_address: None
15929 }, true).unwrap();
15930 node_b.peer_connected(node_a.get_our_node_id(), &Init {
15931 features: node_a.init_features(), networks: None, remote_network_address: None
15932 }, false).unwrap();
15933 node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None, None).unwrap();
15934 node_b.handle_open_channel(node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
15935 node_a.handle_accept_channel(node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
15936
15937 let tx;
15938 if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) {
15939 tx = Transaction { version: Version::TWO, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
15940 value: Amount::from_sat(8_000_000), script_pubkey: output_script,
15941 }]};
15942 node_a.funding_transaction_generated(temporary_channel_id, node_b.get_our_node_id(), tx.clone()).unwrap();
15943 } else { panic!(); }
15944
15945 node_b.handle_funding_created(node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingCreated, node_b.get_our_node_id()));
15946 let events_b = node_b.get_and_clear_pending_events();
15947 assert_eq!(events_b.len(), 1);
15948 match events_b[0] {
15949 Event::ChannelPending{ ref counterparty_node_id, .. } => {
15950 assert_eq!(*counterparty_node_id, node_a.get_our_node_id());
15951 },
15952 _ => panic!("Unexpected event"),
15953 }
15954
15955 node_a.handle_funding_signed(node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingSigned, node_a.get_our_node_id()));
15956 let events_a = node_a.get_and_clear_pending_events();
15957 assert_eq!(events_a.len(), 1);
15958 match events_a[0] {
15959 Event::ChannelPending{ ref counterparty_node_id, .. } => {
15960 assert_eq!(*counterparty_node_id, node_b.get_our_node_id());
15961 },
15962 _ => panic!("Unexpected event"),
15963 }
15964
15965 assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
15966
15967 let block = create_dummy_block(BestBlock::from_network(network).block_hash, 42, vec![tx]);
15968 Listen::block_connected(&node_a, &block, 1);
15969 Listen::block_connected(&node_b, &block, 1);
15970
15971 node_a.handle_channel_ready(node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendChannelReady, node_a.get_our_node_id()));
15972 let msg_events = node_a.get_and_clear_pending_msg_events();
15973 assert_eq!(msg_events.len(), 2);
15974 match msg_events[0] {
15975 MessageSendEvent::SendChannelReady { ref msg, .. } => {
15976 node_b.handle_channel_ready(node_a.get_our_node_id(), msg);
15977 get_event_msg!(node_b_holder, MessageSendEvent::SendChannelUpdate, node_a.get_our_node_id());
15978 },
15979 _ => panic!(),
15980 }
15981 match msg_events[1] {
15982 MessageSendEvent::SendChannelUpdate { .. } => {},
15983 _ => panic!(),
15984 }
15985
15986 let events_a = node_a.get_and_clear_pending_events();
15987 assert_eq!(events_a.len(), 1);
15988 match events_a[0] {
15989 Event::ChannelReady{ ref counterparty_node_id, .. } => {
15990 assert_eq!(*counterparty_node_id, node_b.get_our_node_id());
15991 },
15992 _ => panic!("Unexpected event"),
15993 }
15994
15995 let events_b = node_b.get_and_clear_pending_events();
15996 assert_eq!(events_b.len(), 1);
15997 match events_b[0] {
15998 Event::ChannelReady{ ref counterparty_node_id, .. } => {
15999 assert_eq!(*counterparty_node_id, node_a.get_our_node_id());
16000 },
16001 _ => panic!("Unexpected event"),
16002 }
16003
16004 let mut payment_count: u64 = 0;
16005 macro_rules! send_payment {
16006 ($node_a: expr, $node_b: expr) => {
16007 let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id(), TEST_FINAL_CLTV)
16008 .with_bolt11_features($node_b.bolt11_invoice_features()).unwrap();
16009 let mut payment_preimage = PaymentPreimage([0; 32]);
16010 payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes());
16011 payment_count += 1;
16012 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
16013 let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
16014
16015 $node_a.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
16016 PaymentId(payment_hash.0),
16017 RouteParameters::from_payment_params_and_value(payment_params, 10_000),
16018 Retry::Attempts(0)).unwrap();
16019 let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
16020 $node_b.handle_update_add_htlc($node_a.get_our_node_id(), &payment_event.msgs[0]);
16021 $node_b.handle_commitment_signed($node_a.get_our_node_id(), &payment_event.commitment_msg);
16022 let (raa, cs) = get_revoke_commit_msgs(&ANodeHolder { node: &$node_b }, &$node_a.get_our_node_id());
16023 $node_a.handle_revoke_and_ack($node_b.get_our_node_id(), &raa);
16024 $node_a.handle_commitment_signed($node_b.get_our_node_id(), &cs);
16025 $node_b.handle_revoke_and_ack($node_a.get_our_node_id(), &get_event_msg!(ANodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id()));
16026
16027 expect_pending_htlcs_forwardable!(ANodeHolder { node: &$node_b });
16028 expect_payment_claimable!(ANodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000);
16029 $node_b.claim_funds(payment_preimage);
16030 expect_payment_claimed!(ANodeHolder { node: &$node_b }, payment_hash, 10_000);
16031
16032 match $node_b.get_and_clear_pending_msg_events().pop().unwrap() {
16033 MessageSendEvent::UpdateHTLCs { node_id, updates } => {
16034 assert_eq!(node_id, $node_a.get_our_node_id());
16035 $node_a.handle_update_fulfill_htlc($node_b.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
16036 $node_a.handle_commitment_signed($node_b.get_our_node_id(), &updates.commitment_signed);
16037 },
16038 _ => panic!("Failed to generate claim event"),
16039 }
16040
16041 let (raa, cs) = get_revoke_commit_msgs(&ANodeHolder { node: &$node_a }, &$node_b.get_our_node_id());
16042 $node_b.handle_revoke_and_ack($node_a.get_our_node_id(), &raa);
16043 $node_b.handle_commitment_signed($node_a.get_our_node_id(), &cs);
16044 $node_a.handle_revoke_and_ack($node_b.get_our_node_id(), &get_event_msg!(ANodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id()));
16045
16046 expect_payment_sent!(ANodeHolder { node: &$node_a }, payment_preimage);
16047 }
16048 }
16049
16050 bench.bench_function(bench_name, |b| b.iter(|| {
16051 send_payment!(node_a, node_b);
16052 send_payment!(node_b, node_a);
16053 }));
16054 }
16055}