1use bitcoin::amount::Amount;
13use bitcoin::constants::ChainHash;
14
15use bitcoin::secp256k1;
16use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
17use bitcoin::secp256k1::Secp256k1;
18use bitcoin::secp256k1::{PublicKey, Verification};
19
20use bitcoin::hashes::sha256d::Hash as Sha256dHash;
21use bitcoin::hashes::Hash;
22use bitcoin::network::Network;
23
24use crate::ln::msgs;
25use crate::ln::msgs::{
26 BaseMessageHandler, ChannelAnnouncement, ChannelUpdate, GossipTimestampFilter, NodeAnnouncement,
27};
28use crate::ln::msgs::{
29 DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, SocketAddress,
30 MAX_VALUE_MSAT,
31};
32use crate::ln::msgs::{
33 MessageSendEvent, QueryChannelRange, QueryShortChannelIds, ReplyChannelRange,
34 ReplyShortChannelIdsEnd,
35};
36use crate::ln::types::ChannelId;
37use crate::routing::utxo::{self, UtxoLookup, UtxoResolver};
38use crate::types::features::{ChannelFeatures, InitFeatures, NodeFeatures};
39use crate::types::string::PrintableString;
40use crate::util::indexed_map::{
41 Entry as IndexedMapEntry, IndexedMap, OccupiedEntry as IndexedMapOccupiedEntry,
42};
43use crate::util::logger::{Level, Logger};
44use crate::util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK};
45use crate::util::ser::{MaybeReadable, Readable, ReadableArgs, RequiredWrapper, Writeable, Writer};
46
47use crate::io;
48use crate::io_extras::{copy, sink};
49use crate::prelude::*;
50use crate::sync::Mutex;
51use crate::sync::{LockTestExt, RwLock, RwLockReadGuard};
52use core::ops::{Bound, Deref};
53use core::str::FromStr;
54use core::sync::atomic::{AtomicUsize, Ordering};
55use core::{cmp, fmt};
56
57pub use lightning_types::routing::RoutingFees;
58
59#[cfg(feature = "std")]
60use std::time::{SystemTime, UNIX_EPOCH};
61
62const STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS: u64 = 60 * 60 * 24 * 14;
65
66const REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS: u64 = 60 * 60 * 24 * 7;
68
69const MAX_EXCESS_BYTES_FOR_RELAY: usize = 1024;
72
73const MAX_SCIDS_PER_REPLY: usize = 8000;
76
77#[derive(Clone, Copy, PartialEq, Eq)]
84pub struct NodeId([u8; PUBLIC_KEY_SIZE]);
85
86impl NodeId {
87 pub fn from_pubkey(pubkey: &PublicKey) -> Self {
89 NodeId(pubkey.serialize())
90 }
91
92 pub fn from_slice(bytes: &[u8]) -> Result<Self, DecodeError> {
94 if bytes.len() != PUBLIC_KEY_SIZE {
95 return Err(DecodeError::InvalidValue);
96 }
97 let mut data = [0; PUBLIC_KEY_SIZE];
98 data.copy_from_slice(bytes);
99 Ok(NodeId(data))
100 }
101
102 pub fn as_slice(&self) -> &[u8] {
104 &self.0
105 }
106
107 pub fn as_array(&self) -> &[u8; PUBLIC_KEY_SIZE] {
109 &self.0
110 }
111
112 pub fn as_pubkey(&self) -> Result<PublicKey, secp256k1::Error> {
114 PublicKey::from_slice(&self.0)
115 }
116}
117
118impl fmt::Debug for NodeId {
119 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
120 write!(f, "NodeId({})", crate::util::logger::DebugBytes(&self.0))
121 }
122}
123impl fmt::Display for NodeId {
124 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
125 crate::util::logger::DebugBytes(&self.0).fmt(f)
126 }
127}
128
129impl core::hash::Hash for NodeId {
130 fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
131 self.0.hash(hasher);
132 }
133}
134
135impl cmp::PartialOrd for NodeId {
136 fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
137 Some(self.cmp(other))
138 }
139}
140
141impl Ord for NodeId {
142 fn cmp(&self, other: &Self) -> cmp::Ordering {
143 self.0[..].cmp(&other.0[..])
144 }
145}
146
147impl Writeable for NodeId {
148 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
149 writer.write_all(&self.0)?;
150 Ok(())
151 }
152}
153
154impl Readable for NodeId {
155 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
156 let mut buf = [0; PUBLIC_KEY_SIZE];
157 reader.read_exact(&mut buf)?;
158 Ok(Self(buf))
159 }
160}
161
162impl From<PublicKey> for NodeId {
163 fn from(pubkey: PublicKey) -> Self {
164 Self::from_pubkey(&pubkey)
165 }
166}
167
168impl TryFrom<NodeId> for PublicKey {
169 type Error = secp256k1::Error;
170
171 fn try_from(node_id: NodeId) -> Result<Self, Self::Error> {
172 node_id.as_pubkey()
173 }
174}
175
176impl FromStr for NodeId {
177 type Err = bitcoin::hex::parse::HexToArrayError;
178
179 fn from_str(s: &str) -> Result<Self, Self::Err> {
180 let data: [u8; PUBLIC_KEY_SIZE] = bitcoin::hex::FromHex::from_hex(s)?;
181 Ok(NodeId(data))
182 }
183}
184
185pub struct NetworkGraph<L: Deref>
187where
188 L::Target: Logger,
189{
190 secp_ctx: Secp256k1<secp256k1::VerifyOnly>,
191 last_rapid_gossip_sync_timestamp: Mutex<Option<u32>>,
192 chain_hash: ChainHash,
193 logger: L,
194 channels: RwLock<IndexedMap<u64, ChannelInfo>>,
196 nodes: RwLock<IndexedMap<NodeId, NodeInfo>>,
197 removed_node_counters: Mutex<Vec<u32>>,
198 next_node_counter: AtomicUsize,
199 removed_channels: Mutex<HashMap<u64, Option<u64>>>,
214 removed_nodes: Mutex<HashMap<NodeId, Option<u64>>>,
218 pub(super) pending_checks: utxo::PendingChecks,
220}
221
222pub struct ReadOnlyNetworkGraph<'a> {
224 channels: RwLockReadGuard<'a, IndexedMap<u64, ChannelInfo>>,
225 nodes: RwLockReadGuard<'a, IndexedMap<NodeId, NodeInfo>>,
226 max_node_counter: u32,
227}
228
229#[derive(Clone, Debug, PartialEq, Eq)]
234pub enum NetworkUpdate {
235 ChannelFailure {
238 short_channel_id: u64,
240 is_permanent: bool,
243 },
244 NodeFailure {
247 node_id: PublicKey,
249 is_permanent: bool,
252 },
253}
254
255impl Writeable for NetworkUpdate {
256 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
257 match self {
258 Self::ChannelFailure { short_channel_id, is_permanent } => {
259 2u8.write(writer)?;
260 write_tlv_fields!(writer, {
261 (0, short_channel_id, required),
262 (2, is_permanent, required),
263 });
264 },
265 Self::NodeFailure { node_id, is_permanent } => {
266 4u8.write(writer)?;
267 write_tlv_fields!(writer, {
268 (0, node_id, required),
269 (2, is_permanent, required),
270 });
271 },
272 }
273 Ok(())
274 }
275}
276
277impl MaybeReadable for NetworkUpdate {
278 fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
279 let id: u8 = Readable::read(reader)?;
280 match id {
281 0 => {
282 let mut msg: RequiredWrapper<ChannelUpdate> = RequiredWrapper(None);
285 read_tlv_fields!(reader, {
286 (0, msg, required),
287 });
288 Ok(Some(Self::ChannelFailure {
289 short_channel_id: msg.0.unwrap().contents.short_channel_id,
290 is_permanent: false,
291 }))
292 },
293 2 => {
294 _init_and_read_len_prefixed_tlv_fields!(reader, {
295 (0, short_channel_id, required),
296 (2, is_permanent, required),
297 });
298 Ok(Some(Self::ChannelFailure {
299 short_channel_id: short_channel_id.0.unwrap(),
300 is_permanent: is_permanent.0.unwrap(),
301 }))
302 },
303 4 => {
304 _init_and_read_len_prefixed_tlv_fields!(reader, {
305 (0, node_id, required),
306 (2, is_permanent, required),
307 });
308 Ok(Some(Self::NodeFailure {
309 node_id: node_id.0.unwrap(),
310 is_permanent: is_permanent.0.unwrap(),
311 }))
312 },
313 t if t % 2 == 0 => Err(DecodeError::UnknownRequiredFeature),
314 _ => Ok(None),
315 }
316 }
317}
318
319pub struct P2PGossipSync<G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
325where
326 U::Target: UtxoLookup,
327 L::Target: Logger,
328{
329 network_graph: G,
330 utxo_lookup: RwLock<Option<U>>,
331 full_syncs_requested: AtomicUsize,
332 pending_events: Mutex<Vec<MessageSendEvent>>,
333 logger: L,
334}
335
336impl<G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref> P2PGossipSync<G, U, L>
337where
338 U::Target: UtxoLookup,
339 L::Target: Logger,
340{
341 pub fn new(network_graph: G, utxo_lookup: Option<U>, logger: L) -> Self {
346 P2PGossipSync {
347 network_graph,
348 full_syncs_requested: AtomicUsize::new(0),
349 utxo_lookup: RwLock::new(utxo_lookup),
350 pending_events: Mutex::new(vec![]),
351 logger,
352 }
353 }
354
355 pub fn add_utxo_lookup(&self, utxo_lookup: Option<U>) {
359 *self.utxo_lookup.write().unwrap() = utxo_lookup;
360 }
361
362 pub fn network_graph(&self) -> &G {
367 &self.network_graph
368 }
369
370 fn should_request_full_sync(&self) -> bool {
372 const FULL_SYNCS_TO_REQUEST: usize = 5;
373 if self.full_syncs_requested.load(Ordering::Acquire) < FULL_SYNCS_TO_REQUEST {
374 self.full_syncs_requested.fetch_add(1, Ordering::AcqRel);
375 true
376 } else {
377 false
378 }
379 }
380
381 pub(super) fn forward_gossip_msg(&self, mut ev: MessageSendEvent) {
386 match &mut ev {
387 MessageSendEvent::BroadcastChannelAnnouncement { msg, ref mut update_msg } => {
388 if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY {
389 return;
390 }
391 if update_msg.as_ref().map(|msg| msg.contents.excess_data.len()).unwrap_or(0)
392 > MAX_EXCESS_BYTES_FOR_RELAY
393 {
394 *update_msg = None;
395 }
396 },
397 MessageSendEvent::BroadcastChannelUpdate { msg } => {
398 if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY {
399 return;
400 }
401 },
402 MessageSendEvent::BroadcastNodeAnnouncement { msg } => {
403 if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY
404 || msg.contents.excess_address_data.len() > MAX_EXCESS_BYTES_FOR_RELAY
405 || msg.contents.excess_data.len() + msg.contents.excess_address_data.len()
406 > MAX_EXCESS_BYTES_FOR_RELAY
407 {
408 return;
409 }
410 },
411 _ => return,
412 }
413 self.pending_events.lock().unwrap().push(ev);
414 }
415}
416
417impl<L: Deref> NetworkGraph<L>
418where
419 L::Target: Logger,
420{
421 pub fn handle_network_update(&self, network_update: &NetworkUpdate) {
425 match *network_update {
426 NetworkUpdate::ChannelFailure { short_channel_id, is_permanent } => {
427 if is_permanent {
428 log_debug!(
429 self.logger,
430 "Removing channel graph entry for {} due to a payment failure.",
431 short_channel_id
432 );
433 self.channel_failed_permanent(short_channel_id);
434 }
435 },
436 NetworkUpdate::NodeFailure { ref node_id, is_permanent } => {
437 if is_permanent {
438 log_debug!(
439 self.logger,
440 "Removed node graph entry for {} due to a payment failure.",
441 log_pubkey!(node_id)
442 );
443 self.node_failed_permanent(node_id);
444 };
445 },
446 }
447 }
448
449 pub fn get_chain_hash(&self) -> ChainHash {
451 self.chain_hash
452 }
453}
454
455macro_rules! secp_verify_sig {
456 ( $secp_ctx: expr, $msg: expr, $sig: expr, $pubkey: expr, $msg_type: expr ) => {
457 match $secp_ctx.verify_ecdsa($msg, $sig, $pubkey) {
458 Ok(_) => {},
459 Err(_) => {
460 return Err(LightningError {
461 err: format!("Invalid signature on {} message", $msg_type),
462 action: ErrorAction::SendWarningMessage {
463 msg: msgs::WarningMessage {
464 channel_id: ChannelId::new_zero(),
465 data: format!("Invalid signature on {} message", $msg_type),
466 },
467 log_level: Level::Trace,
468 },
469 });
470 },
471 }
472 };
473}
474
475macro_rules! get_pubkey_from_node_id {
476 ( $node_id: expr, $msg_type: expr ) => {
477 PublicKey::from_slice($node_id.as_slice()).map_err(|_| LightningError {
478 err: format!("Invalid public key on {} message", $msg_type),
479 action: ErrorAction::SendWarningMessage {
480 msg: msgs::WarningMessage {
481 channel_id: ChannelId::new_zero(),
482 data: format!("Invalid public key on {} message", $msg_type),
483 },
484 log_level: Level::Trace,
485 },
486 })?
487 };
488}
489
490fn message_sha256d_hash<M: Writeable>(msg: &M) -> Sha256dHash {
491 let mut engine = Sha256dHash::engine();
492 msg.write(&mut engine).expect("In-memory structs should not fail to serialize");
493 Sha256dHash::from_engine(engine)
494}
495
496pub fn verify_node_announcement<C: Verification>(
500 msg: &NodeAnnouncement, secp_ctx: &Secp256k1<C>,
501) -> Result<(), LightningError> {
502 let msg_hash = hash_to_message!(&message_sha256d_hash(&msg.contents)[..]);
503 secp_verify_sig!(
504 secp_ctx,
505 &msg_hash,
506 &msg.signature,
507 &get_pubkey_from_node_id!(msg.contents.node_id, "node_announcement"),
508 "node_announcement"
509 );
510
511 Ok(())
512}
513
514pub fn verify_channel_announcement<C: Verification>(
518 msg: &ChannelAnnouncement, secp_ctx: &Secp256k1<C>,
519) -> Result<(), LightningError> {
520 let msg_hash = hash_to_message!(&message_sha256d_hash(&msg.contents)[..]);
521 let node_a = get_pubkey_from_node_id!(msg.contents.node_id_1, "channel_announcement");
522 secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_1, &node_a, "channel_announcement");
523 let node_b = get_pubkey_from_node_id!(msg.contents.node_id_2, "channel_announcement");
524 secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_2, &node_b, "channel_announcement");
525 let btc_a = get_pubkey_from_node_id!(msg.contents.bitcoin_key_1, "channel_announcement");
526 secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_1, &btc_a, "channel_announcement");
527 let btc_b = get_pubkey_from_node_id!(msg.contents.bitcoin_key_2, "channel_announcement");
528 secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_2, &btc_b, "channel_announcement");
529
530 Ok(())
531}
532
533impl<G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref> RoutingMessageHandler
534 for P2PGossipSync<G, U, L>
535where
536 U::Target: UtxoLookup,
537 L::Target: Logger,
538{
539 fn handle_node_announcement(
540 &self, _their_node_id: Option<PublicKey>, msg: &msgs::NodeAnnouncement,
541 ) -> Result<bool, LightningError> {
542 self.network_graph.update_node_from_announcement(msg)?;
543 Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY
544 && msg.contents.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY
545 && msg.contents.excess_data.len() + msg.contents.excess_address_data.len()
546 <= MAX_EXCESS_BYTES_FOR_RELAY)
547 }
548
549 fn handle_channel_announcement(
550 &self, _their_node_id: Option<PublicKey>, msg: &msgs::ChannelAnnouncement,
551 ) -> Result<bool, LightningError> {
552 self.network_graph
553 .update_channel_from_announcement(msg, &*self.utxo_lookup.read().unwrap())?;
554 Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY)
555 }
556
557 fn handle_channel_update(
558 &self, _their_node_id: Option<PublicKey>, msg: &msgs::ChannelUpdate,
559 ) -> Result<bool, LightningError> {
560 self.network_graph.update_channel(msg)?;
561 Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY)
562 }
563
564 fn get_next_channel_announcement(
565 &self, starting_point: u64,
566 ) -> Option<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> {
567 let mut channels = self.network_graph.channels.write().unwrap();
568 for (_, ref chan) in channels.range(starting_point..) {
569 if chan.announcement_message.is_some() {
570 let chan_announcement = chan.announcement_message.clone().unwrap();
571 let mut one_to_two_announcement: Option<msgs::ChannelUpdate> = None;
572 let mut two_to_one_announcement: Option<msgs::ChannelUpdate> = None;
573 if let Some(one_to_two) = chan.one_to_two.as_ref() {
574 one_to_two_announcement.clone_from(&one_to_two.last_update_message);
575 }
576 if let Some(two_to_one) = chan.two_to_one.as_ref() {
577 two_to_one_announcement.clone_from(&two_to_one.last_update_message);
578 }
579 return Some((chan_announcement, one_to_two_announcement, two_to_one_announcement));
580 } else {
581 }
584 }
585 None
586 }
587
588 fn get_next_node_announcement(
589 &self, starting_point: Option<&NodeId>,
590 ) -> Option<NodeAnnouncement> {
591 let mut nodes = self.network_graph.nodes.write().unwrap();
592 let iter = if let Some(node_id) = starting_point {
593 nodes.range((Bound::Excluded(node_id), Bound::Unbounded))
594 } else {
595 nodes.range(..)
596 };
597 for (_, ref node) in iter {
598 if let Some(node_info) = node.announcement_info.as_ref() {
599 if let NodeAnnouncementInfo::Relayed(announcement) = node_info {
600 return Some(announcement.clone());
601 }
602 }
603 }
604 None
605 }
606
607 fn handle_reply_channel_range(
608 &self, _their_node_id: PublicKey, _msg: ReplyChannelRange,
609 ) -> Result<(), LightningError> {
610 Ok(())
614 }
615
616 fn handle_reply_short_channel_ids_end(
617 &self, _their_node_id: PublicKey, _msg: ReplyShortChannelIdsEnd,
618 ) -> Result<(), LightningError> {
619 Ok(())
623 }
624
625 fn handle_query_channel_range(
633 &self, their_node_id: PublicKey, msg: QueryChannelRange,
634 ) -> Result<(), LightningError> {
635 log_debug!(
636 self.logger,
637 "Handling query_channel_range peer={}, first_blocknum={}, number_of_blocks={}",
638 log_pubkey!(their_node_id),
639 msg.first_blocknum,
640 msg.number_of_blocks
641 );
642
643 let inclusive_start_scid = scid_from_parts(msg.first_blocknum as u64, 0, 0);
644
645 let exclusive_end_scid =
648 scid_from_parts(cmp::min(msg.end_blocknum() as u64, MAX_SCID_BLOCK), 0, 0);
649
650 if msg.chain_hash != self.network_graph.chain_hash
652 || inclusive_start_scid.is_err()
653 || exclusive_end_scid.is_err()
654 || msg.number_of_blocks == 0
655 {
656 let mut pending_events = self.pending_events.lock().unwrap();
657 pending_events.push(MessageSendEvent::SendReplyChannelRange {
658 node_id: their_node_id.clone(),
659 msg: ReplyChannelRange {
660 chain_hash: msg.chain_hash.clone(),
661 first_blocknum: msg.first_blocknum,
662 number_of_blocks: msg.number_of_blocks,
663 sync_complete: true,
664 short_channel_ids: vec![],
665 },
666 });
667 return Err(LightningError {
668 err: String::from("query_channel_range could not be processed"),
669 action: ErrorAction::IgnoreError,
670 });
671 }
672
673 let mut batches: Vec<Vec<u64>> = vec![Vec::with_capacity(MAX_SCIDS_PER_REPLY)];
677 let mut channels = self.network_graph.channels.write().unwrap();
678 for (_, ref chan) in
679 channels.range(inclusive_start_scid.unwrap()..exclusive_end_scid.unwrap())
680 {
681 if let Some(chan_announcement) = &chan.announcement_message {
682 if batches.last().unwrap().len() == batches.last().unwrap().capacity() {
684 batches.push(Vec::with_capacity(MAX_SCIDS_PER_REPLY));
685 }
686
687 let batch = batches.last_mut().unwrap();
688 batch.push(chan_announcement.contents.short_channel_id);
689 }
690 }
691 drop(channels);
692
693 let mut pending_events = self.pending_events.lock().unwrap();
694 let batch_count = batches.len();
695 let mut prev_batch_endblock = msg.first_blocknum;
696 for (batch_index, batch) in batches.into_iter().enumerate() {
697 let first_blocknum = prev_batch_endblock;
707
708 let (sync_complete, number_of_blocks) = if batch_index == batch_count - 1 {
719 (true, msg.end_blocknum() - first_blocknum)
720 }
721 else {
724 (false, block_from_scid(*batch.last().unwrap()) - first_blocknum)
725 };
726
727 prev_batch_endblock = first_blocknum + number_of_blocks;
728
729 pending_events.push(MessageSendEvent::SendReplyChannelRange {
730 node_id: their_node_id.clone(),
731 msg: ReplyChannelRange {
732 chain_hash: msg.chain_hash.clone(),
733 first_blocknum,
734 number_of_blocks,
735 sync_complete,
736 short_channel_ids: batch,
737 },
738 });
739 }
740
741 Ok(())
742 }
743
744 fn handle_query_short_channel_ids(
745 &self, _their_node_id: PublicKey, _msg: QueryShortChannelIds,
746 ) -> Result<(), LightningError> {
747 Err(LightningError {
749 err: String::from("Not implemented"),
750 action: ErrorAction::IgnoreError,
751 })
752 }
753
754 fn processing_queue_high(&self) -> bool {
755 self.network_graph.pending_checks.too_many_checks_pending()
756 }
757}
758
759impl<G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref> BaseMessageHandler
760 for P2PGossipSync<G, U, L>
761where
762 U::Target: UtxoLookup,
763 L::Target: Logger,
764{
765 fn peer_connected(
781 &self, their_node_id: PublicKey, init_msg: &Init, _inbound: bool,
782 ) -> Result<(), ()> {
783 if !init_msg.features.supports_gossip_queries() {
785 return Ok(());
788 }
789
790 #[allow(unused_mut, unused_assignments)]
841 let mut gossip_start_time = 0;
842 #[allow(unused)]
843 let should_sync = self.should_request_full_sync();
844 #[cfg(feature = "std")]
845 {
846 gossip_start_time = SystemTime::now()
847 .duration_since(UNIX_EPOCH)
848 .expect("Time must be > 1970")
849 .as_secs();
850 if should_sync {
851 gossip_start_time -= 60 * 60 * 24 * 7 * 2; } else {
853 gossip_start_time -= 60 * 60; }
855 }
856
857 let mut pending_events = self.pending_events.lock().unwrap();
858 pending_events.push(MessageSendEvent::SendGossipTimestampFilter {
859 node_id: their_node_id.clone(),
860 msg: GossipTimestampFilter {
861 chain_hash: self.network_graph.chain_hash,
862 first_timestamp: gossip_start_time as u32, timestamp_range: u32::max_value(),
864 },
865 });
866 Ok(())
867 }
868
869 fn peer_disconnected(&self, _their_node_id: PublicKey) {}
870
871 fn provided_node_features(&self) -> NodeFeatures {
872 let mut features = NodeFeatures::empty();
873 features.set_gossip_queries_optional();
874 features
875 }
876
877 fn provided_init_features(&self, _their_node_id: PublicKey) -> InitFeatures {
878 let mut features = InitFeatures::empty();
879 features.set_gossip_queries_optional();
880 features
881 }
882
883 fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
884 let mut ret = Vec::new();
885 let mut pending_events = self.pending_events.lock().unwrap();
886 core::mem::swap(&mut ret, &mut pending_events);
887 ret
888 }
889}
890
891#[repr(C, align(32))]
901#[derive(Clone, Debug, PartialEq, Eq)]
902pub struct ChannelUpdateInfo {
904 pub htlc_minimum_msat: u64,
906 pub htlc_maximum_msat: u64,
908 pub fees: RoutingFees,
910 pub last_update: u32,
913 pub cltv_expiry_delta: u16,
915 pub enabled: bool,
917 pub last_update_message: Option<ChannelUpdate>,
922}
923
924impl fmt::Display for ChannelUpdateInfo {
925 fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
926 write!(
927 f,
928 "last_update {}, enabled {}, cltv_expiry_delta {}, htlc_minimum_msat {}, fees {:?}",
929 self.last_update,
930 self.enabled,
931 self.cltv_expiry_delta,
932 self.htlc_minimum_msat,
933 self.fees
934 )?;
935 Ok(())
936 }
937}
938
939impl Writeable for ChannelUpdateInfo {
940 fn write<W: crate::util::ser::Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
941 write_tlv_fields!(writer, {
942 (0, self.last_update, required),
943 (2, self.enabled, required),
944 (4, self.cltv_expiry_delta, required),
945 (6, self.htlc_minimum_msat, required),
946 (8, Some(self.htlc_maximum_msat), required),
949 (10, self.fees, required),
950 (12, self.last_update_message, required),
951 });
952 Ok(())
953 }
954}
955
956impl Readable for ChannelUpdateInfo {
957 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
958 _init_tlv_field_var!(last_update, required);
959 _init_tlv_field_var!(enabled, required);
960 _init_tlv_field_var!(cltv_expiry_delta, required);
961 _init_tlv_field_var!(htlc_minimum_msat, required);
962 _init_tlv_field_var!(htlc_maximum_msat, option);
963 _init_tlv_field_var!(fees, required);
964 _init_tlv_field_var!(last_update_message, required);
965
966 read_tlv_fields!(reader, {
967 (0, last_update, required),
968 (2, enabled, required),
969 (4, cltv_expiry_delta, required),
970 (6, htlc_minimum_msat, required),
971 (8, htlc_maximum_msat, required),
972 (10, fees, required),
973 (12, last_update_message, required)
974 });
975
976 if let Some(htlc_maximum_msat) = htlc_maximum_msat {
977 Ok(ChannelUpdateInfo {
978 last_update: _init_tlv_based_struct_field!(last_update, required),
979 enabled: _init_tlv_based_struct_field!(enabled, required),
980 cltv_expiry_delta: _init_tlv_based_struct_field!(cltv_expiry_delta, required),
981 htlc_minimum_msat: _init_tlv_based_struct_field!(htlc_minimum_msat, required),
982 htlc_maximum_msat,
983 fees: _init_tlv_based_struct_field!(fees, required),
984 last_update_message: _init_tlv_based_struct_field!(last_update_message, required),
985 })
986 } else {
987 Err(DecodeError::InvalidValue)
988 }
989 }
990}
991
992#[repr(align(128), C)]
1006#[derive(Clone, Debug, Eq)]
1007pub struct ChannelInfo {
1010 pub features: ChannelFeatures,
1012
1013 pub node_one: NodeId,
1015
1016 pub node_two: NodeId,
1018
1019 pub(crate) node_one_counter: u32,
1021 pub(crate) node_two_counter: u32,
1023
1024 pub capacity_sats: Option<u64>,
1026
1027 pub one_to_two: Option<ChannelUpdateInfo>,
1029 pub two_to_one: Option<ChannelUpdateInfo>,
1031
1032 pub announcement_message: Option<ChannelAnnouncement>,
1037 announcement_received_time: u64,
1041}
1042
1043impl PartialEq for ChannelInfo {
1044 fn eq(&self, o: &ChannelInfo) -> bool {
1045 self.features == o.features
1046 && self.node_one == o.node_one
1047 && self.one_to_two == o.one_to_two
1048 && self.node_two == o.node_two
1049 && self.two_to_one == o.two_to_one
1050 && self.capacity_sats == o.capacity_sats
1051 && self.announcement_message == o.announcement_message
1052 && self.announcement_received_time == o.announcement_received_time
1053 }
1054}
1055
1056impl ChannelInfo {
1057 pub fn as_directed_to(&self, target: &NodeId) -> Option<(DirectedChannelInfo<'_>, &NodeId)> {
1060 if self.one_to_two.is_none() || self.two_to_one.is_none() {
1061 return None;
1062 }
1063 let (direction, source, outbound) = {
1064 if target == &self.node_one {
1065 (self.two_to_one.as_ref(), &self.node_two, false)
1066 } else if target == &self.node_two {
1067 (self.one_to_two.as_ref(), &self.node_one, true)
1068 } else {
1069 return None;
1070 }
1071 };
1072 let dir = direction.expect("We checked that both directions are available at the start");
1073 Some((DirectedChannelInfo::new(self, dir, outbound), source))
1074 }
1075
1076 pub fn as_directed_from(&self, source: &NodeId) -> Option<(DirectedChannelInfo<'_>, &NodeId)> {
1079 if self.one_to_two.is_none() || self.two_to_one.is_none() {
1080 return None;
1081 }
1082 let (direction, target, outbound) = {
1083 if source == &self.node_one {
1084 (self.one_to_two.as_ref(), &self.node_two, true)
1085 } else if source == &self.node_two {
1086 (self.two_to_one.as_ref(), &self.node_one, false)
1087 } else {
1088 return None;
1089 }
1090 };
1091 let dir = direction.expect("We checked that both directions are available at the start");
1092 Some((DirectedChannelInfo::new(self, dir, outbound), target))
1093 }
1094
1095 pub fn get_directional_info(&self, channel_flags: u8) -> Option<&ChannelUpdateInfo> {
1097 let direction = channel_flags & 1u8;
1098 if direction == 0 {
1099 self.one_to_two.as_ref()
1100 } else {
1101 self.two_to_one.as_ref()
1102 }
1103 }
1104}
1105
1106impl fmt::Display for ChannelInfo {
1107 fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
1108 write!(
1109 f,
1110 "features: {}, node_one: {}, one_to_two: {:?}, node_two: {}, two_to_one: {:?}",
1111 log_bytes!(self.features.encode()),
1112 &self.node_one,
1113 self.one_to_two,
1114 &self.node_two,
1115 self.two_to_one
1116 )?;
1117 Ok(())
1118 }
1119}
1120
1121impl Writeable for ChannelInfo {
1122 fn write<W: crate::util::ser::Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
1123 write_tlv_fields!(writer, {
1124 (0, self.features, required),
1125 (1, self.announcement_received_time, (default_value, 0)),
1126 (2, self.node_one, required),
1127 (4, self.one_to_two, required),
1128 (6, self.node_two, required),
1129 (8, self.two_to_one, required),
1130 (10, self.capacity_sats, required),
1131 (12, self.announcement_message, required),
1132 });
1133 Ok(())
1134 }
1135}
1136
1137struct ChannelUpdateInfoDeserWrapper(Option<ChannelUpdateInfo>);
1143
1144impl MaybeReadable for ChannelUpdateInfoDeserWrapper {
1145 fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
1146 match crate::util::ser::Readable::read(reader) {
1147 Ok(channel_update_option) => Ok(Some(Self(channel_update_option))),
1148 Err(DecodeError::ShortRead) => Ok(None),
1149 Err(DecodeError::InvalidValue) => Ok(None),
1150 Err(err) => Err(err),
1151 }
1152 }
1153}
1154
1155impl Readable for ChannelInfo {
1156 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
1157 _init_tlv_field_var!(features, required);
1158 _init_tlv_field_var!(announcement_received_time, (default_value, 0));
1159 _init_tlv_field_var!(node_one, required);
1160 let mut one_to_two_wrap: Option<ChannelUpdateInfoDeserWrapper> = None;
1161 _init_tlv_field_var!(node_two, required);
1162 let mut two_to_one_wrap: Option<ChannelUpdateInfoDeserWrapper> = None;
1163 _init_tlv_field_var!(capacity_sats, required);
1164 _init_tlv_field_var!(announcement_message, required);
1165 read_tlv_fields!(reader, {
1166 (0, features, required),
1167 (1, announcement_received_time, (default_value, 0)),
1168 (2, node_one, required),
1169 (4, one_to_two_wrap, upgradable_option),
1170 (6, node_two, required),
1171 (8, two_to_one_wrap, upgradable_option),
1172 (10, capacity_sats, required),
1173 (12, announcement_message, required),
1174 });
1175
1176 Ok(ChannelInfo {
1177 features: _init_tlv_based_struct_field!(features, required),
1178 node_one: _init_tlv_based_struct_field!(node_one, required),
1179 one_to_two: one_to_two_wrap.map(|w| w.0).unwrap_or(None),
1180 node_two: _init_tlv_based_struct_field!(node_two, required),
1181 two_to_one: two_to_one_wrap.map(|w| w.0).unwrap_or(None),
1182 capacity_sats: _init_tlv_based_struct_field!(capacity_sats, required),
1183 announcement_message: _init_tlv_based_struct_field!(announcement_message, required),
1184 announcement_received_time: _init_tlv_based_struct_field!(
1185 announcement_received_time,
1186 (default_value, 0)
1187 ),
1188 node_one_counter: u32::max_value(),
1189 node_two_counter: u32::max_value(),
1190 })
1191 }
1192}
1193
1194#[derive(Clone)]
1197pub struct DirectedChannelInfo<'a> {
1198 channel: &'a ChannelInfo,
1199 direction: &'a ChannelUpdateInfo,
1200 source_counter: u32,
1201 target_counter: u32,
1202 from_node_one: bool,
1205}
1206
1207impl<'a> DirectedChannelInfo<'a> {
1208 #[inline]
1209 fn new(
1210 channel: &'a ChannelInfo, direction: &'a ChannelUpdateInfo, from_node_one: bool,
1211 ) -> Self {
1212 let (source_counter, target_counter) = if from_node_one {
1213 (channel.node_one_counter, channel.node_two_counter)
1214 } else {
1215 (channel.node_two_counter, channel.node_one_counter)
1216 };
1217 Self { channel, direction, from_node_one, source_counter, target_counter }
1218 }
1219
1220 #[inline]
1222 pub fn channel(&self) -> &'a ChannelInfo {
1223 self.channel
1224 }
1225
1226 #[inline]
1232 pub fn effective_capacity(&self) -> EffectiveCapacity {
1233 let mut htlc_maximum_msat = self.direction().htlc_maximum_msat;
1234 let capacity_msat = self.channel.capacity_sats.map(|capacity_sats| capacity_sats * 1000);
1235
1236 match capacity_msat {
1237 Some(capacity_msat) => {
1238 htlc_maximum_msat = cmp::min(htlc_maximum_msat, capacity_msat);
1239 EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat }
1240 },
1241 None => EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: htlc_maximum_msat },
1242 }
1243 }
1244
1245 #[inline]
1247 pub(super) fn direction(&self) -> &'a ChannelUpdateInfo {
1248 self.direction
1249 }
1250
1251 #[inline]
1255 pub fn source(&self) -> &'a NodeId {
1256 if self.from_node_one {
1257 &self.channel.node_one
1258 } else {
1259 &self.channel.node_two
1260 }
1261 }
1262
1263 #[inline]
1267 pub fn target(&self) -> &'a NodeId {
1268 if self.from_node_one {
1269 &self.channel.node_two
1270 } else {
1271 &self.channel.node_one
1272 }
1273 }
1274
1275 #[inline(always)]
1277 pub(super) fn source_counter(&self) -> u32 {
1278 self.source_counter
1279 }
1280
1281 #[inline(always)]
1283 pub(super) fn target_counter(&self) -> u32 {
1284 self.target_counter
1285 }
1286}
1287
1288impl<'a> fmt::Debug for DirectedChannelInfo<'a> {
1289 fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
1290 f.debug_struct("DirectedChannelInfo").field("channel", &self.channel).finish()
1291 }
1292}
1293
1294#[derive(Clone, Copy, Debug, PartialEq)]
1299pub enum EffectiveCapacity {
1300 ExactLiquidity {
1303 liquidity_msat: u64,
1306 },
1307 AdvertisedMaxHTLC {
1309 amount_msat: u64,
1311 },
1312 Total {
1314 capacity_msat: u64,
1316 htlc_maximum_msat: u64,
1318 },
1319 Infinite,
1322 HintMaxHTLC {
1324 amount_msat: u64,
1326 },
1327 Unknown,
1330}
1331
1332pub const UNKNOWN_CHANNEL_CAPACITY_MSAT: u64 = 250_000 * 1000;
1335
1336impl EffectiveCapacity {
1337 pub fn as_msat(&self) -> u64 {
1339 match self {
1340 EffectiveCapacity::ExactLiquidity { liquidity_msat } => *liquidity_msat,
1341 EffectiveCapacity::AdvertisedMaxHTLC { amount_msat } => *amount_msat,
1342 EffectiveCapacity::Total { capacity_msat, .. } => *capacity_msat,
1343 EffectiveCapacity::HintMaxHTLC { amount_msat } => *amount_msat,
1344 EffectiveCapacity::Infinite => u64::max_value(),
1345 EffectiveCapacity::Unknown => UNKNOWN_CHANNEL_CAPACITY_MSAT,
1346 }
1347 }
1348}
1349
1350impl_writeable_tlv_based!(RoutingFees, {
1351 (0, base_msat, required),
1352 (2, proportional_millionths, required)
1353});
1354
1355#[derive(Clone, Debug, PartialEq, Eq)]
1356pub struct NodeAnnouncementDetails {
1358 pub features: NodeFeatures,
1360
1361 pub last_update: u32,
1364
1365 pub rgb: [u8; 3],
1367
1368 pub alias: NodeAlias,
1372
1373 pub addresses: Vec<SocketAddress>,
1375}
1376
1377#[derive(Clone, Debug, PartialEq, Eq)]
1378pub enum NodeAnnouncementInfo {
1380 Relayed(NodeAnnouncement),
1384
1385 Local(NodeAnnouncementDetails),
1387}
1388
1389impl NodeAnnouncementInfo {
1390 pub fn features(&self) -> &NodeFeatures {
1392 match self {
1393 NodeAnnouncementInfo::Relayed(relayed) => &relayed.contents.features,
1394 NodeAnnouncementInfo::Local(local) => &local.features,
1395 }
1396 }
1397
1398 pub fn last_update(&self) -> u32 {
1402 match self {
1403 NodeAnnouncementInfo::Relayed(relayed) => relayed.contents.timestamp,
1404 NodeAnnouncementInfo::Local(local) => local.last_update,
1405 }
1406 }
1407
1408 pub fn rgb(&self) -> [u8; 3] {
1410 match self {
1411 NodeAnnouncementInfo::Relayed(relayed) => relayed.contents.rgb,
1412 NodeAnnouncementInfo::Local(local) => local.rgb,
1413 }
1414 }
1415
1416 pub fn alias(&self) -> &NodeAlias {
1420 match self {
1421 NodeAnnouncementInfo::Relayed(relayed) => &relayed.contents.alias,
1422 NodeAnnouncementInfo::Local(local) => &local.alias,
1423 }
1424 }
1425
1426 pub fn addresses(&self) -> &[SocketAddress] {
1428 match self {
1429 NodeAnnouncementInfo::Relayed(relayed) => &relayed.contents.addresses,
1430 NodeAnnouncementInfo::Local(local) => &local.addresses,
1431 }
1432 }
1433
1434 pub fn announcement_message(&self) -> Option<&NodeAnnouncement> {
1438 match self {
1439 NodeAnnouncementInfo::Relayed(announcement) => Some(announcement),
1440 NodeAnnouncementInfo::Local(_) => None,
1441 }
1442 }
1443}
1444
1445impl Writeable for NodeAnnouncementInfo {
1446 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
1447 let features = self.features();
1448 let last_update = self.last_update();
1449 let rgb = self.rgb();
1450 let alias = self.alias();
1451 let addresses = self.addresses();
1452 let announcement_message = self.announcement_message();
1453
1454 write_tlv_fields!(writer, {
1455 (0, features, required),
1456 (2, last_update, required),
1457 (4, rgb, required),
1458 (6, alias, required),
1459 (8, announcement_message, option),
1460 (10, *addresses, required_vec), });
1462 Ok(())
1463 }
1464}
1465
1466impl Readable for NodeAnnouncementInfo {
1467 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
1468 _init_and_read_len_prefixed_tlv_fields!(reader, {
1469 (0, features, required),
1470 (2, last_update, required),
1471 (4, rgb, required),
1472 (6, alias, required),
1473 (8, announcement_message, option),
1474 (10, addresses, required_vec),
1475 });
1476 if let Some(announcement) = announcement_message {
1477 Ok(Self::Relayed(announcement))
1478 } else {
1479 Ok(Self::Local(NodeAnnouncementDetails {
1480 features: features.0.unwrap(),
1481 last_update: last_update.0.unwrap(),
1482 rgb: rgb.0.unwrap(),
1483 alias: alias.0.unwrap(),
1484 addresses,
1485 }))
1486 }
1487 }
1488}
1489
1490#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
1495pub struct NodeAlias(pub [u8; 32]);
1496
1497impl fmt::Display for NodeAlias {
1498 fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
1499 let first_null = self.0.iter().position(|b| *b == 0).unwrap_or(self.0.len());
1500 let bytes = self.0.split_at(first_null).0;
1501 match core::str::from_utf8(bytes) {
1502 Ok(alias) => PrintableString(alias).fmt(f)?,
1503 Err(_) => {
1504 use core::fmt::Write;
1505 for c in bytes.iter().map(|b| *b as char) {
1506 let control_symbol = core::char::REPLACEMENT_CHARACTER;
1508 let c = if c >= '\x20' && c <= '\x7e' { c } else { control_symbol };
1509 f.write_char(c)?;
1510 }
1511 },
1512 };
1513 Ok(())
1514 }
1515}
1516
1517impl Writeable for NodeAlias {
1518 fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
1519 self.0.write(w)
1520 }
1521}
1522
1523impl Readable for NodeAlias {
1524 fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
1525 Ok(NodeAlias(Readable::read(r)?))
1526 }
1527}
1528
1529#[derive(Clone, Debug, Eq)]
1530pub struct NodeInfo {
1532 pub channels: Vec<u64>,
1534 pub announcement_info: Option<NodeAnnouncementInfo>,
1538 pub(crate) node_counter: u32,
1544}
1545
1546impl PartialEq for NodeInfo {
1547 fn eq(&self, o: &NodeInfo) -> bool {
1548 self.channels == o.channels && self.announcement_info == o.announcement_info
1549 }
1550}
1551
1552impl NodeInfo {
1553 pub fn is_tor_only(&self) -> bool {
1555 self.announcement_info
1556 .as_ref()
1557 .map(|info| info.addresses())
1558 .and_then(|addresses| (!addresses.is_empty()).then(|| addresses))
1559 .map(|addresses| addresses.iter().all(|address| address.is_tor()))
1560 .unwrap_or(false)
1561 }
1562}
1563
1564impl fmt::Display for NodeInfo {
1565 fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
1566 write!(
1567 f,
1568 " channels: {:?}, announcement_info: {:?}",
1569 &self.channels[..],
1570 self.announcement_info
1571 )?;
1572 Ok(())
1573 }
1574}
1575
1576impl Writeable for NodeInfo {
1577 fn write<W: crate::util::ser::Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
1578 write_tlv_fields!(writer, {
1579 (2, self.announcement_info, option),
1581 (4, self.channels, required_vec),
1582 });
1583 Ok(())
1584 }
1585}
1586
1587struct NodeAnnouncementInfoDeserWrapper(NodeAnnouncementInfo);
1592
1593impl MaybeReadable for NodeAnnouncementInfoDeserWrapper {
1594 fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
1595 match crate::util::ser::Readable::read(reader) {
1596 Ok(node_announcement_info) => return Ok(Some(Self(node_announcement_info))),
1597 Err(_) => {
1598 copy(reader, &mut sink()).unwrap();
1599 return Ok(None);
1600 },
1601 };
1602 }
1603}
1604
1605impl Readable for NodeInfo {
1606 fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
1607 _init_and_read_len_prefixed_tlv_fields!(reader, {
1613 (0, _lowest_inbound_channel_fees, option),
1614 (2, announcement_info_wrap, upgradable_option),
1615 (4, channels, required_vec),
1616 });
1617 let _: Option<RoutingFees> = _lowest_inbound_channel_fees;
1618 let announcement_info_wrap: Option<NodeAnnouncementInfoDeserWrapper> =
1619 announcement_info_wrap;
1620
1621 Ok(NodeInfo {
1622 announcement_info: announcement_info_wrap.map(|w| w.0),
1623 channels,
1624 node_counter: u32::max_value(),
1625 })
1626 }
1627}
1628
1629const SERIALIZATION_VERSION: u8 = 1;
1630const MIN_SERIALIZATION_VERSION: u8 = 1;
1631
1632impl<L: Deref> Writeable for NetworkGraph<L>
1633where
1634 L::Target: Logger,
1635{
1636 fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
1637 self.test_node_counter_consistency();
1638
1639 write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
1640
1641 self.chain_hash.write(writer)?;
1642 let channels = self.channels.read().unwrap();
1643 (channels.len() as u64).write(writer)?;
1644 for (ref chan_id, ref chan_info) in channels.unordered_iter() {
1645 (*chan_id).write(writer)?;
1646 chan_info.write(writer)?;
1647 }
1648 let nodes = self.nodes.read().unwrap();
1649 (nodes.len() as u64).write(writer)?;
1650 for (ref node_id, ref node_info) in nodes.unordered_iter() {
1651 node_id.write(writer)?;
1652 node_info.write(writer)?;
1653 }
1654
1655 let last_rapid_gossip_sync_timestamp = self.get_last_rapid_gossip_sync_timestamp();
1656 write_tlv_fields!(writer, {
1657 (1, last_rapid_gossip_sync_timestamp, option),
1658 });
1659 Ok(())
1660 }
1661}
1662
1663impl<L: Deref> ReadableArgs<L> for NetworkGraph<L>
1664where
1665 L::Target: Logger,
1666{
1667 fn read<R: io::Read>(reader: &mut R, logger: L) -> Result<NetworkGraph<L>, DecodeError> {
1668 let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
1669
1670 let chain_hash: ChainHash = Readable::read(reader)?;
1671 let channels_count: u64 = Readable::read(reader)?;
1672 let mut channels = IndexedMap::with_capacity(CHAN_COUNT_ESTIMATE);
1673 for _ in 0..channels_count {
1674 let chan_id: u64 = Readable::read(reader)?;
1675 let chan_info: ChannelInfo = Readable::read(reader)?;
1676 channels.insert(chan_id, chan_info);
1677 }
1678 let nodes_count: u64 = Readable::read(reader)?;
1679 if nodes_count > u32::max_value() as u64 / 2 {
1682 return Err(DecodeError::InvalidValue);
1683 }
1684 let mut nodes = IndexedMap::with_capacity(NODE_COUNT_ESTIMATE);
1685 for i in 0..nodes_count {
1686 let node_id = Readable::read(reader)?;
1687 let mut node_info: NodeInfo = Readable::read(reader)?;
1688 node_info.node_counter = i as u32;
1689 nodes.insert(node_id, node_info);
1690 }
1691
1692 for (_, chan) in channels.unordered_iter_mut() {
1693 chan.node_one_counter =
1694 nodes.get(&chan.node_one).ok_or(DecodeError::InvalidValue)?.node_counter;
1695 chan.node_two_counter =
1696 nodes.get(&chan.node_two).ok_or(DecodeError::InvalidValue)?.node_counter;
1697 }
1698
1699 let mut last_rapid_gossip_sync_timestamp: Option<u32> = None;
1700 read_tlv_fields!(reader, {
1701 (1, last_rapid_gossip_sync_timestamp, option),
1702 });
1703
1704 Ok(NetworkGraph {
1705 secp_ctx: Secp256k1::verification_only(),
1706 chain_hash,
1707 logger,
1708 channels: RwLock::new(channels),
1709 nodes: RwLock::new(nodes),
1710 removed_node_counters: Mutex::new(Vec::new()),
1711 next_node_counter: AtomicUsize::new(nodes_count as usize),
1712 last_rapid_gossip_sync_timestamp: Mutex::new(last_rapid_gossip_sync_timestamp),
1713 removed_nodes: Mutex::new(new_hash_map()),
1714 removed_channels: Mutex::new(new_hash_map()),
1715 pending_checks: utxo::PendingChecks::new(),
1716 })
1717 }
1718}
1719
1720impl<L: Deref> fmt::Display for NetworkGraph<L>
1721where
1722 L::Target: Logger,
1723{
1724 fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
1725 writeln!(f, "Network map\n[Channels]")?;
1726 for (key, val) in self.channels.read().unwrap().unordered_iter() {
1727 writeln!(f, " {}: {}", key, val)?;
1728 }
1729 writeln!(f, "[Nodes]")?;
1730 for (&node_id, val) in self.nodes.read().unwrap().unordered_iter() {
1731 writeln!(f, " {}: {}", &node_id, val)?;
1732 }
1733 Ok(())
1734 }
1735}
1736
1737impl<L: Deref> Eq for NetworkGraph<L> where L::Target: Logger {}
1738impl<L: Deref> PartialEq for NetworkGraph<L>
1739where
1740 L::Target: Logger,
1741{
1742 fn eq(&self, other: &Self) -> bool {
1743 let ord = ((self as *const _) as usize) < ((other as *const _) as usize);
1746 let a = if ord { (&self.channels, &self.nodes) } else { (&other.channels, &other.nodes) };
1747 let b = if ord { (&other.channels, &other.nodes) } else { (&self.channels, &self.nodes) };
1748 let (channels_a, channels_b) = (
1749 a.0.unsafe_well_ordered_double_lock_self(),
1750 b.0.unsafe_well_ordered_double_lock_self(),
1751 );
1752 let (nodes_a, nodes_b) = (
1753 a.1.unsafe_well_ordered_double_lock_self(),
1754 b.1.unsafe_well_ordered_double_lock_self(),
1755 );
1756 self.chain_hash.eq(&other.chain_hash) && channels_a.eq(&channels_b) && nodes_a.eq(&nodes_b)
1757 }
1758}
1759
1760const CHAN_COUNT_ESTIMATE: usize = 60_000;
1764const NODE_COUNT_ESTIMATE: usize = 20_000;
1768
1769impl<L: Deref> NetworkGraph<L>
1770where
1771 L::Target: Logger,
1772{
1773 pub fn new(network: Network, logger: L) -> NetworkGraph<L> {
1775 Self {
1776 secp_ctx: Secp256k1::verification_only(),
1777 chain_hash: ChainHash::using_genesis_block(network),
1778 logger,
1779 channels: RwLock::new(IndexedMap::with_capacity(CHAN_COUNT_ESTIMATE)),
1780 nodes: RwLock::new(IndexedMap::with_capacity(NODE_COUNT_ESTIMATE)),
1781 next_node_counter: AtomicUsize::new(0),
1782 removed_node_counters: Mutex::new(Vec::new()),
1783 last_rapid_gossip_sync_timestamp: Mutex::new(None),
1784 removed_channels: Mutex::new(new_hash_map()),
1785 removed_nodes: Mutex::new(new_hash_map()),
1786 pending_checks: utxo::PendingChecks::new(),
1787 }
1788 }
1789
1790 fn test_node_counter_consistency(&self) {
1791 #[cfg(any(test, fuzzing))]
1792 {
1793 let channels = self.channels.read().unwrap();
1794 let nodes = self.nodes.read().unwrap();
1795 let removed_node_counters = self.removed_node_counters.lock().unwrap();
1796 let next_counter = self.next_node_counter.load(Ordering::Acquire);
1797 assert!(next_counter < (u32::max_value() as usize) / 2);
1798 let mut used_node_counters = vec![0u8; next_counter / 8 + 1];
1799
1800 for counter in removed_node_counters.iter() {
1801 let pos = (*counter as usize) / 8;
1802 let bit = 1 << (counter % 8);
1803 assert_eq!(used_node_counters[pos] & bit, 0);
1804 used_node_counters[pos] |= bit;
1805 }
1806 for (_, node) in nodes.unordered_iter() {
1807 assert!((node.node_counter as usize) < next_counter);
1808 let pos = (node.node_counter as usize) / 8;
1809 let bit = 1 << (node.node_counter % 8);
1810 assert_eq!(used_node_counters[pos] & bit, 0);
1811 used_node_counters[pos] |= bit;
1812 }
1813
1814 for (idx, used_bitset) in used_node_counters.iter().enumerate() {
1815 if idx != next_counter / 8 {
1816 assert_eq!(*used_bitset, 0xff);
1817 } else {
1818 assert_eq!(*used_bitset, (1u8 << (next_counter % 8)) - 1);
1819 }
1820 }
1821
1822 for (_, chan) in channels.unordered_iter() {
1823 assert_eq!(chan.node_one_counter, nodes.get(&chan.node_one).unwrap().node_counter);
1824 assert_eq!(chan.node_two_counter, nodes.get(&chan.node_two).unwrap().node_counter);
1825 }
1826 }
1827 }
1828
1829 pub fn read_only(&'_ self) -> ReadOnlyNetworkGraph<'_> {
1831 self.test_node_counter_consistency();
1832 let channels = self.channels.read().unwrap();
1833 let nodes = self.nodes.read().unwrap();
1834 ReadOnlyNetworkGraph {
1835 channels,
1836 nodes,
1837 max_node_counter: (self.next_node_counter.load(Ordering::Acquire) as u32)
1838 .saturating_sub(1),
1839 }
1840 }
1841
1842 pub fn get_last_rapid_gossip_sync_timestamp(&self) -> Option<u32> {
1845 self.last_rapid_gossip_sync_timestamp.lock().unwrap().clone()
1846 }
1847
1848 pub fn set_last_rapid_gossip_sync_timestamp(&self, last_rapid_gossip_sync_timestamp: u32) {
1851 self.last_rapid_gossip_sync_timestamp
1852 .lock()
1853 .unwrap()
1854 .replace(last_rapid_gossip_sync_timestamp);
1855 }
1856
1857 #[cfg(test)]
1860 pub fn clear_nodes_announcement_info(&self) {
1861 for node in self.nodes.write().unwrap().unordered_iter_mut() {
1862 node.1.announcement_info = None;
1863 }
1864 }
1865
1866 pub fn update_node_from_announcement(
1873 &self, msg: &msgs::NodeAnnouncement,
1874 ) -> Result<(), LightningError> {
1875 if let Some(node) = self.nodes.read().unwrap().get(&msg.contents.node_id) {
1878 if let Some(node_info) = node.announcement_info.as_ref() {
1879 if node_info.last_update() == msg.contents.timestamp {
1880 return Err(LightningError {
1881 err: "Update had the same timestamp as last processed update".to_owned(),
1882 action: ErrorAction::IgnoreDuplicateGossip,
1883 });
1884 }
1885 }
1886 }
1887 verify_node_announcement(msg, &self.secp_ctx)?;
1888 self.update_node_from_announcement_intern(&msg.contents, Some(&msg))
1889 }
1890
1891 pub fn update_node_from_unsigned_announcement(
1896 &self, msg: &msgs::UnsignedNodeAnnouncement,
1897 ) -> Result<(), LightningError> {
1898 self.update_node_from_announcement_intern(msg, None)
1899 }
1900
1901 fn update_node_from_announcement_intern(
1902 &self, msg: &msgs::UnsignedNodeAnnouncement, full_msg: Option<&msgs::NodeAnnouncement>,
1903 ) -> Result<(), LightningError> {
1904 let mut nodes = self.nodes.write().unwrap();
1905 match nodes.get_mut(&msg.node_id) {
1906 None => {
1907 core::mem::drop(nodes);
1908 self.pending_checks.check_hold_pending_node_announcement(msg, full_msg)?;
1909 Err(LightningError {
1910 err: "No existing channels for node_announcement".to_owned(),
1911 action: ErrorAction::IgnoreError,
1912 })
1913 },
1914 Some(node) => {
1915 if let Some(node_info) = node.announcement_info.as_ref() {
1916 if node_info.last_update() > msg.timestamp {
1920 return Err(LightningError {
1921 err: "Update older than last processed update".to_owned(),
1922 action: ErrorAction::IgnoreDuplicateGossip,
1923 });
1924 } else if node_info.last_update() == msg.timestamp {
1925 return Err(LightningError {
1926 err: "Update had the same timestamp as last processed update"
1927 .to_owned(),
1928 action: ErrorAction::IgnoreDuplicateGossip,
1929 });
1930 }
1931 }
1932
1933 let should_relay = msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY
1934 && msg.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY
1935 && msg.excess_data.len() + msg.excess_address_data.len()
1936 <= MAX_EXCESS_BYTES_FOR_RELAY;
1937
1938 node.announcement_info =
1939 if let (Some(signed_announcement), true) = (full_msg, should_relay) {
1940 Some(NodeAnnouncementInfo::Relayed(signed_announcement.clone()))
1941 } else {
1942 Some(NodeAnnouncementInfo::Local(NodeAnnouncementDetails {
1943 features: msg.features.clone(),
1944 last_update: msg.timestamp,
1945 rgb: msg.rgb,
1946 alias: msg.alias,
1947 addresses: msg.addresses.clone(),
1948 }))
1949 };
1950
1951 Ok(())
1952 },
1953 }
1954 }
1955
1956 pub fn update_channel_from_announcement<U: Deref>(
1965 &self, msg: &msgs::ChannelAnnouncement, utxo_lookup: &Option<U>,
1966 ) -> Result<(), LightningError>
1967 where
1968 U::Target: UtxoLookup,
1969 {
1970 self.pre_channel_announcement_validation_check(&msg.contents, utxo_lookup)?;
1971 verify_channel_announcement(msg, &self.secp_ctx)?;
1972 self.update_channel_from_unsigned_announcement_intern(&msg.contents, Some(msg), utxo_lookup)
1973 }
1974
1975 pub fn update_channel_from_announcement_no_lookup(
1983 &self, msg: &ChannelAnnouncement,
1984 ) -> Result<(), LightningError> {
1985 self.update_channel_from_announcement::<&UtxoResolver>(msg, &None)
1986 }
1987
1988 pub fn update_channel_from_unsigned_announcement<U: Deref>(
1995 &self, msg: &msgs::UnsignedChannelAnnouncement, utxo_lookup: &Option<U>,
1996 ) -> Result<(), LightningError>
1997 where
1998 U::Target: UtxoLookup,
1999 {
2000 self.pre_channel_announcement_validation_check(&msg, utxo_lookup)?;
2001 self.update_channel_from_unsigned_announcement_intern(msg, None, utxo_lookup)
2002 }
2003
2004 pub fn add_channel_from_partial_announcement(
2011 &self, short_channel_id: u64, capacity_sats: Option<u64>, timestamp: u64,
2012 features: ChannelFeatures, node_id_1: NodeId, node_id_2: NodeId,
2013 ) -> Result<(), LightningError> {
2014 if node_id_1 == node_id_2 {
2015 return Err(LightningError {
2016 err: "Channel announcement node had a channel with itself".to_owned(),
2017 action: ErrorAction::IgnoreError,
2018 });
2019 };
2020
2021 let channel_info = ChannelInfo {
2022 features,
2023 node_one: node_id_1,
2024 one_to_two: None,
2025 node_two: node_id_2,
2026 two_to_one: None,
2027 capacity_sats,
2028 announcement_message: None,
2029 announcement_received_time: timestamp,
2030 node_one_counter: u32::max_value(),
2031 node_two_counter: u32::max_value(),
2032 };
2033
2034 self.add_channel_between_nodes(short_channel_id, channel_info, None)
2035 }
2036
2037 fn add_channel_between_nodes(
2038 &self, short_channel_id: u64, channel_info: ChannelInfo, utxo_value: Option<Amount>,
2039 ) -> Result<(), LightningError> {
2040 let mut channels = self.channels.write().unwrap();
2041 let mut nodes = self.nodes.write().unwrap();
2042
2043 let node_id_a = channel_info.node_one.clone();
2044 let node_id_b = channel_info.node_two.clone();
2045
2046 log_gossip!(
2047 self.logger,
2048 "Adding channel {} between nodes {} and {}",
2049 short_channel_id,
2050 node_id_a,
2051 node_id_b
2052 );
2053
2054 let channel_info = match channels.entry(short_channel_id) {
2055 IndexedMapEntry::Occupied(mut entry) => {
2056 if utxo_value.is_some() {
2060 self.remove_channel_in_nodes(&mut nodes, &entry.get(), short_channel_id);
2069 *entry.get_mut() = channel_info;
2070 entry.into_mut()
2071 } else {
2072 return Err(LightningError {
2073 err: "Already have knowledge of channel".to_owned(),
2074 action: ErrorAction::IgnoreDuplicateGossip,
2075 });
2076 }
2077 },
2078 IndexedMapEntry::Vacant(entry) => entry.insert(channel_info),
2079 };
2080
2081 let mut node_counter_id = [
2082 (&mut channel_info.node_one_counter, node_id_a),
2083 (&mut channel_info.node_two_counter, node_id_b),
2084 ];
2085 for (chan_info_node_counter, current_node_id) in node_counter_id.iter_mut() {
2086 match nodes.entry(current_node_id.clone()) {
2087 IndexedMapEntry::Occupied(node_entry) => {
2088 let node = node_entry.into_mut();
2089 node.channels.push(short_channel_id);
2090 **chan_info_node_counter = node.node_counter;
2091 },
2092 IndexedMapEntry::Vacant(node_entry) => {
2093 let mut removed_node_counters = self.removed_node_counters.lock().unwrap();
2094 **chan_info_node_counter = removed_node_counters.pop().unwrap_or_else(|| {
2095 self.next_node_counter.fetch_add(1, Ordering::Relaxed) as u32
2096 });
2097 node_entry.insert(NodeInfo {
2098 channels: vec![short_channel_id],
2099 announcement_info: None,
2100 node_counter: **chan_info_node_counter,
2101 });
2102 },
2103 };
2104 }
2105
2106 core::mem::drop(nodes);
2107 core::mem::drop(channels);
2108 self.test_node_counter_consistency();
2109
2110 Ok(())
2111 }
2112
2113 fn pre_channel_announcement_validation_check<U: Deref>(
2119 &self, msg: &msgs::UnsignedChannelAnnouncement, utxo_lookup: &Option<U>,
2120 ) -> Result<(), LightningError>
2121 where
2122 U::Target: UtxoLookup,
2123 {
2124 let channels = self.channels.read().unwrap();
2125
2126 if let Some(chan) = channels.get(&msg.short_channel_id) {
2127 if chan.capacity_sats.is_some() {
2128 if msg.node_id_1 == chan.node_one && msg.node_id_2 == chan.node_two {
2140 return Err(LightningError {
2141 err: "Already have chain-validated channel".to_owned(),
2142 action: ErrorAction::IgnoreDuplicateGossip,
2143 });
2144 }
2145 } else if utxo_lookup.is_none() {
2146 return Err(LightningError {
2149 err: "Already have non-chain-validated channel".to_owned(),
2150 action: ErrorAction::IgnoreDuplicateGossip,
2151 });
2152 }
2153 }
2154
2155 Ok(())
2156 }
2157
2158 fn update_channel_from_unsigned_announcement_intern<U: Deref>(
2163 &self, msg: &msgs::UnsignedChannelAnnouncement,
2164 full_msg: Option<&msgs::ChannelAnnouncement>, utxo_lookup: &Option<U>,
2165 ) -> Result<(), LightningError>
2166 where
2167 U::Target: UtxoLookup,
2168 {
2169 if msg.node_id_1 == msg.node_id_2 || msg.bitcoin_key_1 == msg.bitcoin_key_2 {
2170 return Err(LightningError {
2171 err: "Channel announcement node had a channel with itself".to_owned(),
2172 action: ErrorAction::IgnoreError,
2173 });
2174 }
2175
2176 if msg.chain_hash != self.chain_hash {
2177 return Err(LightningError {
2178 err: "Channel announcement chain hash does not match genesis hash".to_owned(),
2179 action: ErrorAction::IgnoreAndLog(Level::Debug),
2180 });
2181 }
2182
2183 {
2184 let removed_channels = self.removed_channels.lock().unwrap();
2185 let removed_nodes = self.removed_nodes.lock().unwrap();
2186 if removed_channels.contains_key(&msg.short_channel_id)
2187 || removed_nodes.contains_key(&msg.node_id_1)
2188 || removed_nodes.contains_key(&msg.node_id_2)
2189 {
2190 return Err(LightningError{
2191 err: format!("Channel with SCID {} or one of its nodes was removed from our network graph recently", &msg.short_channel_id),
2192 action: ErrorAction::IgnoreAndLog(Level::Gossip)});
2193 }
2194 }
2195
2196 let utxo_value =
2197 self.pending_checks.check_channel_announcement(utxo_lookup, msg, full_msg)?;
2198
2199 #[allow(unused_mut, unused_assignments)]
2200 let mut announcement_received_time = 0;
2201 #[cfg(feature = "std")]
2202 {
2203 announcement_received_time = SystemTime::now()
2204 .duration_since(UNIX_EPOCH)
2205 .expect("Time must be > 1970")
2206 .as_secs();
2207 }
2208
2209 let chan_info = ChannelInfo {
2210 features: msg.features.clone(),
2211 node_one: msg.node_id_1,
2212 one_to_two: None,
2213 node_two: msg.node_id_2,
2214 two_to_one: None,
2215 capacity_sats: utxo_value.map(|a| a.to_sat()),
2216 announcement_message: if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY {
2217 full_msg.cloned()
2218 } else {
2219 None
2220 },
2221 announcement_received_time,
2222 node_one_counter: u32::max_value(),
2223 node_two_counter: u32::max_value(),
2224 };
2225
2226 self.add_channel_between_nodes(msg.short_channel_id, chan_info, utxo_value)?;
2227
2228 log_gossip!(
2229 self.logger,
2230 "Added channel_announcement for {}{}",
2231 msg.short_channel_id,
2232 if !msg.excess_data.is_empty() { " with excess uninterpreted data!" } else { "" }
2233 );
2234 Ok(())
2235 }
2236
2237 pub fn channel_failed_permanent(&self, short_channel_id: u64) {
2241 #[cfg(feature = "std")]
2242 let current_time_unix = Some(
2243 SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(),
2244 );
2245 #[cfg(not(feature = "std"))]
2246 let current_time_unix = None;
2247
2248 self.channel_failed_permanent_with_time(short_channel_id, current_time_unix)
2249 }
2250
2251 fn channel_failed_permanent_with_time(
2255 &self, short_channel_id: u64, current_time_unix: Option<u64>,
2256 ) {
2257 let mut channels = self.channels.write().unwrap();
2258 if let Some(chan) = channels.remove(&short_channel_id) {
2259 let mut nodes = self.nodes.write().unwrap();
2260 self.removed_channels.lock().unwrap().insert(short_channel_id, current_time_unix);
2261 self.remove_channel_in_nodes(&mut nodes, &chan, short_channel_id);
2262 }
2263 }
2264
2265 pub fn node_failed_permanent(&self, node_id: &PublicKey) {
2268 #[cfg(feature = "std")]
2269 let current_time_unix = Some(
2270 SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(),
2271 );
2272 #[cfg(not(feature = "std"))]
2273 let current_time_unix = None;
2274
2275 let node_id = NodeId::from_pubkey(node_id);
2276 let mut channels = self.channels.write().unwrap();
2277 let mut nodes = self.nodes.write().unwrap();
2278 let mut removed_channels = self.removed_channels.lock().unwrap();
2279 let mut removed_nodes = self.removed_nodes.lock().unwrap();
2280
2281 if let Some(node) = nodes.remove(&node_id) {
2282 let mut removed_node_counters = self.removed_node_counters.lock().unwrap();
2283 for scid in node.channels.iter() {
2284 if let Some(chan_info) = channels.remove(scid) {
2285 let other_node_id = if node_id == chan_info.node_one {
2286 chan_info.node_two
2287 } else {
2288 chan_info.node_one
2289 };
2290 if let IndexedMapEntry::Occupied(mut other_node_entry) =
2291 nodes.entry(other_node_id)
2292 {
2293 other_node_entry.get_mut().channels.retain(|chan_id| *scid != *chan_id);
2294 if other_node_entry.get().channels.is_empty() {
2295 removed_node_counters.push(other_node_entry.get().node_counter);
2296 other_node_entry.remove_entry();
2297 }
2298 }
2299 removed_channels.insert(*scid, current_time_unix);
2300 } else {
2301 debug_assert!(false, "Channels in nodes must always have channel info");
2302 }
2303 }
2304 removed_node_counters.push(node.node_counter);
2305 removed_nodes.insert(node_id, current_time_unix);
2306 }
2307 }
2308
2309 #[cfg(feature = "std")]
2310 pub fn remove_stale_channels_and_tracking(&self) {
2327 let time =
2328 SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs();
2329 self.remove_stale_channels_and_tracking_with_time(time);
2330 }
2331
2332 #[cfg_attr(feature = "std", doc = "")]
2343 #[cfg_attr(
2344 feature = "std",
2345 doc = "This function takes the current unix time as an argument. For users with the `std` feature"
2346 )]
2347 #[cfg_attr(
2348 feature = "std",
2349 doc = "enabled, [`NetworkGraph::remove_stale_channels_and_tracking`] may be preferable."
2350 )]
2351 pub fn remove_stale_channels_and_tracking_with_time(&self, current_time_unix: u64) {
2352 let mut channels = self.channels.write().unwrap();
2353 if current_time_unix > u32::max_value() as u64 {
2355 return;
2356 } if current_time_unix < STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS {
2358 return;
2359 }
2360 let min_time_unix: u32 = (current_time_unix - STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS) as u32;
2361 let mut scids_to_remove = new_hash_set();
2362 for (scid, info) in channels.unordered_iter_mut() {
2363 if info.one_to_two.is_some()
2364 && info.one_to_two.as_ref().unwrap().last_update < min_time_unix
2365 {
2366 log_gossip!(self.logger, "Removing directional update one_to_two (0) for channel {} due to its timestamp {} being below {}",
2367 scid, info.one_to_two.as_ref().unwrap().last_update, min_time_unix);
2368 info.one_to_two = None;
2369 }
2370 if info.two_to_one.is_some()
2371 && info.two_to_one.as_ref().unwrap().last_update < min_time_unix
2372 {
2373 log_gossip!(self.logger, "Removing directional update two_to_one (1) for channel {} due to its timestamp {} being below {}",
2374 scid, info.two_to_one.as_ref().unwrap().last_update, min_time_unix);
2375 info.two_to_one = None;
2376 }
2377 if info.one_to_two.is_none() || info.two_to_one.is_none() {
2378 let announcement_received_timestamp = info.announcement_received_time;
2382 if announcement_received_timestamp < min_time_unix as u64 {
2383 log_gossip!(self.logger, "Removing channel {} because both directional updates are missing and its announcement timestamp {} being below {}",
2384 scid, announcement_received_timestamp, min_time_unix);
2385 scids_to_remove.insert(*scid);
2386 }
2387 }
2388 }
2389 if !scids_to_remove.is_empty() {
2390 let mut nodes = self.nodes.write().unwrap();
2391 let mut removed_channels_lck = self.removed_channels.lock().unwrap();
2392
2393 let channels_removed_bulk = channels.remove_fetch_bulk(&scids_to_remove);
2394 self.removed_node_counters.lock().unwrap().reserve(channels_removed_bulk.len());
2395 let mut nodes_to_remove = hash_set_with_capacity(channels_removed_bulk.len());
2396 for (scid, info) in channels_removed_bulk {
2397 self.remove_channel_in_nodes_callback(&mut nodes, &info, scid, |e| {
2398 nodes_to_remove.insert(*e.key());
2399 });
2400 removed_channels_lck.insert(scid, Some(current_time_unix));
2401 }
2402 nodes.remove_bulk(&nodes_to_remove);
2403 }
2404
2405 let should_keep_tracking = |time: &mut Option<u64>| {
2406 if let Some(time) = time {
2407 current_time_unix.saturating_sub(*time) < REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS
2408 } else {
2409 #[cfg(not(feature = "std"))]
2413 {
2414 let mut tracked_time = Some(current_time_unix);
2415 core::mem::swap(time, &mut tracked_time);
2416 return true;
2417 }
2418 #[allow(unreachable_code)]
2419 false
2420 }
2421 };
2422
2423 self.removed_channels.lock().unwrap().retain(|_, time| should_keep_tracking(time));
2424 self.removed_nodes.lock().unwrap().retain(|_, time| should_keep_tracking(time));
2425 }
2426
2427 pub fn update_channel(&self, msg: &msgs::ChannelUpdate) -> Result<(), LightningError> {
2437 self.update_channel_internal(&msg.contents, Some(&msg), Some(&msg.signature), false)
2438 }
2439
2440 pub fn update_channel_unsigned(
2447 &self, msg: &msgs::UnsignedChannelUpdate,
2448 ) -> Result<(), LightningError> {
2449 self.update_channel_internal(msg, None, None, false)
2450 }
2451
2452 pub fn verify_channel_update(&self, msg: &msgs::ChannelUpdate) -> Result<(), LightningError> {
2459 self.update_channel_internal(&msg.contents, Some(&msg), Some(&msg.signature), true)
2460 }
2461
2462 fn update_channel_internal(
2463 &self, msg: &msgs::UnsignedChannelUpdate, full_msg: Option<&msgs::ChannelUpdate>,
2464 sig: Option<&secp256k1::ecdsa::Signature>, only_verify: bool,
2465 ) -> Result<(), LightningError> {
2466 let chan_enabled = msg.channel_flags & (1 << 1) != (1 << 1);
2467
2468 if msg.chain_hash != self.chain_hash {
2469 return Err(LightningError {
2470 err: "Channel update chain hash does not match genesis hash".to_owned(),
2471 action: ErrorAction::IgnoreAndLog(Level::Debug),
2472 });
2473 }
2474
2475 #[cfg(all(feature = "std", not(test), not(feature = "_test_utils")))]
2476 {
2477 let time = SystemTime::now()
2480 .duration_since(UNIX_EPOCH)
2481 .expect("Time must be > 1970")
2482 .as_secs();
2483 if (msg.timestamp as u64) < time - STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS {
2484 return Err(LightningError {
2485 err: "channel_update is older than two weeks old".to_owned(),
2486 action: ErrorAction::IgnoreAndLog(Level::Gossip),
2487 });
2488 }
2489 if msg.timestamp as u64 > time + 60 * 60 * 24 {
2490 return Err(LightningError {
2491 err: "channel_update has a timestamp more than a day in the future".to_owned(),
2492 action: ErrorAction::IgnoreAndLog(Level::Gossip),
2493 });
2494 }
2495 }
2496
2497 log_gossip!(
2498 self.logger,
2499 "Updating channel {} in direction {} with timestamp {}",
2500 msg.short_channel_id,
2501 msg.channel_flags & 1,
2502 msg.timestamp
2503 );
2504
2505 if msg.htlc_maximum_msat > MAX_VALUE_MSAT {
2506 return Err(LightningError {
2507 err: "htlc_maximum_msat is larger than maximum possible msats".to_owned(),
2508 action: ErrorAction::IgnoreError,
2509 });
2510 }
2511
2512 let check_update_latest =
2513 |target: &Option<ChannelUpdateInfo>| -> Result<(), LightningError> {
2514 if let Some(existing_chan_info) = target {
2515 if existing_chan_info.last_update > msg.timestamp {
2522 return Err(LightningError {
2523 err: "Update older than last processed update".to_owned(),
2524 action: ErrorAction::IgnoreDuplicateGossip,
2525 });
2526 } else if existing_chan_info.last_update == msg.timestamp {
2527 return Err(LightningError {
2528 err: "Update had same timestamp as last processed update".to_owned(),
2529 action: ErrorAction::IgnoreDuplicateGossip,
2530 });
2531 }
2532 }
2533 Ok(())
2534 };
2535
2536 let check_msg_sanity =
2537 |channel: &ChannelInfo| -> Result<(), LightningError> {
2538 if let Some(capacity_sats) = channel.capacity_sats {
2539 if capacity_sats > MAX_VALUE_MSAT / 1000
2542 || msg.htlc_maximum_msat > capacity_sats * 1000
2543 {
2544 return Err(LightningError{err:
2545 "htlc_maximum_msat is larger than channel capacity or capacity is bogus".to_owned(),
2546 action: ErrorAction::IgnoreError});
2547 }
2548 }
2549
2550 if msg.channel_flags & 1 == 1 {
2551 check_update_latest(&channel.two_to_one)
2552 } else {
2553 check_update_latest(&channel.one_to_two)
2554 }
2555 };
2556
2557 let mut node_pubkey = None;
2558 {
2559 let channels = self.channels.read().unwrap();
2560 match channels.get(&msg.short_channel_id) {
2561 None => {
2562 core::mem::drop(channels);
2563 self.pending_checks.check_hold_pending_channel_update(msg, full_msg)?;
2564 return Err(LightningError {
2565 err: "Couldn't find channel for update".to_owned(),
2566 action: ErrorAction::IgnoreAndLog(Level::Gossip),
2567 });
2568 },
2569 Some(channel) => {
2570 check_msg_sanity(channel)?;
2571 let node_id = if msg.channel_flags & 1 == 1 {
2572 channel.node_two.as_slice()
2573 } else {
2574 channel.node_one.as_slice()
2575 };
2576 if sig.is_some() {
2577 node_pubkey =
2582 Some(PublicKey::from_slice(node_id).map_err(|_| LightningError {
2583 err: "Couldn't parse source node pubkey".to_owned(),
2584 action: ErrorAction::IgnoreAndLog(Level::Debug),
2585 })?);
2586 }
2587 },
2588 }
2589 }
2590
2591 if let Some(sig) = sig {
2592 let msg_hash = hash_to_message!(&message_sha256d_hash(&msg)[..]);
2593 let node_pubkey = if let Some(pubkey) = node_pubkey {
2594 pubkey
2595 } else {
2596 debug_assert!(false, "node_pubkey should have been decoded above");
2597 let err = "node_pubkey wasn't decoded but we need it to check a sig".to_owned();
2598 let action = ErrorAction::IgnoreAndLog(Level::Error);
2599 return Err(LightningError { err, action });
2600 };
2601 secp_verify_sig!(self.secp_ctx, &msg_hash, &sig, &node_pubkey, "channel_update");
2602 }
2603
2604 if only_verify {
2605 return Ok(());
2606 }
2607
2608 let mut channels = self.channels.write().unwrap();
2609 if let Some(channel) = channels.get_mut(&msg.short_channel_id) {
2610 check_msg_sanity(channel)?;
2611
2612 let last_update_message = if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY {
2613 full_msg.cloned()
2614 } else {
2615 None
2616 };
2617
2618 let new_channel_info = Some(ChannelUpdateInfo {
2619 enabled: chan_enabled,
2620 last_update: msg.timestamp,
2621 cltv_expiry_delta: msg.cltv_expiry_delta,
2622 htlc_minimum_msat: msg.htlc_minimum_msat,
2623 htlc_maximum_msat: msg.htlc_maximum_msat,
2624 fees: RoutingFees {
2625 base_msat: msg.fee_base_msat,
2626 proportional_millionths: msg.fee_proportional_millionths,
2627 },
2628 last_update_message,
2629 });
2630
2631 if msg.channel_flags & 1 == 1 {
2632 channel.two_to_one = new_channel_info;
2633 } else {
2634 channel.one_to_two = new_channel_info;
2635 }
2636 }
2637
2638 Ok(())
2639 }
2640
2641 fn remove_channel_in_nodes_callback<RM: FnMut(IndexedMapOccupiedEntry<NodeId, NodeInfo>)>(
2642 &self, nodes: &mut IndexedMap<NodeId, NodeInfo>, chan: &ChannelInfo, short_channel_id: u64,
2643 mut remove_node: RM,
2644 ) {
2645 macro_rules! remove_from_node {
2646 ($node_id: expr) => {
2647 if let IndexedMapEntry::Occupied(mut entry) = nodes.entry($node_id) {
2648 entry.get_mut().channels.retain(|chan_id| short_channel_id != *chan_id);
2649 if entry.get().channels.is_empty() {
2650 self.removed_node_counters.lock().unwrap().push(entry.get().node_counter);
2651 remove_node(entry);
2652 }
2653 } else {
2654 panic!(
2655 "Had channel that pointed to unknown node (ie inconsistent network map)!"
2656 );
2657 }
2658 };
2659 }
2660
2661 remove_from_node!(chan.node_one);
2662 remove_from_node!(chan.node_two);
2663 }
2664
2665 fn remove_channel_in_nodes(
2666 &self, nodes: &mut IndexedMap<NodeId, NodeInfo>, chan: &ChannelInfo, short_channel_id: u64,
2667 ) {
2668 self.remove_channel_in_nodes_callback(nodes, chan, short_channel_id, |e| {
2669 e.remove_entry();
2670 });
2671 }
2672}
2673
2674impl ReadOnlyNetworkGraph<'_> {
2675 pub fn channels(&self) -> &IndexedMap<u64, ChannelInfo> {
2679 &*self.channels
2680 }
2681
2682 pub fn channel(&self, short_channel_id: u64) -> Option<&ChannelInfo> {
2684 self.channels.get(&short_channel_id)
2685 }
2686
2687 #[cfg(c_bindings)] pub fn list_channels(&self) -> Vec<u64> {
2690 self.channels.unordered_keys().map(|c| *c).collect()
2691 }
2692
2693 pub fn nodes(&self) -> &IndexedMap<NodeId, NodeInfo> {
2697 &*self.nodes
2698 }
2699
2700 pub fn node(&self, node_id: &NodeId) -> Option<&NodeInfo> {
2702 self.nodes.get(node_id)
2703 }
2704
2705 #[cfg(c_bindings)] pub fn list_nodes(&self) -> Vec<NodeId> {
2708 self.nodes.unordered_keys().map(|n| *n).collect()
2709 }
2710
2711 pub fn get_addresses(&self, pubkey: &PublicKey) -> Option<Vec<SocketAddress>> {
2715 self.nodes
2716 .get(&NodeId::from_pubkey(&pubkey))
2717 .and_then(|node| node.announcement_info.as_ref().map(|ann| ann.addresses().to_vec()))
2718 }
2719
2720 pub(crate) fn max_node_counter(&self) -> u32 {
2722 self.max_node_counter
2723 }
2724}
2725
2726#[cfg(test)]
2727pub(crate) mod tests {
2728 use crate::ln::chan_utils::make_funding_redeemscript;
2729 use crate::ln::channelmanager;
2730 use crate::ln::msgs::{BaseMessageHandler, MessageSendEvent, SocketAddress};
2731 use crate::ln::msgs::{
2732 ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, QueryChannelRange,
2733 QueryShortChannelIds, ReplyChannelRange, RoutingMessageHandler,
2734 UnsignedChannelAnnouncement, UnsignedChannelUpdate, UnsignedNodeAnnouncement,
2735 MAX_VALUE_MSAT,
2736 };
2737 use crate::routing::gossip::{
2738 ChannelInfo, ChannelUpdateInfo, NetworkGraph, NetworkUpdate, NodeAlias,
2739 NodeAnnouncementInfo, NodeId, NodeInfo, P2PGossipSync, RoutingFees,
2740 MAX_EXCESS_BYTES_FOR_RELAY,
2741 };
2742 use crate::routing::utxo::{UtxoLookupError, UtxoResult};
2743 #[cfg(feature = "std")]
2744 use crate::types::features::InitFeatures;
2745 use crate::util::config::UserConfig;
2746 use crate::util::scid_utils::scid_from_parts;
2747 use crate::util::ser::{Hostname, LengthReadable, Readable, ReadableArgs, Writeable};
2748 use crate::util::test_utils;
2749
2750 use super::STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS;
2751 use crate::routing::gossip::REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS;
2752
2753 use bitcoin::amount::Amount;
2754 use bitcoin::constants::ChainHash;
2755 use bitcoin::hashes::sha256d::Hash as Sha256dHash;
2756 use bitcoin::hashes::Hash;
2757 use bitcoin::hex::FromHex;
2758 use bitcoin::network::Network;
2759 use bitcoin::script::ScriptBuf;
2760 use bitcoin::secp256k1::{All, Secp256k1};
2761 use bitcoin::secp256k1::{PublicKey, SecretKey};
2762 use bitcoin::transaction::TxOut;
2763
2764 use crate::io;
2765 use crate::prelude::*;
2766 use crate::sync::Arc;
2767 use bitcoin::secp256k1;
2768
2769 fn create_network_graph() -> NetworkGraph<Arc<test_utils::TestLogger>> {
2770 let logger = Arc::new(test_utils::TestLogger::new());
2771 NetworkGraph::new(Network::Testnet, logger)
2772 }
2773
2774 fn create_gossip_sync(
2775 network_graph: &NetworkGraph<Arc<test_utils::TestLogger>>,
2776 ) -> (
2777 Secp256k1<All>,
2778 P2PGossipSync<
2779 &NetworkGraph<Arc<test_utils::TestLogger>>,
2780 Arc<test_utils::TestChainSource>,
2781 Arc<test_utils::TestLogger>,
2782 >,
2783 ) {
2784 let secp_ctx = Secp256k1::new();
2785 let logger = Arc::new(test_utils::TestLogger::new());
2786 let gossip_sync = P2PGossipSync::new(network_graph, None, Arc::clone(&logger));
2787 (secp_ctx, gossip_sync)
2788 }
2789
2790 #[test]
2791 fn request_full_sync_finite_times() {
2792 let network_graph = create_network_graph();
2793 let (_, gossip_sync) = create_gossip_sync(&network_graph);
2794
2795 assert!(gossip_sync.should_request_full_sync());
2796 assert!(gossip_sync.should_request_full_sync());
2797 assert!(gossip_sync.should_request_full_sync());
2798 assert!(gossip_sync.should_request_full_sync());
2799 assert!(gossip_sync.should_request_full_sync());
2800 assert!(!gossip_sync.should_request_full_sync());
2801 }
2802
2803 pub(crate) fn get_signed_node_announcement<F: Fn(&mut UnsignedNodeAnnouncement)>(
2804 f: F, node_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>,
2805 ) -> NodeAnnouncement {
2806 let node_id = NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, node_key));
2807 let mut unsigned_announcement = UnsignedNodeAnnouncement {
2808 features: channelmanager::provided_node_features(&UserConfig::default()),
2809 timestamp: 100,
2810 node_id,
2811 rgb: [0; 3],
2812 alias: NodeAlias([0; 32]),
2813 addresses: Vec::new(),
2814 excess_address_data: Vec::new(),
2815 excess_data: Vec::new(),
2816 };
2817 f(&mut unsigned_announcement);
2818 let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]);
2819 NodeAnnouncement {
2820 signature: secp_ctx.sign_ecdsa(&msghash, node_key),
2821 contents: unsigned_announcement,
2822 }
2823 }
2824
2825 pub(crate) fn get_signed_channel_announcement<F: Fn(&mut UnsignedChannelAnnouncement)>(
2826 f: F, node_1_key: &SecretKey, node_2_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>,
2827 ) -> ChannelAnnouncement {
2828 let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_key);
2829 let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_key);
2830 let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap();
2831 let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap();
2832
2833 let mut unsigned_announcement = UnsignedChannelAnnouncement {
2834 features: channelmanager::provided_channel_features(&UserConfig::default()),
2835 chain_hash: ChainHash::using_genesis_block(Network::Testnet),
2836 short_channel_id: 0,
2837 node_id_1: NodeId::from_pubkey(&node_id_1),
2838 node_id_2: NodeId::from_pubkey(&node_id_2),
2839 bitcoin_key_1: NodeId::from_pubkey(&PublicKey::from_secret_key(
2840 &secp_ctx,
2841 node_1_btckey,
2842 )),
2843 bitcoin_key_2: NodeId::from_pubkey(&PublicKey::from_secret_key(
2844 &secp_ctx,
2845 node_2_btckey,
2846 )),
2847 excess_data: Vec::new(),
2848 };
2849 f(&mut unsigned_announcement);
2850 let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]);
2851 ChannelAnnouncement {
2852 node_signature_1: secp_ctx.sign_ecdsa(&msghash, node_1_key),
2853 node_signature_2: secp_ctx.sign_ecdsa(&msghash, node_2_key),
2854 bitcoin_signature_1: secp_ctx.sign_ecdsa(&msghash, node_1_btckey),
2855 bitcoin_signature_2: secp_ctx.sign_ecdsa(&msghash, node_2_btckey),
2856 contents: unsigned_announcement,
2857 }
2858 }
2859
2860 pub(crate) fn get_channel_script(secp_ctx: &Secp256k1<secp256k1::All>) -> ScriptBuf {
2861 let node_1_btckey = SecretKey::from_slice(&[40; 32]).unwrap();
2862 let node_2_btckey = SecretKey::from_slice(&[39; 32]).unwrap();
2863 make_funding_redeemscript(
2864 &PublicKey::from_secret_key(secp_ctx, &node_1_btckey),
2865 &PublicKey::from_secret_key(secp_ctx, &node_2_btckey),
2866 )
2867 .to_p2wsh()
2868 }
2869
2870 pub(crate) fn get_signed_channel_update<F: Fn(&mut UnsignedChannelUpdate)>(
2871 f: F, node_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>,
2872 ) -> ChannelUpdate {
2873 let mut unsigned_channel_update = UnsignedChannelUpdate {
2874 chain_hash: ChainHash::using_genesis_block(Network::Testnet),
2875 short_channel_id: 0,
2876 timestamp: 100,
2877 message_flags: 1, channel_flags: 0,
2879 cltv_expiry_delta: 144,
2880 htlc_minimum_msat: 1_000_000,
2881 htlc_maximum_msat: 1_000_000,
2882 fee_base_msat: 10_000,
2883 fee_proportional_millionths: 20,
2884 excess_data: Vec::new(),
2885 };
2886 f(&mut unsigned_channel_update);
2887 let msghash =
2888 hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]);
2889 ChannelUpdate {
2890 signature: secp_ctx.sign_ecdsa(&msghash, node_key),
2891 contents: unsigned_channel_update,
2892 }
2893 }
2894
2895 #[test]
2896 fn handling_node_announcements() {
2897 let network_graph = create_network_graph();
2898 let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
2899
2900 let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
2901 let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
2902 let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
2903 let zero_hash = Sha256dHash::hash(&[0; 32]);
2904
2905 let valid_announcement = get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx);
2906 match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) {
2907 Ok(_) => panic!(),
2908 Err(e) => assert_eq!("No existing channels for node_announcement", e.err),
2909 };
2910
2911 {
2912 let valid_announcement =
2914 get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
2915 match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement)
2916 {
2917 Ok(res) => assert!(res),
2918 _ => panic!(),
2919 };
2920 }
2921
2922 let fake_msghash = hash_to_message!(zero_hash.as_byte_array());
2923 match gossip_sync.handle_node_announcement(
2924 Some(node_1_pubkey),
2925 &NodeAnnouncement {
2926 signature: secp_ctx.sign_ecdsa(&fake_msghash, node_1_privkey),
2927 contents: valid_announcement.contents.clone(),
2928 },
2929 ) {
2930 Ok(_) => panic!(),
2931 Err(e) => assert_eq!(e.err, "Invalid signature on node_announcement message"),
2932 };
2933
2934 match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) {
2935 Ok(res) => assert!(res),
2936 Err(_) => panic!(),
2937 };
2938
2939 let announcement_with_data = get_signed_node_announcement(
2940 |unsigned_announcement| {
2941 unsigned_announcement.timestamp += 1000;
2942 unsigned_announcement.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0);
2943 },
2944 node_1_privkey,
2945 &secp_ctx,
2946 );
2947 match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement_with_data) {
2949 Ok(res) => assert!(!res),
2950 Err(_) => panic!(),
2951 };
2952
2953 let outdated_announcement = get_signed_node_announcement(
2956 |unsigned_announcement| {
2957 unsigned_announcement.timestamp += 1000 - 10;
2958 },
2959 node_1_privkey,
2960 &secp_ctx,
2961 );
2962 match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &outdated_announcement) {
2963 Ok(_) => panic!(),
2964 Err(e) => assert_eq!(e.err, "Update older than last processed update"),
2965 };
2966 }
2967
2968 #[test]
2969 fn handling_channel_announcements() {
2970 let secp_ctx = Secp256k1::new();
2971 let logger = test_utils::TestLogger::new();
2972
2973 let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
2974 let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
2975 let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
2976
2977 let good_script = get_channel_script(&secp_ctx);
2978 let valid_announcement =
2979 get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
2980
2981 let network_graph = NetworkGraph::new(Network::Testnet, &logger);
2983 let mut gossip_sync = P2PGossipSync::new(&network_graph, None, &logger);
2984 match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) {
2985 Ok(res) => assert!(res),
2986 _ => panic!(),
2987 };
2988
2989 let scid = valid_announcement.contents.short_channel_id;
2990 match network_graph.read_only().channels().get(&scid) {
2991 None => panic!(),
2992 Some(_) => (),
2993 };
2994
2995 match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) {
2998 Ok(_) => panic!(),
2999 Err(e) => assert_eq!(e.err, "Already have non-chain-validated channel"),
3000 };
3001
3002 let chain_source = test_utils::TestChainSource::new(Network::Testnet);
3004 *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Sync(Err(UtxoLookupError::UnknownTx));
3005 let network_graph = NetworkGraph::new(Network::Testnet, &logger);
3006 gossip_sync = P2PGossipSync::new(&network_graph, Some(&chain_source), &logger);
3007
3008 let valid_announcement = get_signed_channel_announcement(
3009 |unsigned_announcement| {
3010 unsigned_announcement.short_channel_id += 1;
3011 },
3012 node_1_privkey,
3013 node_2_privkey,
3014 &secp_ctx,
3015 );
3016 match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) {
3017 Ok(_) => panic!(),
3018 Err(e) => assert_eq!(e.err, "Channel announced without corresponding UTXO entry"),
3019 };
3020
3021 *chain_source.utxo_ret.lock().unwrap() =
3023 UtxoResult::Sync(Ok(TxOut { value: Amount::ZERO, script_pubkey: good_script.clone() }));
3024 let valid_announcement = get_signed_channel_announcement(
3025 |unsigned_announcement| {
3026 unsigned_announcement.short_channel_id += 2;
3027 },
3028 node_1_privkey,
3029 node_2_privkey,
3030 &secp_ctx,
3031 );
3032 match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) {
3033 Ok(res) => assert!(res),
3034 _ => panic!(),
3035 };
3036
3037 let scid = valid_announcement.contents.short_channel_id;
3038 match network_graph.read_only().channels().get(&scid) {
3039 None => panic!(),
3040 Some(_) => (),
3041 };
3042
3043 *chain_source.utxo_ret.lock().unwrap() =
3046 UtxoResult::Sync(Ok(TxOut { value: Amount::ZERO, script_pubkey: good_script }));
3047 match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) {
3048 Ok(_) => panic!(),
3049 Err(e) => assert_eq!(e.err, "Already have chain-validated channel"),
3050 };
3051
3052 #[cfg(feature = "std")]
3053 {
3054 use std::time::{SystemTime, UNIX_EPOCH};
3055
3056 let tracking_time = SystemTime::now()
3057 .duration_since(UNIX_EPOCH)
3058 .expect("Time must be > 1970")
3059 .as_secs();
3060 let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
3062 gossip_sync.network_graph().node_failed_permanent(&node_1_pubkey);
3063
3064 let valid_announcement = get_signed_channel_announcement(
3066 |unsigned_announcement| {
3067 unsigned_announcement.short_channel_id += 3;
3068 },
3069 node_1_privkey,
3070 node_2_privkey,
3071 &secp_ctx,
3072 );
3073 match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) {
3074 Ok(_) => panic!(),
3075 Err(e) => assert_eq!(e.err, "Channel with SCID 3 or one of its nodes was removed from our network graph recently")
3076 }
3077
3078 gossip_sync.network_graph().remove_stale_channels_and_tracking_with_time(
3079 tracking_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS,
3080 );
3081
3082 match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement)
3084 {
3085 Ok(res) => assert!(res),
3086 _ => panic!(),
3087 }
3088 }
3089
3090 let valid_excess_data_announcement = get_signed_channel_announcement(
3091 |unsigned_announcement| {
3092 unsigned_announcement.short_channel_id += 4;
3093 unsigned_announcement.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0);
3094 },
3095 node_1_privkey,
3096 node_2_privkey,
3097 &secp_ctx,
3098 );
3099
3100 let mut invalid_sig_announcement = valid_excess_data_announcement.clone();
3101 invalid_sig_announcement.contents.excess_data = Vec::new();
3102 match gossip_sync
3103 .handle_channel_announcement(Some(node_1_pubkey), &invalid_sig_announcement)
3104 {
3105 Ok(_) => panic!(),
3106 Err(e) => assert_eq!(e.err, "Invalid signature on channel_announcement message"),
3107 };
3108
3109 match gossip_sync
3111 .handle_channel_announcement(Some(node_1_pubkey), &valid_excess_data_announcement)
3112 {
3113 Ok(res) => assert!(!res),
3114 _ => panic!(),
3115 };
3116
3117 let channel_to_itself_announcement =
3118 get_signed_channel_announcement(|_| {}, node_1_privkey, node_1_privkey, &secp_ctx);
3119 match gossip_sync
3120 .handle_channel_announcement(Some(node_1_pubkey), &channel_to_itself_announcement)
3121 {
3122 Ok(_) => panic!(),
3123 Err(e) => assert_eq!(e.err, "Channel announcement node had a channel with itself"),
3124 };
3125
3126 let incorrect_chain_announcement = get_signed_channel_announcement(
3129 |unsigned_announcement| {
3130 unsigned_announcement.chain_hash = ChainHash::using_genesis_block(Network::Bitcoin);
3131 },
3132 node_1_privkey,
3133 node_2_privkey,
3134 &secp_ctx,
3135 );
3136 match gossip_sync
3137 .handle_channel_announcement(Some(node_1_pubkey), &incorrect_chain_announcement)
3138 {
3139 Ok(_) => panic!(),
3140 Err(e) => {
3141 assert_eq!(e.err, "Channel announcement chain hash does not match genesis hash")
3142 },
3143 };
3144 }
3145
3146 #[test]
3147 fn handling_channel_update() {
3148 let secp_ctx = Secp256k1::new();
3149 let logger = test_utils::TestLogger::new();
3150 let chain_source = test_utils::TestChainSource::new(Network::Testnet);
3151 let network_graph = NetworkGraph::new(Network::Testnet, &logger);
3152 let gossip_sync = P2PGossipSync::new(&network_graph, Some(&chain_source), &logger);
3153
3154 let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
3155 let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
3156 let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
3157
3158 let amount_sats = Amount::from_sat(1000_000);
3159 let short_channel_id;
3160
3161 {
3162 let good_script = get_channel_script(&secp_ctx);
3164 *chain_source.utxo_ret.lock().unwrap() = UtxoResult::Sync(Ok(TxOut {
3165 value: amount_sats,
3166 script_pubkey: good_script.clone(),
3167 }));
3168
3169 let valid_channel_announcement =
3170 get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
3171 short_channel_id = valid_channel_announcement.contents.short_channel_id;
3172 match gossip_sync
3173 .handle_channel_announcement(Some(node_1_pubkey), &valid_channel_announcement)
3174 {
3175 Ok(_) => (),
3176 Err(_) => panic!(),
3177 };
3178 }
3179
3180 let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx);
3181 network_graph.verify_channel_update(&valid_channel_update).unwrap();
3182 match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3183 Ok(res) => assert!(res),
3184 _ => panic!(),
3185 };
3186
3187 {
3188 match network_graph.read_only().channels().get(&short_channel_id) {
3189 None => panic!(),
3190 Some(channel_info) => {
3191 assert_eq!(channel_info.one_to_two.as_ref().unwrap().cltv_expiry_delta, 144);
3192 assert!(channel_info.two_to_one.is_none());
3193 },
3194 };
3195 }
3196
3197 let valid_channel_update = get_signed_channel_update(
3198 |unsigned_channel_update| {
3199 unsigned_channel_update.timestamp += 100;
3200 unsigned_channel_update.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0);
3201 },
3202 node_1_privkey,
3203 &secp_ctx,
3204 );
3205 match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3207 Ok(res) => assert!(!res),
3208 _ => panic!(),
3209 };
3210
3211 let valid_channel_update = get_signed_channel_update(
3212 |unsigned_channel_update| {
3213 unsigned_channel_update.timestamp += 110;
3214 unsigned_channel_update.short_channel_id += 1;
3215 },
3216 node_1_privkey,
3217 &secp_ctx,
3218 );
3219 match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3220 Ok(_) => panic!(),
3221 Err(e) => assert_eq!(e.err, "Couldn't find channel for update"),
3222 };
3223
3224 let valid_channel_update = get_signed_channel_update(
3225 |unsigned_channel_update| {
3226 unsigned_channel_update.htlc_maximum_msat = MAX_VALUE_MSAT + 1;
3227 unsigned_channel_update.timestamp += 110;
3228 },
3229 node_1_privkey,
3230 &secp_ctx,
3231 );
3232 match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3233 Ok(_) => panic!(),
3234 Err(e) => assert_eq!(e.err, "htlc_maximum_msat is larger than maximum possible msats"),
3235 };
3236
3237 let valid_channel_update = get_signed_channel_update(
3238 |unsigned_channel_update| {
3239 unsigned_channel_update.htlc_maximum_msat = amount_sats.to_sat() * 1000 + 1;
3240 unsigned_channel_update.timestamp += 110;
3241 },
3242 node_1_privkey,
3243 &secp_ctx,
3244 );
3245 match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3246 Ok(_) => panic!(),
3247 Err(e) => assert_eq!(
3248 e.err,
3249 "htlc_maximum_msat is larger than channel capacity or capacity is bogus"
3250 ),
3251 };
3252
3253 let valid_channel_update = get_signed_channel_update(
3256 |unsigned_channel_update| {
3257 unsigned_channel_update.timestamp += 100;
3258 },
3259 node_1_privkey,
3260 &secp_ctx,
3261 );
3262 match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3263 Ok(_) => panic!(),
3264 Err(e) => assert_eq!(e.err, "Update had same timestamp as last processed update"),
3265 };
3266
3267 let mut invalid_sig_channel_update = get_signed_channel_update(
3268 |unsigned_channel_update| {
3269 unsigned_channel_update.timestamp += 500;
3270 },
3271 node_1_privkey,
3272 &secp_ctx,
3273 );
3274 let zero_hash = Sha256dHash::hash(&[0; 32]);
3275 let fake_msghash = hash_to_message!(zero_hash.as_byte_array());
3276 invalid_sig_channel_update.signature = secp_ctx.sign_ecdsa(&fake_msghash, node_1_privkey);
3277 match gossip_sync.handle_channel_update(Some(node_1_pubkey), &invalid_sig_channel_update) {
3278 Ok(_) => panic!(),
3279 Err(e) => assert_eq!(e.err, "Invalid signature on channel_update message"),
3280 };
3281
3282 let incorrect_chain_update = get_signed_channel_update(
3285 |unsigned_channel_update| {
3286 unsigned_channel_update.chain_hash =
3287 ChainHash::using_genesis_block(Network::Bitcoin);
3288 },
3289 node_1_privkey,
3290 &secp_ctx,
3291 );
3292
3293 match gossip_sync.handle_channel_update(Some(node_1_pubkey), &incorrect_chain_update) {
3294 Ok(_) => panic!(),
3295 Err(e) => assert_eq!(e.err, "Channel update chain hash does not match genesis hash"),
3296 };
3297 }
3298
3299 #[test]
3300 fn handling_network_update() {
3301 let logger = test_utils::TestLogger::new();
3302 let network_graph = NetworkGraph::new(Network::Testnet, &logger);
3303 let secp_ctx = Secp256k1::new();
3304
3305 let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
3306 let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
3307 let node_2_pk = PublicKey::from_secret_key(&secp_ctx, node_2_privkey);
3308 let node_2_id = NodeId::from_pubkey(&node_2_pk);
3309
3310 {
3311 assert_eq!(network_graph.read_only().nodes().len(), 0);
3313 }
3314
3315 let scid;
3316 {
3317 let valid_channel_announcement =
3319 get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
3320 scid = valid_channel_announcement.contents.short_channel_id;
3321 let chain_source: Option<&test_utils::TestChainSource> = None;
3322 assert!(network_graph
3323 .update_channel_from_announcement(&valid_channel_announcement, &chain_source)
3324 .is_ok());
3325 assert!(network_graph.read_only().channels().get(&scid).is_some());
3326
3327 let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx);
3328
3329 assert!(network_graph.read_only().channels().get(&scid).unwrap().one_to_two.is_none());
3330 network_graph.update_channel(&valid_channel_update).unwrap();
3331 assert!(network_graph.read_only().channels().get(&scid).unwrap().one_to_two.is_some());
3332 }
3333
3334 {
3336 match network_graph.read_only().channels().get(&scid) {
3337 None => panic!(),
3338 Some(channel_info) => {
3339 assert!(channel_info.one_to_two.as_ref().unwrap().enabled);
3340 },
3341 };
3342
3343 network_graph.handle_network_update(&NetworkUpdate::ChannelFailure {
3344 short_channel_id: scid,
3345 is_permanent: false,
3346 });
3347
3348 match network_graph.read_only().channels().get(&scid) {
3349 None => panic!(),
3350 Some(channel_info) => {
3351 assert!(channel_info.one_to_two.as_ref().unwrap().enabled);
3352 },
3353 };
3354 }
3355
3356 network_graph.handle_network_update(&NetworkUpdate::ChannelFailure {
3358 short_channel_id: scid,
3359 is_permanent: true,
3360 });
3361
3362 assert_eq!(network_graph.read_only().channels().len(), 0);
3363 assert_eq!(network_graph.read_only().nodes().len(), 0);
3365
3366 {
3367 let network_graph = NetworkGraph::new(Network::Testnet, &logger);
3369
3370 let valid_channel_announcement =
3372 get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
3373 let short_channel_id = valid_channel_announcement.contents.short_channel_id;
3374 let chain_source: Option<&test_utils::TestChainSource> = None;
3375 assert!(network_graph
3376 .update_channel_from_announcement(&valid_channel_announcement, &chain_source)
3377 .is_ok());
3378 assert!(network_graph.read_only().channels().get(&short_channel_id).is_some());
3379
3380 network_graph.handle_network_update(&NetworkUpdate::NodeFailure {
3382 node_id: node_2_pk,
3383 is_permanent: false,
3384 });
3385
3386 assert!(network_graph.read_only().channels().get(&short_channel_id).is_some());
3387 assert!(network_graph.read_only().nodes().get(&node_2_id).is_some());
3388
3389 network_graph.handle_network_update(&NetworkUpdate::NodeFailure {
3391 node_id: node_2_pk,
3392 is_permanent: true,
3393 });
3394
3395 assert_eq!(network_graph.read_only().nodes().len(), 0);
3396 assert_eq!(network_graph.read_only().channels().len(), 0);
3398 }
3399 }
3400
3401 #[test]
3402 fn test_channel_timeouts() {
3403 let logger = test_utils::TestLogger::new();
3405 let chain_source = test_utils::TestChainSource::new(Network::Testnet);
3406 let network_graph = NetworkGraph::new(Network::Testnet, &logger);
3407 let gossip_sync = P2PGossipSync::new(&network_graph, Some(&chain_source), &logger);
3408 let secp_ctx = Secp256k1::new();
3409
3410 let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
3411 let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
3412 let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
3413
3414 let valid_channel_announcement =
3415 get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
3416 let scid = valid_channel_announcement.contents.short_channel_id;
3417 let chain_source: Option<&test_utils::TestChainSource> = None;
3418 assert!(network_graph
3419 .update_channel_from_announcement(&valid_channel_announcement, &chain_source)
3420 .is_ok());
3421 assert!(network_graph.read_only().channels().get(&scid).is_some());
3422
3423 let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx);
3425 assert!(gossip_sync
3426 .handle_channel_update(Some(node_1_pubkey), &valid_channel_update)
3427 .is_ok());
3428 assert!(network_graph.read_only().channels().get(&scid).unwrap().one_to_two.is_some());
3429
3430 let valid_channel_update_2 = get_signed_channel_update(
3431 |update| {
3432 update.channel_flags |= 1;
3433 },
3434 node_2_privkey,
3435 &secp_ctx,
3436 );
3437 gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update_2).unwrap();
3438 assert!(network_graph.read_only().channels().get(&scid).unwrap().two_to_one.is_some());
3439
3440 network_graph.remove_stale_channels_and_tracking_with_time(
3441 100 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS,
3442 );
3443 assert_eq!(network_graph.read_only().channels().len(), 1);
3444 assert_eq!(network_graph.read_only().nodes().len(), 2);
3445
3446 network_graph.remove_stale_channels_and_tracking_with_time(
3447 101 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS,
3448 );
3449 #[cfg(not(feature = "std"))]
3450 {
3451 assert_eq!(network_graph.removed_channels.lock().unwrap().len(), 1);
3453 }
3454 network_graph.remove_stale_channels_and_tracking_with_time(
3455 101 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS,
3456 );
3457
3458 #[cfg(feature = "std")]
3459 {
3460 assert_eq!(network_graph.read_only().channels().len(), 1);
3464 assert_eq!(network_graph.read_only().nodes().len(), 2);
3465
3466 assert!(network_graph.read_only().channels().get(&scid).unwrap().one_to_two.is_none());
3470 use std::time::{SystemTime, UNIX_EPOCH};
3471 let announcement_time = SystemTime::now()
3472 .duration_since(UNIX_EPOCH)
3473 .expect("Time must be > 1970")
3474 .as_secs();
3475 let valid_channel_update = get_signed_channel_update(
3476 |unsigned_channel_update| {
3477 unsigned_channel_update.timestamp =
3478 (announcement_time + 1 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS) as u32;
3479 },
3480 node_1_privkey,
3481 &secp_ctx,
3482 );
3483 assert!(gossip_sync
3484 .handle_channel_update(Some(node_1_pubkey), &valid_channel_update)
3485 .is_ok());
3486 assert!(network_graph.read_only().channels().get(&scid).unwrap().one_to_two.is_some());
3487 network_graph.remove_stale_channels_and_tracking_with_time(
3488 announcement_time + 1 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS,
3489 );
3490 assert_eq!(network_graph.removed_channels.lock().unwrap().len(), 1);
3492 network_graph.remove_stale_channels_and_tracking_with_time(
3494 announcement_time
3495 + 1 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS
3496 + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS,
3497 );
3498 }
3499
3500 assert_eq!(network_graph.read_only().channels().len(), 0);
3501 assert_eq!(network_graph.read_only().nodes().len(), 0);
3502 assert!(network_graph.removed_channels.lock().unwrap().is_empty());
3503
3504 #[cfg(feature = "std")]
3505 {
3506 use std::time::{SystemTime, UNIX_EPOCH};
3507
3508 let tracking_time = SystemTime::now()
3509 .duration_since(UNIX_EPOCH)
3510 .expect("Time must be > 1970")
3511 .as_secs();
3512
3513 network_graph.removed_channels.lock().unwrap().clear();
3515 network_graph.removed_nodes.lock().unwrap().clear();
3516
3517 assert!(network_graph
3520 .update_channel_from_announcement(&valid_channel_announcement, &chain_source)
3521 .is_ok());
3522
3523 network_graph.channel_failed_permanent_with_time(scid, Some(tracking_time));
3526
3527 network_graph.remove_stale_channels_and_tracking_with_time(
3529 tracking_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS - 1,
3530 );
3531 assert_eq!(
3532 network_graph.removed_channels.lock().unwrap().len(),
3533 1,
3534 "Removed channel count ≠1 with tracking_time {}",
3535 tracking_time
3536 );
3537
3538 network_graph.remove_stale_channels_and_tracking_with_time(
3540 tracking_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS,
3541 );
3542 assert!(
3543 network_graph.removed_channels.lock().unwrap().is_empty(),
3544 "Unexpectedly removed channels with tracking_time {}",
3545 tracking_time
3546 );
3547 assert!(
3548 network_graph.removed_nodes.lock().unwrap().is_empty(),
3549 "Unexpectedly removed nodes with tracking_time {}",
3550 tracking_time
3551 );
3552 }
3553
3554 #[cfg(not(feature = "std"))]
3555 {
3556 let removal_time = 1664619654;
3561
3562 network_graph.removed_channels.lock().unwrap().clear();
3564 network_graph.removed_nodes.lock().unwrap().clear();
3565
3566 assert!(network_graph
3569 .update_channel_from_announcement(&valid_channel_announcement, &chain_source)
3570 .is_ok());
3571
3572 network_graph.channel_failed_permanent(scid);
3575
3576 network_graph.remove_stale_channels_and_tracking_with_time(removal_time);
3578 assert_eq!(network_graph.removed_channels.lock().unwrap().len(), 1);
3579
3580 network_graph.remove_stale_channels_and_tracking_with_time(
3582 removal_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS,
3583 );
3584 assert!(network_graph.removed_channels.lock().unwrap().is_empty());
3585 assert!(network_graph.removed_nodes.lock().unwrap().is_empty());
3586 }
3587 }
3588
3589 #[test]
3590 fn getting_next_channel_announcements() {
3591 let network_graph = create_network_graph();
3592 let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
3593 let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
3594 let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
3595 let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
3596
3597 let channels_with_announcements = gossip_sync.get_next_channel_announcement(0);
3599 assert!(channels_with_announcements.is_none());
3600
3601 let short_channel_id;
3602 {
3603 let valid_channel_announcement =
3605 get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
3606 short_channel_id = valid_channel_announcement.contents.short_channel_id;
3607 match gossip_sync
3608 .handle_channel_announcement(Some(node_1_pubkey), &valid_channel_announcement)
3609 {
3610 Ok(_) => (),
3611 Err(_) => panic!(),
3612 };
3613 }
3614
3615 let channels_with_announcements =
3617 gossip_sync.get_next_channel_announcement(short_channel_id);
3618 if let Some(channel_announcements) = channels_with_announcements {
3619 let (_, ref update_1, ref update_2) = channel_announcements;
3620 assert_eq!(update_1, &None);
3621 assert_eq!(update_2, &None);
3622 } else {
3623 panic!();
3624 }
3625
3626 {
3627 let valid_channel_update = get_signed_channel_update(
3629 |unsigned_channel_update| {
3630 unsigned_channel_update.timestamp = 101;
3631 },
3632 node_1_privkey,
3633 &secp_ctx,
3634 );
3635 match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3636 Ok(_) => (),
3637 Err(_) => panic!(),
3638 };
3639 }
3640
3641 let channels_with_announcements =
3643 gossip_sync.get_next_channel_announcement(short_channel_id);
3644 if let Some(channel_announcements) = channels_with_announcements {
3645 let (_, ref update_1, ref update_2) = channel_announcements;
3646 assert_ne!(update_1, &None);
3647 assert_eq!(update_2, &None);
3648 } else {
3649 panic!();
3650 }
3651
3652 {
3653 let valid_channel_update = get_signed_channel_update(
3655 |unsigned_channel_update| {
3656 unsigned_channel_update.timestamp = 102;
3657 unsigned_channel_update.excess_data =
3658 [1; MAX_EXCESS_BYTES_FOR_RELAY + 1].to_vec();
3659 },
3660 node_1_privkey,
3661 &secp_ctx,
3662 );
3663 match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3664 Ok(_) => (),
3665 Err(_) => panic!(),
3666 };
3667 }
3668
3669 let channels_with_announcements =
3671 gossip_sync.get_next_channel_announcement(short_channel_id);
3672 if let Some(channel_announcements) = channels_with_announcements {
3673 let (_, ref update_1, ref update_2) = channel_announcements;
3674 assert_eq!(update_1, &None);
3675 assert_eq!(update_2, &None);
3676 } else {
3677 panic!();
3678 }
3679
3680 let channels_with_announcements =
3682 gossip_sync.get_next_channel_announcement(short_channel_id + 1000);
3683 assert!(channels_with_announcements.is_none());
3684 }
3685
3686 #[test]
3687 fn getting_next_node_announcements() {
3688 let network_graph = create_network_graph();
3689 let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
3690 let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
3691 let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
3692 let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
3693 let node_id_1 = NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, node_1_privkey));
3694
3695 let next_announcements = gossip_sync.get_next_node_announcement(None);
3697 assert!(next_announcements.is_none());
3698
3699 {
3700 let valid_channel_announcement =
3702 get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
3703 match gossip_sync
3704 .handle_channel_announcement(Some(node_1_pubkey), &valid_channel_announcement)
3705 {
3706 Ok(_) => (),
3707 Err(_) => panic!(),
3708 };
3709 }
3710
3711 let next_announcements = gossip_sync.get_next_node_announcement(None);
3713 assert!(next_announcements.is_none());
3714
3715 {
3716 let valid_announcement =
3717 get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx);
3718 match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) {
3719 Ok(_) => (),
3720 Err(_) => panic!(),
3721 };
3722
3723 let valid_announcement =
3724 get_signed_node_announcement(|_| {}, node_2_privkey, &secp_ctx);
3725 match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) {
3726 Ok(_) => (),
3727 Err(_) => panic!(),
3728 };
3729 }
3730
3731 let next_announcements = gossip_sync.get_next_node_announcement(None);
3732 assert!(next_announcements.is_some());
3733
3734 let next_announcements = gossip_sync.get_next_node_announcement(Some(&node_id_1));
3736 assert!(next_announcements.is_some());
3737
3738 {
3739 let valid_announcement = get_signed_node_announcement(
3741 |unsigned_announcement| {
3742 unsigned_announcement.timestamp += 10;
3743 unsigned_announcement.excess_data =
3744 [1; MAX_EXCESS_BYTES_FOR_RELAY + 1].to_vec();
3745 },
3746 node_2_privkey,
3747 &secp_ctx,
3748 );
3749 match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) {
3750 Ok(res) => assert!(!res),
3751 Err(_) => panic!(),
3752 };
3753 }
3754
3755 let next_announcements = gossip_sync.get_next_node_announcement(Some(&node_id_1));
3756 assert!(next_announcements.is_none());
3757 }
3758
3759 #[test]
3760 fn network_graph_serialization() {
3761 let network_graph = create_network_graph();
3762 let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
3763
3764 let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
3765 let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
3766 let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
3767
3768 let valid_announcement =
3770 get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
3771 match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) {
3772 Ok(res) => assert!(res),
3773 _ => panic!(),
3774 };
3775
3776 let valid_announcement = get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx);
3777 match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) {
3778 Ok(_) => (),
3779 Err(_) => panic!(),
3780 };
3781
3782 let mut w = test_utils::TestVecWriter(Vec::new());
3783 assert!(!network_graph.read_only().nodes().is_empty());
3784 assert!(!network_graph.read_only().channels().is_empty());
3785 network_graph.write(&mut w).unwrap();
3786
3787 let logger = Arc::new(test_utils::TestLogger::new());
3788 assert!(
3789 <NetworkGraph<_>>::read(&mut io::Cursor::new(&w.0), logger).unwrap() == network_graph
3790 );
3791 }
3792
3793 #[test]
3794 fn network_graph_tlv_serialization() {
3795 let network_graph = create_network_graph();
3796 network_graph.set_last_rapid_gossip_sync_timestamp(42);
3797
3798 let mut w = test_utils::TestVecWriter(Vec::new());
3799 network_graph.write(&mut w).unwrap();
3800
3801 let logger = Arc::new(test_utils::TestLogger::new());
3802 let reassembled_network_graph: NetworkGraph<_> =
3803 ReadableArgs::read(&mut io::Cursor::new(&w.0), logger).unwrap();
3804 assert!(reassembled_network_graph == network_graph);
3805 assert_eq!(reassembled_network_graph.get_last_rapid_gossip_sync_timestamp().unwrap(), 42);
3806 }
3807
3808 #[test]
3809 #[cfg(feature = "std")]
3810 fn calling_sync_routing_table() {
3811 use crate::ln::msgs::Init;
3812 use std::time::{SystemTime, UNIX_EPOCH};
3813
3814 let network_graph = create_network_graph();
3815 let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
3816 let node_privkey_1 = &SecretKey::from_slice(&[42; 32]).unwrap();
3817 let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_privkey_1);
3818
3819 let chain_hash = ChainHash::using_genesis_block(Network::Testnet);
3820
3821 {
3823 let init_msg = Init {
3824 features: InitFeatures::empty(),
3825 networks: None,
3826 remote_network_address: None,
3827 };
3828 gossip_sync.peer_connected(node_id_1, &init_msg, true).unwrap();
3829 let events = gossip_sync.get_and_clear_pending_msg_events();
3830 assert_eq!(events.len(), 0);
3831 }
3832
3833 {
3835 let mut features = InitFeatures::empty();
3836 features.set_gossip_queries_optional();
3837 let init_msg = Init { features, networks: None, remote_network_address: None };
3838 gossip_sync.peer_connected(node_id_1, &init_msg, true).unwrap();
3839 let events = gossip_sync.get_and_clear_pending_msg_events();
3840 assert_eq!(events.len(), 1);
3841 match &events[0] {
3842 MessageSendEvent::SendGossipTimestampFilter { node_id, msg } => {
3843 assert_eq!(node_id, &node_id_1);
3844 assert_eq!(msg.chain_hash, chain_hash);
3845 let expected_timestamp = SystemTime::now()
3846 .duration_since(UNIX_EPOCH)
3847 .expect("Time must be > 1970")
3848 .as_secs();
3849 assert!(
3850 (msg.first_timestamp as u64) >= expected_timestamp - 60 * 60 * 24 * 7 * 2
3851 );
3852 assert!(
3853 (msg.first_timestamp as u64)
3854 < expected_timestamp - 60 * 60 * 24 * 7 * 2 + 10
3855 );
3856 assert_eq!(msg.timestamp_range, u32::max_value());
3857 },
3858 _ => panic!("Expected MessageSendEvent::SendChannelRangeQuery"),
3859 };
3860 }
3861 }
3862
3863 #[test]
3864 fn handling_query_channel_range() {
3865 let network_graph = create_network_graph();
3866 let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
3867
3868 let chain_hash = ChainHash::using_genesis_block(Network::Testnet);
3869 let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
3870 let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
3871 let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
3872 let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey);
3873
3874 let mut scids: Vec<u64> = vec![
3875 scid_from_parts(0xfffffe, 0xffffff, 0xffff).unwrap(), scid_from_parts(0xffffff, 0xffffff, 0xffff).unwrap(), ];
3878
3879 for block in 100000..=108001 {
3881 scids.push(scid_from_parts(block, 0, 0).unwrap());
3882 }
3883
3884 scids.push(scid_from_parts(108001, 1, 0).unwrap());
3886
3887 for scid in scids {
3888 let valid_announcement = get_signed_channel_announcement(
3889 |unsigned_announcement| {
3890 unsigned_announcement.short_channel_id = scid;
3891 },
3892 node_1_privkey,
3893 node_2_privkey,
3894 &secp_ctx,
3895 );
3896 match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement)
3897 {
3898 Ok(_) => (),
3899 _ => panic!(),
3900 };
3901 }
3902
3903 do_handling_query_channel_range(
3905 &gossip_sync,
3906 &node_id_2,
3907 QueryChannelRange {
3908 chain_hash: chain_hash.clone(),
3909 first_blocknum: 0,
3910 number_of_blocks: 0,
3911 },
3912 false,
3913 vec![ReplyChannelRange {
3914 chain_hash: chain_hash.clone(),
3915 first_blocknum: 0,
3916 number_of_blocks: 0,
3917 sync_complete: true,
3918 short_channel_ids: vec![],
3919 }],
3920 );
3921
3922 do_handling_query_channel_range(
3924 &gossip_sync,
3925 &node_id_2,
3926 QueryChannelRange {
3927 chain_hash: ChainHash::using_genesis_block(Network::Bitcoin),
3928 first_blocknum: 0,
3929 number_of_blocks: 0xffff_ffff,
3930 },
3931 false,
3932 vec![ReplyChannelRange {
3933 chain_hash: ChainHash::using_genesis_block(Network::Bitcoin),
3934 first_blocknum: 0,
3935 number_of_blocks: 0xffff_ffff,
3936 sync_complete: true,
3937 short_channel_ids: vec![],
3938 }],
3939 );
3940
3941 do_handling_query_channel_range(
3943 &gossip_sync,
3944 &node_id_2,
3945 QueryChannelRange {
3946 chain_hash: chain_hash.clone(),
3947 first_blocknum: 0x01000000,
3948 number_of_blocks: 0xffff_ffff,
3949 },
3950 false,
3951 vec![ReplyChannelRange {
3952 chain_hash: chain_hash.clone(),
3953 first_blocknum: 0x01000000,
3954 number_of_blocks: 0xffff_ffff,
3955 sync_complete: true,
3956 short_channel_ids: vec![],
3957 }],
3958 );
3959
3960 do_handling_query_channel_range(
3962 &gossip_sync,
3963 &node_id_2,
3964 QueryChannelRange {
3965 chain_hash: chain_hash.clone(),
3966 first_blocknum: 0xffffff,
3967 number_of_blocks: 1,
3968 },
3969 true,
3970 vec![ReplyChannelRange {
3971 chain_hash: chain_hash.clone(),
3972 first_blocknum: 0xffffff,
3973 number_of_blocks: 1,
3974 sync_complete: true,
3975 short_channel_ids: vec![],
3976 }],
3977 );
3978
3979 do_handling_query_channel_range(
3981 &gossip_sync,
3982 &node_id_2,
3983 QueryChannelRange {
3984 chain_hash: chain_hash.clone(),
3985 first_blocknum: 1000,
3986 number_of_blocks: 1000,
3987 },
3988 true,
3989 vec![ReplyChannelRange {
3990 chain_hash: chain_hash.clone(),
3991 first_blocknum: 1000,
3992 number_of_blocks: 1000,
3993 sync_complete: true,
3994 short_channel_ids: vec![],
3995 }],
3996 );
3997
3998 do_handling_query_channel_range(
4000 &gossip_sync,
4001 &node_id_2,
4002 QueryChannelRange {
4003 chain_hash: chain_hash.clone(),
4004 first_blocknum: 0xfe0000,
4005 number_of_blocks: 0xffffffff,
4006 },
4007 true,
4008 vec![ReplyChannelRange {
4009 chain_hash: chain_hash.clone(),
4010 first_blocknum: 0xfe0000,
4011 number_of_blocks: 0xffffffff - 0xfe0000,
4012 sync_complete: true,
4013 short_channel_ids: vec![
4014 0xfffffe_ffffff_ffff, ],
4016 }],
4017 );
4018
4019 do_handling_query_channel_range(
4021 &gossip_sync,
4022 &node_id_2,
4023 QueryChannelRange {
4024 chain_hash: chain_hash.clone(),
4025 first_blocknum: 100000,
4026 number_of_blocks: 8000,
4027 },
4028 true,
4029 vec![ReplyChannelRange {
4030 chain_hash: chain_hash.clone(),
4031 first_blocknum: 100000,
4032 number_of_blocks: 8000,
4033 sync_complete: true,
4034 short_channel_ids: (100000..=107999)
4035 .map(|block| scid_from_parts(block, 0, 0).unwrap())
4036 .collect(),
4037 }],
4038 );
4039
4040 do_handling_query_channel_range(
4042 &gossip_sync,
4043 &node_id_2,
4044 QueryChannelRange {
4045 chain_hash: chain_hash.clone(),
4046 first_blocknum: 100000,
4047 number_of_blocks: 8001,
4048 },
4049 true,
4050 vec![
4051 ReplyChannelRange {
4052 chain_hash: chain_hash.clone(),
4053 first_blocknum: 100000,
4054 number_of_blocks: 7999,
4055 sync_complete: false,
4056 short_channel_ids: (100000..=107999)
4057 .map(|block| scid_from_parts(block, 0, 0).unwrap())
4058 .collect(),
4059 },
4060 ReplyChannelRange {
4061 chain_hash: chain_hash.clone(),
4062 first_blocknum: 107999,
4063 number_of_blocks: 2,
4064 sync_complete: true,
4065 short_channel_ids: vec![scid_from_parts(108000, 0, 0).unwrap()],
4066 },
4067 ],
4068 );
4069
4070 do_handling_query_channel_range(
4072 &gossip_sync,
4073 &node_id_2,
4074 QueryChannelRange {
4075 chain_hash: chain_hash.clone(),
4076 first_blocknum: 100002,
4077 number_of_blocks: 8000,
4078 },
4079 true,
4080 vec![
4081 ReplyChannelRange {
4082 chain_hash: chain_hash.clone(),
4083 first_blocknum: 100002,
4084 number_of_blocks: 7999,
4085 sync_complete: false,
4086 short_channel_ids: (100002..=108001)
4087 .map(|block| scid_from_parts(block, 0, 0).unwrap())
4088 .collect(),
4089 },
4090 ReplyChannelRange {
4091 chain_hash: chain_hash.clone(),
4092 first_blocknum: 108001,
4093 number_of_blocks: 1,
4094 sync_complete: true,
4095 short_channel_ids: vec![scid_from_parts(108001, 1, 0).unwrap()],
4096 },
4097 ],
4098 );
4099 }
4100
4101 fn do_handling_query_channel_range(
4102 gossip_sync: &P2PGossipSync<
4103 &NetworkGraph<Arc<test_utils::TestLogger>>,
4104 Arc<test_utils::TestChainSource>,
4105 Arc<test_utils::TestLogger>,
4106 >,
4107 test_node_id: &PublicKey, msg: QueryChannelRange, expected_ok: bool,
4108 expected_replies: Vec<ReplyChannelRange>,
4109 ) {
4110 let mut max_firstblocknum = msg.first_blocknum.saturating_sub(1);
4111 let mut c_lightning_0_9_prev_end_blocknum = max_firstblocknum;
4112 let query_end_blocknum = msg.end_blocknum();
4113 let result = gossip_sync.handle_query_channel_range(*test_node_id, msg);
4114
4115 if expected_ok {
4116 assert!(result.is_ok());
4117 } else {
4118 assert!(result.is_err());
4119 }
4120
4121 let events = gossip_sync.get_and_clear_pending_msg_events();
4122 assert_eq!(events.len(), expected_replies.len());
4123
4124 for i in 0..events.len() {
4125 let expected_reply = &expected_replies[i];
4126 match &events[i] {
4127 MessageSendEvent::SendReplyChannelRange { node_id, msg } => {
4128 assert_eq!(node_id, test_node_id);
4129 assert_eq!(msg.chain_hash, expected_reply.chain_hash);
4130 assert_eq!(msg.first_blocknum, expected_reply.first_blocknum);
4131 assert_eq!(msg.number_of_blocks, expected_reply.number_of_blocks);
4132 assert_eq!(msg.sync_complete, expected_reply.sync_complete);
4133 assert_eq!(msg.short_channel_ids, expected_reply.short_channel_ids);
4134
4135 assert!(
4137 msg.first_blocknum == c_lightning_0_9_prev_end_blocknum
4138 || msg.first_blocknum
4139 == c_lightning_0_9_prev_end_blocknum.saturating_add(1)
4140 );
4141 assert!(msg.first_blocknum >= max_firstblocknum);
4142 max_firstblocknum = msg.first_blocknum;
4143 c_lightning_0_9_prev_end_blocknum =
4144 msg.first_blocknum.saturating_add(msg.number_of_blocks);
4145
4146 if i == events.len() - 1 {
4148 assert!(
4149 msg.first_blocknum.saturating_add(msg.number_of_blocks)
4150 >= query_end_blocknum
4151 );
4152 }
4153 },
4154 _ => panic!("expected MessageSendEvent::SendReplyChannelRange"),
4155 }
4156 }
4157 }
4158
4159 #[test]
4160 fn handling_query_short_channel_ids() {
4161 let network_graph = create_network_graph();
4162 let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
4163 let node_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
4164 let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey);
4165
4166 let chain_hash = ChainHash::using_genesis_block(Network::Testnet);
4167
4168 let result = gossip_sync.handle_query_short_channel_ids(
4169 node_id,
4170 QueryShortChannelIds { chain_hash, short_channel_ids: vec![0x0003e8_000000_0000] },
4171 );
4172 assert!(result.is_err());
4173 }
4174
4175 #[test]
4176 fn displays_node_alias() {
4177 let format_str_alias = |alias: &str| {
4178 let mut bytes = [0u8; 32];
4179 bytes[..alias.len()].copy_from_slice(alias.as_bytes());
4180 format!("{}", NodeAlias(bytes))
4181 };
4182
4183 assert_eq!(format_str_alias("I\u{1F496}LDK! \u{26A1}"), "I\u{1F496}LDK! \u{26A1}");
4184 assert_eq!(format_str_alias("I\u{1F496}LDK!\0\u{26A1}"), "I\u{1F496}LDK!");
4185 assert_eq!(format_str_alias("I\u{1F496}LDK!\t\u{26A1}"), "I\u{1F496}LDK!\u{FFFD}\u{26A1}");
4186
4187 let format_bytes_alias = |alias: &[u8]| {
4188 let mut bytes = [0u8; 32];
4189 bytes[..alias.len()].copy_from_slice(alias);
4190 format!("{}", NodeAlias(bytes))
4191 };
4192
4193 assert_eq!(format_bytes_alias(b"\xFFI <heart> LDK!"), "\u{FFFD}I <heart> LDK!");
4194 assert_eq!(format_bytes_alias(b"\xFFI <heart>\0LDK!"), "\u{FFFD}I <heart>");
4195 assert_eq!(format_bytes_alias(b"\xFFI <heart>\tLDK!"), "\u{FFFD}I <heart>\u{FFFD}LDK!");
4196 }
4197
4198 #[test]
4199 fn channel_info_is_readable() {
4200 let chanmon_cfgs = crate::ln::functional_test_utils::create_chanmon_cfgs(2);
4201 let node_cfgs = crate::ln::functional_test_utils::create_node_cfgs(2, &chanmon_cfgs);
4202 let node_chanmgrs = crate::ln::functional_test_utils::create_node_chanmgrs(
4203 2,
4204 &node_cfgs,
4205 &[None, None, None, None],
4206 );
4207 let nodes = crate::ln::functional_test_utils::create_network(2, &node_cfgs, &node_chanmgrs);
4208 let config = crate::ln::functional_test_utils::test_default_channel_config();
4209
4210 let chan_update_info = ChannelUpdateInfo {
4212 last_update: 23,
4213 enabled: true,
4214 cltv_expiry_delta: 42,
4215 htlc_minimum_msat: 1234,
4216 htlc_maximum_msat: 5678,
4217 fees: RoutingFees { base_msat: 9, proportional_millionths: 10 },
4218 last_update_message: None,
4219 };
4220
4221 let mut encoded_chan_update_info: Vec<u8> = Vec::new();
4222 assert!(chan_update_info.write(&mut encoded_chan_update_info).is_ok());
4223
4224 let read_chan_update_info: ChannelUpdateInfo =
4226 crate::util::ser::Readable::read(&mut encoded_chan_update_info.as_slice()).unwrap();
4227 assert_eq!(chan_update_info, read_chan_update_info);
4228
4229 let legacy_chan_update_info_with_some: Vec<u8> = <Vec<u8>>::from_hex("340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c0100").unwrap();
4231 assert_eq!(encoded_chan_update_info, legacy_chan_update_info_with_some);
4232
4233 let legacy_chan_update_info_with_some_and_fail_update: Vec<u8> = <Vec<u8>>::from_hex("b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f42400000271000000014").unwrap();
4236 let read_chan_update_info_res: Result<ChannelUpdateInfo, crate::ln::msgs::DecodeError> =
4237 crate::util::ser::Readable::read(
4238 &mut legacy_chan_update_info_with_some_and_fail_update.as_slice(),
4239 );
4240 assert!(read_chan_update_info_res.is_err());
4241
4242 let legacy_chan_update_info_with_none: Vec<u8> = <Vec<u8>>::from_hex("2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c0100").unwrap();
4243 let read_chan_update_info_res: Result<ChannelUpdateInfo, crate::ln::msgs::DecodeError> =
4244 crate::util::ser::Readable::read(&mut legacy_chan_update_info_with_none.as_slice());
4245 assert!(read_chan_update_info_res.is_err());
4246
4247 let chan_info_none_updates = ChannelInfo {
4250 features: channelmanager::provided_channel_features(&config),
4251 node_one: NodeId::from_pubkey(&nodes[0].node.get_our_node_id()),
4252 one_to_two: None,
4253 node_two: NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
4254 two_to_one: None,
4255 capacity_sats: None,
4256 announcement_message: None,
4257 announcement_received_time: 87654,
4258 node_one_counter: 0,
4259 node_two_counter: 1,
4260 };
4261
4262 let mut encoded_chan_info: Vec<u8> = Vec::new();
4263 assert!(chan_info_none_updates.write(&mut encoded_chan_info).is_ok());
4264
4265 let read_chan_info: ChannelInfo =
4266 crate::util::ser::Readable::read(&mut encoded_chan_info.as_slice()).unwrap();
4267 assert_eq!(chan_info_none_updates, read_chan_info);
4268
4269 let chan_info_some_updates = ChannelInfo {
4271 features: channelmanager::provided_channel_features(&config),
4272 node_one: NodeId::from_pubkey(&nodes[0].node.get_our_node_id()),
4273 one_to_two: Some(chan_update_info.clone()),
4274 node_two: NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
4275 two_to_one: Some(chan_update_info.clone()),
4276 capacity_sats: None,
4277 announcement_message: None,
4278 announcement_received_time: 87654,
4279 node_one_counter: 0,
4280 node_two_counter: 1,
4281 };
4282
4283 let mut encoded_chan_info: Vec<u8> = Vec::new();
4284 assert!(chan_info_some_updates.write(&mut encoded_chan_info).is_ok());
4285
4286 let read_chan_info: ChannelInfo =
4287 crate::util::ser::Readable::read(&mut encoded_chan_info.as_slice()).unwrap();
4288 assert_eq!(chan_info_some_updates, read_chan_info);
4289
4290 let legacy_chan_info_with_some: Vec<u8> = <Vec<u8>>::from_hex("ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88043636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23083636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c01000a01000c0100").unwrap();
4292 assert_eq!(encoded_chan_info, legacy_chan_info_with_some);
4293
4294 let legacy_chan_info_with_some_and_fail_update = <Vec<u8>>::from_hex("fd01ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce8804b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f4240000027100000001406210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c2308b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f424000002710000000140a01000c0100").unwrap();
4297 let read_chan_info: ChannelInfo = crate::util::ser::Readable::read(
4298 &mut legacy_chan_info_with_some_and_fail_update.as_slice(),
4299 )
4300 .unwrap();
4301 assert_eq!(read_chan_info.announcement_received_time, 87654);
4302 assert_eq!(read_chan_info.one_to_two, None);
4303 assert_eq!(read_chan_info.two_to_one, None);
4304
4305 let legacy_chan_info_with_none: Vec<u8> = <Vec<u8>>::from_hex("ba00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88042e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23082e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c01000a01000c0100").unwrap();
4306 let read_chan_info: ChannelInfo =
4307 crate::util::ser::Readable::read(&mut legacy_chan_info_with_none.as_slice()).unwrap();
4308 assert_eq!(read_chan_info.announcement_received_time, 87654);
4309 assert_eq!(read_chan_info.one_to_two, None);
4310 assert_eq!(read_chan_info.two_to_one, None);
4311 }
4312
4313 #[test]
4314 fn node_info_is_readable() {
4315 let announcement_message = <Vec<u8>>::from_hex("d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000122013413a7031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f2020201010101010101010101010101010101010101010101010101010101010101010000701fffefdfc2607").unwrap();
4317 let announcement_message =
4318 NodeAnnouncement::read_from_fixed_length_buffer(&mut announcement_message.as_slice())
4319 .unwrap();
4320 let valid_node_ann_info = NodeAnnouncementInfo::Relayed(announcement_message);
4321
4322 let mut encoded_valid_node_ann_info = Vec::new();
4323 assert!(valid_node_ann_info.write(&mut encoded_valid_node_ann_info).is_ok());
4324 let read_valid_node_ann_info =
4325 NodeAnnouncementInfo::read(&mut encoded_valid_node_ann_info.as_slice()).unwrap();
4326 assert_eq!(read_valid_node_ann_info, valid_node_ann_info);
4327 assert_eq!(read_valid_node_ann_info.addresses().len(), 1);
4328
4329 let encoded_invalid_node_ann_info = <Vec<u8>>::from_hex("3f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d2").unwrap();
4330 let read_invalid_node_ann_info_res =
4331 NodeAnnouncementInfo::read(&mut encoded_invalid_node_ann_info.as_slice());
4332 assert!(read_invalid_node_ann_info_res.is_err());
4333
4334 let valid_node_info = NodeInfo {
4336 channels: Vec::new(),
4337 announcement_info: Some(valid_node_ann_info),
4338 node_counter: 0,
4339 };
4340
4341 let mut encoded_valid_node_info = Vec::new();
4342 assert!(valid_node_info.write(&mut encoded_valid_node_info).is_ok());
4343 let read_valid_node_info = NodeInfo::read(&mut encoded_valid_node_info.as_slice()).unwrap();
4344 assert_eq!(read_valid_node_info, valid_node_info);
4345
4346 let encoded_invalid_node_info_hex = <Vec<u8>>::from_hex("4402403f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d20400").unwrap();
4347 let read_invalid_node_info =
4348 NodeInfo::read(&mut encoded_invalid_node_info_hex.as_slice()).unwrap();
4349 assert_eq!(read_invalid_node_info.announcement_info, None);
4350 }
4351
4352 #[test]
4353 fn test_node_info_keeps_compatibility() {
4354 let old_ann_info_with_addresses = <Vec<u8>>::from_hex("3f0009000708a000080a51220204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014104d2").unwrap();
4355 let ann_info_with_addresses =
4356 NodeAnnouncementInfo::read(&mut old_ann_info_with_addresses.as_slice())
4357 .expect("to be able to read an old NodeAnnouncementInfo with addresses");
4358 assert!(!ann_info_with_addresses.addresses().is_empty());
4360 }
4361
4362 #[test]
4363 fn test_node_id_display() {
4364 let node_id = NodeId([42; 33]);
4365 assert_eq!(
4366 format!("{}", &node_id),
4367 "2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a"
4368 );
4369 }
4370
4371 #[test]
4372 fn is_tor_only_node() {
4373 let network_graph = create_network_graph();
4374 let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
4375
4376 let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
4377 let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
4378 let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
4379 let node_1_id = NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, node_1_privkey));
4380
4381 let announcement =
4382 get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
4383 gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &announcement).unwrap();
4384
4385 let tcp_ip_v4 = SocketAddress::TcpIpV4 { addr: [255, 254, 253, 252], port: 9735 };
4386 let tcp_ip_v6 = SocketAddress::TcpIpV6 {
4387 addr: [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4388 port: 9735,
4389 };
4390 let onion_v2 =
4391 SocketAddress::OnionV2([255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 38, 7]);
4392 let onion_v3 = SocketAddress::OnionV3 {
4393 ed25519_pubkey: [
4394 255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240,
4395 239, 238, 237, 236, 235, 234, 233, 232, 231, 230, 229, 228, 227, 226, 225, 224,
4396 ],
4397 checksum: 32,
4398 version: 16,
4399 port: 9735,
4400 };
4401 let hostname = SocketAddress::Hostname {
4402 hostname: Hostname::try_from(String::from("host")).unwrap(),
4403 port: 9735,
4404 };
4405
4406 assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4407
4408 let announcement = get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx);
4409 gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap();
4410 assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4411
4412 let announcement = get_signed_node_announcement(
4413 |announcement| {
4414 announcement.addresses = vec![
4415 tcp_ip_v4.clone(),
4416 tcp_ip_v6.clone(),
4417 onion_v2.clone(),
4418 onion_v3.clone(),
4419 hostname.clone(),
4420 ];
4421 announcement.timestamp += 1000;
4422 },
4423 node_1_privkey,
4424 &secp_ctx,
4425 );
4426 gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap();
4427 assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4428
4429 let announcement = get_signed_node_announcement(
4430 |announcement| {
4431 announcement.addresses =
4432 vec![tcp_ip_v4.clone(), tcp_ip_v6.clone(), onion_v2.clone(), onion_v3.clone()];
4433 announcement.timestamp += 2000;
4434 },
4435 node_1_privkey,
4436 &secp_ctx,
4437 );
4438 gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap();
4439 assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4440
4441 let announcement = get_signed_node_announcement(
4442 |announcement| {
4443 announcement.addresses =
4444 vec![tcp_ip_v6.clone(), onion_v2.clone(), onion_v3.clone()];
4445 announcement.timestamp += 3000;
4446 },
4447 node_1_privkey,
4448 &secp_ctx,
4449 );
4450 gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap();
4451 assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4452
4453 let announcement = get_signed_node_announcement(
4454 |announcement| {
4455 announcement.addresses = vec![onion_v2.clone(), onion_v3.clone()];
4456 announcement.timestamp += 4000;
4457 },
4458 node_1_privkey,
4459 &secp_ctx,
4460 );
4461 gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap();
4462 assert!(network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4463
4464 let announcement = get_signed_node_announcement(
4465 |announcement| {
4466 announcement.addresses = vec![onion_v2.clone()];
4467 announcement.timestamp += 5000;
4468 },
4469 node_1_privkey,
4470 &secp_ctx,
4471 );
4472 gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap();
4473 assert!(network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4474
4475 let announcement = get_signed_node_announcement(
4476 |announcement| {
4477 announcement.addresses = vec![tcp_ip_v4.clone()];
4478 announcement.timestamp += 6000;
4479 },
4480 node_1_privkey,
4481 &secp_ctx,
4482 );
4483 gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap();
4484 assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4485 }
4486}
4487
4488#[cfg(ldk_bench)]
4489pub mod benches {
4490 use super::*;
4491 use criterion::{black_box, Criterion};
4492 use std::io::Read;
4493
4494 pub fn read_network_graph(bench: &mut Criterion) {
4495 let logger = crate::util::test_utils::TestLogger::new();
4496 let (mut d, _) = crate::routing::router::bench_utils::get_graph_scorer_file().unwrap();
4497 let mut v = Vec::new();
4498 d.read_to_end(&mut v).unwrap();
4499 bench.bench_function("read_network_graph", |b| {
4500 b.iter(|| {
4501 NetworkGraph::read(&mut crate::io::Cursor::new(black_box(&v)), &logger).unwrap()
4502 })
4503 });
4504 }
4505
4506 pub fn write_network_graph(bench: &mut Criterion) {
4507 let logger = crate::util::test_utils::TestLogger::new();
4508 let (mut d, _) = crate::routing::router::bench_utils::get_graph_scorer_file().unwrap();
4509 let mut graph_buffer = Vec::new();
4510 d.read_to_end(&mut graph_buffer).unwrap();
4511 let net_graph = NetworkGraph::read(&mut &graph_buffer[..], &logger).unwrap();
4512 bench.bench_function("write_network_graph", |b| b.iter(|| black_box(&net_graph).encode()));
4513 }
4514}