lightning/routing/
gossip.rs

1// This file is Copyright its original authors, visible in version control
2// history.
3//
4// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7// You may not use this file except in accordance with one or both of these
8// licenses.
9
10//! The [`NetworkGraph`] stores the network gossip and [`P2PGossipSync`] fetches it from peers
11
12use bitcoin::amount::Amount;
13use bitcoin::constants::ChainHash;
14
15use bitcoin::secp256k1;
16use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
17use bitcoin::secp256k1::Secp256k1;
18use bitcoin::secp256k1::{PublicKey, Verification};
19
20use bitcoin::hashes::sha256d::Hash as Sha256dHash;
21use bitcoin::hashes::Hash;
22use bitcoin::network::Network;
23
24use crate::ln::msgs;
25use crate::ln::msgs::{
26	BaseMessageHandler, ChannelAnnouncement, ChannelUpdate, GossipTimestampFilter, NodeAnnouncement,
27};
28use crate::ln::msgs::{
29	DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, SocketAddress,
30	MAX_VALUE_MSAT,
31};
32use crate::ln::msgs::{
33	MessageSendEvent, QueryChannelRange, QueryShortChannelIds, ReplyChannelRange,
34	ReplyShortChannelIdsEnd,
35};
36use crate::ln::types::ChannelId;
37use crate::routing::utxo::{self, UtxoLookup, UtxoResolver};
38use crate::types::features::{ChannelFeatures, InitFeatures, NodeFeatures};
39use crate::types::string::PrintableString;
40use crate::util::indexed_map::{
41	Entry as IndexedMapEntry, IndexedMap, OccupiedEntry as IndexedMapOccupiedEntry,
42};
43use crate::util::logger::{Level, Logger};
44use crate::util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK};
45use crate::util::ser::{MaybeReadable, Readable, ReadableArgs, RequiredWrapper, Writeable, Writer};
46
47use crate::io;
48use crate::io_extras::{copy, sink};
49use crate::prelude::*;
50use crate::sync::Mutex;
51use crate::sync::{LockTestExt, RwLock, RwLockReadGuard};
52use core::ops::{Bound, Deref};
53use core::str::FromStr;
54use core::sync::atomic::{AtomicUsize, Ordering};
55use core::{cmp, fmt};
56
57pub use lightning_types::routing::RoutingFees;
58
59#[cfg(feature = "std")]
60use std::time::{SystemTime, UNIX_EPOCH};
61
62/// We remove stale channel directional info two weeks after the last update, per BOLT 7's
63/// suggestion.
64const STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS: u64 = 60 * 60 * 24 * 14;
65
66/// We stop tracking the removal of permanently failed nodes and channels one week after removal
67const REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS: u64 = 60 * 60 * 24 * 7;
68
69/// The maximum number of extra bytes which we do not understand in a gossip message before we will
70/// refuse to relay the message.
71const MAX_EXCESS_BYTES_FOR_RELAY: usize = 1024;
72
73/// Maximum number of short_channel_ids that will be encoded in one gossip reply message.
74/// This value ensures a reply fits within the 65k payload limit and is consistent with other implementations.
75const MAX_SCIDS_PER_REPLY: usize = 8000;
76
77/// A compressed pubkey which a node uses to sign announcements and decode HTLCs routed through it.
78///
79/// This type stores a simple byte array which is not checked for validity (i.e. that it describes
80/// a point which lies on the secp256k1 curve), unlike [`PublicKey`], as validity checking would
81/// otherwise represent a large portion of [`NetworkGraph`] deserialization time (and RGS
82/// application).
83#[derive(Clone, Copy, PartialEq, Eq)]
84pub struct NodeId([u8; PUBLIC_KEY_SIZE]);
85
86impl NodeId {
87	/// Create a new NodeId from a public key
88	pub fn from_pubkey(pubkey: &PublicKey) -> Self {
89		NodeId(pubkey.serialize())
90	}
91
92	/// Create a new NodeId from a slice of bytes
93	pub fn from_slice(bytes: &[u8]) -> Result<Self, DecodeError> {
94		if bytes.len() != PUBLIC_KEY_SIZE {
95			return Err(DecodeError::InvalidValue);
96		}
97		let mut data = [0; PUBLIC_KEY_SIZE];
98		data.copy_from_slice(bytes);
99		Ok(NodeId(data))
100	}
101
102	/// Get the public key slice from this NodeId
103	pub fn as_slice(&self) -> &[u8] {
104		&self.0
105	}
106
107	/// Get the public key as an array from this NodeId
108	pub fn as_array(&self) -> &[u8; PUBLIC_KEY_SIZE] {
109		&self.0
110	}
111
112	/// Get the public key from this NodeId
113	pub fn as_pubkey(&self) -> Result<PublicKey, secp256k1::Error> {
114		PublicKey::from_slice(&self.0)
115	}
116}
117
118impl fmt::Debug for NodeId {
119	fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
120		write!(f, "NodeId({})", crate::util::logger::DebugBytes(&self.0))
121	}
122}
123impl fmt::Display for NodeId {
124	fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
125		crate::util::logger::DebugBytes(&self.0).fmt(f)
126	}
127}
128
129impl core::hash::Hash for NodeId {
130	fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
131		self.0.hash(hasher);
132	}
133}
134
135impl cmp::PartialOrd for NodeId {
136	fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
137		Some(self.cmp(other))
138	}
139}
140
141impl Ord for NodeId {
142	fn cmp(&self, other: &Self) -> cmp::Ordering {
143		self.0[..].cmp(&other.0[..])
144	}
145}
146
147impl Writeable for NodeId {
148	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
149		writer.write_all(&self.0)?;
150		Ok(())
151	}
152}
153
154impl Readable for NodeId {
155	fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
156		let mut buf = [0; PUBLIC_KEY_SIZE];
157		reader.read_exact(&mut buf)?;
158		Ok(Self(buf))
159	}
160}
161
162impl From<PublicKey> for NodeId {
163	fn from(pubkey: PublicKey) -> Self {
164		Self::from_pubkey(&pubkey)
165	}
166}
167
168impl TryFrom<NodeId> for PublicKey {
169	type Error = secp256k1::Error;
170
171	fn try_from(node_id: NodeId) -> Result<Self, Self::Error> {
172		node_id.as_pubkey()
173	}
174}
175
176impl FromStr for NodeId {
177	type Err = bitcoin::hex::parse::HexToArrayError;
178
179	fn from_str(s: &str) -> Result<Self, Self::Err> {
180		let data: [u8; PUBLIC_KEY_SIZE] = bitcoin::hex::FromHex::from_hex(s)?;
181		Ok(NodeId(data))
182	}
183}
184
185/// Represents the network as nodes and channels between them
186pub struct NetworkGraph<L: Deref>
187where
188	L::Target: Logger,
189{
190	secp_ctx: Secp256k1<secp256k1::VerifyOnly>,
191	last_rapid_gossip_sync_timestamp: Mutex<Option<u32>>,
192	chain_hash: ChainHash,
193	logger: L,
194	// Lock order: channels -> nodes
195	channels: RwLock<IndexedMap<u64, ChannelInfo>>,
196	nodes: RwLock<IndexedMap<NodeId, NodeInfo>>,
197	removed_node_counters: Mutex<Vec<u32>>,
198	next_node_counter: AtomicUsize,
199	// Lock order: removed_channels -> removed_nodes
200	//
201	// NOTE: In the following `removed_*` maps, we use seconds since UNIX epoch to track time instead
202	// of `std::time::Instant`s for a few reasons:
203	//   * We want it to be possible to do tracking in non-`std` environments where we can compare
204	//     a provided current UNIX timestamp with the time at which we started tracking.
205	//   * In the future, if we decide to persist these maps, they will already be serializable.
206	//   * Although we lose out on the platform's monotonic clock, the system clock in a std
207	//     environment should be practical over the time period we are considering (on the order of a
208	//     week).
209	//
210	/// Keeps track of short channel IDs for channels we have explicitly removed due to permanent
211	/// failure so that we don't resync them from gossip. Each SCID is mapped to the time (in seconds)
212	/// it was removed so that once some time passes, we can potentially resync it from gossip again.
213	removed_channels: Mutex<HashMap<u64, Option<u64>>>,
214	/// Keeps track of `NodeId`s we have explicitly removed due to permanent failure so that we don't
215	/// resync them from gossip. Each `NodeId` is mapped to the time (in seconds) it was removed so
216	/// that once some time passes, we can potentially resync it from gossip again.
217	removed_nodes: Mutex<HashMap<NodeId, Option<u64>>>,
218	/// Announcement messages which are awaiting an on-chain lookup to be processed.
219	pub(super) pending_checks: utxo::PendingChecks,
220}
221
222/// A read-only view of [`NetworkGraph`].
223pub struct ReadOnlyNetworkGraph<'a> {
224	channels: RwLockReadGuard<'a, IndexedMap<u64, ChannelInfo>>,
225	nodes: RwLockReadGuard<'a, IndexedMap<NodeId, NodeInfo>>,
226	max_node_counter: u32,
227}
228
229/// Update to the [`NetworkGraph`] based on payment failure information conveyed via the Onion
230/// return packet by a node along the route. See [BOLT #4] for details.
231///
232/// [BOLT #4]: https://github.com/lightning/bolts/blob/master/04-onion-routing.md
233#[derive(Clone, Debug, PartialEq, Eq)]
234pub enum NetworkUpdate {
235	/// An error indicating that a channel failed to route a payment, which should be applied via
236	/// [`NetworkGraph::channel_failed_permanent`] if permanent.
237	ChannelFailure {
238		/// The short channel id of the closed channel.
239		short_channel_id: u64,
240		/// Whether the channel should be permanently removed or temporarily disabled until a new
241		/// `channel_update` message is received.
242		is_permanent: bool,
243	},
244	/// An error indicating that a node failed to route a payment, which should be applied via
245	/// [`NetworkGraph::node_failed_permanent`] if permanent.
246	NodeFailure {
247		/// The node id of the failed node.
248		node_id: PublicKey,
249		/// Whether the node should be permanently removed from consideration or can be restored
250		/// when a new `channel_update` message is received.
251		is_permanent: bool,
252	},
253}
254
255impl Writeable for NetworkUpdate {
256	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
257		match self {
258			Self::ChannelFailure { short_channel_id, is_permanent } => {
259				2u8.write(writer)?;
260				write_tlv_fields!(writer, {
261					(0, short_channel_id, required),
262					(2, is_permanent, required),
263				});
264			},
265			Self::NodeFailure { node_id, is_permanent } => {
266				4u8.write(writer)?;
267				write_tlv_fields!(writer, {
268					(0, node_id, required),
269					(2, is_permanent, required),
270				});
271			},
272		}
273		Ok(())
274	}
275}
276
277impl MaybeReadable for NetworkUpdate {
278	fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
279		let id: u8 = Readable::read(reader)?;
280		match id {
281			0 => {
282				// 0 was previously used for network updates containing a channel update, subsequently
283				// removed in LDK version 0.0.124.
284				let mut msg: RequiredWrapper<ChannelUpdate> = RequiredWrapper(None);
285				read_tlv_fields!(reader, {
286					(0, msg, required),
287				});
288				Ok(Some(Self::ChannelFailure {
289					short_channel_id: msg.0.unwrap().contents.short_channel_id,
290					is_permanent: false,
291				}))
292			},
293			2 => {
294				_init_and_read_len_prefixed_tlv_fields!(reader, {
295					(0, short_channel_id, required),
296					(2, is_permanent, required),
297				});
298				Ok(Some(Self::ChannelFailure {
299					short_channel_id: short_channel_id.0.unwrap(),
300					is_permanent: is_permanent.0.unwrap(),
301				}))
302			},
303			4 => {
304				_init_and_read_len_prefixed_tlv_fields!(reader, {
305					(0, node_id, required),
306					(2, is_permanent, required),
307				});
308				Ok(Some(Self::NodeFailure {
309					node_id: node_id.0.unwrap(),
310					is_permanent: is_permanent.0.unwrap(),
311				}))
312			},
313			t if t % 2 == 0 => Err(DecodeError::UnknownRequiredFeature),
314			_ => Ok(None),
315		}
316	}
317}
318
319/// Receives and validates network updates from peers,
320/// stores authentic and relevant data as a network graph.
321/// This network graph is then used for routing payments.
322/// Provides interface to help with initial routing sync by
323/// serving historical announcements.
324pub struct P2PGossipSync<G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
325where
326	U::Target: UtxoLookup,
327	L::Target: Logger,
328{
329	network_graph: G,
330	utxo_lookup: RwLock<Option<U>>,
331	full_syncs_requested: AtomicUsize,
332	pending_events: Mutex<Vec<MessageSendEvent>>,
333	logger: L,
334}
335
336impl<G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref> P2PGossipSync<G, U, L>
337where
338	U::Target: UtxoLookup,
339	L::Target: Logger,
340{
341	/// Creates a new tracker of the actual state of the network of channels and nodes,
342	/// assuming an existing [`NetworkGraph`].
343	/// UTXO lookup is used to make sure announced channels exist on-chain, channel data is
344	/// correct, and the announcement is signed with channel owners' keys.
345	pub fn new(network_graph: G, utxo_lookup: Option<U>, logger: L) -> Self {
346		P2PGossipSync {
347			network_graph,
348			full_syncs_requested: AtomicUsize::new(0),
349			utxo_lookup: RwLock::new(utxo_lookup),
350			pending_events: Mutex::new(vec![]),
351			logger,
352		}
353	}
354
355	/// Adds a provider used to check new announcements. Does not affect
356	/// existing announcements unless they are updated.
357	/// Add, update or remove the provider would replace the current one.
358	pub fn add_utxo_lookup(&self, utxo_lookup: Option<U>) {
359		*self.utxo_lookup.write().unwrap() = utxo_lookup;
360	}
361
362	/// Gets a reference to the underlying [`NetworkGraph`] which was provided in
363	/// [`P2PGossipSync::new`].
364	///
365	/// This is not exported to bindings users as bindings don't support a reference-to-a-reference yet
366	pub fn network_graph(&self) -> &G {
367		&self.network_graph
368	}
369
370	/// Returns true when a full routing table sync should be performed with a peer.
371	fn should_request_full_sync(&self) -> bool {
372		const FULL_SYNCS_TO_REQUEST: usize = 5;
373		if self.full_syncs_requested.load(Ordering::Acquire) < FULL_SYNCS_TO_REQUEST {
374			self.full_syncs_requested.fetch_add(1, Ordering::AcqRel);
375			true
376		} else {
377			false
378		}
379	}
380
381	/// Used to broadcast forward gossip messages which were validated async.
382	///
383	/// Note that this will ignore events other than `Broadcast*` or messages with too much excess
384	/// data.
385	pub(super) fn forward_gossip_msg(&self, mut ev: MessageSendEvent) {
386		match &mut ev {
387			MessageSendEvent::BroadcastChannelAnnouncement { msg, ref mut update_msg } => {
388				if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY {
389					return;
390				}
391				if update_msg.as_ref().map(|msg| msg.contents.excess_data.len()).unwrap_or(0)
392					> MAX_EXCESS_BYTES_FOR_RELAY
393				{
394					*update_msg = None;
395				}
396			},
397			MessageSendEvent::BroadcastChannelUpdate { msg } => {
398				if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY {
399					return;
400				}
401			},
402			MessageSendEvent::BroadcastNodeAnnouncement { msg } => {
403				if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY
404					|| msg.contents.excess_address_data.len() > MAX_EXCESS_BYTES_FOR_RELAY
405					|| msg.contents.excess_data.len() + msg.contents.excess_address_data.len()
406						> MAX_EXCESS_BYTES_FOR_RELAY
407				{
408					return;
409				}
410			},
411			_ => return,
412		}
413		self.pending_events.lock().unwrap().push(ev);
414	}
415}
416
417impl<L: Deref> NetworkGraph<L>
418where
419	L::Target: Logger,
420{
421	/// Handles any network updates originating from [`Event`]s.
422	///
423	/// [`Event`]: crate::events::Event
424	pub fn handle_network_update(&self, network_update: &NetworkUpdate) {
425		match *network_update {
426			NetworkUpdate::ChannelFailure { short_channel_id, is_permanent } => {
427				if is_permanent {
428					log_debug!(
429						self.logger,
430						"Removing channel graph entry for {} due to a payment failure.",
431						short_channel_id
432					);
433					self.channel_failed_permanent(short_channel_id);
434				}
435			},
436			NetworkUpdate::NodeFailure { ref node_id, is_permanent } => {
437				if is_permanent {
438					log_debug!(
439						self.logger,
440						"Removed node graph entry for {} due to a payment failure.",
441						log_pubkey!(node_id)
442					);
443					self.node_failed_permanent(node_id);
444				};
445			},
446		}
447	}
448
449	/// Gets the chain hash for this network graph.
450	pub fn get_chain_hash(&self) -> ChainHash {
451		self.chain_hash
452	}
453}
454
455macro_rules! secp_verify_sig {
456	( $secp_ctx: expr, $msg: expr, $sig: expr, $pubkey: expr, $msg_type: expr ) => {
457		match $secp_ctx.verify_ecdsa($msg, $sig, $pubkey) {
458			Ok(_) => {},
459			Err(_) => {
460				return Err(LightningError {
461					err: format!("Invalid signature on {} message", $msg_type),
462					action: ErrorAction::SendWarningMessage {
463						msg: msgs::WarningMessage {
464							channel_id: ChannelId::new_zero(),
465							data: format!("Invalid signature on {} message", $msg_type),
466						},
467						log_level: Level::Trace,
468					},
469				});
470			},
471		}
472	};
473}
474
475macro_rules! get_pubkey_from_node_id {
476	( $node_id: expr, $msg_type: expr ) => {
477		PublicKey::from_slice($node_id.as_slice()).map_err(|_| LightningError {
478			err: format!("Invalid public key on {} message", $msg_type),
479			action: ErrorAction::SendWarningMessage {
480				msg: msgs::WarningMessage {
481					channel_id: ChannelId::new_zero(),
482					data: format!("Invalid public key on {} message", $msg_type),
483				},
484				log_level: Level::Trace,
485			},
486		})?
487	};
488}
489
490fn message_sha256d_hash<M: Writeable>(msg: &M) -> Sha256dHash {
491	let mut engine = Sha256dHash::engine();
492	msg.write(&mut engine).expect("In-memory structs should not fail to serialize");
493	Sha256dHash::from_engine(engine)
494}
495
496/// Verifies the signature of a [`NodeAnnouncement`].
497///
498/// Returns an error if it is invalid.
499pub fn verify_node_announcement<C: Verification>(
500	msg: &NodeAnnouncement, secp_ctx: &Secp256k1<C>,
501) -> Result<(), LightningError> {
502	let msg_hash = hash_to_message!(&message_sha256d_hash(&msg.contents)[..]);
503	secp_verify_sig!(
504		secp_ctx,
505		&msg_hash,
506		&msg.signature,
507		&get_pubkey_from_node_id!(msg.contents.node_id, "node_announcement"),
508		"node_announcement"
509	);
510
511	Ok(())
512}
513
514/// Verifies all signatures included in a [`ChannelAnnouncement`].
515///
516/// Returns an error if one of the signatures is invalid.
517pub fn verify_channel_announcement<C: Verification>(
518	msg: &ChannelAnnouncement, secp_ctx: &Secp256k1<C>,
519) -> Result<(), LightningError> {
520	let msg_hash = hash_to_message!(&message_sha256d_hash(&msg.contents)[..]);
521	let node_a = get_pubkey_from_node_id!(msg.contents.node_id_1, "channel_announcement");
522	secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_1, &node_a, "channel_announcement");
523	let node_b = get_pubkey_from_node_id!(msg.contents.node_id_2, "channel_announcement");
524	secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_2, &node_b, "channel_announcement");
525	let btc_a = get_pubkey_from_node_id!(msg.contents.bitcoin_key_1, "channel_announcement");
526	secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_1, &btc_a, "channel_announcement");
527	let btc_b = get_pubkey_from_node_id!(msg.contents.bitcoin_key_2, "channel_announcement");
528	secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_2, &btc_b, "channel_announcement");
529
530	Ok(())
531}
532
533impl<G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref> RoutingMessageHandler
534	for P2PGossipSync<G, U, L>
535where
536	U::Target: UtxoLookup,
537	L::Target: Logger,
538{
539	fn handle_node_announcement(
540		&self, _their_node_id: Option<PublicKey>, msg: &msgs::NodeAnnouncement,
541	) -> Result<bool, LightningError> {
542		self.network_graph.update_node_from_announcement(msg)?;
543		Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY
544			&& msg.contents.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY
545			&& msg.contents.excess_data.len() + msg.contents.excess_address_data.len()
546				<= MAX_EXCESS_BYTES_FOR_RELAY)
547	}
548
549	fn handle_channel_announcement(
550		&self, _their_node_id: Option<PublicKey>, msg: &msgs::ChannelAnnouncement,
551	) -> Result<bool, LightningError> {
552		self.network_graph
553			.update_channel_from_announcement(msg, &*self.utxo_lookup.read().unwrap())?;
554		Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY)
555	}
556
557	fn handle_channel_update(
558		&self, _their_node_id: Option<PublicKey>, msg: &msgs::ChannelUpdate,
559	) -> Result<bool, LightningError> {
560		self.network_graph.update_channel(msg)?;
561		Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY)
562	}
563
564	fn get_next_channel_announcement(
565		&self, starting_point: u64,
566	) -> Option<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> {
567		let mut channels = self.network_graph.channels.write().unwrap();
568		for (_, ref chan) in channels.range(starting_point..) {
569			if chan.announcement_message.is_some() {
570				let chan_announcement = chan.announcement_message.clone().unwrap();
571				let mut one_to_two_announcement: Option<msgs::ChannelUpdate> = None;
572				let mut two_to_one_announcement: Option<msgs::ChannelUpdate> = None;
573				if let Some(one_to_two) = chan.one_to_two.as_ref() {
574					one_to_two_announcement.clone_from(&one_to_two.last_update_message);
575				}
576				if let Some(two_to_one) = chan.two_to_one.as_ref() {
577					two_to_one_announcement.clone_from(&two_to_one.last_update_message);
578				}
579				return Some((chan_announcement, one_to_two_announcement, two_to_one_announcement));
580			} else {
581				// TODO: We may end up sending un-announced channel_updates if we are sending
582				// initial sync data while receiving announce/updates for this channel.
583			}
584		}
585		None
586	}
587
588	fn get_next_node_announcement(
589		&self, starting_point: Option<&NodeId>,
590	) -> Option<NodeAnnouncement> {
591		let mut nodes = self.network_graph.nodes.write().unwrap();
592		let iter = if let Some(node_id) = starting_point {
593			nodes.range((Bound::Excluded(node_id), Bound::Unbounded))
594		} else {
595			nodes.range(..)
596		};
597		for (_, ref node) in iter {
598			if let Some(node_info) = node.announcement_info.as_ref() {
599				if let NodeAnnouncementInfo::Relayed(announcement) = node_info {
600					return Some(announcement.clone());
601				}
602			}
603		}
604		None
605	}
606
607	fn handle_reply_channel_range(
608		&self, _their_node_id: PublicKey, _msg: ReplyChannelRange,
609	) -> Result<(), LightningError> {
610		// We don't make queries, so should never receive replies. If, in the future, the set
611		// reconciliation extensions to gossip queries become broadly supported, we should revert
612		// this code to its state pre-0.0.106.
613		Ok(())
614	}
615
616	fn handle_reply_short_channel_ids_end(
617		&self, _their_node_id: PublicKey, _msg: ReplyShortChannelIdsEnd,
618	) -> Result<(), LightningError> {
619		// We don't make queries, so should never receive replies. If, in the future, the set
620		// reconciliation extensions to gossip queries become broadly supported, we should revert
621		// this code to its state pre-0.0.106.
622		Ok(())
623	}
624
625	/// Processes a query from a peer by finding announced/public channels whose funding UTXOs
626	/// are in the specified block range. Due to message size limits, large range
627	/// queries may result in several reply messages. This implementation enqueues
628	/// all reply messages into pending events. Each message will allocate just under 65KiB. A full
629	/// sync of the public routing table with 128k channels will generated 16 messages and allocate ~1MB.
630	/// Logic can be changed to reduce allocation if/when a full sync of the routing table impacts
631	/// memory constrained systems.
632	fn handle_query_channel_range(
633		&self, their_node_id: PublicKey, msg: QueryChannelRange,
634	) -> Result<(), LightningError> {
635		log_debug!(
636			self.logger,
637			"Handling query_channel_range peer={}, first_blocknum={}, number_of_blocks={}",
638			log_pubkey!(their_node_id),
639			msg.first_blocknum,
640			msg.number_of_blocks
641		);
642
643		let inclusive_start_scid = scid_from_parts(msg.first_blocknum as u64, 0, 0);
644
645		// We might receive valid queries with end_blocknum that would overflow SCID conversion.
646		// If so, we manually cap the ending block to avoid this overflow.
647		let exclusive_end_scid =
648			scid_from_parts(cmp::min(msg.end_blocknum() as u64, MAX_SCID_BLOCK), 0, 0);
649
650		// Per spec, we must reply to a query. Send an empty message when things are invalid.
651		if msg.chain_hash != self.network_graph.chain_hash
652			|| inclusive_start_scid.is_err()
653			|| exclusive_end_scid.is_err()
654			|| msg.number_of_blocks == 0
655		{
656			let mut pending_events = self.pending_events.lock().unwrap();
657			pending_events.push(MessageSendEvent::SendReplyChannelRange {
658				node_id: their_node_id.clone(),
659				msg: ReplyChannelRange {
660					chain_hash: msg.chain_hash.clone(),
661					first_blocknum: msg.first_blocknum,
662					number_of_blocks: msg.number_of_blocks,
663					sync_complete: true,
664					short_channel_ids: vec![],
665				},
666			});
667			return Err(LightningError {
668				err: String::from("query_channel_range could not be processed"),
669				action: ErrorAction::IgnoreError,
670			});
671		}
672
673		// Creates channel batches. We are not checking if the channel is routable
674		// (has at least one update). A peer may still want to know the channel
675		// exists even if its not yet routable.
676		let mut batches: Vec<Vec<u64>> = vec![Vec::with_capacity(MAX_SCIDS_PER_REPLY)];
677		let mut channels = self.network_graph.channels.write().unwrap();
678		for (_, ref chan) in
679			channels.range(inclusive_start_scid.unwrap()..exclusive_end_scid.unwrap())
680		{
681			if let Some(chan_announcement) = &chan.announcement_message {
682				// Construct a new batch if last one is full
683				if batches.last().unwrap().len() == batches.last().unwrap().capacity() {
684					batches.push(Vec::with_capacity(MAX_SCIDS_PER_REPLY));
685				}
686
687				let batch = batches.last_mut().unwrap();
688				batch.push(chan_announcement.contents.short_channel_id);
689			}
690		}
691		drop(channels);
692
693		let mut pending_events = self.pending_events.lock().unwrap();
694		let batch_count = batches.len();
695		let mut prev_batch_endblock = msg.first_blocknum;
696		for (batch_index, batch) in batches.into_iter().enumerate() {
697			// Per spec, the initial `first_blocknum` needs to be <= the query's `first_blocknum`
698			// and subsequent `first_blocknum`s must be >= the prior reply's `first_blocknum`.
699			//
700			// Additionally, c-lightning versions < 0.10 require that the `first_blocknum` of each
701			// reply is >= the previous reply's `first_blocknum` and either exactly the previous
702			// reply's `first_blocknum + number_of_blocks` or exactly one greater. This is a
703			// significant diversion from the requirements set by the spec, and, in case of blocks
704			// with no channel opens (e.g. empty blocks), requires that we use the previous value
705			// and *not* derive the first_blocknum from the actual first block of the reply.
706			let first_blocknum = prev_batch_endblock;
707
708			// Each message carries the number of blocks (from the `first_blocknum`) its contents
709			// fit in. Though there is no requirement that we use exactly the number of blocks its
710			// contents are from, except for the bogus requirements c-lightning enforces, above.
711			//
712			// Per spec, the last end block (ie `first_blocknum + number_of_blocks`) needs to be
713			// >= the query's end block. Thus, for the last reply, we calculate the difference
714			// between the query's end block and the start of the reply.
715			//
716			// Overflow safe since end_blocknum=msg.first_block_num+msg.number_of_blocks and
717			// first_blocknum will be either msg.first_blocknum or a higher block height.
718			let (sync_complete, number_of_blocks) = if batch_index == batch_count - 1 {
719				(true, msg.end_blocknum() - first_blocknum)
720			}
721			// Prior replies should use the number of blocks that fit into the reply. Overflow
722			// safe since first_blocknum is always <= last SCID's block.
723			else {
724				(false, block_from_scid(*batch.last().unwrap()) - first_blocknum)
725			};
726
727			prev_batch_endblock = first_blocknum + number_of_blocks;
728
729			pending_events.push(MessageSendEvent::SendReplyChannelRange {
730				node_id: their_node_id.clone(),
731				msg: ReplyChannelRange {
732					chain_hash: msg.chain_hash.clone(),
733					first_blocknum,
734					number_of_blocks,
735					sync_complete,
736					short_channel_ids: batch,
737				},
738			});
739		}
740
741		Ok(())
742	}
743
744	fn handle_query_short_channel_ids(
745		&self, _their_node_id: PublicKey, _msg: QueryShortChannelIds,
746	) -> Result<(), LightningError> {
747		// TODO
748		Err(LightningError {
749			err: String::from("Not implemented"),
750			action: ErrorAction::IgnoreError,
751		})
752	}
753
754	fn processing_queue_high(&self) -> bool {
755		self.network_graph.pending_checks.too_many_checks_pending()
756	}
757}
758
759impl<G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref> BaseMessageHandler
760	for P2PGossipSync<G, U, L>
761where
762	U::Target: UtxoLookup,
763	L::Target: Logger,
764{
765	/// Initiates a stateless sync of routing gossip information with a peer
766	/// using [`gossip_queries`]. The default strategy used by this implementation
767	/// is to sync the full block range with several peers.
768	///
769	/// We should expect one or more [`reply_channel_range`] messages in response
770	/// to our [`query_channel_range`]. Each reply will enqueue a [`query_scid`] message
771	/// to request gossip messages for each channel. The sync is considered complete
772	/// when the final [`reply_scids_end`] message is received, though we are not
773	/// tracking this directly.
774	///
775	/// [`gossip_queries`]: https://github.com/lightning/bolts/blob/master/07-routing-gossip.md#query-messages
776	/// [`reply_channel_range`]: msgs::ReplyChannelRange
777	/// [`query_channel_range`]: msgs::QueryChannelRange
778	/// [`query_scid`]: msgs::QueryShortChannelIds
779	/// [`reply_scids_end`]: msgs::ReplyShortChannelIdsEnd
780	fn peer_connected(
781		&self, their_node_id: PublicKey, init_msg: &Init, _inbound: bool,
782	) -> Result<(), ()> {
783		// We will only perform a sync with peers that support gossip_queries.
784		if !init_msg.features.supports_gossip_queries() {
785			// Don't disconnect peers for not supporting gossip queries. We may wish to have
786			// channels with peers even without being able to exchange gossip.
787			return Ok(());
788		}
789
790		// The lightning network's gossip sync system is completely broken in numerous ways.
791		//
792		// Given no broadly-available set-reconciliation protocol, the only reasonable approach is
793		// to do a full sync from the first few peers we connect to, and then receive gossip
794		// updates from all our peers normally.
795		//
796		// Originally, we could simply tell a peer to dump us the entire gossip table on startup,
797		// wasting lots of bandwidth but ensuring we have the full network graph. After the initial
798		// dump peers would always send gossip and we'd stay up-to-date with whatever our peer has
799		// seen.
800		//
801		// In order to reduce the bandwidth waste, "gossip queries" were introduced, allowing you
802		// to ask for the SCIDs of all channels in your peer's routing graph, and then only request
803		// channel data which you are missing. Except there was no way at all to identify which
804		// `channel_update`s you were missing, so you still had to request everything, just in a
805		// very complicated way with some queries instead of just getting the dump.
806		//
807		// Later, an option was added to fetch the latest timestamps of the `channel_update`s to
808		// make efficient sync possible, however it has yet to be implemented in lnd, which makes
809		// relying on it useless.
810		//
811		// After gossip queries were introduced, support for receiving a full gossip table dump on
812		// connection was removed from several nodes, making it impossible to get a full sync
813		// without using the "gossip queries" messages.
814		//
815		// Once you opt into "gossip queries" the only way to receive any gossip updates that a
816		// peer receives after you connect, you must send a `gossip_timestamp_filter` message. This
817		// message, as the name implies, tells the peer to not forward any gossip messages with a
818		// timestamp older than a given value (not the time the peer received the filter, but the
819		// timestamp in the update message, which is often hours behind when the peer received the
820		// message).
821		//
822		// Obnoxiously, `gossip_timestamp_filter` isn't *just* a filter, but its also a request for
823		// your peer to send you the full routing graph (subject to the filter). Thus, in order to
824		// tell a peer to send you any updates as it sees them, you have to also ask for the full
825		// routing graph to be synced. If you set a timestamp filter near the current time, peers
826		// will simply not forward any new updates they see to you which were generated some time
827		// ago (which is not uncommon). If you instead set a timestamp filter near 0 (or two weeks
828		// ago), you will always get the full routing graph from all your peers.
829		//
830		// Most lightning nodes today opt to simply turn off receiving gossip data which only
831		// propagated some time after it was generated, and, worse, often disable gossiping with
832		// several peers after their first connection. The second behavior can cause gossip to not
833		// propagate fully if there are cuts in the gossiping subgraph.
834		//
835		// In an attempt to cut a middle ground between always fetching the full graph from all of
836		// our peers and never receiving gossip from peers at all, we send all of our peers a
837		// `gossip_timestamp_filter`, with the filter time set either two weeks ago or an hour ago.
838		//
839		// For non-`std` builds, we bury our head in the sand and do a full sync on each connection.
840		#[allow(unused_mut, unused_assignments)]
841		let mut gossip_start_time = 0;
842		#[allow(unused)]
843		let should_sync = self.should_request_full_sync();
844		#[cfg(feature = "std")]
845		{
846			gossip_start_time = SystemTime::now()
847				.duration_since(UNIX_EPOCH)
848				.expect("Time must be > 1970")
849				.as_secs();
850			if should_sync {
851				gossip_start_time -= 60 * 60 * 24 * 7 * 2; // 2 weeks ago
852			} else {
853				gossip_start_time -= 60 * 60; // an hour ago
854			}
855		}
856
857		let mut pending_events = self.pending_events.lock().unwrap();
858		pending_events.push(MessageSendEvent::SendGossipTimestampFilter {
859			node_id: their_node_id.clone(),
860			msg: GossipTimestampFilter {
861				chain_hash: self.network_graph.chain_hash,
862				first_timestamp: gossip_start_time as u32, // 2106 issue!
863				timestamp_range: u32::max_value(),
864			},
865		});
866		Ok(())
867	}
868
869	fn peer_disconnected(&self, _their_node_id: PublicKey) {}
870
871	fn provided_node_features(&self) -> NodeFeatures {
872		let mut features = NodeFeatures::empty();
873		features.set_gossip_queries_optional();
874		features
875	}
876
877	fn provided_init_features(&self, _their_node_id: PublicKey) -> InitFeatures {
878		let mut features = InitFeatures::empty();
879		features.set_gossip_queries_optional();
880		features
881	}
882
883	fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
884		let mut ret = Vec::new();
885		let mut pending_events = self.pending_events.lock().unwrap();
886		core::mem::swap(&mut ret, &mut pending_events);
887		ret
888	}
889}
890
891// Fetching values from this struct is very performance sensitive during routefinding. Thus, we
892// want to ensure that all of the fields we care about (all of them except `last_update_message`)
893// sit on the same cache line.
894//
895// We do this by using `repr(C)`, which forces the struct to be laid out in memory the way we write
896// it (ensuring `last_update_message` hangs off the end and no fields are reordered after it), and
897// `align(32)`, ensuring the struct starts either at the start, or in the middle, of an x86-64
898// 64-byte cache line. This ensures the beginning fields (which are 31 bytes) all sit in the same
899// cache line.
900#[repr(C, align(32))]
901#[derive(Clone, Debug, PartialEq, Eq)]
902/// Details about one direction of a channel as received within a [`ChannelUpdate`].
903pub struct ChannelUpdateInfo {
904	/// The minimum value, which must be relayed to the next hop via the channel
905	pub htlc_minimum_msat: u64,
906	/// The maximum value which may be relayed to the next hop via the channel.
907	pub htlc_maximum_msat: u64,
908	/// Fees charged when the channel is used for routing
909	pub fees: RoutingFees,
910	/// When the last update to the channel direction was issued.
911	/// Value is opaque, as set in the announcement.
912	pub last_update: u32,
913	/// The difference in CLTV values that you must have when routing through this channel.
914	pub cltv_expiry_delta: u16,
915	/// Whether the channel can be currently used for payments (in this one direction).
916	pub enabled: bool,
917	/// Most recent update for the channel received from the network
918	/// Mostly redundant with the data we store in fields explicitly.
919	/// Everything else is useful only for sending out for initial routing sync.
920	/// Not stored if contains excess data to prevent DoS.
921	pub last_update_message: Option<ChannelUpdate>,
922}
923
924impl fmt::Display for ChannelUpdateInfo {
925	fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
926		write!(
927			f,
928			"last_update {}, enabled {}, cltv_expiry_delta {}, htlc_minimum_msat {}, fees {:?}",
929			self.last_update,
930			self.enabled,
931			self.cltv_expiry_delta,
932			self.htlc_minimum_msat,
933			self.fees
934		)?;
935		Ok(())
936	}
937}
938
939impl Writeable for ChannelUpdateInfo {
940	fn write<W: crate::util::ser::Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
941		write_tlv_fields!(writer, {
942			(0, self.last_update, required),
943			(2, self.enabled, required),
944			(4, self.cltv_expiry_delta, required),
945			(6, self.htlc_minimum_msat, required),
946			// Writing htlc_maximum_msat as an Option<u64> is required to maintain backwards
947			// compatibility with LDK versions prior to v0.0.110.
948			(8, Some(self.htlc_maximum_msat), required),
949			(10, self.fees, required),
950			(12, self.last_update_message, required),
951		});
952		Ok(())
953	}
954}
955
956impl Readable for ChannelUpdateInfo {
957	fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
958		_init_tlv_field_var!(last_update, required);
959		_init_tlv_field_var!(enabled, required);
960		_init_tlv_field_var!(cltv_expiry_delta, required);
961		_init_tlv_field_var!(htlc_minimum_msat, required);
962		_init_tlv_field_var!(htlc_maximum_msat, option);
963		_init_tlv_field_var!(fees, required);
964		_init_tlv_field_var!(last_update_message, required);
965
966		read_tlv_fields!(reader, {
967			(0, last_update, required),
968			(2, enabled, required),
969			(4, cltv_expiry_delta, required),
970			(6, htlc_minimum_msat, required),
971			(8, htlc_maximum_msat, required),
972			(10, fees, required),
973			(12, last_update_message, required)
974		});
975
976		if let Some(htlc_maximum_msat) = htlc_maximum_msat {
977			Ok(ChannelUpdateInfo {
978				last_update: _init_tlv_based_struct_field!(last_update, required),
979				enabled: _init_tlv_based_struct_field!(enabled, required),
980				cltv_expiry_delta: _init_tlv_based_struct_field!(cltv_expiry_delta, required),
981				htlc_minimum_msat: _init_tlv_based_struct_field!(htlc_minimum_msat, required),
982				htlc_maximum_msat,
983				fees: _init_tlv_based_struct_field!(fees, required),
984				last_update_message: _init_tlv_based_struct_field!(last_update_message, required),
985			})
986		} else {
987			Err(DecodeError::InvalidValue)
988		}
989	}
990}
991
992// Fetching values from this struct is very performance sensitive during routefinding. Thus, we
993// want to ensure that all of the fields we care about (all of them except `last_update_message`
994// and `announcement_received_time`) sit on the same cache line.
995//
996// Sadly, this is not possible, however we can still do okay - all of the fields before
997// `one_to_two` and `two_to_one` are just under 128 bytes long, so we can ensure they sit on
998// adjacent cache lines (which are often fetched together in x86-64 processors).
999//
1000// This leaves only the two directional channel info structs on separate cache lines.
1001//
1002// We accomplish this using `repr(C)`, which forces the struct to be laid out in memory the way we
1003// write it (ensuring the fields we care about are at the start of the struct) and `align(128)`,
1004// ensuring the struct starts at the beginning of two adjacent 64b x86-64 cache lines.
1005#[repr(align(128), C)]
1006#[derive(Clone, Debug, Eq)]
1007/// Details about a channel (both directions).
1008/// Received within a channel announcement.
1009pub struct ChannelInfo {
1010	/// Protocol features of a channel communicated during its announcement
1011	pub features: ChannelFeatures,
1012
1013	/// Source node of the first direction of a channel
1014	pub node_one: NodeId,
1015
1016	/// Source node of the second direction of a channel
1017	pub node_two: NodeId,
1018
1019	/// The [`NodeInfo::node_counter`] of the node pointed to by [`Self::node_one`].
1020	pub(crate) node_one_counter: u32,
1021	/// The [`NodeInfo::node_counter`] of the node pointed to by [`Self::node_two`].
1022	pub(crate) node_two_counter: u32,
1023
1024	/// The channel capacity as seen on-chain, if chain lookup is available.
1025	pub capacity_sats: Option<u64>,
1026
1027	/// Details about the first direction of a channel
1028	pub one_to_two: Option<ChannelUpdateInfo>,
1029	/// Details about the second direction of a channel
1030	pub two_to_one: Option<ChannelUpdateInfo>,
1031
1032	/// An initial announcement of the channel
1033	/// Mostly redundant with the data we store in fields explicitly.
1034	/// Everything else is useful only for sending out for initial routing sync.
1035	/// Not stored if contains excess data to prevent DoS.
1036	pub announcement_message: Option<ChannelAnnouncement>,
1037	/// The timestamp when we received the announcement, if we are running with feature = "std"
1038	/// (which we can probably assume we are - non-`std` environments probably won't have a full
1039	/// network graph in memory!).
1040	announcement_received_time: u64,
1041}
1042
1043impl PartialEq for ChannelInfo {
1044	fn eq(&self, o: &ChannelInfo) -> bool {
1045		self.features == o.features
1046			&& self.node_one == o.node_one
1047			&& self.one_to_two == o.one_to_two
1048			&& self.node_two == o.node_two
1049			&& self.two_to_one == o.two_to_one
1050			&& self.capacity_sats == o.capacity_sats
1051			&& self.announcement_message == o.announcement_message
1052			&& self.announcement_received_time == o.announcement_received_time
1053	}
1054}
1055
1056impl ChannelInfo {
1057	/// Returns a [`DirectedChannelInfo`] for the channel directed to the given `target` from a
1058	/// returned `source`, or `None` if `target` is not one of the channel's counterparties.
1059	pub fn as_directed_to(&self, target: &NodeId) -> Option<(DirectedChannelInfo<'_>, &NodeId)> {
1060		if self.one_to_two.is_none() || self.two_to_one.is_none() {
1061			return None;
1062		}
1063		let (direction, source, outbound) = {
1064			if target == &self.node_one {
1065				(self.two_to_one.as_ref(), &self.node_two, false)
1066			} else if target == &self.node_two {
1067				(self.one_to_two.as_ref(), &self.node_one, true)
1068			} else {
1069				return None;
1070			}
1071		};
1072		let dir = direction.expect("We checked that both directions are available at the start");
1073		Some((DirectedChannelInfo::new(self, dir, outbound), source))
1074	}
1075
1076	/// Returns a [`DirectedChannelInfo`] for the channel directed from the given `source` to a
1077	/// returned `target`, or `None` if `source` is not one of the channel's counterparties.
1078	pub fn as_directed_from(&self, source: &NodeId) -> Option<(DirectedChannelInfo<'_>, &NodeId)> {
1079		if self.one_to_two.is_none() || self.two_to_one.is_none() {
1080			return None;
1081		}
1082		let (direction, target, outbound) = {
1083			if source == &self.node_one {
1084				(self.one_to_two.as_ref(), &self.node_two, true)
1085			} else if source == &self.node_two {
1086				(self.two_to_one.as_ref(), &self.node_one, false)
1087			} else {
1088				return None;
1089			}
1090		};
1091		let dir = direction.expect("We checked that both directions are available at the start");
1092		Some((DirectedChannelInfo::new(self, dir, outbound), target))
1093	}
1094
1095	/// Returns a [`ChannelUpdateInfo`] based on the direction implied by the channel_flag.
1096	pub fn get_directional_info(&self, channel_flags: u8) -> Option<&ChannelUpdateInfo> {
1097		let direction = channel_flags & 1u8;
1098		if direction == 0 {
1099			self.one_to_two.as_ref()
1100		} else {
1101			self.two_to_one.as_ref()
1102		}
1103	}
1104}
1105
1106impl fmt::Display for ChannelInfo {
1107	fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
1108		write!(
1109			f,
1110			"features: {}, node_one: {}, one_to_two: {:?}, node_two: {}, two_to_one: {:?}",
1111			log_bytes!(self.features.encode()),
1112			&self.node_one,
1113			self.one_to_two,
1114			&self.node_two,
1115			self.two_to_one
1116		)?;
1117		Ok(())
1118	}
1119}
1120
1121impl Writeable for ChannelInfo {
1122	fn write<W: crate::util::ser::Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
1123		write_tlv_fields!(writer, {
1124			(0, self.features, required),
1125			(1, self.announcement_received_time, (default_value, 0)),
1126			(2, self.node_one, required),
1127			(4, self.one_to_two, required),
1128			(6, self.node_two, required),
1129			(8, self.two_to_one, required),
1130			(10, self.capacity_sats, required),
1131			(12, self.announcement_message, required),
1132		});
1133		Ok(())
1134	}
1135}
1136
1137// A wrapper allowing for the optional deseralization of ChannelUpdateInfo. Utilizing this is
1138// necessary to maintain backwards compatibility with previous serializations of `ChannelUpdateInfo`
1139// that may have no `htlc_maximum_msat` field set. In case the field is absent, we simply ignore
1140// the error and continue reading the `ChannelInfo`. Hopefully, we'll then eventually receive newer
1141// channel updates via the gossip network.
1142struct ChannelUpdateInfoDeserWrapper(Option<ChannelUpdateInfo>);
1143
1144impl MaybeReadable for ChannelUpdateInfoDeserWrapper {
1145	fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
1146		match crate::util::ser::Readable::read(reader) {
1147			Ok(channel_update_option) => Ok(Some(Self(channel_update_option))),
1148			Err(DecodeError::ShortRead) => Ok(None),
1149			Err(DecodeError::InvalidValue) => Ok(None),
1150			Err(err) => Err(err),
1151		}
1152	}
1153}
1154
1155impl Readable for ChannelInfo {
1156	fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
1157		_init_tlv_field_var!(features, required);
1158		_init_tlv_field_var!(announcement_received_time, (default_value, 0));
1159		_init_tlv_field_var!(node_one, required);
1160		let mut one_to_two_wrap: Option<ChannelUpdateInfoDeserWrapper> = None;
1161		_init_tlv_field_var!(node_two, required);
1162		let mut two_to_one_wrap: Option<ChannelUpdateInfoDeserWrapper> = None;
1163		_init_tlv_field_var!(capacity_sats, required);
1164		_init_tlv_field_var!(announcement_message, required);
1165		read_tlv_fields!(reader, {
1166			(0, features, required),
1167			(1, announcement_received_time, (default_value, 0)),
1168			(2, node_one, required),
1169			(4, one_to_two_wrap, upgradable_option),
1170			(6, node_two, required),
1171			(8, two_to_one_wrap, upgradable_option),
1172			(10, capacity_sats, required),
1173			(12, announcement_message, required),
1174		});
1175
1176		Ok(ChannelInfo {
1177			features: _init_tlv_based_struct_field!(features, required),
1178			node_one: _init_tlv_based_struct_field!(node_one, required),
1179			one_to_two: one_to_two_wrap.map(|w| w.0).unwrap_or(None),
1180			node_two: _init_tlv_based_struct_field!(node_two, required),
1181			two_to_one: two_to_one_wrap.map(|w| w.0).unwrap_or(None),
1182			capacity_sats: _init_tlv_based_struct_field!(capacity_sats, required),
1183			announcement_message: _init_tlv_based_struct_field!(announcement_message, required),
1184			announcement_received_time: _init_tlv_based_struct_field!(
1185				announcement_received_time,
1186				(default_value, 0)
1187			),
1188			node_one_counter: u32::max_value(),
1189			node_two_counter: u32::max_value(),
1190		})
1191	}
1192}
1193
1194/// A wrapper around [`ChannelInfo`] representing information about the channel as directed from a
1195/// source node to a target node.
1196#[derive(Clone)]
1197pub struct DirectedChannelInfo<'a> {
1198	channel: &'a ChannelInfo,
1199	direction: &'a ChannelUpdateInfo,
1200	source_counter: u32,
1201	target_counter: u32,
1202	/// The direction this channel is in - if set, it indicates that we're traversing the channel
1203	/// from [`ChannelInfo::node_one`] to [`ChannelInfo::node_two`].
1204	from_node_one: bool,
1205}
1206
1207impl<'a> DirectedChannelInfo<'a> {
1208	#[inline]
1209	fn new(
1210		channel: &'a ChannelInfo, direction: &'a ChannelUpdateInfo, from_node_one: bool,
1211	) -> Self {
1212		let (source_counter, target_counter) = if from_node_one {
1213			(channel.node_one_counter, channel.node_two_counter)
1214		} else {
1215			(channel.node_two_counter, channel.node_one_counter)
1216		};
1217		Self { channel, direction, from_node_one, source_counter, target_counter }
1218	}
1219
1220	/// Returns information for the channel.
1221	#[inline]
1222	pub fn channel(&self) -> &'a ChannelInfo {
1223		self.channel
1224	}
1225
1226	/// Returns the [`EffectiveCapacity`] of the channel in the direction.
1227	///
1228	/// This is either the total capacity from the funding transaction, if known, or the
1229	/// `htlc_maximum_msat` for the direction as advertised by the gossip network, if known,
1230	/// otherwise.
1231	#[inline]
1232	pub fn effective_capacity(&self) -> EffectiveCapacity {
1233		let mut htlc_maximum_msat = self.direction().htlc_maximum_msat;
1234		let capacity_msat = self.channel.capacity_sats.map(|capacity_sats| capacity_sats * 1000);
1235
1236		match capacity_msat {
1237			Some(capacity_msat) => {
1238				htlc_maximum_msat = cmp::min(htlc_maximum_msat, capacity_msat);
1239				EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat }
1240			},
1241			None => EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: htlc_maximum_msat },
1242		}
1243	}
1244
1245	/// Returns information for the direction.
1246	#[inline]
1247	pub(super) fn direction(&self) -> &'a ChannelUpdateInfo {
1248		self.direction
1249	}
1250
1251	/// Returns the `node_id` of the source hop.
1252	///
1253	/// Refers to the `node_id` forwarding the payment to the next hop.
1254	#[inline]
1255	pub fn source(&self) -> &'a NodeId {
1256		if self.from_node_one {
1257			&self.channel.node_one
1258		} else {
1259			&self.channel.node_two
1260		}
1261	}
1262
1263	/// Returns the `node_id` of the target hop.
1264	///
1265	/// Refers to the `node_id` receiving the payment from the previous hop.
1266	#[inline]
1267	pub fn target(&self) -> &'a NodeId {
1268		if self.from_node_one {
1269			&self.channel.node_two
1270		} else {
1271			&self.channel.node_one
1272		}
1273	}
1274
1275	/// Returns the source node's counter
1276	#[inline(always)]
1277	pub(super) fn source_counter(&self) -> u32 {
1278		self.source_counter
1279	}
1280
1281	/// Returns the target node's counter
1282	#[inline(always)]
1283	pub(super) fn target_counter(&self) -> u32 {
1284		self.target_counter
1285	}
1286}
1287
1288impl<'a> fmt::Debug for DirectedChannelInfo<'a> {
1289	fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
1290		f.debug_struct("DirectedChannelInfo").field("channel", &self.channel).finish()
1291	}
1292}
1293
1294/// The effective capacity of a channel for routing purposes.
1295///
1296/// While this may be smaller than the actual channel capacity, amounts greater than
1297/// [`Self::as_msat`] should not be routed through the channel.
1298#[derive(Clone, Copy, Debug, PartialEq)]
1299pub enum EffectiveCapacity {
1300	/// The available liquidity in the channel known from being a channel counterparty, and thus a
1301	/// direct hop.
1302	ExactLiquidity {
1303		/// Either the inbound or outbound liquidity depending on the direction, denominated in
1304		/// millisatoshi.
1305		liquidity_msat: u64,
1306	},
1307	/// The maximum HTLC amount in one direction as advertised on the gossip network.
1308	AdvertisedMaxHTLC {
1309		/// The maximum HTLC amount denominated in millisatoshi.
1310		amount_msat: u64,
1311	},
1312	/// The total capacity of the channel as determined by the funding transaction.
1313	Total {
1314		/// The funding amount denominated in millisatoshi.
1315		capacity_msat: u64,
1316		/// The maximum HTLC amount denominated in millisatoshi.
1317		htlc_maximum_msat: u64,
1318	},
1319	/// A capacity sufficient to route any payment, typically used for private channels provided by
1320	/// an invoice.
1321	Infinite,
1322	/// The maximum HTLC amount as provided by an invoice route hint.
1323	HintMaxHTLC {
1324		/// The maximum HTLC amount denominated in millisatoshi.
1325		amount_msat: u64,
1326	},
1327	/// A capacity that is unknown possibly because either the chain state is unavailable to know
1328	/// the total capacity or the `htlc_maximum_msat` was not advertised on the gossip network.
1329	Unknown,
1330}
1331
1332/// The presumed channel capacity denominated in millisatoshi for [`EffectiveCapacity::Unknown`] to
1333/// use when making routing decisions.
1334pub const UNKNOWN_CHANNEL_CAPACITY_MSAT: u64 = 250_000 * 1000;
1335
1336impl EffectiveCapacity {
1337	/// Returns the effective capacity denominated in millisatoshi.
1338	pub fn as_msat(&self) -> u64 {
1339		match self {
1340			EffectiveCapacity::ExactLiquidity { liquidity_msat } => *liquidity_msat,
1341			EffectiveCapacity::AdvertisedMaxHTLC { amount_msat } => *amount_msat,
1342			EffectiveCapacity::Total { capacity_msat, .. } => *capacity_msat,
1343			EffectiveCapacity::HintMaxHTLC { amount_msat } => *amount_msat,
1344			EffectiveCapacity::Infinite => u64::max_value(),
1345			EffectiveCapacity::Unknown => UNKNOWN_CHANNEL_CAPACITY_MSAT,
1346		}
1347	}
1348}
1349
1350impl_writeable_tlv_based!(RoutingFees, {
1351	(0, base_msat, required),
1352	(2, proportional_millionths, required)
1353});
1354
1355#[derive(Clone, Debug, PartialEq, Eq)]
1356/// Non-relayable information received in the latest node_announcement from this node.
1357pub struct NodeAnnouncementDetails {
1358	/// Protocol features the node announced support for
1359	pub features: NodeFeatures,
1360
1361	/// When the last known update to the node state was issued.
1362	/// Value is opaque, as set in the announcement.
1363	pub last_update: u32,
1364
1365	/// Color assigned to the node
1366	pub rgb: [u8; 3],
1367
1368	/// Moniker assigned to the node.
1369	/// May be invalid or malicious (eg control chars),
1370	/// should not be exposed to the user.
1371	pub alias: NodeAlias,
1372
1373	/// Internet-level addresses via which one can connect to the node
1374	pub addresses: Vec<SocketAddress>,
1375}
1376
1377#[derive(Clone, Debug, PartialEq, Eq)]
1378/// Information received in the latest node_announcement from this node.
1379pub enum NodeAnnouncementInfo {
1380	/// An initial announcement of the node
1381	/// Everything else is useful only for sending out for initial routing sync.
1382	/// Not stored if contains excess data to prevent DoS.
1383	Relayed(NodeAnnouncement),
1384
1385	/// Non-relayable information received in the latest node_announcement from this node.
1386	Local(NodeAnnouncementDetails),
1387}
1388
1389impl NodeAnnouncementInfo {
1390	/// Protocol features the node announced support for
1391	pub fn features(&self) -> &NodeFeatures {
1392		match self {
1393			NodeAnnouncementInfo::Relayed(relayed) => &relayed.contents.features,
1394			NodeAnnouncementInfo::Local(local) => &local.features,
1395		}
1396	}
1397
1398	/// When the last known update to the node state was issued.
1399	///
1400	/// Value may or may not be a timestamp, depending on the policy of the origin node.
1401	pub fn last_update(&self) -> u32 {
1402		match self {
1403			NodeAnnouncementInfo::Relayed(relayed) => relayed.contents.timestamp,
1404			NodeAnnouncementInfo::Local(local) => local.last_update,
1405		}
1406	}
1407
1408	/// Color assigned to the node
1409	pub fn rgb(&self) -> [u8; 3] {
1410		match self {
1411			NodeAnnouncementInfo::Relayed(relayed) => relayed.contents.rgb,
1412			NodeAnnouncementInfo::Local(local) => local.rgb,
1413		}
1414	}
1415
1416	/// Moniker assigned to the node.
1417	///
1418	/// May be invalid or malicious (eg control chars), should not be exposed to the user.
1419	pub fn alias(&self) -> &NodeAlias {
1420		match self {
1421			NodeAnnouncementInfo::Relayed(relayed) => &relayed.contents.alias,
1422			NodeAnnouncementInfo::Local(local) => &local.alias,
1423		}
1424	}
1425
1426	/// Internet-level addresses via which one can connect to the node
1427	pub fn addresses(&self) -> &[SocketAddress] {
1428		match self {
1429			NodeAnnouncementInfo::Relayed(relayed) => &relayed.contents.addresses,
1430			NodeAnnouncementInfo::Local(local) => &local.addresses,
1431		}
1432	}
1433
1434	/// An initial announcement of the node
1435	///
1436	/// Not stored if contains excess data to prevent DoS.
1437	pub fn announcement_message(&self) -> Option<&NodeAnnouncement> {
1438		match self {
1439			NodeAnnouncementInfo::Relayed(announcement) => Some(announcement),
1440			NodeAnnouncementInfo::Local(_) => None,
1441		}
1442	}
1443}
1444
1445impl Writeable for NodeAnnouncementInfo {
1446	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
1447		let features = self.features();
1448		let last_update = self.last_update();
1449		let rgb = self.rgb();
1450		let alias = self.alias();
1451		let addresses = self.addresses();
1452		let announcement_message = self.announcement_message();
1453
1454		write_tlv_fields!(writer, {
1455			(0, features, required),
1456			(2, last_update, required),
1457			(4, rgb, required),
1458			(6, alias, required),
1459			(8, announcement_message, option),
1460			(10, *addresses, required_vec), // Versions 0.0.115 through 0.0.123 only serialized an empty vec
1461		});
1462		Ok(())
1463	}
1464}
1465
1466impl Readable for NodeAnnouncementInfo {
1467	fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
1468		_init_and_read_len_prefixed_tlv_fields!(reader, {
1469			(0, features, required),
1470			(2, last_update, required),
1471			(4, rgb, required),
1472			(6, alias, required),
1473			(8, announcement_message, option),
1474			(10, addresses, required_vec),
1475		});
1476		if let Some(announcement) = announcement_message {
1477			Ok(Self::Relayed(announcement))
1478		} else {
1479			Ok(Self::Local(NodeAnnouncementDetails {
1480				features: features.0.unwrap(),
1481				last_update: last_update.0.unwrap(),
1482				rgb: rgb.0.unwrap(),
1483				alias: alias.0.unwrap(),
1484				addresses,
1485			}))
1486		}
1487	}
1488}
1489
1490/// A user-defined name for a node, which may be used when displaying the node in a graph.
1491///
1492/// Since node aliases are provided by third parties, they are a potential avenue for injection
1493/// attacks. Care must be taken when processing.
1494#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
1495pub struct NodeAlias(pub [u8; 32]);
1496
1497impl fmt::Display for NodeAlias {
1498	fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
1499		let first_null = self.0.iter().position(|b| *b == 0).unwrap_or(self.0.len());
1500		let bytes = self.0.split_at(first_null).0;
1501		match core::str::from_utf8(bytes) {
1502			Ok(alias) => PrintableString(alias).fmt(f)?,
1503			Err(_) => {
1504				use core::fmt::Write;
1505				for c in bytes.iter().map(|b| *b as char) {
1506					// Display printable ASCII characters
1507					let control_symbol = core::char::REPLACEMENT_CHARACTER;
1508					let c = if c >= '\x20' && c <= '\x7e' { c } else { control_symbol };
1509					f.write_char(c)?;
1510				}
1511			},
1512		};
1513		Ok(())
1514	}
1515}
1516
1517impl Writeable for NodeAlias {
1518	fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
1519		self.0.write(w)
1520	}
1521}
1522
1523impl Readable for NodeAlias {
1524	fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
1525		Ok(NodeAlias(Readable::read(r)?))
1526	}
1527}
1528
1529#[derive(Clone, Debug, Eq)]
1530/// Details about a node in the network, known from the network announcement.
1531pub struct NodeInfo {
1532	/// All valid channels a node has announced
1533	pub channels: Vec<u64>,
1534	/// More information about a node from node_announcement.
1535	/// Optional because we store a Node entry after learning about it from
1536	/// a channel announcement, but before receiving a node announcement.
1537	pub announcement_info: Option<NodeAnnouncementInfo>,
1538	/// In memory, each node is assigned a unique ID. They are eagerly reused, ensuring they remain
1539	/// relatively dense.
1540	///
1541	/// These IDs allow the router to avoid a `HashMap` lookup by simply using this value as an
1542	/// index in a `Vec`, skipping a big step in some of the hottest code when routing.
1543	pub(crate) node_counter: u32,
1544}
1545
1546impl PartialEq for NodeInfo {
1547	fn eq(&self, o: &NodeInfo) -> bool {
1548		self.channels == o.channels && self.announcement_info == o.announcement_info
1549	}
1550}
1551
1552impl NodeInfo {
1553	/// Returns whether the node has only announced Tor addresses.
1554	pub fn is_tor_only(&self) -> bool {
1555		self.announcement_info
1556			.as_ref()
1557			.map(|info| info.addresses())
1558			.and_then(|addresses| (!addresses.is_empty()).then(|| addresses))
1559			.map(|addresses| addresses.iter().all(|address| address.is_tor()))
1560			.unwrap_or(false)
1561	}
1562}
1563
1564impl fmt::Display for NodeInfo {
1565	fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
1566		write!(
1567			f,
1568			" channels: {:?}, announcement_info: {:?}",
1569			&self.channels[..],
1570			self.announcement_info
1571		)?;
1572		Ok(())
1573	}
1574}
1575
1576impl Writeable for NodeInfo {
1577	fn write<W: crate::util::ser::Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
1578		write_tlv_fields!(writer, {
1579			// Note that older versions of LDK wrote the lowest inbound fees here at type 0
1580			(2, self.announcement_info, option),
1581			(4, self.channels, required_vec),
1582		});
1583		Ok(())
1584	}
1585}
1586
1587// A wrapper allowing for the optional deserialization of `NodeAnnouncementInfo`. Utilizing this is
1588// necessary to maintain compatibility with previous serializations of `SocketAddress` that have an
1589// invalid hostname set. We ignore and eat all errors until we are either able to read a
1590// `NodeAnnouncementInfo` or hit a `ShortRead`, i.e., read the TLV field to the end.
1591struct NodeAnnouncementInfoDeserWrapper(NodeAnnouncementInfo);
1592
1593impl MaybeReadable for NodeAnnouncementInfoDeserWrapper {
1594	fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
1595		match crate::util::ser::Readable::read(reader) {
1596			Ok(node_announcement_info) => return Ok(Some(Self(node_announcement_info))),
1597			Err(_) => {
1598				copy(reader, &mut sink()).unwrap();
1599				return Ok(None);
1600			},
1601		};
1602	}
1603}
1604
1605impl Readable for NodeInfo {
1606	fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
1607		// Historically, we tracked the lowest inbound fees for any node in order to use it as an
1608		// A* heuristic when routing. Sadly, these days many, many nodes have at least one channel
1609		// with zero inbound fees, causing that heuristic to provide little gain. Worse, because it
1610		// requires additional complexity and lookups during routing, it ends up being a
1611		// performance loss. Thus, we simply ignore the old field here and no longer track it.
1612		_init_and_read_len_prefixed_tlv_fields!(reader, {
1613			(0, _lowest_inbound_channel_fees, option),
1614			(2, announcement_info_wrap, upgradable_option),
1615			(4, channels, required_vec),
1616		});
1617		let _: Option<RoutingFees> = _lowest_inbound_channel_fees;
1618		let announcement_info_wrap: Option<NodeAnnouncementInfoDeserWrapper> =
1619			announcement_info_wrap;
1620
1621		Ok(NodeInfo {
1622			announcement_info: announcement_info_wrap.map(|w| w.0),
1623			channels,
1624			node_counter: u32::max_value(),
1625		})
1626	}
1627}
1628
1629const SERIALIZATION_VERSION: u8 = 1;
1630const MIN_SERIALIZATION_VERSION: u8 = 1;
1631
1632impl<L: Deref> Writeable for NetworkGraph<L>
1633where
1634	L::Target: Logger,
1635{
1636	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
1637		self.test_node_counter_consistency();
1638
1639		write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
1640
1641		self.chain_hash.write(writer)?;
1642		let channels = self.channels.read().unwrap();
1643		(channels.len() as u64).write(writer)?;
1644		for (ref chan_id, ref chan_info) in channels.unordered_iter() {
1645			(*chan_id).write(writer)?;
1646			chan_info.write(writer)?;
1647		}
1648		let nodes = self.nodes.read().unwrap();
1649		(nodes.len() as u64).write(writer)?;
1650		for (ref node_id, ref node_info) in nodes.unordered_iter() {
1651			node_id.write(writer)?;
1652			node_info.write(writer)?;
1653		}
1654
1655		let last_rapid_gossip_sync_timestamp = self.get_last_rapid_gossip_sync_timestamp();
1656		write_tlv_fields!(writer, {
1657			(1, last_rapid_gossip_sync_timestamp, option),
1658		});
1659		Ok(())
1660	}
1661}
1662
1663impl<L: Deref> ReadableArgs<L> for NetworkGraph<L>
1664where
1665	L::Target: Logger,
1666{
1667	fn read<R: io::Read>(reader: &mut R, logger: L) -> Result<NetworkGraph<L>, DecodeError> {
1668		let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
1669
1670		let chain_hash: ChainHash = Readable::read(reader)?;
1671		let channels_count: u64 = Readable::read(reader)?;
1672		let mut channels = IndexedMap::with_capacity(CHAN_COUNT_ESTIMATE);
1673		for _ in 0..channels_count {
1674			let chan_id: u64 = Readable::read(reader)?;
1675			let chan_info: ChannelInfo = Readable::read(reader)?;
1676			channels.insert(chan_id, chan_info);
1677		}
1678		let nodes_count: u64 = Readable::read(reader)?;
1679		// There shouldn't be anywhere near `u32::MAX` nodes, and we need some headroom to insert
1680		// new nodes during sync, so reject any graphs claiming more than `u32::MAX / 2` nodes.
1681		if nodes_count > u32::max_value() as u64 / 2 {
1682			return Err(DecodeError::InvalidValue);
1683		}
1684		let mut nodes = IndexedMap::with_capacity(NODE_COUNT_ESTIMATE);
1685		for i in 0..nodes_count {
1686			let node_id = Readable::read(reader)?;
1687			let mut node_info: NodeInfo = Readable::read(reader)?;
1688			node_info.node_counter = i as u32;
1689			nodes.insert(node_id, node_info);
1690		}
1691
1692		for (_, chan) in channels.unordered_iter_mut() {
1693			chan.node_one_counter =
1694				nodes.get(&chan.node_one).ok_or(DecodeError::InvalidValue)?.node_counter;
1695			chan.node_two_counter =
1696				nodes.get(&chan.node_two).ok_or(DecodeError::InvalidValue)?.node_counter;
1697		}
1698
1699		let mut last_rapid_gossip_sync_timestamp: Option<u32> = None;
1700		read_tlv_fields!(reader, {
1701			(1, last_rapid_gossip_sync_timestamp, option),
1702		});
1703
1704		Ok(NetworkGraph {
1705			secp_ctx: Secp256k1::verification_only(),
1706			chain_hash,
1707			logger,
1708			channels: RwLock::new(channels),
1709			nodes: RwLock::new(nodes),
1710			removed_node_counters: Mutex::new(Vec::new()),
1711			next_node_counter: AtomicUsize::new(nodes_count as usize),
1712			last_rapid_gossip_sync_timestamp: Mutex::new(last_rapid_gossip_sync_timestamp),
1713			removed_nodes: Mutex::new(new_hash_map()),
1714			removed_channels: Mutex::new(new_hash_map()),
1715			pending_checks: utxo::PendingChecks::new(),
1716		})
1717	}
1718}
1719
1720impl<L: Deref> fmt::Display for NetworkGraph<L>
1721where
1722	L::Target: Logger,
1723{
1724	fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
1725		writeln!(f, "Network map\n[Channels]")?;
1726		for (key, val) in self.channels.read().unwrap().unordered_iter() {
1727			writeln!(f, " {}: {}", key, val)?;
1728		}
1729		writeln!(f, "[Nodes]")?;
1730		for (&node_id, val) in self.nodes.read().unwrap().unordered_iter() {
1731			writeln!(f, " {}: {}", &node_id, val)?;
1732		}
1733		Ok(())
1734	}
1735}
1736
1737impl<L: Deref> Eq for NetworkGraph<L> where L::Target: Logger {}
1738impl<L: Deref> PartialEq for NetworkGraph<L>
1739where
1740	L::Target: Logger,
1741{
1742	fn eq(&self, other: &Self) -> bool {
1743		// For a total lockorder, sort by position in memory and take the inner locks in that order.
1744		// (Assumes that we can't move within memory while a lock is held).
1745		let ord = ((self as *const _) as usize) < ((other as *const _) as usize);
1746		let a = if ord { (&self.channels, &self.nodes) } else { (&other.channels, &other.nodes) };
1747		let b = if ord { (&other.channels, &other.nodes) } else { (&self.channels, &self.nodes) };
1748		let (channels_a, channels_b) = (
1749			a.0.unsafe_well_ordered_double_lock_self(),
1750			b.0.unsafe_well_ordered_double_lock_self(),
1751		);
1752		let (nodes_a, nodes_b) = (
1753			a.1.unsafe_well_ordered_double_lock_self(),
1754			b.1.unsafe_well_ordered_double_lock_self(),
1755		);
1756		self.chain_hash.eq(&other.chain_hash) && channels_a.eq(&channels_b) && nodes_a.eq(&nodes_b)
1757	}
1758}
1759
1760// In Jan, 2025 there were about 49K channels.
1761// We over-allocate by a bit because 20% more is better than the double we get if we're slightly
1762// too low
1763const CHAN_COUNT_ESTIMATE: usize = 60_000;
1764// In Jan, 2025 there were about 15K nodes
1765// We over-allocate by a bit because 33% more is better than the double we get if we're slightly
1766// too low
1767const NODE_COUNT_ESTIMATE: usize = 20_000;
1768
1769impl<L: Deref> NetworkGraph<L>
1770where
1771	L::Target: Logger,
1772{
1773	/// Creates a new, empty, network graph.
1774	pub fn new(network: Network, logger: L) -> NetworkGraph<L> {
1775		Self {
1776			secp_ctx: Secp256k1::verification_only(),
1777			chain_hash: ChainHash::using_genesis_block(network),
1778			logger,
1779			channels: RwLock::new(IndexedMap::with_capacity(CHAN_COUNT_ESTIMATE)),
1780			nodes: RwLock::new(IndexedMap::with_capacity(NODE_COUNT_ESTIMATE)),
1781			next_node_counter: AtomicUsize::new(0),
1782			removed_node_counters: Mutex::new(Vec::new()),
1783			last_rapid_gossip_sync_timestamp: Mutex::new(None),
1784			removed_channels: Mutex::new(new_hash_map()),
1785			removed_nodes: Mutex::new(new_hash_map()),
1786			pending_checks: utxo::PendingChecks::new(),
1787		}
1788	}
1789
1790	fn test_node_counter_consistency(&self) {
1791		#[cfg(any(test, fuzzing))]
1792		{
1793			let channels = self.channels.read().unwrap();
1794			let nodes = self.nodes.read().unwrap();
1795			let removed_node_counters = self.removed_node_counters.lock().unwrap();
1796			let next_counter = self.next_node_counter.load(Ordering::Acquire);
1797			assert!(next_counter < (u32::max_value() as usize) / 2);
1798			let mut used_node_counters = vec![0u8; next_counter / 8 + 1];
1799
1800			for counter in removed_node_counters.iter() {
1801				let pos = (*counter as usize) / 8;
1802				let bit = 1 << (counter % 8);
1803				assert_eq!(used_node_counters[pos] & bit, 0);
1804				used_node_counters[pos] |= bit;
1805			}
1806			for (_, node) in nodes.unordered_iter() {
1807				assert!((node.node_counter as usize) < next_counter);
1808				let pos = (node.node_counter as usize) / 8;
1809				let bit = 1 << (node.node_counter % 8);
1810				assert_eq!(used_node_counters[pos] & bit, 0);
1811				used_node_counters[pos] |= bit;
1812			}
1813
1814			for (idx, used_bitset) in used_node_counters.iter().enumerate() {
1815				if idx != next_counter / 8 {
1816					assert_eq!(*used_bitset, 0xff);
1817				} else {
1818					assert_eq!(*used_bitset, (1u8 << (next_counter % 8)) - 1);
1819				}
1820			}
1821
1822			for (_, chan) in channels.unordered_iter() {
1823				assert_eq!(chan.node_one_counter, nodes.get(&chan.node_one).unwrap().node_counter);
1824				assert_eq!(chan.node_two_counter, nodes.get(&chan.node_two).unwrap().node_counter);
1825			}
1826		}
1827	}
1828
1829	/// Returns a read-only view of the network graph.
1830	pub fn read_only(&'_ self) -> ReadOnlyNetworkGraph<'_> {
1831		self.test_node_counter_consistency();
1832		let channels = self.channels.read().unwrap();
1833		let nodes = self.nodes.read().unwrap();
1834		ReadOnlyNetworkGraph {
1835			channels,
1836			nodes,
1837			max_node_counter: (self.next_node_counter.load(Ordering::Acquire) as u32)
1838				.saturating_sub(1),
1839		}
1840	}
1841
1842	/// The unix timestamp provided by the most recent rapid gossip sync.
1843	/// It will be set by the rapid sync process after every sync completion.
1844	pub fn get_last_rapid_gossip_sync_timestamp(&self) -> Option<u32> {
1845		self.last_rapid_gossip_sync_timestamp.lock().unwrap().clone()
1846	}
1847
1848	/// Update the unix timestamp provided by the most recent rapid gossip sync.
1849	/// This should be done automatically by the rapid sync process after every sync completion.
1850	pub fn set_last_rapid_gossip_sync_timestamp(&self, last_rapid_gossip_sync_timestamp: u32) {
1851		self.last_rapid_gossip_sync_timestamp
1852			.lock()
1853			.unwrap()
1854			.replace(last_rapid_gossip_sync_timestamp);
1855	}
1856
1857	/// Clears the `NodeAnnouncementInfo` field for all nodes in the `NetworkGraph` for testing
1858	/// purposes.
1859	#[cfg(test)]
1860	pub fn clear_nodes_announcement_info(&self) {
1861		for node in self.nodes.write().unwrap().unordered_iter_mut() {
1862			node.1.announcement_info = None;
1863		}
1864	}
1865
1866	/// For an already known node (from channel announcements), update its stored properties from a
1867	/// given node announcement.
1868	///
1869	/// You probably don't want to call this directly, instead relying on a P2PGossipSync's
1870	/// RoutingMessageHandler implementation to call it indirectly. This may be useful to accept
1871	/// routing messages from a source using a protocol other than the lightning P2P protocol.
1872	pub fn update_node_from_announcement(
1873		&self, msg: &msgs::NodeAnnouncement,
1874	) -> Result<(), LightningError> {
1875		// First check if we have the announcement already to avoid the CPU cost of validating a
1876		// redundant announcement.
1877		if let Some(node) = self.nodes.read().unwrap().get(&msg.contents.node_id) {
1878			if let Some(node_info) = node.announcement_info.as_ref() {
1879				if node_info.last_update() == msg.contents.timestamp {
1880					return Err(LightningError {
1881						err: "Update had the same timestamp as last processed update".to_owned(),
1882						action: ErrorAction::IgnoreDuplicateGossip,
1883					});
1884				}
1885			}
1886		}
1887		verify_node_announcement(msg, &self.secp_ctx)?;
1888		self.update_node_from_announcement_intern(&msg.contents, Some(&msg))
1889	}
1890
1891	/// For an already known node (from channel announcements), update its stored properties from a
1892	/// given node announcement without verifying the associated signatures. Because we aren't
1893	/// given the associated signatures here we cannot relay the node announcement to any of our
1894	/// peers.
1895	pub fn update_node_from_unsigned_announcement(
1896		&self, msg: &msgs::UnsignedNodeAnnouncement,
1897	) -> Result<(), LightningError> {
1898		self.update_node_from_announcement_intern(msg, None)
1899	}
1900
1901	fn update_node_from_announcement_intern(
1902		&self, msg: &msgs::UnsignedNodeAnnouncement, full_msg: Option<&msgs::NodeAnnouncement>,
1903	) -> Result<(), LightningError> {
1904		let mut nodes = self.nodes.write().unwrap();
1905		match nodes.get_mut(&msg.node_id) {
1906			None => {
1907				core::mem::drop(nodes);
1908				self.pending_checks.check_hold_pending_node_announcement(msg, full_msg)?;
1909				Err(LightningError {
1910					err: "No existing channels for node_announcement".to_owned(),
1911					action: ErrorAction::IgnoreError,
1912				})
1913			},
1914			Some(node) => {
1915				if let Some(node_info) = node.announcement_info.as_ref() {
1916					// The timestamp field is somewhat of a misnomer - the BOLTs use it to order
1917					// updates to ensure you always have the latest one, only vaguely suggesting
1918					// that it be at least the current time.
1919					if node_info.last_update() > msg.timestamp {
1920						return Err(LightningError {
1921							err: "Update older than last processed update".to_owned(),
1922							action: ErrorAction::IgnoreDuplicateGossip,
1923						});
1924					} else if node_info.last_update() == msg.timestamp {
1925						return Err(LightningError {
1926							err: "Update had the same timestamp as last processed update"
1927								.to_owned(),
1928							action: ErrorAction::IgnoreDuplicateGossip,
1929						});
1930					}
1931				}
1932
1933				let should_relay = msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY
1934					&& msg.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY
1935					&& msg.excess_data.len() + msg.excess_address_data.len()
1936						<= MAX_EXCESS_BYTES_FOR_RELAY;
1937
1938				node.announcement_info =
1939					if let (Some(signed_announcement), true) = (full_msg, should_relay) {
1940						Some(NodeAnnouncementInfo::Relayed(signed_announcement.clone()))
1941					} else {
1942						Some(NodeAnnouncementInfo::Local(NodeAnnouncementDetails {
1943							features: msg.features.clone(),
1944							last_update: msg.timestamp,
1945							rgb: msg.rgb,
1946							alias: msg.alias,
1947							addresses: msg.addresses.clone(),
1948						}))
1949					};
1950
1951				Ok(())
1952			},
1953		}
1954	}
1955
1956	/// Store or update channel info from a channel announcement.
1957	///
1958	/// You probably don't want to call this directly, instead relying on a [`P2PGossipSync`]'s
1959	/// [`RoutingMessageHandler`] implementation to call it indirectly. This may be useful to accept
1960	/// routing messages from a source using a protocol other than the lightning P2P protocol.
1961	///
1962	/// If a [`UtxoLookup`] object is provided via `utxo_lookup`, it will be called to verify
1963	/// the corresponding UTXO exists on chain and is correctly-formatted.
1964	pub fn update_channel_from_announcement<U: Deref>(
1965		&self, msg: &msgs::ChannelAnnouncement, utxo_lookup: &Option<U>,
1966	) -> Result<(), LightningError>
1967	where
1968		U::Target: UtxoLookup,
1969	{
1970		self.pre_channel_announcement_validation_check(&msg.contents, utxo_lookup)?;
1971		verify_channel_announcement(msg, &self.secp_ctx)?;
1972		self.update_channel_from_unsigned_announcement_intern(&msg.contents, Some(msg), utxo_lookup)
1973	}
1974
1975	/// Store or update channel info from a channel announcement.
1976	///
1977	/// You probably don't want to call this directly, instead relying on a [`P2PGossipSync`]'s
1978	/// [`RoutingMessageHandler`] implementation to call it indirectly. This may be useful to accept
1979	/// routing messages from a source using a protocol other than the lightning P2P protocol.
1980	///
1981	/// This will skip verification of if the channel is actually on-chain.
1982	pub fn update_channel_from_announcement_no_lookup(
1983		&self, msg: &ChannelAnnouncement,
1984	) -> Result<(), LightningError> {
1985		self.update_channel_from_announcement::<&UtxoResolver>(msg, &None)
1986	}
1987
1988	/// Store or update channel info from a channel announcement without verifying the associated
1989	/// signatures. Because we aren't given the associated signatures here we cannot relay the
1990	/// channel announcement to any of our peers.
1991	///
1992	/// If a [`UtxoLookup`] object is provided via `utxo_lookup`, it will be called to verify
1993	/// the corresponding UTXO exists on chain and is correctly-formatted.
1994	pub fn update_channel_from_unsigned_announcement<U: Deref>(
1995		&self, msg: &msgs::UnsignedChannelAnnouncement, utxo_lookup: &Option<U>,
1996	) -> Result<(), LightningError>
1997	where
1998		U::Target: UtxoLookup,
1999	{
2000		self.pre_channel_announcement_validation_check(&msg, utxo_lookup)?;
2001		self.update_channel_from_unsigned_announcement_intern(msg, None, utxo_lookup)
2002	}
2003
2004	/// Update channel from partial announcement data received via rapid gossip sync
2005	///
2006	/// `timestamp: u64`: Timestamp emulating the backdated original announcement receipt (by the
2007	/// rapid gossip sync server)
2008	///
2009	/// All other parameters as used in [`msgs::UnsignedChannelAnnouncement`] fields.
2010	pub fn add_channel_from_partial_announcement(
2011		&self, short_channel_id: u64, capacity_sats: Option<u64>, timestamp: u64,
2012		features: ChannelFeatures, node_id_1: NodeId, node_id_2: NodeId,
2013	) -> Result<(), LightningError> {
2014		if node_id_1 == node_id_2 {
2015			return Err(LightningError {
2016				err: "Channel announcement node had a channel with itself".to_owned(),
2017				action: ErrorAction::IgnoreError,
2018			});
2019		};
2020
2021		let channel_info = ChannelInfo {
2022			features,
2023			node_one: node_id_1,
2024			one_to_two: None,
2025			node_two: node_id_2,
2026			two_to_one: None,
2027			capacity_sats,
2028			announcement_message: None,
2029			announcement_received_time: timestamp,
2030			node_one_counter: u32::max_value(),
2031			node_two_counter: u32::max_value(),
2032		};
2033
2034		self.add_channel_between_nodes(short_channel_id, channel_info, None)
2035	}
2036
2037	fn add_channel_between_nodes(
2038		&self, short_channel_id: u64, channel_info: ChannelInfo, utxo_value: Option<Amount>,
2039	) -> Result<(), LightningError> {
2040		let mut channels = self.channels.write().unwrap();
2041		let mut nodes = self.nodes.write().unwrap();
2042
2043		let node_id_a = channel_info.node_one.clone();
2044		let node_id_b = channel_info.node_two.clone();
2045
2046		log_gossip!(
2047			self.logger,
2048			"Adding channel {} between nodes {} and {}",
2049			short_channel_id,
2050			node_id_a,
2051			node_id_b
2052		);
2053
2054		let channel_info = match channels.entry(short_channel_id) {
2055			IndexedMapEntry::Occupied(mut entry) => {
2056				//TODO: because asking the blockchain if short_channel_id is valid is only optional
2057				//in the blockchain API, we need to handle it smartly here, though it's unclear
2058				//exactly how...
2059				if utxo_value.is_some() {
2060					// Either our UTXO provider is busted, there was a reorg, or the UTXO provider
2061					// only sometimes returns results. In any case remove the previous entry. Note
2062					// that the spec expects us to "blacklist" the node_ids involved, but we can't
2063					// do that because
2064					// a) we don't *require* a UTXO provider that always returns results.
2065					// b) we don't track UTXOs of channels we know about and remove them if they
2066					//    get reorg'd out.
2067					// c) it's unclear how to do so without exposing ourselves to massive DoS risk.
2068					self.remove_channel_in_nodes(&mut nodes, &entry.get(), short_channel_id);
2069					*entry.get_mut() = channel_info;
2070					entry.into_mut()
2071				} else {
2072					return Err(LightningError {
2073						err: "Already have knowledge of channel".to_owned(),
2074						action: ErrorAction::IgnoreDuplicateGossip,
2075					});
2076				}
2077			},
2078			IndexedMapEntry::Vacant(entry) => entry.insert(channel_info),
2079		};
2080
2081		let mut node_counter_id = [
2082			(&mut channel_info.node_one_counter, node_id_a),
2083			(&mut channel_info.node_two_counter, node_id_b),
2084		];
2085		for (chan_info_node_counter, current_node_id) in node_counter_id.iter_mut() {
2086			match nodes.entry(current_node_id.clone()) {
2087				IndexedMapEntry::Occupied(node_entry) => {
2088					let node = node_entry.into_mut();
2089					node.channels.push(short_channel_id);
2090					**chan_info_node_counter = node.node_counter;
2091				},
2092				IndexedMapEntry::Vacant(node_entry) => {
2093					let mut removed_node_counters = self.removed_node_counters.lock().unwrap();
2094					**chan_info_node_counter = removed_node_counters.pop().unwrap_or_else(|| {
2095						self.next_node_counter.fetch_add(1, Ordering::Relaxed) as u32
2096					});
2097					node_entry.insert(NodeInfo {
2098						channels: vec![short_channel_id],
2099						announcement_info: None,
2100						node_counter: **chan_info_node_counter,
2101					});
2102				},
2103			};
2104		}
2105
2106		core::mem::drop(nodes);
2107		core::mem::drop(channels);
2108		self.test_node_counter_consistency();
2109
2110		Ok(())
2111	}
2112
2113	/// If we already have all the information for a channel that we're gonna get, there's no
2114	/// reason to redundantly process it.
2115	///
2116	/// In those cases, this will return an `Err` that we can return immediately. Otherwise it will
2117	/// return an `Ok(())`.
2118	fn pre_channel_announcement_validation_check<U: Deref>(
2119		&self, msg: &msgs::UnsignedChannelAnnouncement, utxo_lookup: &Option<U>,
2120	) -> Result<(), LightningError>
2121	where
2122		U::Target: UtxoLookup,
2123	{
2124		let channels = self.channels.read().unwrap();
2125
2126		if let Some(chan) = channels.get(&msg.short_channel_id) {
2127			if chan.capacity_sats.is_some() {
2128				// If we'd previously looked up the channel on-chain and checked the script
2129				// against what appears on-chain, ignore the duplicate announcement.
2130				//
2131				// Because a reorg could replace one channel with another at the same SCID, if
2132				// the channel appears to be different, we re-validate. This doesn't expose us
2133				// to any more DoS risk than not, as a peer can always flood us with
2134				// randomly-generated SCID values anyway.
2135				//
2136				// We use the Node IDs rather than the bitcoin_keys to check for "equivalence"
2137				// as we didn't (necessarily) store the bitcoin keys, and we only really care
2138				// if the peers on the channel changed anyway.
2139				if msg.node_id_1 == chan.node_one && msg.node_id_2 == chan.node_two {
2140					return Err(LightningError {
2141						err: "Already have chain-validated channel".to_owned(),
2142						action: ErrorAction::IgnoreDuplicateGossip,
2143					});
2144				}
2145			} else if utxo_lookup.is_none() {
2146				// Similarly, if we can't check the chain right now anyway, ignore the
2147				// duplicate announcement without bothering to take the channels write lock.
2148				return Err(LightningError {
2149					err: "Already have non-chain-validated channel".to_owned(),
2150					action: ErrorAction::IgnoreDuplicateGossip,
2151				});
2152			}
2153		}
2154
2155		Ok(())
2156	}
2157
2158	/// Update channel information from a received announcement.
2159	///
2160	/// Generally [`Self::pre_channel_announcement_validation_check`] should have been called
2161	/// first.
2162	fn update_channel_from_unsigned_announcement_intern<U: Deref>(
2163		&self, msg: &msgs::UnsignedChannelAnnouncement,
2164		full_msg: Option<&msgs::ChannelAnnouncement>, utxo_lookup: &Option<U>,
2165	) -> Result<(), LightningError>
2166	where
2167		U::Target: UtxoLookup,
2168	{
2169		if msg.node_id_1 == msg.node_id_2 || msg.bitcoin_key_1 == msg.bitcoin_key_2 {
2170			return Err(LightningError {
2171				err: "Channel announcement node had a channel with itself".to_owned(),
2172				action: ErrorAction::IgnoreError,
2173			});
2174		}
2175
2176		if msg.chain_hash != self.chain_hash {
2177			return Err(LightningError {
2178				err: "Channel announcement chain hash does not match genesis hash".to_owned(),
2179				action: ErrorAction::IgnoreAndLog(Level::Debug),
2180			});
2181		}
2182
2183		{
2184			let removed_channels = self.removed_channels.lock().unwrap();
2185			let removed_nodes = self.removed_nodes.lock().unwrap();
2186			if removed_channels.contains_key(&msg.short_channel_id)
2187				|| removed_nodes.contains_key(&msg.node_id_1)
2188				|| removed_nodes.contains_key(&msg.node_id_2)
2189			{
2190				return Err(LightningError{
2191					err: format!("Channel with SCID {} or one of its nodes was removed from our network graph recently", &msg.short_channel_id),
2192					action: ErrorAction::IgnoreAndLog(Level::Gossip)});
2193			}
2194		}
2195
2196		let utxo_value =
2197			self.pending_checks.check_channel_announcement(utxo_lookup, msg, full_msg)?;
2198
2199		#[allow(unused_mut, unused_assignments)]
2200		let mut announcement_received_time = 0;
2201		#[cfg(feature = "std")]
2202		{
2203			announcement_received_time = SystemTime::now()
2204				.duration_since(UNIX_EPOCH)
2205				.expect("Time must be > 1970")
2206				.as_secs();
2207		}
2208
2209		let chan_info = ChannelInfo {
2210			features: msg.features.clone(),
2211			node_one: msg.node_id_1,
2212			one_to_two: None,
2213			node_two: msg.node_id_2,
2214			two_to_one: None,
2215			capacity_sats: utxo_value.map(|a| a.to_sat()),
2216			announcement_message: if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY {
2217				full_msg.cloned()
2218			} else {
2219				None
2220			},
2221			announcement_received_time,
2222			node_one_counter: u32::max_value(),
2223			node_two_counter: u32::max_value(),
2224		};
2225
2226		self.add_channel_between_nodes(msg.short_channel_id, chan_info, utxo_value)?;
2227
2228		log_gossip!(
2229			self.logger,
2230			"Added channel_announcement for {}{}",
2231			msg.short_channel_id,
2232			if !msg.excess_data.is_empty() { " with excess uninterpreted data!" } else { "" }
2233		);
2234		Ok(())
2235	}
2236
2237	/// Marks a channel in the graph as failed permanently.
2238	///
2239	/// The channel and any node for which this was their last channel are removed from the graph.
2240	pub fn channel_failed_permanent(&self, short_channel_id: u64) {
2241		#[cfg(feature = "std")]
2242		let current_time_unix = Some(
2243			SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(),
2244		);
2245		#[cfg(not(feature = "std"))]
2246		let current_time_unix = None;
2247
2248		self.channel_failed_permanent_with_time(short_channel_id, current_time_unix)
2249	}
2250
2251	/// Marks a channel in the graph as failed permanently.
2252	///
2253	/// The channel and any node for which this was their last channel are removed from the graph.
2254	fn channel_failed_permanent_with_time(
2255		&self, short_channel_id: u64, current_time_unix: Option<u64>,
2256	) {
2257		let mut channels = self.channels.write().unwrap();
2258		if let Some(chan) = channels.remove(&short_channel_id) {
2259			let mut nodes = self.nodes.write().unwrap();
2260			self.removed_channels.lock().unwrap().insert(short_channel_id, current_time_unix);
2261			self.remove_channel_in_nodes(&mut nodes, &chan, short_channel_id);
2262		}
2263	}
2264
2265	/// Marks a node in the graph as permanently failed, effectively removing it and its channels
2266	/// from local storage.
2267	pub fn node_failed_permanent(&self, node_id: &PublicKey) {
2268		#[cfg(feature = "std")]
2269		let current_time_unix = Some(
2270			SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(),
2271		);
2272		#[cfg(not(feature = "std"))]
2273		let current_time_unix = None;
2274
2275		let node_id = NodeId::from_pubkey(node_id);
2276		let mut channels = self.channels.write().unwrap();
2277		let mut nodes = self.nodes.write().unwrap();
2278		let mut removed_channels = self.removed_channels.lock().unwrap();
2279		let mut removed_nodes = self.removed_nodes.lock().unwrap();
2280
2281		if let Some(node) = nodes.remove(&node_id) {
2282			let mut removed_node_counters = self.removed_node_counters.lock().unwrap();
2283			for scid in node.channels.iter() {
2284				if let Some(chan_info) = channels.remove(scid) {
2285					let other_node_id = if node_id == chan_info.node_one {
2286						chan_info.node_two
2287					} else {
2288						chan_info.node_one
2289					};
2290					if let IndexedMapEntry::Occupied(mut other_node_entry) =
2291						nodes.entry(other_node_id)
2292					{
2293						other_node_entry.get_mut().channels.retain(|chan_id| *scid != *chan_id);
2294						if other_node_entry.get().channels.is_empty() {
2295							removed_node_counters.push(other_node_entry.get().node_counter);
2296							other_node_entry.remove_entry();
2297						}
2298					}
2299					removed_channels.insert(*scid, current_time_unix);
2300				} else {
2301					debug_assert!(false, "Channels in nodes must always have channel info");
2302				}
2303			}
2304			removed_node_counters.push(node.node_counter);
2305			removed_nodes.insert(node_id, current_time_unix);
2306		}
2307	}
2308
2309	#[cfg(feature = "std")]
2310	/// Removes information about channels that we haven't heard any updates about in some time.
2311	/// This can be used regularly to prune the network graph of channels that likely no longer
2312	/// exist.
2313	///
2314	/// While there is no formal requirement that nodes regularly re-broadcast their channel
2315	/// updates every two weeks, the non-normative section of BOLT 7 currently suggests that
2316	/// pruning occur for updates which are at least two weeks old, which we implement here.
2317	///
2318	/// Note that for users of the `lightning-background-processor` crate this method may be
2319	/// automatically called regularly for you.
2320	///
2321	/// This method will also cause us to stop tracking removed nodes and channels if they have been
2322	/// in the map for a while so that these can be resynced from gossip in the future.
2323	///
2324	/// This method is only available with the `std` feature. See
2325	/// [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] for non-`std` use.
2326	pub fn remove_stale_channels_and_tracking(&self) {
2327		let time =
2328			SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs();
2329		self.remove_stale_channels_and_tracking_with_time(time);
2330	}
2331
2332	/// Removes information about channels that we haven't heard any updates about in some time.
2333	/// This can be used regularly to prune the network graph of channels that likely no longer
2334	/// exist.
2335	///
2336	/// While there is no formal requirement that nodes regularly re-broadcast their channel
2337	/// updates every two weeks, the non-normative section of BOLT 7 currently suggests that
2338	/// pruning occur for updates which are at least two weeks old, which we implement here.
2339	///
2340	/// This method will also cause us to stop tracking removed nodes and channels if they have been
2341	/// in the map for a while so that these can be resynced from gossip in the future.
2342	#[cfg_attr(feature = "std", doc = "")]
2343	#[cfg_attr(
2344		feature = "std",
2345		doc = "This function takes the current unix time as an argument. For users with the `std` feature"
2346	)]
2347	#[cfg_attr(
2348		feature = "std",
2349		doc = "enabled, [`NetworkGraph::remove_stale_channels_and_tracking`] may be preferable."
2350	)]
2351	pub fn remove_stale_channels_and_tracking_with_time(&self, current_time_unix: u64) {
2352		let mut channels = self.channels.write().unwrap();
2353		// Time out if we haven't received an update in at least 14 days.
2354		if current_time_unix > u32::max_value() as u64 {
2355			return;
2356		} // Remove by 2106
2357		if current_time_unix < STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS {
2358			return;
2359		}
2360		let min_time_unix: u32 = (current_time_unix - STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS) as u32;
2361		let mut scids_to_remove = new_hash_set();
2362		for (scid, info) in channels.unordered_iter_mut() {
2363			if info.one_to_two.is_some()
2364				&& info.one_to_two.as_ref().unwrap().last_update < min_time_unix
2365			{
2366				log_gossip!(self.logger, "Removing directional update one_to_two (0) for channel {} due to its timestamp {} being below {}",
2367					scid, info.one_to_two.as_ref().unwrap().last_update, min_time_unix);
2368				info.one_to_two = None;
2369			}
2370			if info.two_to_one.is_some()
2371				&& info.two_to_one.as_ref().unwrap().last_update < min_time_unix
2372			{
2373				log_gossip!(self.logger, "Removing directional update two_to_one (1) for channel {} due to its timestamp {} being below {}",
2374					scid, info.two_to_one.as_ref().unwrap().last_update, min_time_unix);
2375				info.two_to_one = None;
2376			}
2377			if info.one_to_two.is_none() || info.two_to_one.is_none() {
2378				// We check the announcement_received_time here to ensure we don't drop
2379				// announcements that we just received and are just waiting for our peer to send a
2380				// channel_update for.
2381				let announcement_received_timestamp = info.announcement_received_time;
2382				if announcement_received_timestamp < min_time_unix as u64 {
2383					log_gossip!(self.logger, "Removing channel {} because both directional updates are missing and its announcement timestamp {} being below {}",
2384						scid, announcement_received_timestamp, min_time_unix);
2385					scids_to_remove.insert(*scid);
2386				}
2387			}
2388		}
2389		if !scids_to_remove.is_empty() {
2390			let mut nodes = self.nodes.write().unwrap();
2391			let mut removed_channels_lck = self.removed_channels.lock().unwrap();
2392
2393			let channels_removed_bulk = channels.remove_fetch_bulk(&scids_to_remove);
2394			self.removed_node_counters.lock().unwrap().reserve(channels_removed_bulk.len());
2395			let mut nodes_to_remove = hash_set_with_capacity(channels_removed_bulk.len());
2396			for (scid, info) in channels_removed_bulk {
2397				self.remove_channel_in_nodes_callback(&mut nodes, &info, scid, |e| {
2398					nodes_to_remove.insert(*e.key());
2399				});
2400				removed_channels_lck.insert(scid, Some(current_time_unix));
2401			}
2402			nodes.remove_bulk(&nodes_to_remove);
2403		}
2404
2405		let should_keep_tracking = |time: &mut Option<u64>| {
2406			if let Some(time) = time {
2407				current_time_unix.saturating_sub(*time) < REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS
2408			} else {
2409				// NOTE: In the case of non-`std`, we won't have access to the current UNIX time at the time of removal,
2410				// so we'll just set the removal time here to the current UNIX time on the very next invocation
2411				// of this function.
2412				#[cfg(not(feature = "std"))]
2413				{
2414					let mut tracked_time = Some(current_time_unix);
2415					core::mem::swap(time, &mut tracked_time);
2416					return true;
2417				}
2418				#[allow(unreachable_code)]
2419				false
2420			}
2421		};
2422
2423		self.removed_channels.lock().unwrap().retain(|_, time| should_keep_tracking(time));
2424		self.removed_nodes.lock().unwrap().retain(|_, time| should_keep_tracking(time));
2425	}
2426
2427	/// For an already known (from announcement) channel, update info about one of the directions
2428	/// of the channel.
2429	///
2430	/// You probably don't want to call this directly, instead relying on a [`P2PGossipSync`]'s
2431	/// [`RoutingMessageHandler`] implementation to call it indirectly. This may be useful to accept
2432	/// routing messages from a source using a protocol other than the lightning P2P protocol.
2433	///
2434	/// If not built with `std`, any updates with a timestamp more than two weeks in the past or
2435	/// materially in the future will be rejected.
2436	pub fn update_channel(&self, msg: &msgs::ChannelUpdate) -> Result<(), LightningError> {
2437		self.update_channel_internal(&msg.contents, Some(&msg), Some(&msg.signature), false)
2438	}
2439
2440	/// For an already known (from announcement) channel, update info about one of the directions
2441	/// of the channel without verifying the associated signatures. Because we aren't given the
2442	/// associated signatures here we cannot relay the channel update to any of our peers.
2443	///
2444	/// If not built with `std`, any updates with a timestamp more than two weeks in the past or
2445	/// materially in the future will be rejected.
2446	pub fn update_channel_unsigned(
2447		&self, msg: &msgs::UnsignedChannelUpdate,
2448	) -> Result<(), LightningError> {
2449		self.update_channel_internal(msg, None, None, false)
2450	}
2451
2452	/// For an already known (from announcement) channel, verify the given [`ChannelUpdate`].
2453	///
2454	/// This checks whether the update currently is applicable by [`Self::update_channel`].
2455	///
2456	/// If not built with `std`, any updates with a timestamp more than two weeks in the past or
2457	/// materially in the future will be rejected.
2458	pub fn verify_channel_update(&self, msg: &msgs::ChannelUpdate) -> Result<(), LightningError> {
2459		self.update_channel_internal(&msg.contents, Some(&msg), Some(&msg.signature), true)
2460	}
2461
2462	fn update_channel_internal(
2463		&self, msg: &msgs::UnsignedChannelUpdate, full_msg: Option<&msgs::ChannelUpdate>,
2464		sig: Option<&secp256k1::ecdsa::Signature>, only_verify: bool,
2465	) -> Result<(), LightningError> {
2466		let chan_enabled = msg.channel_flags & (1 << 1) != (1 << 1);
2467
2468		if msg.chain_hash != self.chain_hash {
2469			return Err(LightningError {
2470				err: "Channel update chain hash does not match genesis hash".to_owned(),
2471				action: ErrorAction::IgnoreAndLog(Level::Debug),
2472			});
2473		}
2474
2475		#[cfg(all(feature = "std", not(test), not(feature = "_test_utils")))]
2476		{
2477			// Note that many tests rely on being able to set arbitrarily old timestamps, thus we
2478			// disable this check during tests!
2479			let time = SystemTime::now()
2480				.duration_since(UNIX_EPOCH)
2481				.expect("Time must be > 1970")
2482				.as_secs();
2483			if (msg.timestamp as u64) < time - STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS {
2484				return Err(LightningError {
2485					err: "channel_update is older than two weeks old".to_owned(),
2486					action: ErrorAction::IgnoreAndLog(Level::Gossip),
2487				});
2488			}
2489			if msg.timestamp as u64 > time + 60 * 60 * 24 {
2490				return Err(LightningError {
2491					err: "channel_update has a timestamp more than a day in the future".to_owned(),
2492					action: ErrorAction::IgnoreAndLog(Level::Gossip),
2493				});
2494			}
2495		}
2496
2497		log_gossip!(
2498			self.logger,
2499			"Updating channel {} in direction {} with timestamp {}",
2500			msg.short_channel_id,
2501			msg.channel_flags & 1,
2502			msg.timestamp
2503		);
2504
2505		if msg.htlc_maximum_msat > MAX_VALUE_MSAT {
2506			return Err(LightningError {
2507				err: "htlc_maximum_msat is larger than maximum possible msats".to_owned(),
2508				action: ErrorAction::IgnoreError,
2509			});
2510		}
2511
2512		let check_update_latest =
2513			|target: &Option<ChannelUpdateInfo>| -> Result<(), LightningError> {
2514				if let Some(existing_chan_info) = target {
2515					// The timestamp field is somewhat of a misnomer - the BOLTs use it to
2516					// order updates to ensure you always have the latest one, only
2517					// suggesting  that it be at least the current time. For
2518					// channel_updates specifically, the BOLTs discuss the possibility of
2519					// pruning based on the timestamp field being more than two weeks old,
2520					// but only in the non-normative section.
2521					if existing_chan_info.last_update > msg.timestamp {
2522						return Err(LightningError {
2523							err: "Update older than last processed update".to_owned(),
2524							action: ErrorAction::IgnoreDuplicateGossip,
2525						});
2526					} else if existing_chan_info.last_update == msg.timestamp {
2527						return Err(LightningError {
2528							err: "Update had same timestamp as last processed update".to_owned(),
2529							action: ErrorAction::IgnoreDuplicateGossip,
2530						});
2531					}
2532				}
2533				Ok(())
2534			};
2535
2536		let check_msg_sanity =
2537			|channel: &ChannelInfo| -> Result<(), LightningError> {
2538				if let Some(capacity_sats) = channel.capacity_sats {
2539					// It's possible channel capacity is available now, although it wasn't available at announcement (so the field is None).
2540					// Don't query UTXO set here to reduce DoS risks.
2541					if capacity_sats > MAX_VALUE_MSAT / 1000
2542						|| msg.htlc_maximum_msat > capacity_sats * 1000
2543					{
2544						return Err(LightningError{err:
2545						"htlc_maximum_msat is larger than channel capacity or capacity is bogus".to_owned(),
2546						action: ErrorAction::IgnoreError});
2547					}
2548				}
2549
2550				if msg.channel_flags & 1 == 1 {
2551					check_update_latest(&channel.two_to_one)
2552				} else {
2553					check_update_latest(&channel.one_to_two)
2554				}
2555			};
2556
2557		let mut node_pubkey = None;
2558		{
2559			let channels = self.channels.read().unwrap();
2560			match channels.get(&msg.short_channel_id) {
2561				None => {
2562					core::mem::drop(channels);
2563					self.pending_checks.check_hold_pending_channel_update(msg, full_msg)?;
2564					return Err(LightningError {
2565						err: "Couldn't find channel for update".to_owned(),
2566						action: ErrorAction::IgnoreAndLog(Level::Gossip),
2567					});
2568				},
2569				Some(channel) => {
2570					check_msg_sanity(channel)?;
2571					let node_id = if msg.channel_flags & 1 == 1 {
2572						channel.node_two.as_slice()
2573					} else {
2574						channel.node_one.as_slice()
2575					};
2576					if sig.is_some() {
2577						// PublicKey parsing isn't entirely trivial as it requires that we check
2578						// that the provided point is on the curve. Thus, if we don't have a
2579						// signature to verify, we want to skip the parsing step entirely.
2580						// This represents a substantial speedup in applying RGS snapshots.
2581						node_pubkey =
2582							Some(PublicKey::from_slice(node_id).map_err(|_| LightningError {
2583								err: "Couldn't parse source node pubkey".to_owned(),
2584								action: ErrorAction::IgnoreAndLog(Level::Debug),
2585							})?);
2586					}
2587				},
2588			}
2589		}
2590
2591		if let Some(sig) = sig {
2592			let msg_hash = hash_to_message!(&message_sha256d_hash(&msg)[..]);
2593			let node_pubkey = if let Some(pubkey) = node_pubkey {
2594				pubkey
2595			} else {
2596				debug_assert!(false, "node_pubkey should have been decoded above");
2597				let err = "node_pubkey wasn't decoded but we need it to check a sig".to_owned();
2598				let action = ErrorAction::IgnoreAndLog(Level::Error);
2599				return Err(LightningError { err, action });
2600			};
2601			secp_verify_sig!(self.secp_ctx, &msg_hash, &sig, &node_pubkey, "channel_update");
2602		}
2603
2604		if only_verify {
2605			return Ok(());
2606		}
2607
2608		let mut channels = self.channels.write().unwrap();
2609		if let Some(channel) = channels.get_mut(&msg.short_channel_id) {
2610			check_msg_sanity(channel)?;
2611
2612			let last_update_message = if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY {
2613				full_msg.cloned()
2614			} else {
2615				None
2616			};
2617
2618			let new_channel_info = Some(ChannelUpdateInfo {
2619				enabled: chan_enabled,
2620				last_update: msg.timestamp,
2621				cltv_expiry_delta: msg.cltv_expiry_delta,
2622				htlc_minimum_msat: msg.htlc_minimum_msat,
2623				htlc_maximum_msat: msg.htlc_maximum_msat,
2624				fees: RoutingFees {
2625					base_msat: msg.fee_base_msat,
2626					proportional_millionths: msg.fee_proportional_millionths,
2627				},
2628				last_update_message,
2629			});
2630
2631			if msg.channel_flags & 1 == 1 {
2632				channel.two_to_one = new_channel_info;
2633			} else {
2634				channel.one_to_two = new_channel_info;
2635			}
2636		}
2637
2638		Ok(())
2639	}
2640
2641	fn remove_channel_in_nodes_callback<RM: FnMut(IndexedMapOccupiedEntry<NodeId, NodeInfo>)>(
2642		&self, nodes: &mut IndexedMap<NodeId, NodeInfo>, chan: &ChannelInfo, short_channel_id: u64,
2643		mut remove_node: RM,
2644	) {
2645		macro_rules! remove_from_node {
2646			($node_id: expr) => {
2647				if let IndexedMapEntry::Occupied(mut entry) = nodes.entry($node_id) {
2648					entry.get_mut().channels.retain(|chan_id| short_channel_id != *chan_id);
2649					if entry.get().channels.is_empty() {
2650						self.removed_node_counters.lock().unwrap().push(entry.get().node_counter);
2651						remove_node(entry);
2652					}
2653				} else {
2654					panic!(
2655						"Had channel that pointed to unknown node (ie inconsistent network map)!"
2656					);
2657				}
2658			};
2659		}
2660
2661		remove_from_node!(chan.node_one);
2662		remove_from_node!(chan.node_two);
2663	}
2664
2665	fn remove_channel_in_nodes(
2666		&self, nodes: &mut IndexedMap<NodeId, NodeInfo>, chan: &ChannelInfo, short_channel_id: u64,
2667	) {
2668		self.remove_channel_in_nodes_callback(nodes, chan, short_channel_id, |e| {
2669			e.remove_entry();
2670		});
2671	}
2672}
2673
2674impl ReadOnlyNetworkGraph<'_> {
2675	/// Returns all known valid channels' short ids along with announced channel info.
2676	///
2677	/// This is not exported to bindings users because we don't want to return lifetime'd references
2678	pub fn channels(&self) -> &IndexedMap<u64, ChannelInfo> {
2679		&*self.channels
2680	}
2681
2682	/// Returns information on a channel with the given id.
2683	pub fn channel(&self, short_channel_id: u64) -> Option<&ChannelInfo> {
2684		self.channels.get(&short_channel_id)
2685	}
2686
2687	#[cfg(c_bindings)] // Non-bindings users should use `channels`
2688	/// Returns the list of channels in the graph
2689	pub fn list_channels(&self) -> Vec<u64> {
2690		self.channels.unordered_keys().map(|c| *c).collect()
2691	}
2692
2693	/// Returns all known nodes' public keys along with announced node info.
2694	///
2695	/// This is not exported to bindings users because we don't want to return lifetime'd references
2696	pub fn nodes(&self) -> &IndexedMap<NodeId, NodeInfo> {
2697		&*self.nodes
2698	}
2699
2700	/// Returns information on a node with the given id.
2701	pub fn node(&self, node_id: &NodeId) -> Option<&NodeInfo> {
2702		self.nodes.get(node_id)
2703	}
2704
2705	#[cfg(c_bindings)] // Non-bindings users should use `nodes`
2706	/// Returns the list of nodes in the graph
2707	pub fn list_nodes(&self) -> Vec<NodeId> {
2708		self.nodes.unordered_keys().map(|n| *n).collect()
2709	}
2710
2711	/// Get network addresses by node id.
2712	/// Returns None if the requested node is completely unknown,
2713	/// or if node announcement for the node was never received.
2714	pub fn get_addresses(&self, pubkey: &PublicKey) -> Option<Vec<SocketAddress>> {
2715		self.nodes
2716			.get(&NodeId::from_pubkey(&pubkey))
2717			.and_then(|node| node.announcement_info.as_ref().map(|ann| ann.addresses().to_vec()))
2718	}
2719
2720	/// Gets the maximum possible node_counter for a node in this graph
2721	pub(crate) fn max_node_counter(&self) -> u32 {
2722		self.max_node_counter
2723	}
2724}
2725
2726#[cfg(test)]
2727pub(crate) mod tests {
2728	use crate::ln::chan_utils::make_funding_redeemscript;
2729	use crate::ln::channelmanager;
2730	use crate::ln::msgs::{BaseMessageHandler, MessageSendEvent, SocketAddress};
2731	use crate::ln::msgs::{
2732		ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, QueryChannelRange,
2733		QueryShortChannelIds, ReplyChannelRange, RoutingMessageHandler,
2734		UnsignedChannelAnnouncement, UnsignedChannelUpdate, UnsignedNodeAnnouncement,
2735		MAX_VALUE_MSAT,
2736	};
2737	use crate::routing::gossip::{
2738		ChannelInfo, ChannelUpdateInfo, NetworkGraph, NetworkUpdate, NodeAlias,
2739		NodeAnnouncementInfo, NodeId, NodeInfo, P2PGossipSync, RoutingFees,
2740		MAX_EXCESS_BYTES_FOR_RELAY,
2741	};
2742	use crate::routing::utxo::{UtxoLookupError, UtxoResult};
2743	#[cfg(feature = "std")]
2744	use crate::types::features::InitFeatures;
2745	use crate::util::config::UserConfig;
2746	use crate::util::scid_utils::scid_from_parts;
2747	use crate::util::ser::{Hostname, LengthReadable, Readable, ReadableArgs, Writeable};
2748	use crate::util::test_utils;
2749
2750	use super::STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS;
2751	use crate::routing::gossip::REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS;
2752
2753	use bitcoin::amount::Amount;
2754	use bitcoin::constants::ChainHash;
2755	use bitcoin::hashes::sha256d::Hash as Sha256dHash;
2756	use bitcoin::hashes::Hash;
2757	use bitcoin::hex::FromHex;
2758	use bitcoin::network::Network;
2759	use bitcoin::script::ScriptBuf;
2760	use bitcoin::secp256k1::{All, Secp256k1};
2761	use bitcoin::secp256k1::{PublicKey, SecretKey};
2762	use bitcoin::transaction::TxOut;
2763
2764	use crate::io;
2765	use crate::prelude::*;
2766	use crate::sync::Arc;
2767	use bitcoin::secp256k1;
2768
2769	fn create_network_graph() -> NetworkGraph<Arc<test_utils::TestLogger>> {
2770		let logger = Arc::new(test_utils::TestLogger::new());
2771		NetworkGraph::new(Network::Testnet, logger)
2772	}
2773
2774	fn create_gossip_sync(
2775		network_graph: &NetworkGraph<Arc<test_utils::TestLogger>>,
2776	) -> (
2777		Secp256k1<All>,
2778		P2PGossipSync<
2779			&NetworkGraph<Arc<test_utils::TestLogger>>,
2780			Arc<test_utils::TestChainSource>,
2781			Arc<test_utils::TestLogger>,
2782		>,
2783	) {
2784		let secp_ctx = Secp256k1::new();
2785		let logger = Arc::new(test_utils::TestLogger::new());
2786		let gossip_sync = P2PGossipSync::new(network_graph, None, Arc::clone(&logger));
2787		(secp_ctx, gossip_sync)
2788	}
2789
2790	#[test]
2791	fn request_full_sync_finite_times() {
2792		let network_graph = create_network_graph();
2793		let (_, gossip_sync) = create_gossip_sync(&network_graph);
2794
2795		assert!(gossip_sync.should_request_full_sync());
2796		assert!(gossip_sync.should_request_full_sync());
2797		assert!(gossip_sync.should_request_full_sync());
2798		assert!(gossip_sync.should_request_full_sync());
2799		assert!(gossip_sync.should_request_full_sync());
2800		assert!(!gossip_sync.should_request_full_sync());
2801	}
2802
2803	pub(crate) fn get_signed_node_announcement<F: Fn(&mut UnsignedNodeAnnouncement)>(
2804		f: F, node_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>,
2805	) -> NodeAnnouncement {
2806		let node_id = NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, node_key));
2807		let mut unsigned_announcement = UnsignedNodeAnnouncement {
2808			features: channelmanager::provided_node_features(&UserConfig::default()),
2809			timestamp: 100,
2810			node_id,
2811			rgb: [0; 3],
2812			alias: NodeAlias([0; 32]),
2813			addresses: Vec::new(),
2814			excess_address_data: Vec::new(),
2815			excess_data: Vec::new(),
2816		};
2817		f(&mut unsigned_announcement);
2818		let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]);
2819		NodeAnnouncement {
2820			signature: secp_ctx.sign_ecdsa(&msghash, node_key),
2821			contents: unsigned_announcement,
2822		}
2823	}
2824
2825	pub(crate) fn get_signed_channel_announcement<F: Fn(&mut UnsignedChannelAnnouncement)>(
2826		f: F, node_1_key: &SecretKey, node_2_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>,
2827	) -> ChannelAnnouncement {
2828		let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_key);
2829		let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_key);
2830		let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap();
2831		let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap();
2832
2833		let mut unsigned_announcement = UnsignedChannelAnnouncement {
2834			features: channelmanager::provided_channel_features(&UserConfig::default()),
2835			chain_hash: ChainHash::using_genesis_block(Network::Testnet),
2836			short_channel_id: 0,
2837			node_id_1: NodeId::from_pubkey(&node_id_1),
2838			node_id_2: NodeId::from_pubkey(&node_id_2),
2839			bitcoin_key_1: NodeId::from_pubkey(&PublicKey::from_secret_key(
2840				&secp_ctx,
2841				node_1_btckey,
2842			)),
2843			bitcoin_key_2: NodeId::from_pubkey(&PublicKey::from_secret_key(
2844				&secp_ctx,
2845				node_2_btckey,
2846			)),
2847			excess_data: Vec::new(),
2848		};
2849		f(&mut unsigned_announcement);
2850		let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]);
2851		ChannelAnnouncement {
2852			node_signature_1: secp_ctx.sign_ecdsa(&msghash, node_1_key),
2853			node_signature_2: secp_ctx.sign_ecdsa(&msghash, node_2_key),
2854			bitcoin_signature_1: secp_ctx.sign_ecdsa(&msghash, node_1_btckey),
2855			bitcoin_signature_2: secp_ctx.sign_ecdsa(&msghash, node_2_btckey),
2856			contents: unsigned_announcement,
2857		}
2858	}
2859
2860	pub(crate) fn get_channel_script(secp_ctx: &Secp256k1<secp256k1::All>) -> ScriptBuf {
2861		let node_1_btckey = SecretKey::from_slice(&[40; 32]).unwrap();
2862		let node_2_btckey = SecretKey::from_slice(&[39; 32]).unwrap();
2863		make_funding_redeemscript(
2864			&PublicKey::from_secret_key(secp_ctx, &node_1_btckey),
2865			&PublicKey::from_secret_key(secp_ctx, &node_2_btckey),
2866		)
2867		.to_p2wsh()
2868	}
2869
2870	pub(crate) fn get_signed_channel_update<F: Fn(&mut UnsignedChannelUpdate)>(
2871		f: F, node_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>,
2872	) -> ChannelUpdate {
2873		let mut unsigned_channel_update = UnsignedChannelUpdate {
2874			chain_hash: ChainHash::using_genesis_block(Network::Testnet),
2875			short_channel_id: 0,
2876			timestamp: 100,
2877			message_flags: 1, // Only must_be_one
2878			channel_flags: 0,
2879			cltv_expiry_delta: 144,
2880			htlc_minimum_msat: 1_000_000,
2881			htlc_maximum_msat: 1_000_000,
2882			fee_base_msat: 10_000,
2883			fee_proportional_millionths: 20,
2884			excess_data: Vec::new(),
2885		};
2886		f(&mut unsigned_channel_update);
2887		let msghash =
2888			hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]);
2889		ChannelUpdate {
2890			signature: secp_ctx.sign_ecdsa(&msghash, node_key),
2891			contents: unsigned_channel_update,
2892		}
2893	}
2894
2895	#[test]
2896	fn handling_node_announcements() {
2897		let network_graph = create_network_graph();
2898		let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
2899
2900		let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
2901		let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
2902		let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
2903		let zero_hash = Sha256dHash::hash(&[0; 32]);
2904
2905		let valid_announcement = get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx);
2906		match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) {
2907			Ok(_) => panic!(),
2908			Err(e) => assert_eq!("No existing channels for node_announcement", e.err),
2909		};
2910
2911		{
2912			// Announce a channel to add a corresponding node.
2913			let valid_announcement =
2914				get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
2915			match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement)
2916			{
2917				Ok(res) => assert!(res),
2918				_ => panic!(),
2919			};
2920		}
2921
2922		let fake_msghash = hash_to_message!(zero_hash.as_byte_array());
2923		match gossip_sync.handle_node_announcement(
2924			Some(node_1_pubkey),
2925			&NodeAnnouncement {
2926				signature: secp_ctx.sign_ecdsa(&fake_msghash, node_1_privkey),
2927				contents: valid_announcement.contents.clone(),
2928			},
2929		) {
2930			Ok(_) => panic!(),
2931			Err(e) => assert_eq!(e.err, "Invalid signature on node_announcement message"),
2932		};
2933
2934		match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) {
2935			Ok(res) => assert!(res),
2936			Err(_) => panic!(),
2937		};
2938
2939		let announcement_with_data = get_signed_node_announcement(
2940			|unsigned_announcement| {
2941				unsigned_announcement.timestamp += 1000;
2942				unsigned_announcement.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0);
2943			},
2944			node_1_privkey,
2945			&secp_ctx,
2946		);
2947		// Return false because contains excess data.
2948		match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement_with_data) {
2949			Ok(res) => assert!(!res),
2950			Err(_) => panic!(),
2951		};
2952
2953		// Even though previous announcement was not relayed further, we still accepted it,
2954		// so we now won't accept announcements before the previous one.
2955		let outdated_announcement = get_signed_node_announcement(
2956			|unsigned_announcement| {
2957				unsigned_announcement.timestamp += 1000 - 10;
2958			},
2959			node_1_privkey,
2960			&secp_ctx,
2961		);
2962		match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &outdated_announcement) {
2963			Ok(_) => panic!(),
2964			Err(e) => assert_eq!(e.err, "Update older than last processed update"),
2965		};
2966	}
2967
2968	#[test]
2969	fn handling_channel_announcements() {
2970		let secp_ctx = Secp256k1::new();
2971		let logger = test_utils::TestLogger::new();
2972
2973		let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
2974		let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
2975		let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
2976
2977		let good_script = get_channel_script(&secp_ctx);
2978		let valid_announcement =
2979			get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
2980
2981		// Test if the UTXO lookups were not supported
2982		let network_graph = NetworkGraph::new(Network::Testnet, &logger);
2983		let mut gossip_sync = P2PGossipSync::new(&network_graph, None, &logger);
2984		match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) {
2985			Ok(res) => assert!(res),
2986			_ => panic!(),
2987		};
2988
2989		let scid = valid_announcement.contents.short_channel_id;
2990		match network_graph.read_only().channels().get(&scid) {
2991			None => panic!(),
2992			Some(_) => (),
2993		};
2994
2995		// If we receive announcement for the same channel (with UTXO lookups disabled),
2996		// drop new one on the floor, since we can't see any changes.
2997		match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) {
2998			Ok(_) => panic!(),
2999			Err(e) => assert_eq!(e.err, "Already have non-chain-validated channel"),
3000		};
3001
3002		// Test if an associated transaction were not on-chain (or not confirmed).
3003		let chain_source = test_utils::TestChainSource::new(Network::Testnet);
3004		*chain_source.utxo_ret.lock().unwrap() = UtxoResult::Sync(Err(UtxoLookupError::UnknownTx));
3005		let network_graph = NetworkGraph::new(Network::Testnet, &logger);
3006		gossip_sync = P2PGossipSync::new(&network_graph, Some(&chain_source), &logger);
3007
3008		let valid_announcement = get_signed_channel_announcement(
3009			|unsigned_announcement| {
3010				unsigned_announcement.short_channel_id += 1;
3011			},
3012			node_1_privkey,
3013			node_2_privkey,
3014			&secp_ctx,
3015		);
3016		match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) {
3017			Ok(_) => panic!(),
3018			Err(e) => assert_eq!(e.err, "Channel announced without corresponding UTXO entry"),
3019		};
3020
3021		// Now test if the transaction is found in the UTXO set and the script is correct.
3022		*chain_source.utxo_ret.lock().unwrap() =
3023			UtxoResult::Sync(Ok(TxOut { value: Amount::ZERO, script_pubkey: good_script.clone() }));
3024		let valid_announcement = get_signed_channel_announcement(
3025			|unsigned_announcement| {
3026				unsigned_announcement.short_channel_id += 2;
3027			},
3028			node_1_privkey,
3029			node_2_privkey,
3030			&secp_ctx,
3031		);
3032		match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) {
3033			Ok(res) => assert!(res),
3034			_ => panic!(),
3035		};
3036
3037		let scid = valid_announcement.contents.short_channel_id;
3038		match network_graph.read_only().channels().get(&scid) {
3039			None => panic!(),
3040			Some(_) => (),
3041		};
3042
3043		// If we receive announcement for the same channel, once we've validated it against the
3044		// chain, we simply ignore all new (duplicate) announcements.
3045		*chain_source.utxo_ret.lock().unwrap() =
3046			UtxoResult::Sync(Ok(TxOut { value: Amount::ZERO, script_pubkey: good_script }));
3047		match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) {
3048			Ok(_) => panic!(),
3049			Err(e) => assert_eq!(e.err, "Already have chain-validated channel"),
3050		};
3051
3052		#[cfg(feature = "std")]
3053		{
3054			use std::time::{SystemTime, UNIX_EPOCH};
3055
3056			let tracking_time = SystemTime::now()
3057				.duration_since(UNIX_EPOCH)
3058				.expect("Time must be > 1970")
3059				.as_secs();
3060			// Mark a node as permanently failed so it's tracked as removed.
3061			let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
3062			gossip_sync.network_graph().node_failed_permanent(&node_1_pubkey);
3063
3064			// Return error and ignore valid channel announcement if one of the nodes has been tracked as removed.
3065			let valid_announcement = get_signed_channel_announcement(
3066				|unsigned_announcement| {
3067					unsigned_announcement.short_channel_id += 3;
3068				},
3069				node_1_privkey,
3070				node_2_privkey,
3071				&secp_ctx,
3072			);
3073			match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) {
3074				Ok(_) => panic!(),
3075				Err(e) => assert_eq!(e.err, "Channel with SCID 3 or one of its nodes was removed from our network graph recently")
3076			}
3077
3078			gossip_sync.network_graph().remove_stale_channels_and_tracking_with_time(
3079				tracking_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS,
3080			);
3081
3082			// The above channel announcement should be handled as per normal now.
3083			match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement)
3084			{
3085				Ok(res) => assert!(res),
3086				_ => panic!(),
3087			}
3088		}
3089
3090		let valid_excess_data_announcement = get_signed_channel_announcement(
3091			|unsigned_announcement| {
3092				unsigned_announcement.short_channel_id += 4;
3093				unsigned_announcement.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0);
3094			},
3095			node_1_privkey,
3096			node_2_privkey,
3097			&secp_ctx,
3098		);
3099
3100		let mut invalid_sig_announcement = valid_excess_data_announcement.clone();
3101		invalid_sig_announcement.contents.excess_data = Vec::new();
3102		match gossip_sync
3103			.handle_channel_announcement(Some(node_1_pubkey), &invalid_sig_announcement)
3104		{
3105			Ok(_) => panic!(),
3106			Err(e) => assert_eq!(e.err, "Invalid signature on channel_announcement message"),
3107		};
3108
3109		// Don't relay valid channels with excess data
3110		match gossip_sync
3111			.handle_channel_announcement(Some(node_1_pubkey), &valid_excess_data_announcement)
3112		{
3113			Ok(res) => assert!(!res),
3114			_ => panic!(),
3115		};
3116
3117		let channel_to_itself_announcement =
3118			get_signed_channel_announcement(|_| {}, node_1_privkey, node_1_privkey, &secp_ctx);
3119		match gossip_sync
3120			.handle_channel_announcement(Some(node_1_pubkey), &channel_to_itself_announcement)
3121		{
3122			Ok(_) => panic!(),
3123			Err(e) => assert_eq!(e.err, "Channel announcement node had a channel with itself"),
3124		};
3125
3126		// Test that channel announcements with the wrong chain hash are ignored (network graph is testnet,
3127		// announcement is mainnet).
3128		let incorrect_chain_announcement = get_signed_channel_announcement(
3129			|unsigned_announcement| {
3130				unsigned_announcement.chain_hash = ChainHash::using_genesis_block(Network::Bitcoin);
3131			},
3132			node_1_privkey,
3133			node_2_privkey,
3134			&secp_ctx,
3135		);
3136		match gossip_sync
3137			.handle_channel_announcement(Some(node_1_pubkey), &incorrect_chain_announcement)
3138		{
3139			Ok(_) => panic!(),
3140			Err(e) => {
3141				assert_eq!(e.err, "Channel announcement chain hash does not match genesis hash")
3142			},
3143		};
3144	}
3145
3146	#[test]
3147	fn handling_channel_update() {
3148		let secp_ctx = Secp256k1::new();
3149		let logger = test_utils::TestLogger::new();
3150		let chain_source = test_utils::TestChainSource::new(Network::Testnet);
3151		let network_graph = NetworkGraph::new(Network::Testnet, &logger);
3152		let gossip_sync = P2PGossipSync::new(&network_graph, Some(&chain_source), &logger);
3153
3154		let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
3155		let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
3156		let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
3157
3158		let amount_sats = Amount::from_sat(1000_000);
3159		let short_channel_id;
3160
3161		{
3162			// Announce a channel we will update
3163			let good_script = get_channel_script(&secp_ctx);
3164			*chain_source.utxo_ret.lock().unwrap() = UtxoResult::Sync(Ok(TxOut {
3165				value: amount_sats,
3166				script_pubkey: good_script.clone(),
3167			}));
3168
3169			let valid_channel_announcement =
3170				get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
3171			short_channel_id = valid_channel_announcement.contents.short_channel_id;
3172			match gossip_sync
3173				.handle_channel_announcement(Some(node_1_pubkey), &valid_channel_announcement)
3174			{
3175				Ok(_) => (),
3176				Err(_) => panic!(),
3177			};
3178		}
3179
3180		let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx);
3181		network_graph.verify_channel_update(&valid_channel_update).unwrap();
3182		match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3183			Ok(res) => assert!(res),
3184			_ => panic!(),
3185		};
3186
3187		{
3188			match network_graph.read_only().channels().get(&short_channel_id) {
3189				None => panic!(),
3190				Some(channel_info) => {
3191					assert_eq!(channel_info.one_to_two.as_ref().unwrap().cltv_expiry_delta, 144);
3192					assert!(channel_info.two_to_one.is_none());
3193				},
3194			};
3195		}
3196
3197		let valid_channel_update = get_signed_channel_update(
3198			|unsigned_channel_update| {
3199				unsigned_channel_update.timestamp += 100;
3200				unsigned_channel_update.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0);
3201			},
3202			node_1_privkey,
3203			&secp_ctx,
3204		);
3205		// Return false because contains excess data
3206		match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3207			Ok(res) => assert!(!res),
3208			_ => panic!(),
3209		};
3210
3211		let valid_channel_update = get_signed_channel_update(
3212			|unsigned_channel_update| {
3213				unsigned_channel_update.timestamp += 110;
3214				unsigned_channel_update.short_channel_id += 1;
3215			},
3216			node_1_privkey,
3217			&secp_ctx,
3218		);
3219		match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3220			Ok(_) => panic!(),
3221			Err(e) => assert_eq!(e.err, "Couldn't find channel for update"),
3222		};
3223
3224		let valid_channel_update = get_signed_channel_update(
3225			|unsigned_channel_update| {
3226				unsigned_channel_update.htlc_maximum_msat = MAX_VALUE_MSAT + 1;
3227				unsigned_channel_update.timestamp += 110;
3228			},
3229			node_1_privkey,
3230			&secp_ctx,
3231		);
3232		match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3233			Ok(_) => panic!(),
3234			Err(e) => assert_eq!(e.err, "htlc_maximum_msat is larger than maximum possible msats"),
3235		};
3236
3237		let valid_channel_update = get_signed_channel_update(
3238			|unsigned_channel_update| {
3239				unsigned_channel_update.htlc_maximum_msat = amount_sats.to_sat() * 1000 + 1;
3240				unsigned_channel_update.timestamp += 110;
3241			},
3242			node_1_privkey,
3243			&secp_ctx,
3244		);
3245		match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3246			Ok(_) => panic!(),
3247			Err(e) => assert_eq!(
3248				e.err,
3249				"htlc_maximum_msat is larger than channel capacity or capacity is bogus"
3250			),
3251		};
3252
3253		// Even though previous update was not relayed further, we still accepted it,
3254		// so we now won't accept update before the previous one.
3255		let valid_channel_update = get_signed_channel_update(
3256			|unsigned_channel_update| {
3257				unsigned_channel_update.timestamp += 100;
3258			},
3259			node_1_privkey,
3260			&secp_ctx,
3261		);
3262		match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3263			Ok(_) => panic!(),
3264			Err(e) => assert_eq!(e.err, "Update had same timestamp as last processed update"),
3265		};
3266
3267		let mut invalid_sig_channel_update = get_signed_channel_update(
3268			|unsigned_channel_update| {
3269				unsigned_channel_update.timestamp += 500;
3270			},
3271			node_1_privkey,
3272			&secp_ctx,
3273		);
3274		let zero_hash = Sha256dHash::hash(&[0; 32]);
3275		let fake_msghash = hash_to_message!(zero_hash.as_byte_array());
3276		invalid_sig_channel_update.signature = secp_ctx.sign_ecdsa(&fake_msghash, node_1_privkey);
3277		match gossip_sync.handle_channel_update(Some(node_1_pubkey), &invalid_sig_channel_update) {
3278			Ok(_) => panic!(),
3279			Err(e) => assert_eq!(e.err, "Invalid signature on channel_update message"),
3280		};
3281
3282		// Test that channel updates with the wrong chain hash are ignored (network graph is testnet, channel
3283		// update is mainet).
3284		let incorrect_chain_update = get_signed_channel_update(
3285			|unsigned_channel_update| {
3286				unsigned_channel_update.chain_hash =
3287					ChainHash::using_genesis_block(Network::Bitcoin);
3288			},
3289			node_1_privkey,
3290			&secp_ctx,
3291		);
3292
3293		match gossip_sync.handle_channel_update(Some(node_1_pubkey), &incorrect_chain_update) {
3294			Ok(_) => panic!(),
3295			Err(e) => assert_eq!(e.err, "Channel update chain hash does not match genesis hash"),
3296		};
3297	}
3298
3299	#[test]
3300	fn handling_network_update() {
3301		let logger = test_utils::TestLogger::new();
3302		let network_graph = NetworkGraph::new(Network::Testnet, &logger);
3303		let secp_ctx = Secp256k1::new();
3304
3305		let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
3306		let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
3307		let node_2_pk = PublicKey::from_secret_key(&secp_ctx, node_2_privkey);
3308		let node_2_id = NodeId::from_pubkey(&node_2_pk);
3309
3310		{
3311			// There is no nodes in the table at the beginning.
3312			assert_eq!(network_graph.read_only().nodes().len(), 0);
3313		}
3314
3315		let scid;
3316		{
3317			// Check that we can manually apply a channel update.
3318			let valid_channel_announcement =
3319				get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
3320			scid = valid_channel_announcement.contents.short_channel_id;
3321			let chain_source: Option<&test_utils::TestChainSource> = None;
3322			assert!(network_graph
3323				.update_channel_from_announcement(&valid_channel_announcement, &chain_source)
3324				.is_ok());
3325			assert!(network_graph.read_only().channels().get(&scid).is_some());
3326
3327			let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx);
3328
3329			assert!(network_graph.read_only().channels().get(&scid).unwrap().one_to_two.is_none());
3330			network_graph.update_channel(&valid_channel_update).unwrap();
3331			assert!(network_graph.read_only().channels().get(&scid).unwrap().one_to_two.is_some());
3332		}
3333
3334		// Non-permanent failure doesn't touch the channel at all
3335		{
3336			match network_graph.read_only().channels().get(&scid) {
3337				None => panic!(),
3338				Some(channel_info) => {
3339					assert!(channel_info.one_to_two.as_ref().unwrap().enabled);
3340				},
3341			};
3342
3343			network_graph.handle_network_update(&NetworkUpdate::ChannelFailure {
3344				short_channel_id: scid,
3345				is_permanent: false,
3346			});
3347
3348			match network_graph.read_only().channels().get(&scid) {
3349				None => panic!(),
3350				Some(channel_info) => {
3351					assert!(channel_info.one_to_two.as_ref().unwrap().enabled);
3352				},
3353			};
3354		}
3355
3356		// Permanent closing deletes a channel
3357		network_graph.handle_network_update(&NetworkUpdate::ChannelFailure {
3358			short_channel_id: scid,
3359			is_permanent: true,
3360		});
3361
3362		assert_eq!(network_graph.read_only().channels().len(), 0);
3363		// Nodes are also deleted because there are no associated channels anymore
3364		assert_eq!(network_graph.read_only().nodes().len(), 0);
3365
3366		{
3367			// Get a new network graph since we don't want to track removed nodes in this test with "std"
3368			let network_graph = NetworkGraph::new(Network::Testnet, &logger);
3369
3370			// Announce a channel to test permanent node failure
3371			let valid_channel_announcement =
3372				get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
3373			let short_channel_id = valid_channel_announcement.contents.short_channel_id;
3374			let chain_source: Option<&test_utils::TestChainSource> = None;
3375			assert!(network_graph
3376				.update_channel_from_announcement(&valid_channel_announcement, &chain_source)
3377				.is_ok());
3378			assert!(network_graph.read_only().channels().get(&short_channel_id).is_some());
3379
3380			// Non-permanent node failure does not delete any nodes or channels
3381			network_graph.handle_network_update(&NetworkUpdate::NodeFailure {
3382				node_id: node_2_pk,
3383				is_permanent: false,
3384			});
3385
3386			assert!(network_graph.read_only().channels().get(&short_channel_id).is_some());
3387			assert!(network_graph.read_only().nodes().get(&node_2_id).is_some());
3388
3389			// Permanent node failure deletes node and its channels
3390			network_graph.handle_network_update(&NetworkUpdate::NodeFailure {
3391				node_id: node_2_pk,
3392				is_permanent: true,
3393			});
3394
3395			assert_eq!(network_graph.read_only().nodes().len(), 0);
3396			// Channels are also deleted because the associated node has been deleted
3397			assert_eq!(network_graph.read_only().channels().len(), 0);
3398		}
3399	}
3400
3401	#[test]
3402	fn test_channel_timeouts() {
3403		// Test the removal of channels with `remove_stale_channels_and_tracking`.
3404		let logger = test_utils::TestLogger::new();
3405		let chain_source = test_utils::TestChainSource::new(Network::Testnet);
3406		let network_graph = NetworkGraph::new(Network::Testnet, &logger);
3407		let gossip_sync = P2PGossipSync::new(&network_graph, Some(&chain_source), &logger);
3408		let secp_ctx = Secp256k1::new();
3409
3410		let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
3411		let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
3412		let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
3413
3414		let valid_channel_announcement =
3415			get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
3416		let scid = valid_channel_announcement.contents.short_channel_id;
3417		let chain_source: Option<&test_utils::TestChainSource> = None;
3418		assert!(network_graph
3419			.update_channel_from_announcement(&valid_channel_announcement, &chain_source)
3420			.is_ok());
3421		assert!(network_graph.read_only().channels().get(&scid).is_some());
3422
3423		// Submit two channel updates for each channel direction (update.flags bit).
3424		let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx);
3425		assert!(gossip_sync
3426			.handle_channel_update(Some(node_1_pubkey), &valid_channel_update)
3427			.is_ok());
3428		assert!(network_graph.read_only().channels().get(&scid).unwrap().one_to_two.is_some());
3429
3430		let valid_channel_update_2 = get_signed_channel_update(
3431			|update| {
3432				update.channel_flags |= 1;
3433			},
3434			node_2_privkey,
3435			&secp_ctx,
3436		);
3437		gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update_2).unwrap();
3438		assert!(network_graph.read_only().channels().get(&scid).unwrap().two_to_one.is_some());
3439
3440		network_graph.remove_stale_channels_and_tracking_with_time(
3441			100 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS,
3442		);
3443		assert_eq!(network_graph.read_only().channels().len(), 1);
3444		assert_eq!(network_graph.read_only().nodes().len(), 2);
3445
3446		network_graph.remove_stale_channels_and_tracking_with_time(
3447			101 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS,
3448		);
3449		#[cfg(not(feature = "std"))]
3450		{
3451			// Make sure removed channels are tracked.
3452			assert_eq!(network_graph.removed_channels.lock().unwrap().len(), 1);
3453		}
3454		network_graph.remove_stale_channels_and_tracking_with_time(
3455			101 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS,
3456		);
3457
3458		#[cfg(feature = "std")]
3459		{
3460			// In std mode, a further check is performed before fully removing the channel -
3461			// the channel_announcement must have been received at least two weeks ago. We
3462			// fudge that here by indicating the time has jumped two weeks.
3463			assert_eq!(network_graph.read_only().channels().len(), 1);
3464			assert_eq!(network_graph.read_only().nodes().len(), 2);
3465
3466			// Note that the directional channel information will have been removed already..
3467			// We want to check that this will work even if *one* of the channel updates is recent,
3468			// so we should add it with a recent timestamp.
3469			assert!(network_graph.read_only().channels().get(&scid).unwrap().one_to_two.is_none());
3470			use std::time::{SystemTime, UNIX_EPOCH};
3471			let announcement_time = SystemTime::now()
3472				.duration_since(UNIX_EPOCH)
3473				.expect("Time must be > 1970")
3474				.as_secs();
3475			let valid_channel_update = get_signed_channel_update(
3476				|unsigned_channel_update| {
3477					unsigned_channel_update.timestamp =
3478						(announcement_time + 1 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS) as u32;
3479				},
3480				node_1_privkey,
3481				&secp_ctx,
3482			);
3483			assert!(gossip_sync
3484				.handle_channel_update(Some(node_1_pubkey), &valid_channel_update)
3485				.is_ok());
3486			assert!(network_graph.read_only().channels().get(&scid).unwrap().one_to_two.is_some());
3487			network_graph.remove_stale_channels_and_tracking_with_time(
3488				announcement_time + 1 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS,
3489			);
3490			// Make sure removed channels are tracked.
3491			assert_eq!(network_graph.removed_channels.lock().unwrap().len(), 1);
3492			// Provide a later time so that sufficient time has passed
3493			network_graph.remove_stale_channels_and_tracking_with_time(
3494				announcement_time
3495					+ 1 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS
3496					+ REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS,
3497			);
3498		}
3499
3500		assert_eq!(network_graph.read_only().channels().len(), 0);
3501		assert_eq!(network_graph.read_only().nodes().len(), 0);
3502		assert!(network_graph.removed_channels.lock().unwrap().is_empty());
3503
3504		#[cfg(feature = "std")]
3505		{
3506			use std::time::{SystemTime, UNIX_EPOCH};
3507
3508			let tracking_time = SystemTime::now()
3509				.duration_since(UNIX_EPOCH)
3510				.expect("Time must be > 1970")
3511				.as_secs();
3512
3513			// Clear tracked nodes and channels for clean slate
3514			network_graph.removed_channels.lock().unwrap().clear();
3515			network_graph.removed_nodes.lock().unwrap().clear();
3516
3517			// Add a channel and nodes from channel announcement. So our network graph will
3518			// now only consist of two nodes and one channel between them.
3519			assert!(network_graph
3520				.update_channel_from_announcement(&valid_channel_announcement, &chain_source)
3521				.is_ok());
3522
3523			// Mark the channel as permanently failed. This will also remove the two nodes
3524			// and all of the entries will be tracked as removed.
3525			network_graph.channel_failed_permanent_with_time(scid, Some(tracking_time));
3526
3527			// Should not remove from tracking if insufficient time has passed
3528			network_graph.remove_stale_channels_and_tracking_with_time(
3529				tracking_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS - 1,
3530			);
3531			assert_eq!(
3532				network_graph.removed_channels.lock().unwrap().len(),
3533				1,
3534				"Removed channel count ≠ 1 with tracking_time {}",
3535				tracking_time
3536			);
3537
3538			// Provide a later time so that sufficient time has passed
3539			network_graph.remove_stale_channels_and_tracking_with_time(
3540				tracking_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS,
3541			);
3542			assert!(
3543				network_graph.removed_channels.lock().unwrap().is_empty(),
3544				"Unexpectedly removed channels with tracking_time {}",
3545				tracking_time
3546			);
3547			assert!(
3548				network_graph.removed_nodes.lock().unwrap().is_empty(),
3549				"Unexpectedly removed nodes with tracking_time {}",
3550				tracking_time
3551			);
3552		}
3553
3554		#[cfg(not(feature = "std"))]
3555		{
3556			// When we don't have access to the system clock, the time we started tracking removal will only
3557			// be that provided by the first call to `remove_stale_channels_and_tracking_with_time`. Hence,
3558			// only if sufficient time has passed after that first call, will the next call remove it from
3559			// tracking.
3560			let removal_time = 1664619654;
3561
3562			// Clear removed nodes and channels for clean slate
3563			network_graph.removed_channels.lock().unwrap().clear();
3564			network_graph.removed_nodes.lock().unwrap().clear();
3565
3566			// Add a channel and nodes from channel announcement. So our network graph will
3567			// now only consist of two nodes and one channel between them.
3568			assert!(network_graph
3569				.update_channel_from_announcement(&valid_channel_announcement, &chain_source)
3570				.is_ok());
3571
3572			// Mark the channel as permanently failed. This will also remove the two nodes
3573			// and all of the entries will be tracked as removed.
3574			network_graph.channel_failed_permanent(scid);
3575
3576			// The first time we call the following, the channel will have a removal time assigned.
3577			network_graph.remove_stale_channels_and_tracking_with_time(removal_time);
3578			assert_eq!(network_graph.removed_channels.lock().unwrap().len(), 1);
3579
3580			// Provide a later time so that sufficient time has passed
3581			network_graph.remove_stale_channels_and_tracking_with_time(
3582				removal_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS,
3583			);
3584			assert!(network_graph.removed_channels.lock().unwrap().is_empty());
3585			assert!(network_graph.removed_nodes.lock().unwrap().is_empty());
3586		}
3587	}
3588
3589	#[test]
3590	fn getting_next_channel_announcements() {
3591		let network_graph = create_network_graph();
3592		let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
3593		let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
3594		let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
3595		let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
3596
3597		// Channels were not announced yet.
3598		let channels_with_announcements = gossip_sync.get_next_channel_announcement(0);
3599		assert!(channels_with_announcements.is_none());
3600
3601		let short_channel_id;
3602		{
3603			// Announce a channel we will update
3604			let valid_channel_announcement =
3605				get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
3606			short_channel_id = valid_channel_announcement.contents.short_channel_id;
3607			match gossip_sync
3608				.handle_channel_announcement(Some(node_1_pubkey), &valid_channel_announcement)
3609			{
3610				Ok(_) => (),
3611				Err(_) => panic!(),
3612			};
3613		}
3614
3615		// Contains initial channel announcement now.
3616		let channels_with_announcements =
3617			gossip_sync.get_next_channel_announcement(short_channel_id);
3618		if let Some(channel_announcements) = channels_with_announcements {
3619			let (_, ref update_1, ref update_2) = channel_announcements;
3620			assert_eq!(update_1, &None);
3621			assert_eq!(update_2, &None);
3622		} else {
3623			panic!();
3624		}
3625
3626		{
3627			// Valid channel update
3628			let valid_channel_update = get_signed_channel_update(
3629				|unsigned_channel_update| {
3630					unsigned_channel_update.timestamp = 101;
3631				},
3632				node_1_privkey,
3633				&secp_ctx,
3634			);
3635			match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3636				Ok(_) => (),
3637				Err(_) => panic!(),
3638			};
3639		}
3640
3641		// Now contains an initial announcement and an update.
3642		let channels_with_announcements =
3643			gossip_sync.get_next_channel_announcement(short_channel_id);
3644		if let Some(channel_announcements) = channels_with_announcements {
3645			let (_, ref update_1, ref update_2) = channel_announcements;
3646			assert_ne!(update_1, &None);
3647			assert_eq!(update_2, &None);
3648		} else {
3649			panic!();
3650		}
3651
3652		{
3653			// Channel update with excess data.
3654			let valid_channel_update = get_signed_channel_update(
3655				|unsigned_channel_update| {
3656					unsigned_channel_update.timestamp = 102;
3657					unsigned_channel_update.excess_data =
3658						[1; MAX_EXCESS_BYTES_FOR_RELAY + 1].to_vec();
3659				},
3660				node_1_privkey,
3661				&secp_ctx,
3662			);
3663			match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3664				Ok(_) => (),
3665				Err(_) => panic!(),
3666			};
3667		}
3668
3669		// Test that announcements with excess data won't be returned
3670		let channels_with_announcements =
3671			gossip_sync.get_next_channel_announcement(short_channel_id);
3672		if let Some(channel_announcements) = channels_with_announcements {
3673			let (_, ref update_1, ref update_2) = channel_announcements;
3674			assert_eq!(update_1, &None);
3675			assert_eq!(update_2, &None);
3676		} else {
3677			panic!();
3678		}
3679
3680		// Further starting point have no channels after it
3681		let channels_with_announcements =
3682			gossip_sync.get_next_channel_announcement(short_channel_id + 1000);
3683		assert!(channels_with_announcements.is_none());
3684	}
3685
3686	#[test]
3687	fn getting_next_node_announcements() {
3688		let network_graph = create_network_graph();
3689		let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
3690		let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
3691		let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
3692		let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
3693		let node_id_1 = NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, node_1_privkey));
3694
3695		// No nodes yet.
3696		let next_announcements = gossip_sync.get_next_node_announcement(None);
3697		assert!(next_announcements.is_none());
3698
3699		{
3700			// Announce a channel to add 2 nodes
3701			let valid_channel_announcement =
3702				get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
3703			match gossip_sync
3704				.handle_channel_announcement(Some(node_1_pubkey), &valid_channel_announcement)
3705			{
3706				Ok(_) => (),
3707				Err(_) => panic!(),
3708			};
3709		}
3710
3711		// Nodes were never announced
3712		let next_announcements = gossip_sync.get_next_node_announcement(None);
3713		assert!(next_announcements.is_none());
3714
3715		{
3716			let valid_announcement =
3717				get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx);
3718			match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) {
3719				Ok(_) => (),
3720				Err(_) => panic!(),
3721			};
3722
3723			let valid_announcement =
3724				get_signed_node_announcement(|_| {}, node_2_privkey, &secp_ctx);
3725			match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) {
3726				Ok(_) => (),
3727				Err(_) => panic!(),
3728			};
3729		}
3730
3731		let next_announcements = gossip_sync.get_next_node_announcement(None);
3732		assert!(next_announcements.is_some());
3733
3734		// Skip the first node.
3735		let next_announcements = gossip_sync.get_next_node_announcement(Some(&node_id_1));
3736		assert!(next_announcements.is_some());
3737
3738		{
3739			// Later announcement which should not be relayed (excess data) prevent us from sharing a node
3740			let valid_announcement = get_signed_node_announcement(
3741				|unsigned_announcement| {
3742					unsigned_announcement.timestamp += 10;
3743					unsigned_announcement.excess_data =
3744						[1; MAX_EXCESS_BYTES_FOR_RELAY + 1].to_vec();
3745				},
3746				node_2_privkey,
3747				&secp_ctx,
3748			);
3749			match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) {
3750				Ok(res) => assert!(!res),
3751				Err(_) => panic!(),
3752			};
3753		}
3754
3755		let next_announcements = gossip_sync.get_next_node_announcement(Some(&node_id_1));
3756		assert!(next_announcements.is_none());
3757	}
3758
3759	#[test]
3760	fn network_graph_serialization() {
3761		let network_graph = create_network_graph();
3762		let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
3763
3764		let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
3765		let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
3766		let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
3767
3768		// Announce a channel to add a corresponding node.
3769		let valid_announcement =
3770			get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
3771		match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) {
3772			Ok(res) => assert!(res),
3773			_ => panic!(),
3774		};
3775
3776		let valid_announcement = get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx);
3777		match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) {
3778			Ok(_) => (),
3779			Err(_) => panic!(),
3780		};
3781
3782		let mut w = test_utils::TestVecWriter(Vec::new());
3783		assert!(!network_graph.read_only().nodes().is_empty());
3784		assert!(!network_graph.read_only().channels().is_empty());
3785		network_graph.write(&mut w).unwrap();
3786
3787		let logger = Arc::new(test_utils::TestLogger::new());
3788		assert!(
3789			<NetworkGraph<_>>::read(&mut io::Cursor::new(&w.0), logger).unwrap() == network_graph
3790		);
3791	}
3792
3793	#[test]
3794	fn network_graph_tlv_serialization() {
3795		let network_graph = create_network_graph();
3796		network_graph.set_last_rapid_gossip_sync_timestamp(42);
3797
3798		let mut w = test_utils::TestVecWriter(Vec::new());
3799		network_graph.write(&mut w).unwrap();
3800
3801		let logger = Arc::new(test_utils::TestLogger::new());
3802		let reassembled_network_graph: NetworkGraph<_> =
3803			ReadableArgs::read(&mut io::Cursor::new(&w.0), logger).unwrap();
3804		assert!(reassembled_network_graph == network_graph);
3805		assert_eq!(reassembled_network_graph.get_last_rapid_gossip_sync_timestamp().unwrap(), 42);
3806	}
3807
3808	#[test]
3809	#[cfg(feature = "std")]
3810	fn calling_sync_routing_table() {
3811		use crate::ln::msgs::Init;
3812		use std::time::{SystemTime, UNIX_EPOCH};
3813
3814		let network_graph = create_network_graph();
3815		let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
3816		let node_privkey_1 = &SecretKey::from_slice(&[42; 32]).unwrap();
3817		let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_privkey_1);
3818
3819		let chain_hash = ChainHash::using_genesis_block(Network::Testnet);
3820
3821		// It should ignore if gossip_queries feature is not enabled
3822		{
3823			let init_msg = Init {
3824				features: InitFeatures::empty(),
3825				networks: None,
3826				remote_network_address: None,
3827			};
3828			gossip_sync.peer_connected(node_id_1, &init_msg, true).unwrap();
3829			let events = gossip_sync.get_and_clear_pending_msg_events();
3830			assert_eq!(events.len(), 0);
3831		}
3832
3833		// It should send a gossip_timestamp_filter with the correct information
3834		{
3835			let mut features = InitFeatures::empty();
3836			features.set_gossip_queries_optional();
3837			let init_msg = Init { features, networks: None, remote_network_address: None };
3838			gossip_sync.peer_connected(node_id_1, &init_msg, true).unwrap();
3839			let events = gossip_sync.get_and_clear_pending_msg_events();
3840			assert_eq!(events.len(), 1);
3841			match &events[0] {
3842				MessageSendEvent::SendGossipTimestampFilter { node_id, msg } => {
3843					assert_eq!(node_id, &node_id_1);
3844					assert_eq!(msg.chain_hash, chain_hash);
3845					let expected_timestamp = SystemTime::now()
3846						.duration_since(UNIX_EPOCH)
3847						.expect("Time must be > 1970")
3848						.as_secs();
3849					assert!(
3850						(msg.first_timestamp as u64) >= expected_timestamp - 60 * 60 * 24 * 7 * 2
3851					);
3852					assert!(
3853						(msg.first_timestamp as u64)
3854							< expected_timestamp - 60 * 60 * 24 * 7 * 2 + 10
3855					);
3856					assert_eq!(msg.timestamp_range, u32::max_value());
3857				},
3858				_ => panic!("Expected MessageSendEvent::SendChannelRangeQuery"),
3859			};
3860		}
3861	}
3862
3863	#[test]
3864	fn handling_query_channel_range() {
3865		let network_graph = create_network_graph();
3866		let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
3867
3868		let chain_hash = ChainHash::using_genesis_block(Network::Testnet);
3869		let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
3870		let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
3871		let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
3872		let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey);
3873
3874		let mut scids: Vec<u64> = vec![
3875			scid_from_parts(0xfffffe, 0xffffff, 0xffff).unwrap(), // max
3876			scid_from_parts(0xffffff, 0xffffff, 0xffff).unwrap(), // never
3877		];
3878
3879		// used for testing multipart reply across blocks
3880		for block in 100000..=108001 {
3881			scids.push(scid_from_parts(block, 0, 0).unwrap());
3882		}
3883
3884		// used for testing resumption on same block
3885		scids.push(scid_from_parts(108001, 1, 0).unwrap());
3886
3887		for scid in scids {
3888			let valid_announcement = get_signed_channel_announcement(
3889				|unsigned_announcement| {
3890					unsigned_announcement.short_channel_id = scid;
3891				},
3892				node_1_privkey,
3893				node_2_privkey,
3894				&secp_ctx,
3895			);
3896			match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement)
3897			{
3898				Ok(_) => (),
3899				_ => panic!(),
3900			};
3901		}
3902
3903		// Error when number_of_blocks=0
3904		do_handling_query_channel_range(
3905			&gossip_sync,
3906			&node_id_2,
3907			QueryChannelRange {
3908				chain_hash: chain_hash.clone(),
3909				first_blocknum: 0,
3910				number_of_blocks: 0,
3911			},
3912			false,
3913			vec![ReplyChannelRange {
3914				chain_hash: chain_hash.clone(),
3915				first_blocknum: 0,
3916				number_of_blocks: 0,
3917				sync_complete: true,
3918				short_channel_ids: vec![],
3919			}],
3920		);
3921
3922		// Error when wrong chain
3923		do_handling_query_channel_range(
3924			&gossip_sync,
3925			&node_id_2,
3926			QueryChannelRange {
3927				chain_hash: ChainHash::using_genesis_block(Network::Bitcoin),
3928				first_blocknum: 0,
3929				number_of_blocks: 0xffff_ffff,
3930			},
3931			false,
3932			vec![ReplyChannelRange {
3933				chain_hash: ChainHash::using_genesis_block(Network::Bitcoin),
3934				first_blocknum: 0,
3935				number_of_blocks: 0xffff_ffff,
3936				sync_complete: true,
3937				short_channel_ids: vec![],
3938			}],
3939		);
3940
3941		// Error when first_blocknum > 0xffffff
3942		do_handling_query_channel_range(
3943			&gossip_sync,
3944			&node_id_2,
3945			QueryChannelRange {
3946				chain_hash: chain_hash.clone(),
3947				first_blocknum: 0x01000000,
3948				number_of_blocks: 0xffff_ffff,
3949			},
3950			false,
3951			vec![ReplyChannelRange {
3952				chain_hash: chain_hash.clone(),
3953				first_blocknum: 0x01000000,
3954				number_of_blocks: 0xffff_ffff,
3955				sync_complete: true,
3956				short_channel_ids: vec![],
3957			}],
3958		);
3959
3960		// Empty reply when max valid SCID block num
3961		do_handling_query_channel_range(
3962			&gossip_sync,
3963			&node_id_2,
3964			QueryChannelRange {
3965				chain_hash: chain_hash.clone(),
3966				first_blocknum: 0xffffff,
3967				number_of_blocks: 1,
3968			},
3969			true,
3970			vec![ReplyChannelRange {
3971				chain_hash: chain_hash.clone(),
3972				first_blocknum: 0xffffff,
3973				number_of_blocks: 1,
3974				sync_complete: true,
3975				short_channel_ids: vec![],
3976			}],
3977		);
3978
3979		// No results in valid query range
3980		do_handling_query_channel_range(
3981			&gossip_sync,
3982			&node_id_2,
3983			QueryChannelRange {
3984				chain_hash: chain_hash.clone(),
3985				first_blocknum: 1000,
3986				number_of_blocks: 1000,
3987			},
3988			true,
3989			vec![ReplyChannelRange {
3990				chain_hash: chain_hash.clone(),
3991				first_blocknum: 1000,
3992				number_of_blocks: 1000,
3993				sync_complete: true,
3994				short_channel_ids: vec![],
3995			}],
3996		);
3997
3998		// Overflow first_blocknum + number_of_blocks
3999		do_handling_query_channel_range(
4000			&gossip_sync,
4001			&node_id_2,
4002			QueryChannelRange {
4003				chain_hash: chain_hash.clone(),
4004				first_blocknum: 0xfe0000,
4005				number_of_blocks: 0xffffffff,
4006			},
4007			true,
4008			vec![ReplyChannelRange {
4009				chain_hash: chain_hash.clone(),
4010				first_blocknum: 0xfe0000,
4011				number_of_blocks: 0xffffffff - 0xfe0000,
4012				sync_complete: true,
4013				short_channel_ids: vec![
4014					0xfffffe_ffffff_ffff, // max
4015				],
4016			}],
4017		);
4018
4019		// Single block exactly full
4020		do_handling_query_channel_range(
4021			&gossip_sync,
4022			&node_id_2,
4023			QueryChannelRange {
4024				chain_hash: chain_hash.clone(),
4025				first_blocknum: 100000,
4026				number_of_blocks: 8000,
4027			},
4028			true,
4029			vec![ReplyChannelRange {
4030				chain_hash: chain_hash.clone(),
4031				first_blocknum: 100000,
4032				number_of_blocks: 8000,
4033				sync_complete: true,
4034				short_channel_ids: (100000..=107999)
4035					.map(|block| scid_from_parts(block, 0, 0).unwrap())
4036					.collect(),
4037			}],
4038		);
4039
4040		// Multiple split on new block
4041		do_handling_query_channel_range(
4042			&gossip_sync,
4043			&node_id_2,
4044			QueryChannelRange {
4045				chain_hash: chain_hash.clone(),
4046				first_blocknum: 100000,
4047				number_of_blocks: 8001,
4048			},
4049			true,
4050			vec![
4051				ReplyChannelRange {
4052					chain_hash: chain_hash.clone(),
4053					first_blocknum: 100000,
4054					number_of_blocks: 7999,
4055					sync_complete: false,
4056					short_channel_ids: (100000..=107999)
4057						.map(|block| scid_from_parts(block, 0, 0).unwrap())
4058						.collect(),
4059				},
4060				ReplyChannelRange {
4061					chain_hash: chain_hash.clone(),
4062					first_blocknum: 107999,
4063					number_of_blocks: 2,
4064					sync_complete: true,
4065					short_channel_ids: vec![scid_from_parts(108000, 0, 0).unwrap()],
4066				},
4067			],
4068		);
4069
4070		// Multiple split on same block
4071		do_handling_query_channel_range(
4072			&gossip_sync,
4073			&node_id_2,
4074			QueryChannelRange {
4075				chain_hash: chain_hash.clone(),
4076				first_blocknum: 100002,
4077				number_of_blocks: 8000,
4078			},
4079			true,
4080			vec![
4081				ReplyChannelRange {
4082					chain_hash: chain_hash.clone(),
4083					first_blocknum: 100002,
4084					number_of_blocks: 7999,
4085					sync_complete: false,
4086					short_channel_ids: (100002..=108001)
4087						.map(|block| scid_from_parts(block, 0, 0).unwrap())
4088						.collect(),
4089				},
4090				ReplyChannelRange {
4091					chain_hash: chain_hash.clone(),
4092					first_blocknum: 108001,
4093					number_of_blocks: 1,
4094					sync_complete: true,
4095					short_channel_ids: vec![scid_from_parts(108001, 1, 0).unwrap()],
4096				},
4097			],
4098		);
4099	}
4100
4101	fn do_handling_query_channel_range(
4102		gossip_sync: &P2PGossipSync<
4103			&NetworkGraph<Arc<test_utils::TestLogger>>,
4104			Arc<test_utils::TestChainSource>,
4105			Arc<test_utils::TestLogger>,
4106		>,
4107		test_node_id: &PublicKey, msg: QueryChannelRange, expected_ok: bool,
4108		expected_replies: Vec<ReplyChannelRange>,
4109	) {
4110		let mut max_firstblocknum = msg.first_blocknum.saturating_sub(1);
4111		let mut c_lightning_0_9_prev_end_blocknum = max_firstblocknum;
4112		let query_end_blocknum = msg.end_blocknum();
4113		let result = gossip_sync.handle_query_channel_range(*test_node_id, msg);
4114
4115		if expected_ok {
4116			assert!(result.is_ok());
4117		} else {
4118			assert!(result.is_err());
4119		}
4120
4121		let events = gossip_sync.get_and_clear_pending_msg_events();
4122		assert_eq!(events.len(), expected_replies.len());
4123
4124		for i in 0..events.len() {
4125			let expected_reply = &expected_replies[i];
4126			match &events[i] {
4127				MessageSendEvent::SendReplyChannelRange { node_id, msg } => {
4128					assert_eq!(node_id, test_node_id);
4129					assert_eq!(msg.chain_hash, expected_reply.chain_hash);
4130					assert_eq!(msg.first_blocknum, expected_reply.first_blocknum);
4131					assert_eq!(msg.number_of_blocks, expected_reply.number_of_blocks);
4132					assert_eq!(msg.sync_complete, expected_reply.sync_complete);
4133					assert_eq!(msg.short_channel_ids, expected_reply.short_channel_ids);
4134
4135					// Enforce exactly the sequencing requirements present on c-lightning v0.9.3
4136					assert!(
4137						msg.first_blocknum == c_lightning_0_9_prev_end_blocknum
4138							|| msg.first_blocknum
4139								== c_lightning_0_9_prev_end_blocknum.saturating_add(1)
4140					);
4141					assert!(msg.first_blocknum >= max_firstblocknum);
4142					max_firstblocknum = msg.first_blocknum;
4143					c_lightning_0_9_prev_end_blocknum =
4144						msg.first_blocknum.saturating_add(msg.number_of_blocks);
4145
4146					// Check that the last block count is >= the query's end_blocknum
4147					if i == events.len() - 1 {
4148						assert!(
4149							msg.first_blocknum.saturating_add(msg.number_of_blocks)
4150								>= query_end_blocknum
4151						);
4152					}
4153				},
4154				_ => panic!("expected MessageSendEvent::SendReplyChannelRange"),
4155			}
4156		}
4157	}
4158
4159	#[test]
4160	fn handling_query_short_channel_ids() {
4161		let network_graph = create_network_graph();
4162		let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
4163		let node_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
4164		let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey);
4165
4166		let chain_hash = ChainHash::using_genesis_block(Network::Testnet);
4167
4168		let result = gossip_sync.handle_query_short_channel_ids(
4169			node_id,
4170			QueryShortChannelIds { chain_hash, short_channel_ids: vec![0x0003e8_000000_0000] },
4171		);
4172		assert!(result.is_err());
4173	}
4174
4175	#[test]
4176	fn displays_node_alias() {
4177		let format_str_alias = |alias: &str| {
4178			let mut bytes = [0u8; 32];
4179			bytes[..alias.len()].copy_from_slice(alias.as_bytes());
4180			format!("{}", NodeAlias(bytes))
4181		};
4182
4183		assert_eq!(format_str_alias("I\u{1F496}LDK! \u{26A1}"), "I\u{1F496}LDK! \u{26A1}");
4184		assert_eq!(format_str_alias("I\u{1F496}LDK!\0\u{26A1}"), "I\u{1F496}LDK!");
4185		assert_eq!(format_str_alias("I\u{1F496}LDK!\t\u{26A1}"), "I\u{1F496}LDK!\u{FFFD}\u{26A1}");
4186
4187		let format_bytes_alias = |alias: &[u8]| {
4188			let mut bytes = [0u8; 32];
4189			bytes[..alias.len()].copy_from_slice(alias);
4190			format!("{}", NodeAlias(bytes))
4191		};
4192
4193		assert_eq!(format_bytes_alias(b"\xFFI <heart> LDK!"), "\u{FFFD}I <heart> LDK!");
4194		assert_eq!(format_bytes_alias(b"\xFFI <heart>\0LDK!"), "\u{FFFD}I <heart>");
4195		assert_eq!(format_bytes_alias(b"\xFFI <heart>\tLDK!"), "\u{FFFD}I <heart>\u{FFFD}LDK!");
4196	}
4197
4198	#[test]
4199	fn channel_info_is_readable() {
4200		let chanmon_cfgs = crate::ln::functional_test_utils::create_chanmon_cfgs(2);
4201		let node_cfgs = crate::ln::functional_test_utils::create_node_cfgs(2, &chanmon_cfgs);
4202		let node_chanmgrs = crate::ln::functional_test_utils::create_node_chanmgrs(
4203			2,
4204			&node_cfgs,
4205			&[None, None, None, None],
4206		);
4207		let nodes = crate::ln::functional_test_utils::create_network(2, &node_cfgs, &node_chanmgrs);
4208		let config = crate::ln::functional_test_utils::test_default_channel_config();
4209
4210		// 1. Test encoding/decoding of ChannelUpdateInfo
4211		let chan_update_info = ChannelUpdateInfo {
4212			last_update: 23,
4213			enabled: true,
4214			cltv_expiry_delta: 42,
4215			htlc_minimum_msat: 1234,
4216			htlc_maximum_msat: 5678,
4217			fees: RoutingFees { base_msat: 9, proportional_millionths: 10 },
4218			last_update_message: None,
4219		};
4220
4221		let mut encoded_chan_update_info: Vec<u8> = Vec::new();
4222		assert!(chan_update_info.write(&mut encoded_chan_update_info).is_ok());
4223
4224		// First make sure we can read ChannelUpdateInfos we just wrote
4225		let read_chan_update_info: ChannelUpdateInfo =
4226			crate::util::ser::Readable::read(&mut encoded_chan_update_info.as_slice()).unwrap();
4227		assert_eq!(chan_update_info, read_chan_update_info);
4228
4229		// Check the serialization hasn't changed.
4230		let legacy_chan_update_info_with_some: Vec<u8> = <Vec<u8>>::from_hex("340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c0100").unwrap();
4231		assert_eq!(encoded_chan_update_info, legacy_chan_update_info_with_some);
4232
4233		// Check we fail if htlc_maximum_msat is not present in either the ChannelUpdateInfo itself
4234		// or the ChannelUpdate enclosed with `last_update_message`.
4235		let legacy_chan_update_info_with_some_and_fail_update: Vec<u8> = <Vec<u8>>::from_hex("b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f42400000271000000014").unwrap();
4236		let read_chan_update_info_res: Result<ChannelUpdateInfo, crate::ln::msgs::DecodeError> =
4237			crate::util::ser::Readable::read(
4238				&mut legacy_chan_update_info_with_some_and_fail_update.as_slice(),
4239			);
4240		assert!(read_chan_update_info_res.is_err());
4241
4242		let legacy_chan_update_info_with_none: Vec<u8> = <Vec<u8>>::from_hex("2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c0100").unwrap();
4243		let read_chan_update_info_res: Result<ChannelUpdateInfo, crate::ln::msgs::DecodeError> =
4244			crate::util::ser::Readable::read(&mut legacy_chan_update_info_with_none.as_slice());
4245		assert!(read_chan_update_info_res.is_err());
4246
4247		// 2. Test encoding/decoding of ChannelInfo
4248		// Check we can encode/decode ChannelInfo without ChannelUpdateInfo fields present.
4249		let chan_info_none_updates = ChannelInfo {
4250			features: channelmanager::provided_channel_features(&config),
4251			node_one: NodeId::from_pubkey(&nodes[0].node.get_our_node_id()),
4252			one_to_two: None,
4253			node_two: NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
4254			two_to_one: None,
4255			capacity_sats: None,
4256			announcement_message: None,
4257			announcement_received_time: 87654,
4258			node_one_counter: 0,
4259			node_two_counter: 1,
4260		};
4261
4262		let mut encoded_chan_info: Vec<u8> = Vec::new();
4263		assert!(chan_info_none_updates.write(&mut encoded_chan_info).is_ok());
4264
4265		let read_chan_info: ChannelInfo =
4266			crate::util::ser::Readable::read(&mut encoded_chan_info.as_slice()).unwrap();
4267		assert_eq!(chan_info_none_updates, read_chan_info);
4268
4269		// Check we can encode/decode ChannelInfo with ChannelUpdateInfo fields present.
4270		let chan_info_some_updates = ChannelInfo {
4271			features: channelmanager::provided_channel_features(&config),
4272			node_one: NodeId::from_pubkey(&nodes[0].node.get_our_node_id()),
4273			one_to_two: Some(chan_update_info.clone()),
4274			node_two: NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
4275			two_to_one: Some(chan_update_info.clone()),
4276			capacity_sats: None,
4277			announcement_message: None,
4278			announcement_received_time: 87654,
4279			node_one_counter: 0,
4280			node_two_counter: 1,
4281		};
4282
4283		let mut encoded_chan_info: Vec<u8> = Vec::new();
4284		assert!(chan_info_some_updates.write(&mut encoded_chan_info).is_ok());
4285
4286		let read_chan_info: ChannelInfo =
4287			crate::util::ser::Readable::read(&mut encoded_chan_info.as_slice()).unwrap();
4288		assert_eq!(chan_info_some_updates, read_chan_info);
4289
4290		// Check the serialization hasn't changed.
4291		let legacy_chan_info_with_some: Vec<u8> = <Vec<u8>>::from_hex("ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88043636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23083636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c01000a01000c0100").unwrap();
4292		assert_eq!(encoded_chan_info, legacy_chan_info_with_some);
4293
4294		// Check we can decode legacy ChannelInfo, even if the `two_to_one` / `one_to_two` /
4295		// `last_update_message` fields fail to decode due to missing htlc_maximum_msat.
4296		let legacy_chan_info_with_some_and_fail_update = <Vec<u8>>::from_hex("fd01ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce8804b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f4240000027100000001406210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c2308b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f424000002710000000140a01000c0100").unwrap();
4297		let read_chan_info: ChannelInfo = crate::util::ser::Readable::read(
4298			&mut legacy_chan_info_with_some_and_fail_update.as_slice(),
4299		)
4300		.unwrap();
4301		assert_eq!(read_chan_info.announcement_received_time, 87654);
4302		assert_eq!(read_chan_info.one_to_two, None);
4303		assert_eq!(read_chan_info.two_to_one, None);
4304
4305		let legacy_chan_info_with_none: Vec<u8> = <Vec<u8>>::from_hex("ba00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88042e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23082e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c01000a01000c0100").unwrap();
4306		let read_chan_info: ChannelInfo =
4307			crate::util::ser::Readable::read(&mut legacy_chan_info_with_none.as_slice()).unwrap();
4308		assert_eq!(read_chan_info.announcement_received_time, 87654);
4309		assert_eq!(read_chan_info.one_to_two, None);
4310		assert_eq!(read_chan_info.two_to_one, None);
4311	}
4312
4313	#[test]
4314	fn node_info_is_readable() {
4315		// 1. Check we can read a valid NodeAnnouncementInfo and fail on an invalid one
4316		let announcement_message = <Vec<u8>>::from_hex("d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000122013413a7031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f2020201010101010101010101010101010101010101010101010101010101010101010000701fffefdfc2607").unwrap();
4317		let announcement_message =
4318			NodeAnnouncement::read_from_fixed_length_buffer(&mut announcement_message.as_slice())
4319				.unwrap();
4320		let valid_node_ann_info = NodeAnnouncementInfo::Relayed(announcement_message);
4321
4322		let mut encoded_valid_node_ann_info = Vec::new();
4323		assert!(valid_node_ann_info.write(&mut encoded_valid_node_ann_info).is_ok());
4324		let read_valid_node_ann_info =
4325			NodeAnnouncementInfo::read(&mut encoded_valid_node_ann_info.as_slice()).unwrap();
4326		assert_eq!(read_valid_node_ann_info, valid_node_ann_info);
4327		assert_eq!(read_valid_node_ann_info.addresses().len(), 1);
4328
4329		let encoded_invalid_node_ann_info = <Vec<u8>>::from_hex("3f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d2").unwrap();
4330		let read_invalid_node_ann_info_res =
4331			NodeAnnouncementInfo::read(&mut encoded_invalid_node_ann_info.as_slice());
4332		assert!(read_invalid_node_ann_info_res.is_err());
4333
4334		// 2. Check we can read a NodeInfo anyways, but set the NodeAnnouncementInfo to None if invalid
4335		let valid_node_info = NodeInfo {
4336			channels: Vec::new(),
4337			announcement_info: Some(valid_node_ann_info),
4338			node_counter: 0,
4339		};
4340
4341		let mut encoded_valid_node_info = Vec::new();
4342		assert!(valid_node_info.write(&mut encoded_valid_node_info).is_ok());
4343		let read_valid_node_info = NodeInfo::read(&mut encoded_valid_node_info.as_slice()).unwrap();
4344		assert_eq!(read_valid_node_info, valid_node_info);
4345
4346		let encoded_invalid_node_info_hex = <Vec<u8>>::from_hex("4402403f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d20400").unwrap();
4347		let read_invalid_node_info =
4348			NodeInfo::read(&mut encoded_invalid_node_info_hex.as_slice()).unwrap();
4349		assert_eq!(read_invalid_node_info.announcement_info, None);
4350	}
4351
4352	#[test]
4353	fn test_node_info_keeps_compatibility() {
4354		let old_ann_info_with_addresses = <Vec<u8>>::from_hex("3f0009000708a000080a51220204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014104d2").unwrap();
4355		let ann_info_with_addresses =
4356			NodeAnnouncementInfo::read(&mut old_ann_info_with_addresses.as_slice())
4357				.expect("to be able to read an old NodeAnnouncementInfo with addresses");
4358		// This serialized info has no announcement_message but its address field should still be considered
4359		assert!(!ann_info_with_addresses.addresses().is_empty());
4360	}
4361
4362	#[test]
4363	fn test_node_id_display() {
4364		let node_id = NodeId([42; 33]);
4365		assert_eq!(
4366			format!("{}", &node_id),
4367			"2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a"
4368		);
4369	}
4370
4371	#[test]
4372	fn is_tor_only_node() {
4373		let network_graph = create_network_graph();
4374		let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
4375
4376		let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
4377		let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
4378		let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
4379		let node_1_id = NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, node_1_privkey));
4380
4381		let announcement =
4382			get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
4383		gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &announcement).unwrap();
4384
4385		let tcp_ip_v4 = SocketAddress::TcpIpV4 { addr: [255, 254, 253, 252], port: 9735 };
4386		let tcp_ip_v6 = SocketAddress::TcpIpV6 {
4387			addr: [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4388			port: 9735,
4389		};
4390		let onion_v2 =
4391			SocketAddress::OnionV2([255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 38, 7]);
4392		let onion_v3 = SocketAddress::OnionV3 {
4393			ed25519_pubkey: [
4394				255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240,
4395				239, 238, 237, 236, 235, 234, 233, 232, 231, 230, 229, 228, 227, 226, 225, 224,
4396			],
4397			checksum: 32,
4398			version: 16,
4399			port: 9735,
4400		};
4401		let hostname = SocketAddress::Hostname {
4402			hostname: Hostname::try_from(String::from("host")).unwrap(),
4403			port: 9735,
4404		};
4405
4406		assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4407
4408		let announcement = get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx);
4409		gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap();
4410		assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4411
4412		let announcement = get_signed_node_announcement(
4413			|announcement| {
4414				announcement.addresses = vec![
4415					tcp_ip_v4.clone(),
4416					tcp_ip_v6.clone(),
4417					onion_v2.clone(),
4418					onion_v3.clone(),
4419					hostname.clone(),
4420				];
4421				announcement.timestamp += 1000;
4422			},
4423			node_1_privkey,
4424			&secp_ctx,
4425		);
4426		gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap();
4427		assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4428
4429		let announcement = get_signed_node_announcement(
4430			|announcement| {
4431				announcement.addresses =
4432					vec![tcp_ip_v4.clone(), tcp_ip_v6.clone(), onion_v2.clone(), onion_v3.clone()];
4433				announcement.timestamp += 2000;
4434			},
4435			node_1_privkey,
4436			&secp_ctx,
4437		);
4438		gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap();
4439		assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4440
4441		let announcement = get_signed_node_announcement(
4442			|announcement| {
4443				announcement.addresses =
4444					vec![tcp_ip_v6.clone(), onion_v2.clone(), onion_v3.clone()];
4445				announcement.timestamp += 3000;
4446			},
4447			node_1_privkey,
4448			&secp_ctx,
4449		);
4450		gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap();
4451		assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4452
4453		let announcement = get_signed_node_announcement(
4454			|announcement| {
4455				announcement.addresses = vec![onion_v2.clone(), onion_v3.clone()];
4456				announcement.timestamp += 4000;
4457			},
4458			node_1_privkey,
4459			&secp_ctx,
4460		);
4461		gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap();
4462		assert!(network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4463
4464		let announcement = get_signed_node_announcement(
4465			|announcement| {
4466				announcement.addresses = vec![onion_v2.clone()];
4467				announcement.timestamp += 5000;
4468			},
4469			node_1_privkey,
4470			&secp_ctx,
4471		);
4472		gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap();
4473		assert!(network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4474
4475		let announcement = get_signed_node_announcement(
4476			|announcement| {
4477				announcement.addresses = vec![tcp_ip_v4.clone()];
4478				announcement.timestamp += 6000;
4479			},
4480			node_1_privkey,
4481			&secp_ctx,
4482		);
4483		gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap();
4484		assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4485	}
4486}
4487
4488#[cfg(ldk_bench)]
4489pub mod benches {
4490	use super::*;
4491	use criterion::{black_box, Criterion};
4492	use std::io::Read;
4493
4494	pub fn read_network_graph(bench: &mut Criterion) {
4495		let logger = crate::util::test_utils::TestLogger::new();
4496		let (mut d, _) = crate::routing::router::bench_utils::get_graph_scorer_file().unwrap();
4497		let mut v = Vec::new();
4498		d.read_to_end(&mut v).unwrap();
4499		bench.bench_function("read_network_graph", |b| {
4500			b.iter(|| {
4501				NetworkGraph::read(&mut crate::io::Cursor::new(black_box(&v)), &logger).unwrap()
4502			})
4503		});
4504	}
4505
4506	pub fn write_network_graph(bench: &mut Criterion) {
4507		let logger = crate::util::test_utils::TestLogger::new();
4508		let (mut d, _) = crate::routing::router::bench_utils::get_graph_scorer_file().unwrap();
4509		let mut graph_buffer = Vec::new();
4510		d.read_to_end(&mut graph_buffer).unwrap();
4511		let net_graph = NetworkGraph::read(&mut &graph_buffer[..], &logger).unwrap();
4512		bench.bench_function("write_network_graph", |b| b.iter(|| black_box(&net_graph).encode()));
4513	}
4514}