lightning/routing/
gossip.rs

1// This file is Copyright its original authors, visible in version control
2// history.
3//
4// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7// You may not use this file except in accordance with one or both of these
8// licenses.
9
10//! The [`NetworkGraph`] stores the network gossip and [`P2PGossipSync`] fetches it from peers
11
12use bitcoin::amount::Amount;
13use bitcoin::constants::ChainHash;
14
15use bitcoin::secp256k1;
16use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
17use bitcoin::secp256k1::Secp256k1;
18use bitcoin::secp256k1::{PublicKey, Verification};
19
20use bitcoin::hashes::sha256d::Hash as Sha256dHash;
21use bitcoin::hashes::Hash;
22use bitcoin::network::Network;
23
24use crate::events::{MessageSendEvent, MessageSendEventsProvider};
25use crate::ln::msgs;
26use crate::ln::msgs::{
27	ChannelAnnouncement, ChannelUpdate, GossipTimestampFilter, NodeAnnouncement,
28};
29use crate::ln::msgs::{
30	DecodeError, ErrorAction, Init, LightningError, RoutingMessageHandler, SocketAddress,
31	MAX_VALUE_MSAT,
32};
33use crate::ln::msgs::{
34	QueryChannelRange, QueryShortChannelIds, ReplyChannelRange, ReplyShortChannelIdsEnd,
35};
36use crate::ln::types::ChannelId;
37use crate::routing::utxo::{self, UtxoLookup, UtxoResolver};
38use crate::types::features::{ChannelFeatures, InitFeatures, NodeFeatures};
39use crate::util::indexed_map::{Entry as IndexedMapEntry, IndexedMap};
40use crate::util::logger::{Level, Logger};
41use crate::util::scid_utils::{block_from_scid, scid_from_parts, MAX_SCID_BLOCK};
42use crate::util::ser::{MaybeReadable, Readable, ReadableArgs, RequiredWrapper, Writeable, Writer};
43use crate::util::string::PrintableString;
44
45use crate::io;
46use crate::io_extras::{copy, sink};
47use crate::prelude::*;
48use crate::sync::Mutex;
49use crate::sync::{LockTestExt, RwLock, RwLockReadGuard};
50use core::ops::{Bound, Deref};
51use core::str::FromStr;
52use core::sync::atomic::{AtomicUsize, Ordering};
53use core::{cmp, fmt};
54
55pub use lightning_types::routing::RoutingFees;
56
57#[cfg(feature = "std")]
58use std::time::{SystemTime, UNIX_EPOCH};
59
60/// We remove stale channel directional info two weeks after the last update, per BOLT 7's
61/// suggestion.
62const STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS: u64 = 60 * 60 * 24 * 14;
63
64/// We stop tracking the removal of permanently failed nodes and channels one week after removal
65const REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS: u64 = 60 * 60 * 24 * 7;
66
67/// The maximum number of extra bytes which we do not understand in a gossip message before we will
68/// refuse to relay the message.
69const MAX_EXCESS_BYTES_FOR_RELAY: usize = 1024;
70
71/// Maximum number of short_channel_ids that will be encoded in one gossip reply message.
72/// This value ensures a reply fits within the 65k payload limit and is consistent with other implementations.
73const MAX_SCIDS_PER_REPLY: usize = 8000;
74
75/// Represents the compressed public key of a node
76#[derive(Clone, Copy, PartialEq, Eq)]
77pub struct NodeId([u8; PUBLIC_KEY_SIZE]);
78
79impl NodeId {
80	/// Create a new NodeId from a public key
81	pub fn from_pubkey(pubkey: &PublicKey) -> Self {
82		NodeId(pubkey.serialize())
83	}
84
85	/// Create a new NodeId from a slice of bytes
86	pub fn from_slice(bytes: &[u8]) -> Result<Self, DecodeError> {
87		if bytes.len() != PUBLIC_KEY_SIZE {
88			return Err(DecodeError::InvalidValue);
89		}
90		let mut data = [0; PUBLIC_KEY_SIZE];
91		data.copy_from_slice(bytes);
92		Ok(NodeId(data))
93	}
94
95	/// Get the public key slice from this NodeId
96	pub fn as_slice(&self) -> &[u8] {
97		&self.0
98	}
99
100	/// Get the public key as an array from this NodeId
101	pub fn as_array(&self) -> &[u8; PUBLIC_KEY_SIZE] {
102		&self.0
103	}
104
105	/// Get the public key from this NodeId
106	pub fn as_pubkey(&self) -> Result<PublicKey, secp256k1::Error> {
107		PublicKey::from_slice(&self.0)
108	}
109}
110
111impl fmt::Debug for NodeId {
112	fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
113		write!(f, "NodeId({})", crate::util::logger::DebugBytes(&self.0))
114	}
115}
116impl fmt::Display for NodeId {
117	fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
118		crate::util::logger::DebugBytes(&self.0).fmt(f)
119	}
120}
121
122impl core::hash::Hash for NodeId {
123	fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
124		self.0.hash(hasher);
125	}
126}
127
128impl cmp::PartialOrd for NodeId {
129	fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
130		Some(self.cmp(other))
131	}
132}
133
134impl Ord for NodeId {
135	fn cmp(&self, other: &Self) -> cmp::Ordering {
136		self.0[..].cmp(&other.0[..])
137	}
138}
139
140impl Writeable for NodeId {
141	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
142		writer.write_all(&self.0)?;
143		Ok(())
144	}
145}
146
147impl Readable for NodeId {
148	fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
149		let mut buf = [0; PUBLIC_KEY_SIZE];
150		reader.read_exact(&mut buf)?;
151		Ok(Self(buf))
152	}
153}
154
155impl From<PublicKey> for NodeId {
156	fn from(pubkey: PublicKey) -> Self {
157		Self::from_pubkey(&pubkey)
158	}
159}
160
161impl TryFrom<NodeId> for PublicKey {
162	type Error = secp256k1::Error;
163
164	fn try_from(node_id: NodeId) -> Result<Self, Self::Error> {
165		node_id.as_pubkey()
166	}
167}
168
169impl FromStr for NodeId {
170	type Err = bitcoin::hex::parse::HexToArrayError;
171
172	fn from_str(s: &str) -> Result<Self, Self::Err> {
173		let data: [u8; PUBLIC_KEY_SIZE] = bitcoin::hex::FromHex::from_hex(s)?;
174		Ok(NodeId(data))
175	}
176}
177
178/// Represents the network as nodes and channels between them
179pub struct NetworkGraph<L: Deref>
180where
181	L::Target: Logger,
182{
183	secp_ctx: Secp256k1<secp256k1::VerifyOnly>,
184	last_rapid_gossip_sync_timestamp: Mutex<Option<u32>>,
185	chain_hash: ChainHash,
186	logger: L,
187	// Lock order: channels -> nodes
188	channels: RwLock<IndexedMap<u64, ChannelInfo>>,
189	nodes: RwLock<IndexedMap<NodeId, NodeInfo>>,
190	removed_node_counters: Mutex<Vec<u32>>,
191	next_node_counter: AtomicUsize,
192	// Lock order: removed_channels -> removed_nodes
193	//
194	// NOTE: In the following `removed_*` maps, we use seconds since UNIX epoch to track time instead
195	// of `std::time::Instant`s for a few reasons:
196	//   * We want it to be possible to do tracking in non-`std` environments where we can compare
197	//     a provided current UNIX timestamp with the time at which we started tracking.
198	//   * In the future, if we decide to persist these maps, they will already be serializable.
199	//   * Although we lose out on the platform's monotonic clock, the system clock in a std
200	//     environment should be practical over the time period we are considering (on the order of a
201	//     week).
202	//
203	/// Keeps track of short channel IDs for channels we have explicitly removed due to permanent
204	/// failure so that we don't resync them from gossip. Each SCID is mapped to the time (in seconds)
205	/// it was removed so that once some time passes, we can potentially resync it from gossip again.
206	removed_channels: Mutex<HashMap<u64, Option<u64>>>,
207	/// Keeps track of `NodeId`s we have explicitly removed due to permanent failure so that we don't
208	/// resync them from gossip. Each `NodeId` is mapped to the time (in seconds) it was removed so
209	/// that once some time passes, we can potentially resync it from gossip again.
210	removed_nodes: Mutex<HashMap<NodeId, Option<u64>>>,
211	/// Announcement messages which are awaiting an on-chain lookup to be processed.
212	pub(super) pending_checks: utxo::PendingChecks,
213}
214
215/// A read-only view of [`NetworkGraph`].
216pub struct ReadOnlyNetworkGraph<'a> {
217	channels: RwLockReadGuard<'a, IndexedMap<u64, ChannelInfo>>,
218	nodes: RwLockReadGuard<'a, IndexedMap<NodeId, NodeInfo>>,
219	max_node_counter: u32,
220}
221
222/// Update to the [`NetworkGraph`] based on payment failure information conveyed via the Onion
223/// return packet by a node along the route. See [BOLT #4] for details.
224///
225/// [BOLT #4]: https://github.com/lightning/bolts/blob/master/04-onion-routing.md
226#[derive(Clone, Debug, PartialEq, Eq)]
227pub enum NetworkUpdate {
228	/// An error indicating that a channel failed to route a payment, which should be applied via
229	/// [`NetworkGraph::channel_failed_permanent`] if permanent.
230	ChannelFailure {
231		/// The short channel id of the closed channel.
232		short_channel_id: u64,
233		/// Whether the channel should be permanently removed or temporarily disabled until a new
234		/// `channel_update` message is received.
235		is_permanent: bool,
236	},
237	/// An error indicating that a node failed to route a payment, which should be applied via
238	/// [`NetworkGraph::node_failed_permanent`] if permanent.
239	NodeFailure {
240		/// The node id of the failed node.
241		node_id: PublicKey,
242		/// Whether the node should be permanently removed from consideration or can be restored
243		/// when a new `channel_update` message is received.
244		is_permanent: bool,
245	},
246}
247
248impl Writeable for NetworkUpdate {
249	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
250		match self {
251			Self::ChannelFailure { short_channel_id, is_permanent } => {
252				2u8.write(writer)?;
253				write_tlv_fields!(writer, {
254					(0, short_channel_id, required),
255					(2, is_permanent, required),
256				});
257			},
258			Self::NodeFailure { node_id, is_permanent } => {
259				4u8.write(writer)?;
260				write_tlv_fields!(writer, {
261					(0, node_id, required),
262					(2, is_permanent, required),
263				});
264			},
265		}
266		Ok(())
267	}
268}
269
270impl MaybeReadable for NetworkUpdate {
271	fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
272		let id: u8 = Readable::read(reader)?;
273		match id {
274			0 => {
275				// 0 was previously used for network updates containing a channel update, subsequently
276				// removed in LDK version 0.0.124.
277				let mut msg: RequiredWrapper<ChannelUpdate> = RequiredWrapper(None);
278				read_tlv_fields!(reader, {
279					(0, msg, required),
280				});
281				Ok(Some(Self::ChannelFailure {
282					short_channel_id: msg.0.unwrap().contents.short_channel_id,
283					is_permanent: false,
284				}))
285			},
286			2 => {
287				_init_and_read_len_prefixed_tlv_fields!(reader, {
288					(0, short_channel_id, required),
289					(2, is_permanent, required),
290				});
291				Ok(Some(Self::ChannelFailure {
292					short_channel_id: short_channel_id.0.unwrap(),
293					is_permanent: is_permanent.0.unwrap(),
294				}))
295			},
296			4 => {
297				_init_and_read_len_prefixed_tlv_fields!(reader, {
298					(0, node_id, required),
299					(2, is_permanent, required),
300				});
301				Ok(Some(Self::NodeFailure {
302					node_id: node_id.0.unwrap(),
303					is_permanent: is_permanent.0.unwrap(),
304				}))
305			},
306			t if t % 2 == 0 => Err(DecodeError::UnknownRequiredFeature),
307			_ => Ok(None),
308		}
309	}
310}
311
312/// Receives and validates network updates from peers,
313/// stores authentic and relevant data as a network graph.
314/// This network graph is then used for routing payments.
315/// Provides interface to help with initial routing sync by
316/// serving historical announcements.
317pub struct P2PGossipSync<G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref>
318where
319	U::Target: UtxoLookup,
320	L::Target: Logger,
321{
322	network_graph: G,
323	utxo_lookup: RwLock<Option<U>>,
324	full_syncs_requested: AtomicUsize,
325	pending_events: Mutex<Vec<MessageSendEvent>>,
326	logger: L,
327}
328
329impl<G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref> P2PGossipSync<G, U, L>
330where
331	U::Target: UtxoLookup,
332	L::Target: Logger,
333{
334	/// Creates a new tracker of the actual state of the network of channels and nodes,
335	/// assuming an existing [`NetworkGraph`].
336	/// UTXO lookup is used to make sure announced channels exist on-chain, channel data is
337	/// correct, and the announcement is signed with channel owners' keys.
338	pub fn new(network_graph: G, utxo_lookup: Option<U>, logger: L) -> Self {
339		P2PGossipSync {
340			network_graph,
341			full_syncs_requested: AtomicUsize::new(0),
342			utxo_lookup: RwLock::new(utxo_lookup),
343			pending_events: Mutex::new(vec![]),
344			logger,
345		}
346	}
347
348	/// Adds a provider used to check new announcements. Does not affect
349	/// existing announcements unless they are updated.
350	/// Add, update or remove the provider would replace the current one.
351	pub fn add_utxo_lookup(&self, utxo_lookup: Option<U>) {
352		*self.utxo_lookup.write().unwrap() = utxo_lookup;
353	}
354
355	/// Gets a reference to the underlying [`NetworkGraph`] which was provided in
356	/// [`P2PGossipSync::new`].
357	///
358	/// This is not exported to bindings users as bindings don't support a reference-to-a-reference yet
359	pub fn network_graph(&self) -> &G {
360		&self.network_graph
361	}
362
363	/// Returns true when a full routing table sync should be performed with a peer.
364	fn should_request_full_sync(&self) -> bool {
365		const FULL_SYNCS_TO_REQUEST: usize = 5;
366		if self.full_syncs_requested.load(Ordering::Acquire) < FULL_SYNCS_TO_REQUEST {
367			self.full_syncs_requested.fetch_add(1, Ordering::AcqRel);
368			true
369		} else {
370			false
371		}
372	}
373
374	/// Used to broadcast forward gossip messages which were validated async.
375	///
376	/// Note that this will ignore events other than `Broadcast*` or messages with too much excess
377	/// data.
378	pub(super) fn forward_gossip_msg(&self, mut ev: MessageSendEvent) {
379		match &mut ev {
380			MessageSendEvent::BroadcastChannelAnnouncement { msg, ref mut update_msg } => {
381				if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY {
382					return;
383				}
384				if update_msg.as_ref().map(|msg| msg.contents.excess_data.len()).unwrap_or(0)
385					> MAX_EXCESS_BYTES_FOR_RELAY
386				{
387					*update_msg = None;
388				}
389			},
390			MessageSendEvent::BroadcastChannelUpdate { msg } => {
391				if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY {
392					return;
393				}
394			},
395			MessageSendEvent::BroadcastNodeAnnouncement { msg } => {
396				if msg.contents.excess_data.len() > MAX_EXCESS_BYTES_FOR_RELAY
397					|| msg.contents.excess_address_data.len() > MAX_EXCESS_BYTES_FOR_RELAY
398					|| msg.contents.excess_data.len() + msg.contents.excess_address_data.len()
399						> MAX_EXCESS_BYTES_FOR_RELAY
400				{
401					return;
402				}
403			},
404			_ => return,
405		}
406		self.pending_events.lock().unwrap().push(ev);
407	}
408}
409
410impl<L: Deref> NetworkGraph<L>
411where
412	L::Target: Logger,
413{
414	/// Handles any network updates originating from [`Event`]s.
415	///
416	/// [`Event`]: crate::events::Event
417	pub fn handle_network_update(&self, network_update: &NetworkUpdate) {
418		match *network_update {
419			NetworkUpdate::ChannelFailure { short_channel_id, is_permanent } => {
420				if is_permanent {
421					log_debug!(
422						self.logger,
423						"Removing channel graph entry for {} due to a payment failure.",
424						short_channel_id
425					);
426					self.channel_failed_permanent(short_channel_id);
427				}
428			},
429			NetworkUpdate::NodeFailure { ref node_id, is_permanent } => {
430				if is_permanent {
431					log_debug!(
432						self.logger,
433						"Removed node graph entry for {} due to a payment failure.",
434						log_pubkey!(node_id)
435					);
436					self.node_failed_permanent(node_id);
437				};
438			},
439		}
440	}
441
442	/// Gets the chain hash for this network graph.
443	pub fn get_chain_hash(&self) -> ChainHash {
444		self.chain_hash
445	}
446}
447
448macro_rules! secp_verify_sig {
449	( $secp_ctx: expr, $msg: expr, $sig: expr, $pubkey: expr, $msg_type: expr ) => {
450		match $secp_ctx.verify_ecdsa($msg, $sig, $pubkey) {
451			Ok(_) => {},
452			Err(_) => {
453				return Err(LightningError {
454					err: format!("Invalid signature on {} message", $msg_type),
455					action: ErrorAction::SendWarningMessage {
456						msg: msgs::WarningMessage {
457							channel_id: ChannelId::new_zero(),
458							data: format!("Invalid signature on {} message", $msg_type),
459						},
460						log_level: Level::Trace,
461					},
462				});
463			},
464		}
465	};
466}
467
468macro_rules! get_pubkey_from_node_id {
469	( $node_id: expr, $msg_type: expr ) => {
470		PublicKey::from_slice($node_id.as_slice()).map_err(|_| LightningError {
471			err: format!("Invalid public key on {} message", $msg_type),
472			action: ErrorAction::SendWarningMessage {
473				msg: msgs::WarningMessage {
474					channel_id: ChannelId::new_zero(),
475					data: format!("Invalid public key on {} message", $msg_type),
476				},
477				log_level: Level::Trace,
478			},
479		})?
480	};
481}
482
483fn message_sha256d_hash<M: Writeable>(msg: &M) -> Sha256dHash {
484	let mut engine = Sha256dHash::engine();
485	msg.write(&mut engine).expect("In-memory structs should not fail to serialize");
486	Sha256dHash::from_engine(engine)
487}
488
489/// Verifies the signature of a [`NodeAnnouncement`].
490///
491/// Returns an error if it is invalid.
492pub fn verify_node_announcement<C: Verification>(
493	msg: &NodeAnnouncement, secp_ctx: &Secp256k1<C>,
494) -> Result<(), LightningError> {
495	let msg_hash = hash_to_message!(&message_sha256d_hash(&msg.contents)[..]);
496	secp_verify_sig!(
497		secp_ctx,
498		&msg_hash,
499		&msg.signature,
500		&get_pubkey_from_node_id!(msg.contents.node_id, "node_announcement"),
501		"node_announcement"
502	);
503
504	Ok(())
505}
506
507/// Verifies all signatures included in a [`ChannelAnnouncement`].
508///
509/// Returns an error if one of the signatures is invalid.
510pub fn verify_channel_announcement<C: Verification>(
511	msg: &ChannelAnnouncement, secp_ctx: &Secp256k1<C>,
512) -> Result<(), LightningError> {
513	let msg_hash = hash_to_message!(&message_sha256d_hash(&msg.contents)[..]);
514	let node_a = get_pubkey_from_node_id!(msg.contents.node_id_1, "channel_announcement");
515	secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_1, &node_a, "channel_announcement");
516	let node_b = get_pubkey_from_node_id!(msg.contents.node_id_2, "channel_announcement");
517	secp_verify_sig!(secp_ctx, &msg_hash, &msg.node_signature_2, &node_b, "channel_announcement");
518	let btc_a = get_pubkey_from_node_id!(msg.contents.bitcoin_key_1, "channel_announcement");
519	secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_1, &btc_a, "channel_announcement");
520	let btc_b = get_pubkey_from_node_id!(msg.contents.bitcoin_key_2, "channel_announcement");
521	secp_verify_sig!(secp_ctx, &msg_hash, &msg.bitcoin_signature_2, &btc_b, "channel_announcement");
522
523	Ok(())
524}
525
526impl<G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref> RoutingMessageHandler
527	for P2PGossipSync<G, U, L>
528where
529	U::Target: UtxoLookup,
530	L::Target: Logger,
531{
532	fn handle_node_announcement(
533		&self, _their_node_id: Option<PublicKey>, msg: &msgs::NodeAnnouncement,
534	) -> Result<bool, LightningError> {
535		self.network_graph.update_node_from_announcement(msg)?;
536		Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY
537			&& msg.contents.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY
538			&& msg.contents.excess_data.len() + msg.contents.excess_address_data.len()
539				<= MAX_EXCESS_BYTES_FOR_RELAY)
540	}
541
542	fn handle_channel_announcement(
543		&self, _their_node_id: Option<PublicKey>, msg: &msgs::ChannelAnnouncement,
544	) -> Result<bool, LightningError> {
545		self.network_graph
546			.update_channel_from_announcement(msg, &*self.utxo_lookup.read().unwrap())?;
547		Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY)
548	}
549
550	fn handle_channel_update(
551		&self, _their_node_id: Option<PublicKey>, msg: &msgs::ChannelUpdate,
552	) -> Result<bool, LightningError> {
553		self.network_graph.update_channel(msg)?;
554		Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY)
555	}
556
557	fn get_next_channel_announcement(
558		&self, starting_point: u64,
559	) -> Option<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> {
560		let mut channels = self.network_graph.channels.write().unwrap();
561		for (_, ref chan) in channels.range(starting_point..) {
562			if chan.announcement_message.is_some() {
563				let chan_announcement = chan.announcement_message.clone().unwrap();
564				let mut one_to_two_announcement: Option<msgs::ChannelUpdate> = None;
565				let mut two_to_one_announcement: Option<msgs::ChannelUpdate> = None;
566				if let Some(one_to_two) = chan.one_to_two.as_ref() {
567					one_to_two_announcement.clone_from(&one_to_two.last_update_message);
568				}
569				if let Some(two_to_one) = chan.two_to_one.as_ref() {
570					two_to_one_announcement.clone_from(&two_to_one.last_update_message);
571				}
572				return Some((chan_announcement, one_to_two_announcement, two_to_one_announcement));
573			} else {
574				// TODO: We may end up sending un-announced channel_updates if we are sending
575				// initial sync data while receiving announce/updates for this channel.
576			}
577		}
578		None
579	}
580
581	fn get_next_node_announcement(
582		&self, starting_point: Option<&NodeId>,
583	) -> Option<NodeAnnouncement> {
584		let mut nodes = self.network_graph.nodes.write().unwrap();
585		let iter = if let Some(node_id) = starting_point {
586			nodes.range((Bound::Excluded(node_id), Bound::Unbounded))
587		} else {
588			nodes.range(..)
589		};
590		for (_, ref node) in iter {
591			if let Some(node_info) = node.announcement_info.as_ref() {
592				if let NodeAnnouncementInfo::Relayed(announcement) = node_info {
593					return Some(announcement.clone());
594				}
595			}
596		}
597		None
598	}
599
600	/// Initiates a stateless sync of routing gossip information with a peer
601	/// using [`gossip_queries`]. The default strategy used by this implementation
602	/// is to sync the full block range with several peers.
603	///
604	/// We should expect one or more [`reply_channel_range`] messages in response
605	/// to our [`query_channel_range`]. Each reply will enqueue a [`query_scid`] message
606	/// to request gossip messages for each channel. The sync is considered complete
607	/// when the final [`reply_scids_end`] message is received, though we are not
608	/// tracking this directly.
609	///
610	/// [`gossip_queries`]: https://github.com/lightning/bolts/blob/master/07-routing-gossip.md#query-messages
611	/// [`reply_channel_range`]: msgs::ReplyChannelRange
612	/// [`query_channel_range`]: msgs::QueryChannelRange
613	/// [`query_scid`]: msgs::QueryShortChannelIds
614	/// [`reply_scids_end`]: msgs::ReplyShortChannelIdsEnd
615	fn peer_connected(
616		&self, their_node_id: PublicKey, init_msg: &Init, _inbound: bool,
617	) -> Result<(), ()> {
618		// We will only perform a sync with peers that support gossip_queries.
619		if !init_msg.features.supports_gossip_queries() {
620			// Don't disconnect peers for not supporting gossip queries. We may wish to have
621			// channels with peers even without being able to exchange gossip.
622			return Ok(());
623		}
624
625		// The lightning network's gossip sync system is completely broken in numerous ways.
626		//
627		// Given no broadly-available set-reconciliation protocol, the only reasonable approach is
628		// to do a full sync from the first few peers we connect to, and then receive gossip
629		// updates from all our peers normally.
630		//
631		// Originally, we could simply tell a peer to dump us the entire gossip table on startup,
632		// wasting lots of bandwidth but ensuring we have the full network graph. After the initial
633		// dump peers would always send gossip and we'd stay up-to-date with whatever our peer has
634		// seen.
635		//
636		// In order to reduce the bandwidth waste, "gossip queries" were introduced, allowing you
637		// to ask for the SCIDs of all channels in your peer's routing graph, and then only request
638		// channel data which you are missing. Except there was no way at all to identify which
639		// `channel_update`s you were missing, so you still had to request everything, just in a
640		// very complicated way with some queries instead of just getting the dump.
641		//
642		// Later, an option was added to fetch the latest timestamps of the `channel_update`s to
643		// make efficient sync possible, however it has yet to be implemented in lnd, which makes
644		// relying on it useless.
645		//
646		// After gossip queries were introduced, support for receiving a full gossip table dump on
647		// connection was removed from several nodes, making it impossible to get a full sync
648		// without using the "gossip queries" messages.
649		//
650		// Once you opt into "gossip queries" the only way to receive any gossip updates that a
651		// peer receives after you connect, you must send a `gossip_timestamp_filter` message. This
652		// message, as the name implies, tells the peer to not forward any gossip messages with a
653		// timestamp older than a given value (not the time the peer received the filter, but the
654		// timestamp in the update message, which is often hours behind when the peer received the
655		// message).
656		//
657		// Obnoxiously, `gossip_timestamp_filter` isn't *just* a filter, but its also a request for
658		// your peer to send you the full routing graph (subject to the filter). Thus, in order to
659		// tell a peer to send you any updates as it sees them, you have to also ask for the full
660		// routing graph to be synced. If you set a timestamp filter near the current time, peers
661		// will simply not forward any new updates they see to you which were generated some time
662		// ago (which is not uncommon). If you instead set a timestamp filter near 0 (or two weeks
663		// ago), you will always get the full routing graph from all your peers.
664		//
665		// Most lightning nodes today opt to simply turn off receiving gossip data which only
666		// propagated some time after it was generated, and, worse, often disable gossiping with
667		// several peers after their first connection. The second behavior can cause gossip to not
668		// propagate fully if there are cuts in the gossiping subgraph.
669		//
670		// In an attempt to cut a middle ground between always fetching the full graph from all of
671		// our peers and never receiving gossip from peers at all, we send all of our peers a
672		// `gossip_timestamp_filter`, with the filter time set either two weeks ago or an hour ago.
673		//
674		// For non-`std` builds, we bury our head in the sand and do a full sync on each connection.
675		#[allow(unused_mut, unused_assignments)]
676		let mut gossip_start_time = 0;
677		#[allow(unused)]
678		let should_sync = self.should_request_full_sync();
679		#[cfg(feature = "std")]
680		{
681			gossip_start_time = SystemTime::now()
682				.duration_since(UNIX_EPOCH)
683				.expect("Time must be > 1970")
684				.as_secs();
685			if should_sync {
686				gossip_start_time -= 60 * 60 * 24 * 7 * 2; // 2 weeks ago
687			} else {
688				gossip_start_time -= 60 * 60; // an hour ago
689			}
690		}
691
692		let mut pending_events = self.pending_events.lock().unwrap();
693		pending_events.push(MessageSendEvent::SendGossipTimestampFilter {
694			node_id: their_node_id.clone(),
695			msg: GossipTimestampFilter {
696				chain_hash: self.network_graph.chain_hash,
697				first_timestamp: gossip_start_time as u32, // 2106 issue!
698				timestamp_range: u32::max_value(),
699			},
700		});
701		Ok(())
702	}
703
704	fn handle_reply_channel_range(
705		&self, _their_node_id: PublicKey, _msg: ReplyChannelRange,
706	) -> Result<(), LightningError> {
707		// We don't make queries, so should never receive replies. If, in the future, the set
708		// reconciliation extensions to gossip queries become broadly supported, we should revert
709		// this code to its state pre-0.0.106.
710		Ok(())
711	}
712
713	fn handle_reply_short_channel_ids_end(
714		&self, _their_node_id: PublicKey, _msg: ReplyShortChannelIdsEnd,
715	) -> Result<(), LightningError> {
716		// We don't make queries, so should never receive replies. If, in the future, the set
717		// reconciliation extensions to gossip queries become broadly supported, we should revert
718		// this code to its state pre-0.0.106.
719		Ok(())
720	}
721
722	/// Processes a query from a peer by finding announced/public channels whose funding UTXOs
723	/// are in the specified block range. Due to message size limits, large range
724	/// queries may result in several reply messages. This implementation enqueues
725	/// all reply messages into pending events. Each message will allocate just under 65KiB. A full
726	/// sync of the public routing table with 128k channels will generated 16 messages and allocate ~1MB.
727	/// Logic can be changed to reduce allocation if/when a full sync of the routing table impacts
728	/// memory constrained systems.
729	fn handle_query_channel_range(
730		&self, their_node_id: PublicKey, msg: QueryChannelRange,
731	) -> Result<(), LightningError> {
732		log_debug!(
733			self.logger,
734			"Handling query_channel_range peer={}, first_blocknum={}, number_of_blocks={}",
735			log_pubkey!(their_node_id),
736			msg.first_blocknum,
737			msg.number_of_blocks
738		);
739
740		let inclusive_start_scid = scid_from_parts(msg.first_blocknum as u64, 0, 0);
741
742		// We might receive valid queries with end_blocknum that would overflow SCID conversion.
743		// If so, we manually cap the ending block to avoid this overflow.
744		let exclusive_end_scid =
745			scid_from_parts(cmp::min(msg.end_blocknum() as u64, MAX_SCID_BLOCK), 0, 0);
746
747		// Per spec, we must reply to a query. Send an empty message when things are invalid.
748		if msg.chain_hash != self.network_graph.chain_hash
749			|| inclusive_start_scid.is_err()
750			|| exclusive_end_scid.is_err()
751			|| msg.number_of_blocks == 0
752		{
753			let mut pending_events = self.pending_events.lock().unwrap();
754			pending_events.push(MessageSendEvent::SendReplyChannelRange {
755				node_id: their_node_id.clone(),
756				msg: ReplyChannelRange {
757					chain_hash: msg.chain_hash.clone(),
758					first_blocknum: msg.first_blocknum,
759					number_of_blocks: msg.number_of_blocks,
760					sync_complete: true,
761					short_channel_ids: vec![],
762				},
763			});
764			return Err(LightningError {
765				err: String::from("query_channel_range could not be processed"),
766				action: ErrorAction::IgnoreError,
767			});
768		}
769
770		// Creates channel batches. We are not checking if the channel is routable
771		// (has at least one update). A peer may still want to know the channel
772		// exists even if its not yet routable.
773		let mut batches: Vec<Vec<u64>> = vec![Vec::with_capacity(MAX_SCIDS_PER_REPLY)];
774		let mut channels = self.network_graph.channels.write().unwrap();
775		for (_, ref chan) in
776			channels.range(inclusive_start_scid.unwrap()..exclusive_end_scid.unwrap())
777		{
778			if let Some(chan_announcement) = &chan.announcement_message {
779				// Construct a new batch if last one is full
780				if batches.last().unwrap().len() == batches.last().unwrap().capacity() {
781					batches.push(Vec::with_capacity(MAX_SCIDS_PER_REPLY));
782				}
783
784				let batch = batches.last_mut().unwrap();
785				batch.push(chan_announcement.contents.short_channel_id);
786			}
787		}
788		drop(channels);
789
790		let mut pending_events = self.pending_events.lock().unwrap();
791		let batch_count = batches.len();
792		let mut prev_batch_endblock = msg.first_blocknum;
793		for (batch_index, batch) in batches.into_iter().enumerate() {
794			// Per spec, the initial `first_blocknum` needs to be <= the query's `first_blocknum`
795			// and subsequent `first_blocknum`s must be >= the prior reply's `first_blocknum`.
796			//
797			// Additionally, c-lightning versions < 0.10 require that the `first_blocknum` of each
798			// reply is >= the previous reply's `first_blocknum` and either exactly the previous
799			// reply's `first_blocknum + number_of_blocks` or exactly one greater. This is a
800			// significant diversion from the requirements set by the spec, and, in case of blocks
801			// with no channel opens (e.g. empty blocks), requires that we use the previous value
802			// and *not* derive the first_blocknum from the actual first block of the reply.
803			let first_blocknum = prev_batch_endblock;
804
805			// Each message carries the number of blocks (from the `first_blocknum`) its contents
806			// fit in. Though there is no requirement that we use exactly the number of blocks its
807			// contents are from, except for the bogus requirements c-lightning enforces, above.
808			//
809			// Per spec, the last end block (ie `first_blocknum + number_of_blocks`) needs to be
810			// >= the query's end block. Thus, for the last reply, we calculate the difference
811			// between the query's end block and the start of the reply.
812			//
813			// Overflow safe since end_blocknum=msg.first_block_num+msg.number_of_blocks and
814			// first_blocknum will be either msg.first_blocknum or a higher block height.
815			let (sync_complete, number_of_blocks) = if batch_index == batch_count - 1 {
816				(true, msg.end_blocknum() - first_blocknum)
817			}
818			// Prior replies should use the number of blocks that fit into the reply. Overflow
819			// safe since first_blocknum is always <= last SCID's block.
820			else {
821				(false, block_from_scid(*batch.last().unwrap()) - first_blocknum)
822			};
823
824			prev_batch_endblock = first_blocknum + number_of_blocks;
825
826			pending_events.push(MessageSendEvent::SendReplyChannelRange {
827				node_id: their_node_id.clone(),
828				msg: ReplyChannelRange {
829					chain_hash: msg.chain_hash.clone(),
830					first_blocknum,
831					number_of_blocks,
832					sync_complete,
833					short_channel_ids: batch,
834				},
835			});
836		}
837
838		Ok(())
839	}
840
841	fn handle_query_short_channel_ids(
842		&self, _their_node_id: PublicKey, _msg: QueryShortChannelIds,
843	) -> Result<(), LightningError> {
844		// TODO
845		Err(LightningError {
846			err: String::from("Not implemented"),
847			action: ErrorAction::IgnoreError,
848		})
849	}
850
851	fn provided_node_features(&self) -> NodeFeatures {
852		let mut features = NodeFeatures::empty();
853		features.set_gossip_queries_optional();
854		features
855	}
856
857	fn provided_init_features(&self, _their_node_id: PublicKey) -> InitFeatures {
858		let mut features = InitFeatures::empty();
859		features.set_gossip_queries_optional();
860		features
861	}
862
863	fn processing_queue_high(&self) -> bool {
864		self.network_graph.pending_checks.too_many_checks_pending()
865	}
866}
867
868impl<G: Deref<Target = NetworkGraph<L>>, U: Deref, L: Deref> MessageSendEventsProvider
869	for P2PGossipSync<G, U, L>
870where
871	U::Target: UtxoLookup,
872	L::Target: Logger,
873{
874	fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
875		let mut ret = Vec::new();
876		let mut pending_events = self.pending_events.lock().unwrap();
877		core::mem::swap(&mut ret, &mut pending_events);
878		ret
879	}
880}
881
882// Fetching values from this struct is very performance sensitive during routefinding. Thus, we
883// want to ensure that all of the fields we care about (all of them except `last_update_message`)
884// sit on the same cache line.
885//
886// We do this by using `repr(C)`, which forces the struct to be laid out in memory the way we write
887// it (ensuring `last_update_message` hangs off the end and no fields are reordered after it), and
888// `align(32)`, ensuring the struct starts either at the start, or in the middle, of an x86-64
889// 64-byte cache line. This ensures the beginning fields (which are 31 bytes) all sit in the same
890// cache line.
891#[repr(C, align(32))]
892#[derive(Clone, Debug, PartialEq, Eq)]
893/// Details about one direction of a channel as received within a [`ChannelUpdate`].
894pub struct ChannelUpdateInfo {
895	/// The minimum value, which must be relayed to the next hop via the channel
896	pub htlc_minimum_msat: u64,
897	/// The maximum value which may be relayed to the next hop via the channel.
898	pub htlc_maximum_msat: u64,
899	/// Fees charged when the channel is used for routing
900	pub fees: RoutingFees,
901	/// When the last update to the channel direction was issued.
902	/// Value is opaque, as set in the announcement.
903	pub last_update: u32,
904	/// The difference in CLTV values that you must have when routing through this channel.
905	pub cltv_expiry_delta: u16,
906	/// Whether the channel can be currently used for payments (in this one direction).
907	pub enabled: bool,
908	/// Most recent update for the channel received from the network
909	/// Mostly redundant with the data we store in fields explicitly.
910	/// Everything else is useful only for sending out for initial routing sync.
911	/// Not stored if contains excess data to prevent DoS.
912	pub last_update_message: Option<ChannelUpdate>,
913}
914
915impl fmt::Display for ChannelUpdateInfo {
916	fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
917		write!(
918			f,
919			"last_update {}, enabled {}, cltv_expiry_delta {}, htlc_minimum_msat {}, fees {:?}",
920			self.last_update,
921			self.enabled,
922			self.cltv_expiry_delta,
923			self.htlc_minimum_msat,
924			self.fees
925		)?;
926		Ok(())
927	}
928}
929
930impl Writeable for ChannelUpdateInfo {
931	fn write<W: crate::util::ser::Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
932		write_tlv_fields!(writer, {
933			(0, self.last_update, required),
934			(2, self.enabled, required),
935			(4, self.cltv_expiry_delta, required),
936			(6, self.htlc_minimum_msat, required),
937			// Writing htlc_maximum_msat as an Option<u64> is required to maintain backwards
938			// compatibility with LDK versions prior to v0.0.110.
939			(8, Some(self.htlc_maximum_msat), required),
940			(10, self.fees, required),
941			(12, self.last_update_message, required),
942		});
943		Ok(())
944	}
945}
946
947impl Readable for ChannelUpdateInfo {
948	fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
949		_init_tlv_field_var!(last_update, required);
950		_init_tlv_field_var!(enabled, required);
951		_init_tlv_field_var!(cltv_expiry_delta, required);
952		_init_tlv_field_var!(htlc_minimum_msat, required);
953		_init_tlv_field_var!(htlc_maximum_msat, option);
954		_init_tlv_field_var!(fees, required);
955		_init_tlv_field_var!(last_update_message, required);
956
957		read_tlv_fields!(reader, {
958			(0, last_update, required),
959			(2, enabled, required),
960			(4, cltv_expiry_delta, required),
961			(6, htlc_minimum_msat, required),
962			(8, htlc_maximum_msat, required),
963			(10, fees, required),
964			(12, last_update_message, required)
965		});
966
967		if let Some(htlc_maximum_msat) = htlc_maximum_msat {
968			Ok(ChannelUpdateInfo {
969				last_update: _init_tlv_based_struct_field!(last_update, required),
970				enabled: _init_tlv_based_struct_field!(enabled, required),
971				cltv_expiry_delta: _init_tlv_based_struct_field!(cltv_expiry_delta, required),
972				htlc_minimum_msat: _init_tlv_based_struct_field!(htlc_minimum_msat, required),
973				htlc_maximum_msat,
974				fees: _init_tlv_based_struct_field!(fees, required),
975				last_update_message: _init_tlv_based_struct_field!(last_update_message, required),
976			})
977		} else {
978			Err(DecodeError::InvalidValue)
979		}
980	}
981}
982
983// Fetching values from this struct is very performance sensitive during routefinding. Thus, we
984// want to ensure that all of the fields we care about (all of them except `last_update_message`
985// and `announcement_received_time`) sit on the same cache line.
986//
987// Sadly, this is not possible, however we can still do okay - all of the fields before
988// `one_to_two` and `two_to_one` are just under 128 bytes long, so we can ensure they sit on
989// adjacent cache lines (which are often fetched together in x86-64 processors).
990//
991// This leaves only the two directional channel info structs on separate cache lines.
992//
993// We accomplish this using `repr(C)`, which forces the struct to be laid out in memory the way we
994// write it (ensuring the fields we care about are at the start of the struct) and `align(128)`,
995// ensuring the struct starts at the beginning of two adjacent 64b x86-64 cache lines.
996#[repr(align(128), C)]
997#[derive(Clone, Debug, Eq)]
998/// Details about a channel (both directions).
999/// Received within a channel announcement.
1000pub struct ChannelInfo {
1001	/// Protocol features of a channel communicated during its announcement
1002	pub features: ChannelFeatures,
1003
1004	/// Source node of the first direction of a channel
1005	pub node_one: NodeId,
1006
1007	/// Source node of the second direction of a channel
1008	pub node_two: NodeId,
1009
1010	/// The [`NodeInfo::node_counter`] of the node pointed to by [`Self::node_one`].
1011	pub(crate) node_one_counter: u32,
1012	/// The [`NodeInfo::node_counter`] of the node pointed to by [`Self::node_two`].
1013	pub(crate) node_two_counter: u32,
1014
1015	/// The channel capacity as seen on-chain, if chain lookup is available.
1016	pub capacity_sats: Option<u64>,
1017
1018	/// Details about the first direction of a channel
1019	pub one_to_two: Option<ChannelUpdateInfo>,
1020	/// Details about the second direction of a channel
1021	pub two_to_one: Option<ChannelUpdateInfo>,
1022
1023	/// An initial announcement of the channel
1024	/// Mostly redundant with the data we store in fields explicitly.
1025	/// Everything else is useful only for sending out for initial routing sync.
1026	/// Not stored if contains excess data to prevent DoS.
1027	pub announcement_message: Option<ChannelAnnouncement>,
1028	/// The timestamp when we received the announcement, if we are running with feature = "std"
1029	/// (which we can probably assume we are - non-`std` environments probably won't have a full
1030	/// network graph in memory!).
1031	announcement_received_time: u64,
1032}
1033
1034impl PartialEq for ChannelInfo {
1035	fn eq(&self, o: &ChannelInfo) -> bool {
1036		self.features == o.features
1037			&& self.node_one == o.node_one
1038			&& self.one_to_two == o.one_to_two
1039			&& self.node_two == o.node_two
1040			&& self.two_to_one == o.two_to_one
1041			&& self.capacity_sats == o.capacity_sats
1042			&& self.announcement_message == o.announcement_message
1043			&& self.announcement_received_time == o.announcement_received_time
1044	}
1045}
1046
1047impl ChannelInfo {
1048	/// Returns a [`DirectedChannelInfo`] for the channel directed to the given `target` from a
1049	/// returned `source`, or `None` if `target` is not one of the channel's counterparties.
1050	pub fn as_directed_to(&self, target: &NodeId) -> Option<(DirectedChannelInfo, &NodeId)> {
1051		if self.one_to_two.is_none() || self.two_to_one.is_none() {
1052			return None;
1053		}
1054		let (direction, source, outbound) = {
1055			if target == &self.node_one {
1056				(self.two_to_one.as_ref(), &self.node_two, false)
1057			} else if target == &self.node_two {
1058				(self.one_to_two.as_ref(), &self.node_one, true)
1059			} else {
1060				return None;
1061			}
1062		};
1063		let dir = direction.expect("We checked that both directions are available at the start");
1064		Some((DirectedChannelInfo::new(self, dir, outbound), source))
1065	}
1066
1067	/// Returns a [`DirectedChannelInfo`] for the channel directed from the given `source` to a
1068	/// returned `target`, or `None` if `source` is not one of the channel's counterparties.
1069	pub fn as_directed_from(&self, source: &NodeId) -> Option<(DirectedChannelInfo, &NodeId)> {
1070		if self.one_to_two.is_none() || self.two_to_one.is_none() {
1071			return None;
1072		}
1073		let (direction, target, outbound) = {
1074			if source == &self.node_one {
1075				(self.one_to_two.as_ref(), &self.node_two, true)
1076			} else if source == &self.node_two {
1077				(self.two_to_one.as_ref(), &self.node_one, false)
1078			} else {
1079				return None;
1080			}
1081		};
1082		let dir = direction.expect("We checked that both directions are available at the start");
1083		Some((DirectedChannelInfo::new(self, dir, outbound), target))
1084	}
1085
1086	/// Returns a [`ChannelUpdateInfo`] based on the direction implied by the channel_flag.
1087	pub fn get_directional_info(&self, channel_flags: u8) -> Option<&ChannelUpdateInfo> {
1088		let direction = channel_flags & 1u8;
1089		if direction == 0 {
1090			self.one_to_two.as_ref()
1091		} else {
1092			self.two_to_one.as_ref()
1093		}
1094	}
1095}
1096
1097impl fmt::Display for ChannelInfo {
1098	fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
1099		write!(
1100			f,
1101			"features: {}, node_one: {}, one_to_two: {:?}, node_two: {}, two_to_one: {:?}",
1102			log_bytes!(self.features.encode()),
1103			&self.node_one,
1104			self.one_to_two,
1105			&self.node_two,
1106			self.two_to_one
1107		)?;
1108		Ok(())
1109	}
1110}
1111
1112impl Writeable for ChannelInfo {
1113	fn write<W: crate::util::ser::Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
1114		write_tlv_fields!(writer, {
1115			(0, self.features, required),
1116			(1, self.announcement_received_time, (default_value, 0)),
1117			(2, self.node_one, required),
1118			(4, self.one_to_two, required),
1119			(6, self.node_two, required),
1120			(8, self.two_to_one, required),
1121			(10, self.capacity_sats, required),
1122			(12, self.announcement_message, required),
1123		});
1124		Ok(())
1125	}
1126}
1127
1128// A wrapper allowing for the optional deseralization of ChannelUpdateInfo. Utilizing this is
1129// necessary to maintain backwards compatibility with previous serializations of `ChannelUpdateInfo`
1130// that may have no `htlc_maximum_msat` field set. In case the field is absent, we simply ignore
1131// the error and continue reading the `ChannelInfo`. Hopefully, we'll then eventually receive newer
1132// channel updates via the gossip network.
1133struct ChannelUpdateInfoDeserWrapper(Option<ChannelUpdateInfo>);
1134
1135impl MaybeReadable for ChannelUpdateInfoDeserWrapper {
1136	fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
1137		match crate::util::ser::Readable::read(reader) {
1138			Ok(channel_update_option) => Ok(Some(Self(channel_update_option))),
1139			Err(DecodeError::ShortRead) => Ok(None),
1140			Err(DecodeError::InvalidValue) => Ok(None),
1141			Err(err) => Err(err),
1142		}
1143	}
1144}
1145
1146impl Readable for ChannelInfo {
1147	fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
1148		_init_tlv_field_var!(features, required);
1149		_init_tlv_field_var!(announcement_received_time, (default_value, 0));
1150		_init_tlv_field_var!(node_one, required);
1151		let mut one_to_two_wrap: Option<ChannelUpdateInfoDeserWrapper> = None;
1152		_init_tlv_field_var!(node_two, required);
1153		let mut two_to_one_wrap: Option<ChannelUpdateInfoDeserWrapper> = None;
1154		_init_tlv_field_var!(capacity_sats, required);
1155		_init_tlv_field_var!(announcement_message, required);
1156		read_tlv_fields!(reader, {
1157			(0, features, required),
1158			(1, announcement_received_time, (default_value, 0)),
1159			(2, node_one, required),
1160			(4, one_to_two_wrap, upgradable_option),
1161			(6, node_two, required),
1162			(8, two_to_one_wrap, upgradable_option),
1163			(10, capacity_sats, required),
1164			(12, announcement_message, required),
1165		});
1166
1167		Ok(ChannelInfo {
1168			features: _init_tlv_based_struct_field!(features, required),
1169			node_one: _init_tlv_based_struct_field!(node_one, required),
1170			one_to_two: one_to_two_wrap.map(|w| w.0).unwrap_or(None),
1171			node_two: _init_tlv_based_struct_field!(node_two, required),
1172			two_to_one: two_to_one_wrap.map(|w| w.0).unwrap_or(None),
1173			capacity_sats: _init_tlv_based_struct_field!(capacity_sats, required),
1174			announcement_message: _init_tlv_based_struct_field!(announcement_message, required),
1175			announcement_received_time: _init_tlv_based_struct_field!(
1176				announcement_received_time,
1177				(default_value, 0)
1178			),
1179			node_one_counter: u32::max_value(),
1180			node_two_counter: u32::max_value(),
1181		})
1182	}
1183}
1184
1185/// A wrapper around [`ChannelInfo`] representing information about the channel as directed from a
1186/// source node to a target node.
1187#[derive(Clone)]
1188pub struct DirectedChannelInfo<'a> {
1189	channel: &'a ChannelInfo,
1190	direction: &'a ChannelUpdateInfo,
1191	source_counter: u32,
1192	target_counter: u32,
1193	/// The direction this channel is in - if set, it indicates that we're traversing the channel
1194	/// from [`ChannelInfo::node_one`] to [`ChannelInfo::node_two`].
1195	from_node_one: bool,
1196}
1197
1198impl<'a> DirectedChannelInfo<'a> {
1199	#[inline]
1200	fn new(
1201		channel: &'a ChannelInfo, direction: &'a ChannelUpdateInfo, from_node_one: bool,
1202	) -> Self {
1203		let (source_counter, target_counter) = if from_node_one {
1204			(channel.node_one_counter, channel.node_two_counter)
1205		} else {
1206			(channel.node_two_counter, channel.node_one_counter)
1207		};
1208		Self { channel, direction, from_node_one, source_counter, target_counter }
1209	}
1210
1211	/// Returns information for the channel.
1212	#[inline]
1213	pub fn channel(&self) -> &'a ChannelInfo {
1214		self.channel
1215	}
1216
1217	/// Returns the [`EffectiveCapacity`] of the channel in the direction.
1218	///
1219	/// This is either the total capacity from the funding transaction, if known, or the
1220	/// `htlc_maximum_msat` for the direction as advertised by the gossip network, if known,
1221	/// otherwise.
1222	#[inline]
1223	pub fn effective_capacity(&self) -> EffectiveCapacity {
1224		let mut htlc_maximum_msat = self.direction().htlc_maximum_msat;
1225		let capacity_msat = self.channel.capacity_sats.map(|capacity_sats| capacity_sats * 1000);
1226
1227		match capacity_msat {
1228			Some(capacity_msat) => {
1229				htlc_maximum_msat = cmp::min(htlc_maximum_msat, capacity_msat);
1230				EffectiveCapacity::Total { capacity_msat, htlc_maximum_msat }
1231			},
1232			None => EffectiveCapacity::AdvertisedMaxHTLC { amount_msat: htlc_maximum_msat },
1233		}
1234	}
1235
1236	/// Returns information for the direction.
1237	#[inline]
1238	pub(super) fn direction(&self) -> &'a ChannelUpdateInfo {
1239		self.direction
1240	}
1241
1242	/// Returns the `node_id` of the source hop.
1243	///
1244	/// Refers to the `node_id` forwarding the payment to the next hop.
1245	#[inline]
1246	pub fn source(&self) -> &'a NodeId {
1247		if self.from_node_one {
1248			&self.channel.node_one
1249		} else {
1250			&self.channel.node_two
1251		}
1252	}
1253
1254	/// Returns the `node_id` of the target hop.
1255	///
1256	/// Refers to the `node_id` receiving the payment from the previous hop.
1257	#[inline]
1258	pub fn target(&self) -> &'a NodeId {
1259		if self.from_node_one {
1260			&self.channel.node_two
1261		} else {
1262			&self.channel.node_one
1263		}
1264	}
1265
1266	/// Returns the source node's counter
1267	#[inline(always)]
1268	pub(super) fn source_counter(&self) -> u32 {
1269		self.source_counter
1270	}
1271
1272	/// Returns the target node's counter
1273	#[inline(always)]
1274	pub(super) fn target_counter(&self) -> u32 {
1275		self.target_counter
1276	}
1277}
1278
1279impl<'a> fmt::Debug for DirectedChannelInfo<'a> {
1280	fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
1281		f.debug_struct("DirectedChannelInfo").field("channel", &self.channel).finish()
1282	}
1283}
1284
1285/// The effective capacity of a channel for routing purposes.
1286///
1287/// While this may be smaller than the actual channel capacity, amounts greater than
1288/// [`Self::as_msat`] should not be routed through the channel.
1289#[derive(Clone, Copy, Debug, PartialEq)]
1290pub enum EffectiveCapacity {
1291	/// The available liquidity in the channel known from being a channel counterparty, and thus a
1292	/// direct hop.
1293	ExactLiquidity {
1294		/// Either the inbound or outbound liquidity depending on the direction, denominated in
1295		/// millisatoshi.
1296		liquidity_msat: u64,
1297	},
1298	/// The maximum HTLC amount in one direction as advertised on the gossip network.
1299	AdvertisedMaxHTLC {
1300		/// The maximum HTLC amount denominated in millisatoshi.
1301		amount_msat: u64,
1302	},
1303	/// The total capacity of the channel as determined by the funding transaction.
1304	Total {
1305		/// The funding amount denominated in millisatoshi.
1306		capacity_msat: u64,
1307		/// The maximum HTLC amount denominated in millisatoshi.
1308		htlc_maximum_msat: u64,
1309	},
1310	/// A capacity sufficient to route any payment, typically used for private channels provided by
1311	/// an invoice.
1312	Infinite,
1313	/// The maximum HTLC amount as provided by an invoice route hint.
1314	HintMaxHTLC {
1315		/// The maximum HTLC amount denominated in millisatoshi.
1316		amount_msat: u64,
1317	},
1318	/// A capacity that is unknown possibly because either the chain state is unavailable to know
1319	/// the total capacity or the `htlc_maximum_msat` was not advertised on the gossip network.
1320	Unknown,
1321}
1322
1323/// The presumed channel capacity denominated in millisatoshi for [`EffectiveCapacity::Unknown`] to
1324/// use when making routing decisions.
1325pub const UNKNOWN_CHANNEL_CAPACITY_MSAT: u64 = 250_000 * 1000;
1326
1327impl EffectiveCapacity {
1328	/// Returns the effective capacity denominated in millisatoshi.
1329	pub fn as_msat(&self) -> u64 {
1330		match self {
1331			EffectiveCapacity::ExactLiquidity { liquidity_msat } => *liquidity_msat,
1332			EffectiveCapacity::AdvertisedMaxHTLC { amount_msat } => *amount_msat,
1333			EffectiveCapacity::Total { capacity_msat, .. } => *capacity_msat,
1334			EffectiveCapacity::HintMaxHTLC { amount_msat } => *amount_msat,
1335			EffectiveCapacity::Infinite => u64::max_value(),
1336			EffectiveCapacity::Unknown => UNKNOWN_CHANNEL_CAPACITY_MSAT,
1337		}
1338	}
1339}
1340
1341impl_writeable_tlv_based!(RoutingFees, {
1342	(0, base_msat, required),
1343	(2, proportional_millionths, required)
1344});
1345
1346#[derive(Clone, Debug, PartialEq, Eq)]
1347/// Non-relayable information received in the latest node_announcement from this node.
1348pub struct NodeAnnouncementDetails {
1349	/// Protocol features the node announced support for
1350	pub features: NodeFeatures,
1351
1352	/// When the last known update to the node state was issued.
1353	/// Value is opaque, as set in the announcement.
1354	pub last_update: u32,
1355
1356	/// Color assigned to the node
1357	pub rgb: [u8; 3],
1358
1359	/// Moniker assigned to the node.
1360	/// May be invalid or malicious (eg control chars),
1361	/// should not be exposed to the user.
1362	pub alias: NodeAlias,
1363
1364	/// Internet-level addresses via which one can connect to the node
1365	pub addresses: Vec<SocketAddress>,
1366}
1367
1368#[derive(Clone, Debug, PartialEq, Eq)]
1369/// Information received in the latest node_announcement from this node.
1370pub enum NodeAnnouncementInfo {
1371	/// An initial announcement of the node
1372	/// Everything else is useful only for sending out for initial routing sync.
1373	/// Not stored if contains excess data to prevent DoS.
1374	Relayed(NodeAnnouncement),
1375
1376	/// Non-relayable information received in the latest node_announcement from this node.
1377	Local(NodeAnnouncementDetails),
1378}
1379
1380impl NodeAnnouncementInfo {
1381	/// Protocol features the node announced support for
1382	pub fn features(&self) -> &NodeFeatures {
1383		match self {
1384			NodeAnnouncementInfo::Relayed(relayed) => &relayed.contents.features,
1385			NodeAnnouncementInfo::Local(local) => &local.features,
1386		}
1387	}
1388
1389	/// When the last known update to the node state was issued.
1390	///
1391	/// Value may or may not be a timestamp, depending on the policy of the origin node.
1392	pub fn last_update(&self) -> u32 {
1393		match self {
1394			NodeAnnouncementInfo::Relayed(relayed) => relayed.contents.timestamp,
1395			NodeAnnouncementInfo::Local(local) => local.last_update,
1396		}
1397	}
1398
1399	/// Color assigned to the node
1400	pub fn rgb(&self) -> [u8; 3] {
1401		match self {
1402			NodeAnnouncementInfo::Relayed(relayed) => relayed.contents.rgb,
1403			NodeAnnouncementInfo::Local(local) => local.rgb,
1404		}
1405	}
1406
1407	/// Moniker assigned to the node.
1408	///
1409	/// May be invalid or malicious (eg control chars), should not be exposed to the user.
1410	pub fn alias(&self) -> &NodeAlias {
1411		match self {
1412			NodeAnnouncementInfo::Relayed(relayed) => &relayed.contents.alias,
1413			NodeAnnouncementInfo::Local(local) => &local.alias,
1414		}
1415	}
1416
1417	/// Internet-level addresses via which one can connect to the node
1418	pub fn addresses(&self) -> &[SocketAddress] {
1419		match self {
1420			NodeAnnouncementInfo::Relayed(relayed) => &relayed.contents.addresses,
1421			NodeAnnouncementInfo::Local(local) => &local.addresses,
1422		}
1423	}
1424
1425	/// An initial announcement of the node
1426	///
1427	/// Not stored if contains excess data to prevent DoS.
1428	pub fn announcement_message(&self) -> Option<&NodeAnnouncement> {
1429		match self {
1430			NodeAnnouncementInfo::Relayed(announcement) => Some(announcement),
1431			NodeAnnouncementInfo::Local(_) => None,
1432		}
1433	}
1434}
1435
1436impl Writeable for NodeAnnouncementInfo {
1437	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
1438		let features = self.features();
1439		let last_update = self.last_update();
1440		let rgb = self.rgb();
1441		let alias = self.alias();
1442		let addresses = self.addresses();
1443		let announcement_message = self.announcement_message();
1444
1445		write_tlv_fields!(writer, {
1446			(0, features, required),
1447			(2, last_update, required),
1448			(4, rgb, required),
1449			(6, alias, required),
1450			(8, announcement_message, option),
1451			(10, *addresses, required_vec), // Versions 0.0.115 through 0.0.123 only serialized an empty vec
1452		});
1453		Ok(())
1454	}
1455}
1456
1457impl Readable for NodeAnnouncementInfo {
1458	fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
1459		_init_and_read_len_prefixed_tlv_fields!(reader, {
1460			(0, features, required),
1461			(2, last_update, required),
1462			(4, rgb, required),
1463			(6, alias, required),
1464			(8, announcement_message, option),
1465			(10, addresses, required_vec),
1466		});
1467		if let Some(announcement) = announcement_message {
1468			Ok(Self::Relayed(announcement))
1469		} else {
1470			Ok(Self::Local(NodeAnnouncementDetails {
1471				features: features.0.unwrap(),
1472				last_update: last_update.0.unwrap(),
1473				rgb: rgb.0.unwrap(),
1474				alias: alias.0.unwrap(),
1475				addresses,
1476			}))
1477		}
1478	}
1479}
1480
1481/// A user-defined name for a node, which may be used when displaying the node in a graph.
1482///
1483/// Since node aliases are provided by third parties, they are a potential avenue for injection
1484/// attacks. Care must be taken when processing.
1485#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
1486pub struct NodeAlias(pub [u8; 32]);
1487
1488impl fmt::Display for NodeAlias {
1489	fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
1490		let first_null = self.0.iter().position(|b| *b == 0).unwrap_or(self.0.len());
1491		let bytes = self.0.split_at(first_null).0;
1492		match core::str::from_utf8(bytes) {
1493			Ok(alias) => PrintableString(alias).fmt(f)?,
1494			Err(_) => {
1495				use core::fmt::Write;
1496				for c in bytes.iter().map(|b| *b as char) {
1497					// Display printable ASCII characters
1498					let control_symbol = core::char::REPLACEMENT_CHARACTER;
1499					let c = if c >= '\x20' && c <= '\x7e' { c } else { control_symbol };
1500					f.write_char(c)?;
1501				}
1502			},
1503		};
1504		Ok(())
1505	}
1506}
1507
1508impl Writeable for NodeAlias {
1509	fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
1510		self.0.write(w)
1511	}
1512}
1513
1514impl Readable for NodeAlias {
1515	fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
1516		Ok(NodeAlias(Readable::read(r)?))
1517	}
1518}
1519
1520#[derive(Clone, Debug, Eq)]
1521/// Details about a node in the network, known from the network announcement.
1522pub struct NodeInfo {
1523	/// All valid channels a node has announced
1524	pub channels: Vec<u64>,
1525	/// More information about a node from node_announcement.
1526	/// Optional because we store a Node entry after learning about it from
1527	/// a channel announcement, but before receiving a node announcement.
1528	pub announcement_info: Option<NodeAnnouncementInfo>,
1529	/// In memory, each node is assigned a unique ID. They are eagerly reused, ensuring they remain
1530	/// relatively dense.
1531	///
1532	/// These IDs allow the router to avoid a `HashMap` lookup by simply using this value as an
1533	/// index in a `Vec`, skipping a big step in some of the hottest code when routing.
1534	pub(crate) node_counter: u32,
1535}
1536
1537impl PartialEq for NodeInfo {
1538	fn eq(&self, o: &NodeInfo) -> bool {
1539		self.channels == o.channels && self.announcement_info == o.announcement_info
1540	}
1541}
1542
1543impl NodeInfo {
1544	/// Returns whether the node has only announced Tor addresses.
1545	pub fn is_tor_only(&self) -> bool {
1546		self.announcement_info
1547			.as_ref()
1548			.map(|info| info.addresses())
1549			.and_then(|addresses| (!addresses.is_empty()).then(|| addresses))
1550			.map(|addresses| addresses.iter().all(|address| address.is_tor()))
1551			.unwrap_or(false)
1552	}
1553}
1554
1555impl fmt::Display for NodeInfo {
1556	fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
1557		write!(
1558			f,
1559			" channels: {:?}, announcement_info: {:?}",
1560			&self.channels[..],
1561			self.announcement_info
1562		)?;
1563		Ok(())
1564	}
1565}
1566
1567impl Writeable for NodeInfo {
1568	fn write<W: crate::util::ser::Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
1569		write_tlv_fields!(writer, {
1570			// Note that older versions of LDK wrote the lowest inbound fees here at type 0
1571			(2, self.announcement_info, option),
1572			(4, self.channels, required_vec),
1573		});
1574		Ok(())
1575	}
1576}
1577
1578// A wrapper allowing for the optional deserialization of `NodeAnnouncementInfo`. Utilizing this is
1579// necessary to maintain compatibility with previous serializations of `SocketAddress` that have an
1580// invalid hostname set. We ignore and eat all errors until we are either able to read a
1581// `NodeAnnouncementInfo` or hit a `ShortRead`, i.e., read the TLV field to the end.
1582struct NodeAnnouncementInfoDeserWrapper(NodeAnnouncementInfo);
1583
1584impl MaybeReadable for NodeAnnouncementInfoDeserWrapper {
1585	fn read<R: io::Read>(reader: &mut R) -> Result<Option<Self>, DecodeError> {
1586		match crate::util::ser::Readable::read(reader) {
1587			Ok(node_announcement_info) => return Ok(Some(Self(node_announcement_info))),
1588			Err(_) => {
1589				copy(reader, &mut sink()).unwrap();
1590				return Ok(None);
1591			},
1592		};
1593	}
1594}
1595
1596impl Readable for NodeInfo {
1597	fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
1598		// Historically, we tracked the lowest inbound fees for any node in order to use it as an
1599		// A* heuristic when routing. Sadly, these days many, many nodes have at least one channel
1600		// with zero inbound fees, causing that heuristic to provide little gain. Worse, because it
1601		// requires additional complexity and lookups during routing, it ends up being a
1602		// performance loss. Thus, we simply ignore the old field here and no longer track it.
1603		_init_and_read_len_prefixed_tlv_fields!(reader, {
1604			(0, _lowest_inbound_channel_fees, option),
1605			(2, announcement_info_wrap, upgradable_option),
1606			(4, channels, required_vec),
1607		});
1608		let _: Option<RoutingFees> = _lowest_inbound_channel_fees;
1609		let announcement_info_wrap: Option<NodeAnnouncementInfoDeserWrapper> =
1610			announcement_info_wrap;
1611
1612		Ok(NodeInfo {
1613			announcement_info: announcement_info_wrap.map(|w| w.0),
1614			channels,
1615			node_counter: u32::max_value(),
1616		})
1617	}
1618}
1619
1620const SERIALIZATION_VERSION: u8 = 1;
1621const MIN_SERIALIZATION_VERSION: u8 = 1;
1622
1623impl<L: Deref> Writeable for NetworkGraph<L>
1624where
1625	L::Target: Logger,
1626{
1627	fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
1628		self.test_node_counter_consistency();
1629
1630		write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
1631
1632		self.chain_hash.write(writer)?;
1633		let channels = self.channels.read().unwrap();
1634		(channels.len() as u64).write(writer)?;
1635		for (ref chan_id, ref chan_info) in channels.unordered_iter() {
1636			(*chan_id).write(writer)?;
1637			chan_info.write(writer)?;
1638		}
1639		let nodes = self.nodes.read().unwrap();
1640		(nodes.len() as u64).write(writer)?;
1641		for (ref node_id, ref node_info) in nodes.unordered_iter() {
1642			node_id.write(writer)?;
1643			node_info.write(writer)?;
1644		}
1645
1646		let last_rapid_gossip_sync_timestamp = self.get_last_rapid_gossip_sync_timestamp();
1647		write_tlv_fields!(writer, {
1648			(1, last_rapid_gossip_sync_timestamp, option),
1649		});
1650		Ok(())
1651	}
1652}
1653
1654impl<L: Deref> ReadableArgs<L> for NetworkGraph<L>
1655where
1656	L::Target: Logger,
1657{
1658	fn read<R: io::Read>(reader: &mut R, logger: L) -> Result<NetworkGraph<L>, DecodeError> {
1659		let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
1660
1661		let chain_hash: ChainHash = Readable::read(reader)?;
1662		let channels_count: u64 = Readable::read(reader)?;
1663		let mut channels = IndexedMap::with_capacity(CHAN_COUNT_ESTIMATE);
1664		for _ in 0..channels_count {
1665			let chan_id: u64 = Readable::read(reader)?;
1666			let chan_info: ChannelInfo = Readable::read(reader)?;
1667			channels.insert(chan_id, chan_info);
1668		}
1669		let nodes_count: u64 = Readable::read(reader)?;
1670		// There shouldn't be anywhere near `u32::MAX` nodes, and we need some headroom to insert
1671		// new nodes during sync, so reject any graphs claiming more than `u32::MAX / 2` nodes.
1672		if nodes_count > u32::max_value() as u64 / 2 {
1673			return Err(DecodeError::InvalidValue);
1674		}
1675		let mut nodes = IndexedMap::with_capacity(NODE_COUNT_ESTIMATE);
1676		for i in 0..nodes_count {
1677			let node_id = Readable::read(reader)?;
1678			let mut node_info: NodeInfo = Readable::read(reader)?;
1679			node_info.node_counter = i as u32;
1680			nodes.insert(node_id, node_info);
1681		}
1682
1683		for (_, chan) in channels.unordered_iter_mut() {
1684			chan.node_one_counter =
1685				nodes.get(&chan.node_one).ok_or(DecodeError::InvalidValue)?.node_counter;
1686			chan.node_two_counter =
1687				nodes.get(&chan.node_two).ok_or(DecodeError::InvalidValue)?.node_counter;
1688		}
1689
1690		let mut last_rapid_gossip_sync_timestamp: Option<u32> = None;
1691		read_tlv_fields!(reader, {
1692			(1, last_rapid_gossip_sync_timestamp, option),
1693		});
1694
1695		Ok(NetworkGraph {
1696			secp_ctx: Secp256k1::verification_only(),
1697			chain_hash,
1698			logger,
1699			channels: RwLock::new(channels),
1700			nodes: RwLock::new(nodes),
1701			removed_node_counters: Mutex::new(Vec::new()),
1702			next_node_counter: AtomicUsize::new(nodes_count as usize),
1703			last_rapid_gossip_sync_timestamp: Mutex::new(last_rapid_gossip_sync_timestamp),
1704			removed_nodes: Mutex::new(new_hash_map()),
1705			removed_channels: Mutex::new(new_hash_map()),
1706			pending_checks: utxo::PendingChecks::new(),
1707		})
1708	}
1709}
1710
1711impl<L: Deref> fmt::Display for NetworkGraph<L>
1712where
1713	L::Target: Logger,
1714{
1715	fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
1716		writeln!(f, "Network map\n[Channels]")?;
1717		for (key, val) in self.channels.read().unwrap().unordered_iter() {
1718			writeln!(f, " {}: {}", key, val)?;
1719		}
1720		writeln!(f, "[Nodes]")?;
1721		for (&node_id, val) in self.nodes.read().unwrap().unordered_iter() {
1722			writeln!(f, " {}: {}", &node_id, val)?;
1723		}
1724		Ok(())
1725	}
1726}
1727
1728impl<L: Deref> Eq for NetworkGraph<L> where L::Target: Logger {}
1729impl<L: Deref> PartialEq for NetworkGraph<L>
1730where
1731	L::Target: Logger,
1732{
1733	fn eq(&self, other: &Self) -> bool {
1734		// For a total lockorder, sort by position in memory and take the inner locks in that order.
1735		// (Assumes that we can't move within memory while a lock is held).
1736		let ord = ((self as *const _) as usize) < ((other as *const _) as usize);
1737		let a = if ord { (&self.channels, &self.nodes) } else { (&other.channels, &other.nodes) };
1738		let b = if ord { (&other.channels, &other.nodes) } else { (&self.channels, &self.nodes) };
1739		let (channels_a, channels_b) = (
1740			a.0.unsafe_well_ordered_double_lock_self(),
1741			b.0.unsafe_well_ordered_double_lock_self(),
1742		);
1743		let (nodes_a, nodes_b) = (
1744			a.1.unsafe_well_ordered_double_lock_self(),
1745			b.1.unsafe_well_ordered_double_lock_self(),
1746		);
1747		self.chain_hash.eq(&other.chain_hash) && channels_a.eq(&channels_b) && nodes_a.eq(&nodes_b)
1748	}
1749}
1750
1751// In Jan, 2025 there were about 49K channels.
1752// We over-allocate by a bit because 20% more is better than the double we get if we're slightly
1753// too low
1754const CHAN_COUNT_ESTIMATE: usize = 60_000;
1755// In Jan, 2025 there were about 15K nodes
1756// We over-allocate by a bit because 33% more is better than the double we get if we're slightly
1757// too low
1758const NODE_COUNT_ESTIMATE: usize = 20_000;
1759
1760impl<L: Deref> NetworkGraph<L>
1761where
1762	L::Target: Logger,
1763{
1764	/// Creates a new, empty, network graph.
1765	pub fn new(network: Network, logger: L) -> NetworkGraph<L> {
1766		Self {
1767			secp_ctx: Secp256k1::verification_only(),
1768			chain_hash: ChainHash::using_genesis_block(network),
1769			logger,
1770			channels: RwLock::new(IndexedMap::with_capacity(CHAN_COUNT_ESTIMATE)),
1771			nodes: RwLock::new(IndexedMap::with_capacity(NODE_COUNT_ESTIMATE)),
1772			next_node_counter: AtomicUsize::new(0),
1773			removed_node_counters: Mutex::new(Vec::new()),
1774			last_rapid_gossip_sync_timestamp: Mutex::new(None),
1775			removed_channels: Mutex::new(new_hash_map()),
1776			removed_nodes: Mutex::new(new_hash_map()),
1777			pending_checks: utxo::PendingChecks::new(),
1778		}
1779	}
1780
1781	fn test_node_counter_consistency(&self) {
1782		#[cfg(test)]
1783		{
1784			let channels = self.channels.read().unwrap();
1785			let nodes = self.nodes.read().unwrap();
1786			let removed_node_counters = self.removed_node_counters.lock().unwrap();
1787			let next_counter = self.next_node_counter.load(Ordering::Acquire);
1788			assert!(next_counter < (u32::max_value() as usize) / 2);
1789			let mut used_node_counters = vec![0u8; next_counter / 8 + 1];
1790
1791			for counter in removed_node_counters.iter() {
1792				let pos = (*counter as usize) / 8;
1793				let bit = 1 << (counter % 8);
1794				assert_eq!(used_node_counters[pos] & bit, 0);
1795				used_node_counters[pos] |= bit;
1796			}
1797			for (_, node) in nodes.unordered_iter() {
1798				assert!((node.node_counter as usize) < next_counter);
1799				let pos = (node.node_counter as usize) / 8;
1800				let bit = 1 << (node.node_counter % 8);
1801				assert_eq!(used_node_counters[pos] & bit, 0);
1802				used_node_counters[pos] |= bit;
1803			}
1804
1805			for (idx, used_bitset) in used_node_counters.iter().enumerate() {
1806				if idx != next_counter / 8 {
1807					assert_eq!(*used_bitset, 0xff);
1808				} else {
1809					assert_eq!(*used_bitset, (1u8 << (next_counter % 8)) - 1);
1810				}
1811			}
1812
1813			for (_, chan) in channels.unordered_iter() {
1814				assert_eq!(chan.node_one_counter, nodes.get(&chan.node_one).unwrap().node_counter);
1815				assert_eq!(chan.node_two_counter, nodes.get(&chan.node_two).unwrap().node_counter);
1816			}
1817		}
1818	}
1819
1820	/// Returns a read-only view of the network graph.
1821	pub fn read_only(&'_ self) -> ReadOnlyNetworkGraph<'_> {
1822		self.test_node_counter_consistency();
1823		let channels = self.channels.read().unwrap();
1824		let nodes = self.nodes.read().unwrap();
1825		ReadOnlyNetworkGraph {
1826			channels,
1827			nodes,
1828			max_node_counter: (self.next_node_counter.load(Ordering::Acquire) as u32)
1829				.saturating_sub(1),
1830		}
1831	}
1832
1833	/// The unix timestamp provided by the most recent rapid gossip sync.
1834	/// It will be set by the rapid sync process after every sync completion.
1835	pub fn get_last_rapid_gossip_sync_timestamp(&self) -> Option<u32> {
1836		self.last_rapid_gossip_sync_timestamp.lock().unwrap().clone()
1837	}
1838
1839	/// Update the unix timestamp provided by the most recent rapid gossip sync.
1840	/// This should be done automatically by the rapid sync process after every sync completion.
1841	pub fn set_last_rapid_gossip_sync_timestamp(&self, last_rapid_gossip_sync_timestamp: u32) {
1842		self.last_rapid_gossip_sync_timestamp
1843			.lock()
1844			.unwrap()
1845			.replace(last_rapid_gossip_sync_timestamp);
1846	}
1847
1848	/// Clears the `NodeAnnouncementInfo` field for all nodes in the `NetworkGraph` for testing
1849	/// purposes.
1850	#[cfg(test)]
1851	pub fn clear_nodes_announcement_info(&self) {
1852		for node in self.nodes.write().unwrap().unordered_iter_mut() {
1853			node.1.announcement_info = None;
1854		}
1855	}
1856
1857	/// For an already known node (from channel announcements), update its stored properties from a
1858	/// given node announcement.
1859	///
1860	/// You probably don't want to call this directly, instead relying on a P2PGossipSync's
1861	/// RoutingMessageHandler implementation to call it indirectly. This may be useful to accept
1862	/// routing messages from a source using a protocol other than the lightning P2P protocol.
1863	pub fn update_node_from_announcement(
1864		&self, msg: &msgs::NodeAnnouncement,
1865	) -> Result<(), LightningError> {
1866		// First check if we have the announcement already to avoid the CPU cost of validating a
1867		// redundant announcement.
1868		if let Some(node) = self.nodes.read().unwrap().get(&msg.contents.node_id) {
1869			if let Some(node_info) = node.announcement_info.as_ref() {
1870				if node_info.last_update() == msg.contents.timestamp {
1871					return Err(LightningError {
1872						err: "Update had the same timestamp as last processed update".to_owned(),
1873						action: ErrorAction::IgnoreDuplicateGossip,
1874					});
1875				}
1876			}
1877		}
1878		verify_node_announcement(msg, &self.secp_ctx)?;
1879		self.update_node_from_announcement_intern(&msg.contents, Some(&msg))
1880	}
1881
1882	/// For an already known node (from channel announcements), update its stored properties from a
1883	/// given node announcement without verifying the associated signatures. Because we aren't
1884	/// given the associated signatures here we cannot relay the node announcement to any of our
1885	/// peers.
1886	pub fn update_node_from_unsigned_announcement(
1887		&self, msg: &msgs::UnsignedNodeAnnouncement,
1888	) -> Result<(), LightningError> {
1889		self.update_node_from_announcement_intern(msg, None)
1890	}
1891
1892	fn update_node_from_announcement_intern(
1893		&self, msg: &msgs::UnsignedNodeAnnouncement, full_msg: Option<&msgs::NodeAnnouncement>,
1894	) -> Result<(), LightningError> {
1895		let mut nodes = self.nodes.write().unwrap();
1896		match nodes.get_mut(&msg.node_id) {
1897			None => {
1898				core::mem::drop(nodes);
1899				self.pending_checks.check_hold_pending_node_announcement(msg, full_msg)?;
1900				Err(LightningError {
1901					err: "No existing channels for node_announcement".to_owned(),
1902					action: ErrorAction::IgnoreError,
1903				})
1904			},
1905			Some(node) => {
1906				if let Some(node_info) = node.announcement_info.as_ref() {
1907					// The timestamp field is somewhat of a misnomer - the BOLTs use it to order
1908					// updates to ensure you always have the latest one, only vaguely suggesting
1909					// that it be at least the current time.
1910					if node_info.last_update() > msg.timestamp {
1911						return Err(LightningError {
1912							err: "Update older than last processed update".to_owned(),
1913							action: ErrorAction::IgnoreDuplicateGossip,
1914						});
1915					} else if node_info.last_update() == msg.timestamp {
1916						return Err(LightningError {
1917							err: "Update had the same timestamp as last processed update"
1918								.to_owned(),
1919							action: ErrorAction::IgnoreDuplicateGossip,
1920						});
1921					}
1922				}
1923
1924				let should_relay = msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY
1925					&& msg.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY
1926					&& msg.excess_data.len() + msg.excess_address_data.len()
1927						<= MAX_EXCESS_BYTES_FOR_RELAY;
1928
1929				node.announcement_info =
1930					if let (Some(signed_announcement), true) = (full_msg, should_relay) {
1931						Some(NodeAnnouncementInfo::Relayed(signed_announcement.clone()))
1932					} else {
1933						Some(NodeAnnouncementInfo::Local(NodeAnnouncementDetails {
1934							features: msg.features.clone(),
1935							last_update: msg.timestamp,
1936							rgb: msg.rgb,
1937							alias: msg.alias,
1938							addresses: msg.addresses.clone(),
1939						}))
1940					};
1941
1942				Ok(())
1943			},
1944		}
1945	}
1946
1947	/// Store or update channel info from a channel announcement.
1948	///
1949	/// You probably don't want to call this directly, instead relying on a [`P2PGossipSync`]'s
1950	/// [`RoutingMessageHandler`] implementation to call it indirectly. This may be useful to accept
1951	/// routing messages from a source using a protocol other than the lightning P2P protocol.
1952	///
1953	/// If a [`UtxoLookup`] object is provided via `utxo_lookup`, it will be called to verify
1954	/// the corresponding UTXO exists on chain and is correctly-formatted.
1955	pub fn update_channel_from_announcement<U: Deref>(
1956		&self, msg: &msgs::ChannelAnnouncement, utxo_lookup: &Option<U>,
1957	) -> Result<(), LightningError>
1958	where
1959		U::Target: UtxoLookup,
1960	{
1961		self.pre_channel_announcement_validation_check(&msg.contents, utxo_lookup)?;
1962		verify_channel_announcement(msg, &self.secp_ctx)?;
1963		self.update_channel_from_unsigned_announcement_intern(&msg.contents, Some(msg), utxo_lookup)
1964	}
1965
1966	/// Store or update channel info from a channel announcement.
1967	///
1968	/// You probably don't want to call this directly, instead relying on a [`P2PGossipSync`]'s
1969	/// [`RoutingMessageHandler`] implementation to call it indirectly. This may be useful to accept
1970	/// routing messages from a source using a protocol other than the lightning P2P protocol.
1971	///
1972	/// This will skip verification of if the channel is actually on-chain.
1973	pub fn update_channel_from_announcement_no_lookup(
1974		&self, msg: &ChannelAnnouncement,
1975	) -> Result<(), LightningError> {
1976		self.update_channel_from_announcement::<&UtxoResolver>(msg, &None)
1977	}
1978
1979	/// Store or update channel info from a channel announcement without verifying the associated
1980	/// signatures. Because we aren't given the associated signatures here we cannot relay the
1981	/// channel announcement to any of our peers.
1982	///
1983	/// If a [`UtxoLookup`] object is provided via `utxo_lookup`, it will be called to verify
1984	/// the corresponding UTXO exists on chain and is correctly-formatted.
1985	pub fn update_channel_from_unsigned_announcement<U: Deref>(
1986		&self, msg: &msgs::UnsignedChannelAnnouncement, utxo_lookup: &Option<U>,
1987	) -> Result<(), LightningError>
1988	where
1989		U::Target: UtxoLookup,
1990	{
1991		self.pre_channel_announcement_validation_check(&msg, utxo_lookup)?;
1992		self.update_channel_from_unsigned_announcement_intern(msg, None, utxo_lookup)
1993	}
1994
1995	/// Update channel from partial announcement data received via rapid gossip sync
1996	///
1997	/// `timestamp: u64`: Timestamp emulating the backdated original announcement receipt (by the
1998	/// rapid gossip sync server)
1999	///
2000	/// All other parameters as used in [`msgs::UnsignedChannelAnnouncement`] fields.
2001	pub fn add_channel_from_partial_announcement(
2002		&self, short_channel_id: u64, timestamp: u64, features: ChannelFeatures,
2003		node_id_1: PublicKey, node_id_2: PublicKey,
2004	) -> Result<(), LightningError> {
2005		if node_id_1 == node_id_2 {
2006			return Err(LightningError {
2007				err: "Channel announcement node had a channel with itself".to_owned(),
2008				action: ErrorAction::IgnoreError,
2009			});
2010		};
2011
2012		let node_1 = NodeId::from_pubkey(&node_id_1);
2013		let node_2 = NodeId::from_pubkey(&node_id_2);
2014		let channel_info = ChannelInfo {
2015			features,
2016			node_one: node_1.clone(),
2017			one_to_two: None,
2018			node_two: node_2.clone(),
2019			two_to_one: None,
2020			capacity_sats: None,
2021			announcement_message: None,
2022			announcement_received_time: timestamp,
2023			node_one_counter: u32::max_value(),
2024			node_two_counter: u32::max_value(),
2025		};
2026
2027		self.add_channel_between_nodes(short_channel_id, channel_info, None)
2028	}
2029
2030	fn add_channel_between_nodes(
2031		&self, short_channel_id: u64, channel_info: ChannelInfo, utxo_value: Option<Amount>,
2032	) -> Result<(), LightningError> {
2033		let mut channels = self.channels.write().unwrap();
2034		let mut nodes = self.nodes.write().unwrap();
2035
2036		let node_id_a = channel_info.node_one.clone();
2037		let node_id_b = channel_info.node_two.clone();
2038
2039		log_gossip!(
2040			self.logger,
2041			"Adding channel {} between nodes {} and {}",
2042			short_channel_id,
2043			node_id_a,
2044			node_id_b
2045		);
2046
2047		let channel_info = match channels.entry(short_channel_id) {
2048			IndexedMapEntry::Occupied(mut entry) => {
2049				//TODO: because asking the blockchain if short_channel_id is valid is only optional
2050				//in the blockchain API, we need to handle it smartly here, though it's unclear
2051				//exactly how...
2052				if utxo_value.is_some() {
2053					// Either our UTXO provider is busted, there was a reorg, or the UTXO provider
2054					// only sometimes returns results. In any case remove the previous entry. Note
2055					// that the spec expects us to "blacklist" the node_ids involved, but we can't
2056					// do that because
2057					// a) we don't *require* a UTXO provider that always returns results.
2058					// b) we don't track UTXOs of channels we know about and remove them if they
2059					//    get reorg'd out.
2060					// c) it's unclear how to do so without exposing ourselves to massive DoS risk.
2061					self.remove_channel_in_nodes(&mut nodes, &entry.get(), short_channel_id);
2062					*entry.get_mut() = channel_info;
2063					entry.into_mut()
2064				} else {
2065					return Err(LightningError {
2066						err: "Already have knowledge of channel".to_owned(),
2067						action: ErrorAction::IgnoreDuplicateGossip,
2068					});
2069				}
2070			},
2071			IndexedMapEntry::Vacant(entry) => entry.insert(channel_info),
2072		};
2073
2074		let mut node_counter_id = [
2075			(&mut channel_info.node_one_counter, node_id_a),
2076			(&mut channel_info.node_two_counter, node_id_b),
2077		];
2078		for (chan_info_node_counter, current_node_id) in node_counter_id.iter_mut() {
2079			match nodes.entry(current_node_id.clone()) {
2080				IndexedMapEntry::Occupied(node_entry) => {
2081					let node = node_entry.into_mut();
2082					node.channels.push(short_channel_id);
2083					**chan_info_node_counter = node.node_counter;
2084				},
2085				IndexedMapEntry::Vacant(node_entry) => {
2086					let mut removed_node_counters = self.removed_node_counters.lock().unwrap();
2087					**chan_info_node_counter = removed_node_counters.pop().unwrap_or_else(|| {
2088						self.next_node_counter.fetch_add(1, Ordering::Relaxed) as u32
2089					});
2090					node_entry.insert(NodeInfo {
2091						channels: vec![short_channel_id],
2092						announcement_info: None,
2093						node_counter: **chan_info_node_counter,
2094					});
2095				},
2096			};
2097		}
2098
2099		core::mem::drop(nodes);
2100		core::mem::drop(channels);
2101		self.test_node_counter_consistency();
2102
2103		Ok(())
2104	}
2105
2106	/// If we already have all the information for a channel that we're gonna get, there's no
2107	/// reason to redundantly process it.
2108	///
2109	/// In those cases, this will return an `Err` that we can return immediately. Otherwise it will
2110	/// return an `Ok(())`.
2111	fn pre_channel_announcement_validation_check<U: Deref>(
2112		&self, msg: &msgs::UnsignedChannelAnnouncement, utxo_lookup: &Option<U>,
2113	) -> Result<(), LightningError>
2114	where
2115		U::Target: UtxoLookup,
2116	{
2117		let channels = self.channels.read().unwrap();
2118
2119		if let Some(chan) = channels.get(&msg.short_channel_id) {
2120			if chan.capacity_sats.is_some() {
2121				// If we'd previously looked up the channel on-chain and checked the script
2122				// against what appears on-chain, ignore the duplicate announcement.
2123				//
2124				// Because a reorg could replace one channel with another at the same SCID, if
2125				// the channel appears to be different, we re-validate. This doesn't expose us
2126				// to any more DoS risk than not, as a peer can always flood us with
2127				// randomly-generated SCID values anyway.
2128				//
2129				// We use the Node IDs rather than the bitcoin_keys to check for "equivalence"
2130				// as we didn't (necessarily) store the bitcoin keys, and we only really care
2131				// if the peers on the channel changed anyway.
2132				if msg.node_id_1 == chan.node_one && msg.node_id_2 == chan.node_two {
2133					return Err(LightningError {
2134						err: "Already have chain-validated channel".to_owned(),
2135						action: ErrorAction::IgnoreDuplicateGossip,
2136					});
2137				}
2138			} else if utxo_lookup.is_none() {
2139				// Similarly, if we can't check the chain right now anyway, ignore the
2140				// duplicate announcement without bothering to take the channels write lock.
2141				return Err(LightningError {
2142					err: "Already have non-chain-validated channel".to_owned(),
2143					action: ErrorAction::IgnoreDuplicateGossip,
2144				});
2145			}
2146		}
2147
2148		Ok(())
2149	}
2150
2151	/// Update channel information from a received announcement.
2152	///
2153	/// Generally [`Self::pre_channel_announcement_validation_check`] should have been called
2154	/// first.
2155	fn update_channel_from_unsigned_announcement_intern<U: Deref>(
2156		&self, msg: &msgs::UnsignedChannelAnnouncement,
2157		full_msg: Option<&msgs::ChannelAnnouncement>, utxo_lookup: &Option<U>,
2158	) -> Result<(), LightningError>
2159	where
2160		U::Target: UtxoLookup,
2161	{
2162		if msg.node_id_1 == msg.node_id_2 || msg.bitcoin_key_1 == msg.bitcoin_key_2 {
2163			return Err(LightningError {
2164				err: "Channel announcement node had a channel with itself".to_owned(),
2165				action: ErrorAction::IgnoreError,
2166			});
2167		}
2168
2169		if msg.chain_hash != self.chain_hash {
2170			return Err(LightningError {
2171				err: "Channel announcement chain hash does not match genesis hash".to_owned(),
2172				action: ErrorAction::IgnoreAndLog(Level::Debug),
2173			});
2174		}
2175
2176		{
2177			let removed_channels = self.removed_channels.lock().unwrap();
2178			let removed_nodes = self.removed_nodes.lock().unwrap();
2179			if removed_channels.contains_key(&msg.short_channel_id)
2180				|| removed_nodes.contains_key(&msg.node_id_1)
2181				|| removed_nodes.contains_key(&msg.node_id_2)
2182			{
2183				return Err(LightningError{
2184					err: format!("Channel with SCID {} or one of its nodes was removed from our network graph recently", &msg.short_channel_id),
2185					action: ErrorAction::IgnoreAndLog(Level::Gossip)});
2186			}
2187		}
2188
2189		let utxo_value =
2190			self.pending_checks.check_channel_announcement(utxo_lookup, msg, full_msg)?;
2191
2192		#[allow(unused_mut, unused_assignments)]
2193		let mut announcement_received_time = 0;
2194		#[cfg(feature = "std")]
2195		{
2196			announcement_received_time = SystemTime::now()
2197				.duration_since(UNIX_EPOCH)
2198				.expect("Time must be > 1970")
2199				.as_secs();
2200		}
2201
2202		let chan_info = ChannelInfo {
2203			features: msg.features.clone(),
2204			node_one: msg.node_id_1,
2205			one_to_two: None,
2206			node_two: msg.node_id_2,
2207			two_to_one: None,
2208			capacity_sats: utxo_value.map(|a| a.to_sat()),
2209			announcement_message: if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY {
2210				full_msg.cloned()
2211			} else {
2212				None
2213			},
2214			announcement_received_time,
2215			node_one_counter: u32::max_value(),
2216			node_two_counter: u32::max_value(),
2217		};
2218
2219		self.add_channel_between_nodes(msg.short_channel_id, chan_info, utxo_value)?;
2220
2221		log_gossip!(
2222			self.logger,
2223			"Added channel_announcement for {}{}",
2224			msg.short_channel_id,
2225			if !msg.excess_data.is_empty() { " with excess uninterpreted data!" } else { "" }
2226		);
2227		Ok(())
2228	}
2229
2230	/// Marks a channel in the graph as failed permanently.
2231	///
2232	/// The channel and any node for which this was their last channel are removed from the graph.
2233	pub fn channel_failed_permanent(&self, short_channel_id: u64) {
2234		#[cfg(feature = "std")]
2235		let current_time_unix = Some(
2236			SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(),
2237		);
2238		#[cfg(not(feature = "std"))]
2239		let current_time_unix = None;
2240
2241		self.channel_failed_permanent_with_time(short_channel_id, current_time_unix)
2242	}
2243
2244	/// Marks a channel in the graph as failed permanently.
2245	///
2246	/// The channel and any node for which this was their last channel are removed from the graph.
2247	fn channel_failed_permanent_with_time(
2248		&self, short_channel_id: u64, current_time_unix: Option<u64>,
2249	) {
2250		let mut channels = self.channels.write().unwrap();
2251		if let Some(chan) = channels.remove(&short_channel_id) {
2252			let mut nodes = self.nodes.write().unwrap();
2253			self.removed_channels.lock().unwrap().insert(short_channel_id, current_time_unix);
2254			self.remove_channel_in_nodes(&mut nodes, &chan, short_channel_id);
2255		}
2256	}
2257
2258	/// Marks a node in the graph as permanently failed, effectively removing it and its channels
2259	/// from local storage.
2260	pub fn node_failed_permanent(&self, node_id: &PublicKey) {
2261		#[cfg(feature = "std")]
2262		let current_time_unix = Some(
2263			SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(),
2264		);
2265		#[cfg(not(feature = "std"))]
2266		let current_time_unix = None;
2267
2268		let node_id = NodeId::from_pubkey(node_id);
2269		let mut channels = self.channels.write().unwrap();
2270		let mut nodes = self.nodes.write().unwrap();
2271		let mut removed_channels = self.removed_channels.lock().unwrap();
2272		let mut removed_nodes = self.removed_nodes.lock().unwrap();
2273
2274		if let Some(node) = nodes.remove(&node_id) {
2275			let mut removed_node_counters = self.removed_node_counters.lock().unwrap();
2276			for scid in node.channels.iter() {
2277				if let Some(chan_info) = channels.remove(scid) {
2278					let other_node_id = if node_id == chan_info.node_one {
2279						chan_info.node_two
2280					} else {
2281						chan_info.node_one
2282					};
2283					if let IndexedMapEntry::Occupied(mut other_node_entry) =
2284						nodes.entry(other_node_id)
2285					{
2286						other_node_entry.get_mut().channels.retain(|chan_id| *scid != *chan_id);
2287						if other_node_entry.get().channels.is_empty() {
2288							removed_node_counters.push(other_node_entry.get().node_counter);
2289							other_node_entry.remove_entry();
2290						}
2291					}
2292					removed_channels.insert(*scid, current_time_unix);
2293				} else {
2294					debug_assert!(false, "Channels in nodes must always have channel info");
2295				}
2296			}
2297			removed_node_counters.push(node.node_counter);
2298			removed_nodes.insert(node_id, current_time_unix);
2299		}
2300	}
2301
2302	#[cfg(feature = "std")]
2303	/// Removes information about channels that we haven't heard any updates about in some time.
2304	/// This can be used regularly to prune the network graph of channels that likely no longer
2305	/// exist.
2306	///
2307	/// While there is no formal requirement that nodes regularly re-broadcast their channel
2308	/// updates every two weeks, the non-normative section of BOLT 7 currently suggests that
2309	/// pruning occur for updates which are at least two weeks old, which we implement here.
2310	///
2311	/// Note that for users of the `lightning-background-processor` crate this method may be
2312	/// automatically called regularly for you.
2313	///
2314	/// This method will also cause us to stop tracking removed nodes and channels if they have been
2315	/// in the map for a while so that these can be resynced from gossip in the future.
2316	///
2317	/// This method is only available with the `std` feature. See
2318	/// [`NetworkGraph::remove_stale_channels_and_tracking_with_time`] for non-`std` use.
2319	pub fn remove_stale_channels_and_tracking(&self) {
2320		let time =
2321			SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs();
2322		self.remove_stale_channels_and_tracking_with_time(time);
2323	}
2324
2325	/// Removes information about channels that we haven't heard any updates about in some time.
2326	/// This can be used regularly to prune the network graph of channels that likely no longer
2327	/// exist.
2328	///
2329	/// While there is no formal requirement that nodes regularly re-broadcast their channel
2330	/// updates every two weeks, the non-normative section of BOLT 7 currently suggests that
2331	/// pruning occur for updates which are at least two weeks old, which we implement here.
2332	///
2333	/// This method will also cause us to stop tracking removed nodes and channels if they have been
2334	/// in the map for a while so that these can be resynced from gossip in the future.
2335	#[cfg_attr(feature = "std", doc = "")]
2336	#[cfg_attr(
2337		feature = "std",
2338		doc = "This function takes the current unix time as an argument. For users with the `std` feature"
2339	)]
2340	#[cfg_attr(
2341		feature = "std",
2342		doc = "enabled, [`NetworkGraph::remove_stale_channels_and_tracking`] may be preferable."
2343	)]
2344	pub fn remove_stale_channels_and_tracking_with_time(&self, current_time_unix: u64) {
2345		let mut channels = self.channels.write().unwrap();
2346		// Time out if we haven't received an update in at least 14 days.
2347		if current_time_unix > u32::max_value() as u64 {
2348			return;
2349		} // Remove by 2106
2350		if current_time_unix < STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS {
2351			return;
2352		}
2353		let min_time_unix: u32 = (current_time_unix - STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS) as u32;
2354		// Sadly BTreeMap::retain was only stabilized in 1.53 so we can't switch to it for some
2355		// time.
2356		let mut scids_to_remove = Vec::new();
2357		for (scid, info) in channels.unordered_iter_mut() {
2358			if info.one_to_two.is_some()
2359				&& info.one_to_two.as_ref().unwrap().last_update < min_time_unix
2360			{
2361				log_gossip!(self.logger, "Removing directional update one_to_two (0) for channel {} due to its timestamp {} being below {}",
2362					scid, info.one_to_two.as_ref().unwrap().last_update, min_time_unix);
2363				info.one_to_two = None;
2364			}
2365			if info.two_to_one.is_some()
2366				&& info.two_to_one.as_ref().unwrap().last_update < min_time_unix
2367			{
2368				log_gossip!(self.logger, "Removing directional update two_to_one (1) for channel {} due to its timestamp {} being below {}",
2369					scid, info.two_to_one.as_ref().unwrap().last_update, min_time_unix);
2370				info.two_to_one = None;
2371			}
2372			if info.one_to_two.is_none() || info.two_to_one.is_none() {
2373				// We check the announcement_received_time here to ensure we don't drop
2374				// announcements that we just received and are just waiting for our peer to send a
2375				// channel_update for.
2376				let announcement_received_timestamp = info.announcement_received_time;
2377				if announcement_received_timestamp < min_time_unix as u64 {
2378					log_gossip!(self.logger, "Removing channel {} because both directional updates are missing and its announcement timestamp {} being below {}",
2379						scid, announcement_received_timestamp, min_time_unix);
2380					scids_to_remove.push(*scid);
2381				}
2382			}
2383		}
2384		if !scids_to_remove.is_empty() {
2385			let mut nodes = self.nodes.write().unwrap();
2386			for scid in scids_to_remove {
2387				let info = channels
2388					.remove(&scid)
2389					.expect("We just accessed this scid, it should be present");
2390				self.remove_channel_in_nodes(&mut nodes, &info, scid);
2391				self.removed_channels.lock().unwrap().insert(scid, Some(current_time_unix));
2392			}
2393		}
2394
2395		let should_keep_tracking = |time: &mut Option<u64>| {
2396			if let Some(time) = time {
2397				current_time_unix.saturating_sub(*time) < REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS
2398			} else {
2399				// NOTE: In the case of non-`std`, we won't have access to the current UNIX time at the time of removal,
2400				// so we'll just set the removal time here to the current UNIX time on the very next invocation
2401				// of this function.
2402				#[cfg(not(feature = "std"))]
2403				{
2404					let mut tracked_time = Some(current_time_unix);
2405					core::mem::swap(time, &mut tracked_time);
2406					return true;
2407				}
2408				#[allow(unreachable_code)]
2409				false
2410			}
2411		};
2412
2413		self.removed_channels.lock().unwrap().retain(|_, time| should_keep_tracking(time));
2414		self.removed_nodes.lock().unwrap().retain(|_, time| should_keep_tracking(time));
2415	}
2416
2417	/// For an already known (from announcement) channel, update info about one of the directions
2418	/// of the channel.
2419	///
2420	/// You probably don't want to call this directly, instead relying on a [`P2PGossipSync`]'s
2421	/// [`RoutingMessageHandler`] implementation to call it indirectly. This may be useful to accept
2422	/// routing messages from a source using a protocol other than the lightning P2P protocol.
2423	///
2424	/// If not built with `std`, any updates with a timestamp more than two weeks in the past or
2425	/// materially in the future will be rejected.
2426	pub fn update_channel(&self, msg: &msgs::ChannelUpdate) -> Result<(), LightningError> {
2427		self.update_channel_internal(&msg.contents, Some(&msg), Some(&msg.signature), false)
2428	}
2429
2430	/// For an already known (from announcement) channel, update info about one of the directions
2431	/// of the channel without verifying the associated signatures. Because we aren't given the
2432	/// associated signatures here we cannot relay the channel update to any of our peers.
2433	///
2434	/// If not built with `std`, any updates with a timestamp more than two weeks in the past or
2435	/// materially in the future will be rejected.
2436	pub fn update_channel_unsigned(
2437		&self, msg: &msgs::UnsignedChannelUpdate,
2438	) -> Result<(), LightningError> {
2439		self.update_channel_internal(msg, None, None, false)
2440	}
2441
2442	/// For an already known (from announcement) channel, verify the given [`ChannelUpdate`].
2443	///
2444	/// This checks whether the update currently is applicable by [`Self::update_channel`].
2445	///
2446	/// If not built with `std`, any updates with a timestamp more than two weeks in the past or
2447	/// materially in the future will be rejected.
2448	pub fn verify_channel_update(&self, msg: &msgs::ChannelUpdate) -> Result<(), LightningError> {
2449		self.update_channel_internal(&msg.contents, Some(&msg), Some(&msg.signature), true)
2450	}
2451
2452	fn update_channel_internal(
2453		&self, msg: &msgs::UnsignedChannelUpdate, full_msg: Option<&msgs::ChannelUpdate>,
2454		sig: Option<&secp256k1::ecdsa::Signature>, only_verify: bool,
2455	) -> Result<(), LightningError> {
2456		let chan_enabled = msg.channel_flags & (1 << 1) != (1 << 1);
2457
2458		if msg.chain_hash != self.chain_hash {
2459			return Err(LightningError {
2460				err: "Channel update chain hash does not match genesis hash".to_owned(),
2461				action: ErrorAction::IgnoreAndLog(Level::Debug),
2462			});
2463		}
2464
2465		#[cfg(all(feature = "std", not(test), not(feature = "_test_utils")))]
2466		{
2467			// Note that many tests rely on being able to set arbitrarily old timestamps, thus we
2468			// disable this check during tests!
2469			let time = SystemTime::now()
2470				.duration_since(UNIX_EPOCH)
2471				.expect("Time must be > 1970")
2472				.as_secs();
2473			if (msg.timestamp as u64) < time - STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS {
2474				return Err(LightningError {
2475					err: "channel_update is older than two weeks old".to_owned(),
2476					action: ErrorAction::IgnoreAndLog(Level::Gossip),
2477				});
2478			}
2479			if msg.timestamp as u64 > time + 60 * 60 * 24 {
2480				return Err(LightningError {
2481					err: "channel_update has a timestamp more than a day in the future".to_owned(),
2482					action: ErrorAction::IgnoreAndLog(Level::Gossip),
2483				});
2484			}
2485		}
2486
2487		log_gossip!(
2488			self.logger,
2489			"Updating channel {} in direction {} with timestamp {}",
2490			msg.short_channel_id,
2491			msg.channel_flags & 1,
2492			msg.timestamp
2493		);
2494
2495		if msg.htlc_maximum_msat > MAX_VALUE_MSAT {
2496			return Err(LightningError {
2497				err: "htlc_maximum_msat is larger than maximum possible msats".to_owned(),
2498				action: ErrorAction::IgnoreError,
2499			});
2500		}
2501
2502		let check_update_latest =
2503			|target: &Option<ChannelUpdateInfo>| -> Result<(), LightningError> {
2504				if let Some(existing_chan_info) = target {
2505					// The timestamp field is somewhat of a misnomer - the BOLTs use it to
2506					// order updates to ensure you always have the latest one, only
2507					// suggesting  that it be at least the current time. For
2508					// channel_updates specifically, the BOLTs discuss the possibility of
2509					// pruning based on the timestamp field being more than two weeks old,
2510					// but only in the non-normative section.
2511					if existing_chan_info.last_update > msg.timestamp {
2512						return Err(LightningError {
2513							err: "Update older than last processed update".to_owned(),
2514							action: ErrorAction::IgnoreDuplicateGossip,
2515						});
2516					} else if existing_chan_info.last_update == msg.timestamp {
2517						return Err(LightningError {
2518							err: "Update had same timestamp as last processed update".to_owned(),
2519							action: ErrorAction::IgnoreDuplicateGossip,
2520						});
2521					}
2522				}
2523				Ok(())
2524			};
2525
2526		let check_msg_sanity =
2527			|channel: &ChannelInfo| -> Result<(), LightningError> {
2528				if let Some(capacity_sats) = channel.capacity_sats {
2529					// It's possible channel capacity is available now, although it wasn't available at announcement (so the field is None).
2530					// Don't query UTXO set here to reduce DoS risks.
2531					if capacity_sats > MAX_VALUE_MSAT / 1000
2532						|| msg.htlc_maximum_msat > capacity_sats * 1000
2533					{
2534						return Err(LightningError{err:
2535						"htlc_maximum_msat is larger than channel capacity or capacity is bogus".to_owned(),
2536						action: ErrorAction::IgnoreError});
2537					}
2538				}
2539
2540				if msg.channel_flags & 1 == 1 {
2541					check_update_latest(&channel.two_to_one)
2542				} else {
2543					check_update_latest(&channel.one_to_two)
2544				}
2545			};
2546
2547		let mut node_pubkey = None;
2548		{
2549			let channels = self.channels.read().unwrap();
2550			match channels.get(&msg.short_channel_id) {
2551				None => {
2552					core::mem::drop(channels);
2553					self.pending_checks.check_hold_pending_channel_update(msg, full_msg)?;
2554					return Err(LightningError {
2555						err: "Couldn't find channel for update".to_owned(),
2556						action: ErrorAction::IgnoreAndLog(Level::Gossip),
2557					});
2558				},
2559				Some(channel) => {
2560					check_msg_sanity(channel)?;
2561					let node_id = if msg.channel_flags & 1 == 1 {
2562						channel.node_two.as_slice()
2563					} else {
2564						channel.node_one.as_slice()
2565					};
2566					if sig.is_some() {
2567						// PublicKey parsing isn't entirely trivial as it requires that we check
2568						// that the provided point is on the curve. Thus, if we don't have a
2569						// signature to verify, we want to skip the parsing step entirely.
2570						// This represents a substantial speedup in applying RGS snapshots.
2571						node_pubkey =
2572							Some(PublicKey::from_slice(node_id).map_err(|_| LightningError {
2573								err: "Couldn't parse source node pubkey".to_owned(),
2574								action: ErrorAction::IgnoreAndLog(Level::Debug),
2575							})?);
2576					}
2577				},
2578			}
2579		}
2580
2581		if let Some(sig) = sig {
2582			let msg_hash = hash_to_message!(&message_sha256d_hash(&msg)[..]);
2583			let node_pubkey = if let Some(pubkey) = node_pubkey {
2584				pubkey
2585			} else {
2586				debug_assert!(false, "node_pubkey should have been decoded above");
2587				let err = "node_pubkey wasn't decoded but we need it to check a sig".to_owned();
2588				let action = ErrorAction::IgnoreAndLog(Level::Error);
2589				return Err(LightningError { err, action });
2590			};
2591			secp_verify_sig!(self.secp_ctx, &msg_hash, &sig, &node_pubkey, "channel_update");
2592		}
2593
2594		if only_verify {
2595			return Ok(());
2596		}
2597
2598		let mut channels = self.channels.write().unwrap();
2599		if let Some(channel) = channels.get_mut(&msg.short_channel_id) {
2600			check_msg_sanity(channel)?;
2601
2602			let last_update_message = if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY {
2603				full_msg.cloned()
2604			} else {
2605				None
2606			};
2607
2608			let new_channel_info = Some(ChannelUpdateInfo {
2609				enabled: chan_enabled,
2610				last_update: msg.timestamp,
2611				cltv_expiry_delta: msg.cltv_expiry_delta,
2612				htlc_minimum_msat: msg.htlc_minimum_msat,
2613				htlc_maximum_msat: msg.htlc_maximum_msat,
2614				fees: RoutingFees {
2615					base_msat: msg.fee_base_msat,
2616					proportional_millionths: msg.fee_proportional_millionths,
2617				},
2618				last_update_message,
2619			});
2620
2621			if msg.channel_flags & 1 == 1 {
2622				channel.two_to_one = new_channel_info;
2623			} else {
2624				channel.one_to_two = new_channel_info;
2625			}
2626		}
2627
2628		Ok(())
2629	}
2630
2631	fn remove_channel_in_nodes(
2632		&self, nodes: &mut IndexedMap<NodeId, NodeInfo>, chan: &ChannelInfo, short_channel_id: u64,
2633	) {
2634		macro_rules! remove_from_node {
2635			($node_id: expr) => {
2636				if let IndexedMapEntry::Occupied(mut entry) = nodes.entry($node_id) {
2637					entry.get_mut().channels.retain(|chan_id| short_channel_id != *chan_id);
2638					if entry.get().channels.is_empty() {
2639						self.removed_node_counters.lock().unwrap().push(entry.get().node_counter);
2640						entry.remove_entry();
2641					}
2642				} else {
2643					panic!(
2644						"Had channel that pointed to unknown node (ie inconsistent network map)!"
2645					);
2646				}
2647			};
2648		}
2649
2650		remove_from_node!(chan.node_one);
2651		remove_from_node!(chan.node_two);
2652	}
2653}
2654
2655impl ReadOnlyNetworkGraph<'_> {
2656	/// Returns all known valid channels' short ids along with announced channel info.
2657	///
2658	/// This is not exported to bindings users because we don't want to return lifetime'd references
2659	pub fn channels(&self) -> &IndexedMap<u64, ChannelInfo> {
2660		&*self.channels
2661	}
2662
2663	/// Returns information on a channel with the given id.
2664	pub fn channel(&self, short_channel_id: u64) -> Option<&ChannelInfo> {
2665		self.channels.get(&short_channel_id)
2666	}
2667
2668	#[cfg(c_bindings)] // Non-bindings users should use `channels`
2669	/// Returns the list of channels in the graph
2670	pub fn list_channels(&self) -> Vec<u64> {
2671		self.channels.unordered_keys().map(|c| *c).collect()
2672	}
2673
2674	/// Returns all known nodes' public keys along with announced node info.
2675	///
2676	/// This is not exported to bindings users because we don't want to return lifetime'd references
2677	pub fn nodes(&self) -> &IndexedMap<NodeId, NodeInfo> {
2678		&*self.nodes
2679	}
2680
2681	/// Returns information on a node with the given id.
2682	pub fn node(&self, node_id: &NodeId) -> Option<&NodeInfo> {
2683		self.nodes.get(node_id)
2684	}
2685
2686	#[cfg(c_bindings)] // Non-bindings users should use `nodes`
2687	/// Returns the list of nodes in the graph
2688	pub fn list_nodes(&self) -> Vec<NodeId> {
2689		self.nodes.unordered_keys().map(|n| *n).collect()
2690	}
2691
2692	/// Get network addresses by node id.
2693	/// Returns None if the requested node is completely unknown,
2694	/// or if node announcement for the node was never received.
2695	pub fn get_addresses(&self, pubkey: &PublicKey) -> Option<Vec<SocketAddress>> {
2696		self.nodes
2697			.get(&NodeId::from_pubkey(&pubkey))
2698			.and_then(|node| node.announcement_info.as_ref().map(|ann| ann.addresses().to_vec()))
2699	}
2700
2701	/// Gets the maximum possible node_counter for a node in this graph
2702	pub(crate) fn max_node_counter(&self) -> u32 {
2703		self.max_node_counter
2704	}
2705}
2706
2707#[cfg(test)]
2708pub(crate) mod tests {
2709	use crate::events::{MessageSendEvent, MessageSendEventsProvider};
2710	use crate::ln::chan_utils::make_funding_redeemscript;
2711	use crate::ln::channelmanager;
2712	use crate::ln::msgs::SocketAddress;
2713	use crate::ln::msgs::{
2714		ChannelAnnouncement, ChannelUpdate, NodeAnnouncement, QueryChannelRange,
2715		QueryShortChannelIds, ReplyChannelRange, RoutingMessageHandler,
2716		UnsignedChannelAnnouncement, UnsignedChannelUpdate, UnsignedNodeAnnouncement,
2717		MAX_VALUE_MSAT,
2718	};
2719	use crate::routing::gossip::{
2720		ChannelInfo, ChannelUpdateInfo, NetworkGraph, NetworkUpdate, NodeAlias,
2721		NodeAnnouncementInfo, NodeId, NodeInfo, P2PGossipSync, RoutingFees,
2722		MAX_EXCESS_BYTES_FOR_RELAY,
2723	};
2724	use crate::routing::utxo::{UtxoLookupError, UtxoResult};
2725	#[cfg(feature = "std")]
2726	use crate::types::features::InitFeatures;
2727	use crate::util::config::UserConfig;
2728	use crate::util::scid_utils::scid_from_parts;
2729	use crate::util::ser::{Hostname, Readable, ReadableArgs, Writeable};
2730	use crate::util::test_utils;
2731
2732	use super::STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS;
2733	use crate::routing::gossip::REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS;
2734
2735	use bitcoin::amount::Amount;
2736	use bitcoin::constants::ChainHash;
2737	use bitcoin::hashes::sha256d::Hash as Sha256dHash;
2738	use bitcoin::hashes::Hash;
2739	use bitcoin::hex::FromHex;
2740	use bitcoin::network::Network;
2741	use bitcoin::script::ScriptBuf;
2742	use bitcoin::secp256k1::{All, Secp256k1};
2743	use bitcoin::secp256k1::{PublicKey, SecretKey};
2744	use bitcoin::transaction::TxOut;
2745
2746	use crate::io;
2747	use crate::prelude::*;
2748	use crate::sync::Arc;
2749	use bitcoin::secp256k1;
2750
2751	fn create_network_graph() -> NetworkGraph<Arc<test_utils::TestLogger>> {
2752		let logger = Arc::new(test_utils::TestLogger::new());
2753		NetworkGraph::new(Network::Testnet, logger)
2754	}
2755
2756	fn create_gossip_sync(
2757		network_graph: &NetworkGraph<Arc<test_utils::TestLogger>>,
2758	) -> (
2759		Secp256k1<All>,
2760		P2PGossipSync<
2761			&NetworkGraph<Arc<test_utils::TestLogger>>,
2762			Arc<test_utils::TestChainSource>,
2763			Arc<test_utils::TestLogger>,
2764		>,
2765	) {
2766		let secp_ctx = Secp256k1::new();
2767		let logger = Arc::new(test_utils::TestLogger::new());
2768		let gossip_sync = P2PGossipSync::new(network_graph, None, Arc::clone(&logger));
2769		(secp_ctx, gossip_sync)
2770	}
2771
2772	#[test]
2773	fn request_full_sync_finite_times() {
2774		let network_graph = create_network_graph();
2775		let (_, gossip_sync) = create_gossip_sync(&network_graph);
2776
2777		assert!(gossip_sync.should_request_full_sync());
2778		assert!(gossip_sync.should_request_full_sync());
2779		assert!(gossip_sync.should_request_full_sync());
2780		assert!(gossip_sync.should_request_full_sync());
2781		assert!(gossip_sync.should_request_full_sync());
2782		assert!(!gossip_sync.should_request_full_sync());
2783	}
2784
2785	pub(crate) fn get_signed_node_announcement<F: Fn(&mut UnsignedNodeAnnouncement)>(
2786		f: F, node_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>,
2787	) -> NodeAnnouncement {
2788		let node_id = NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, node_key));
2789		let mut unsigned_announcement = UnsignedNodeAnnouncement {
2790			features: channelmanager::provided_node_features(&UserConfig::default()),
2791			timestamp: 100,
2792			node_id,
2793			rgb: [0; 3],
2794			alias: NodeAlias([0; 32]),
2795			addresses: Vec::new(),
2796			excess_address_data: Vec::new(),
2797			excess_data: Vec::new(),
2798		};
2799		f(&mut unsigned_announcement);
2800		let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]);
2801		NodeAnnouncement {
2802			signature: secp_ctx.sign_ecdsa(&msghash, node_key),
2803			contents: unsigned_announcement,
2804		}
2805	}
2806
2807	pub(crate) fn get_signed_channel_announcement<F: Fn(&mut UnsignedChannelAnnouncement)>(
2808		f: F, node_1_key: &SecretKey, node_2_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>,
2809	) -> ChannelAnnouncement {
2810		let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_1_key);
2811		let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_key);
2812		let node_1_btckey = &SecretKey::from_slice(&[40; 32]).unwrap();
2813		let node_2_btckey = &SecretKey::from_slice(&[39; 32]).unwrap();
2814
2815		let mut unsigned_announcement = UnsignedChannelAnnouncement {
2816			features: channelmanager::provided_channel_features(&UserConfig::default()),
2817			chain_hash: ChainHash::using_genesis_block(Network::Testnet),
2818			short_channel_id: 0,
2819			node_id_1: NodeId::from_pubkey(&node_id_1),
2820			node_id_2: NodeId::from_pubkey(&node_id_2),
2821			bitcoin_key_1: NodeId::from_pubkey(&PublicKey::from_secret_key(
2822				&secp_ctx,
2823				node_1_btckey,
2824			)),
2825			bitcoin_key_2: NodeId::from_pubkey(&PublicKey::from_secret_key(
2826				&secp_ctx,
2827				node_2_btckey,
2828			)),
2829			excess_data: Vec::new(),
2830		};
2831		f(&mut unsigned_announcement);
2832		let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]);
2833		ChannelAnnouncement {
2834			node_signature_1: secp_ctx.sign_ecdsa(&msghash, node_1_key),
2835			node_signature_2: secp_ctx.sign_ecdsa(&msghash, node_2_key),
2836			bitcoin_signature_1: secp_ctx.sign_ecdsa(&msghash, node_1_btckey),
2837			bitcoin_signature_2: secp_ctx.sign_ecdsa(&msghash, node_2_btckey),
2838			contents: unsigned_announcement,
2839		}
2840	}
2841
2842	pub(crate) fn get_channel_script(secp_ctx: &Secp256k1<secp256k1::All>) -> ScriptBuf {
2843		let node_1_btckey = SecretKey::from_slice(&[40; 32]).unwrap();
2844		let node_2_btckey = SecretKey::from_slice(&[39; 32]).unwrap();
2845		make_funding_redeemscript(
2846			&PublicKey::from_secret_key(secp_ctx, &node_1_btckey),
2847			&PublicKey::from_secret_key(secp_ctx, &node_2_btckey),
2848		)
2849		.to_p2wsh()
2850	}
2851
2852	pub(crate) fn get_signed_channel_update<F: Fn(&mut UnsignedChannelUpdate)>(
2853		f: F, node_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>,
2854	) -> ChannelUpdate {
2855		let mut unsigned_channel_update = UnsignedChannelUpdate {
2856			chain_hash: ChainHash::using_genesis_block(Network::Testnet),
2857			short_channel_id: 0,
2858			timestamp: 100,
2859			message_flags: 1, // Only must_be_one
2860			channel_flags: 0,
2861			cltv_expiry_delta: 144,
2862			htlc_minimum_msat: 1_000_000,
2863			htlc_maximum_msat: 1_000_000,
2864			fee_base_msat: 10_000,
2865			fee_proportional_millionths: 20,
2866			excess_data: Vec::new(),
2867		};
2868		f(&mut unsigned_channel_update);
2869		let msghash =
2870			hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]);
2871		ChannelUpdate {
2872			signature: secp_ctx.sign_ecdsa(&msghash, node_key),
2873			contents: unsigned_channel_update,
2874		}
2875	}
2876
2877	#[test]
2878	fn handling_node_announcements() {
2879		let network_graph = create_network_graph();
2880		let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
2881
2882		let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
2883		let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
2884		let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
2885		let zero_hash = Sha256dHash::hash(&[0; 32]);
2886
2887		let valid_announcement = get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx);
2888		match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) {
2889			Ok(_) => panic!(),
2890			Err(e) => assert_eq!("No existing channels for node_announcement", e.err),
2891		};
2892
2893		{
2894			// Announce a channel to add a corresponding node.
2895			let valid_announcement =
2896				get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
2897			match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement)
2898			{
2899				Ok(res) => assert!(res),
2900				_ => panic!(),
2901			};
2902		}
2903
2904		let fake_msghash = hash_to_message!(zero_hash.as_byte_array());
2905		match gossip_sync.handle_node_announcement(
2906			Some(node_1_pubkey),
2907			&NodeAnnouncement {
2908				signature: secp_ctx.sign_ecdsa(&fake_msghash, node_1_privkey),
2909				contents: valid_announcement.contents.clone(),
2910			},
2911		) {
2912			Ok(_) => panic!(),
2913			Err(e) => assert_eq!(e.err, "Invalid signature on node_announcement message"),
2914		};
2915
2916		match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) {
2917			Ok(res) => assert!(res),
2918			Err(_) => panic!(),
2919		};
2920
2921		let announcement_with_data = get_signed_node_announcement(
2922			|unsigned_announcement| {
2923				unsigned_announcement.timestamp += 1000;
2924				unsigned_announcement.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0);
2925			},
2926			node_1_privkey,
2927			&secp_ctx,
2928		);
2929		// Return false because contains excess data.
2930		match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement_with_data) {
2931			Ok(res) => assert!(!res),
2932			Err(_) => panic!(),
2933		};
2934
2935		// Even though previous announcement was not relayed further, we still accepted it,
2936		// so we now won't accept announcements before the previous one.
2937		let outdated_announcement = get_signed_node_announcement(
2938			|unsigned_announcement| {
2939				unsigned_announcement.timestamp += 1000 - 10;
2940			},
2941			node_1_privkey,
2942			&secp_ctx,
2943		);
2944		match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &outdated_announcement) {
2945			Ok(_) => panic!(),
2946			Err(e) => assert_eq!(e.err, "Update older than last processed update"),
2947		};
2948	}
2949
2950	#[test]
2951	fn handling_channel_announcements() {
2952		let secp_ctx = Secp256k1::new();
2953		let logger = test_utils::TestLogger::new();
2954
2955		let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
2956		let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
2957		let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
2958
2959		let good_script = get_channel_script(&secp_ctx);
2960		let valid_announcement =
2961			get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
2962
2963		// Test if the UTXO lookups were not supported
2964		let network_graph = NetworkGraph::new(Network::Testnet, &logger);
2965		let mut gossip_sync = P2PGossipSync::new(&network_graph, None, &logger);
2966		match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) {
2967			Ok(res) => assert!(res),
2968			_ => panic!(),
2969		};
2970
2971		let scid = valid_announcement.contents.short_channel_id;
2972		match network_graph.read_only().channels().get(&scid) {
2973			None => panic!(),
2974			Some(_) => (),
2975		};
2976
2977		// If we receive announcement for the same channel (with UTXO lookups disabled),
2978		// drop new one on the floor, since we can't see any changes.
2979		match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) {
2980			Ok(_) => panic!(),
2981			Err(e) => assert_eq!(e.err, "Already have non-chain-validated channel"),
2982		};
2983
2984		// Test if an associated transaction were not on-chain (or not confirmed).
2985		let chain_source = test_utils::TestChainSource::new(Network::Testnet);
2986		*chain_source.utxo_ret.lock().unwrap() = UtxoResult::Sync(Err(UtxoLookupError::UnknownTx));
2987		let network_graph = NetworkGraph::new(Network::Testnet, &logger);
2988		gossip_sync = P2PGossipSync::new(&network_graph, Some(&chain_source), &logger);
2989
2990		let valid_announcement = get_signed_channel_announcement(
2991			|unsigned_announcement| {
2992				unsigned_announcement.short_channel_id += 1;
2993			},
2994			node_1_privkey,
2995			node_2_privkey,
2996			&secp_ctx,
2997		);
2998		match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) {
2999			Ok(_) => panic!(),
3000			Err(e) => assert_eq!(e.err, "Channel announced without corresponding UTXO entry"),
3001		};
3002
3003		// Now test if the transaction is found in the UTXO set and the script is correct.
3004		*chain_source.utxo_ret.lock().unwrap() =
3005			UtxoResult::Sync(Ok(TxOut { value: Amount::ZERO, script_pubkey: good_script.clone() }));
3006		let valid_announcement = get_signed_channel_announcement(
3007			|unsigned_announcement| {
3008				unsigned_announcement.short_channel_id += 2;
3009			},
3010			node_1_privkey,
3011			node_2_privkey,
3012			&secp_ctx,
3013		);
3014		match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) {
3015			Ok(res) => assert!(res),
3016			_ => panic!(),
3017		};
3018
3019		let scid = valid_announcement.contents.short_channel_id;
3020		match network_graph.read_only().channels().get(&scid) {
3021			None => panic!(),
3022			Some(_) => (),
3023		};
3024
3025		// If we receive announcement for the same channel, once we've validated it against the
3026		// chain, we simply ignore all new (duplicate) announcements.
3027		*chain_source.utxo_ret.lock().unwrap() =
3028			UtxoResult::Sync(Ok(TxOut { value: Amount::ZERO, script_pubkey: good_script }));
3029		match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) {
3030			Ok(_) => panic!(),
3031			Err(e) => assert_eq!(e.err, "Already have chain-validated channel"),
3032		};
3033
3034		#[cfg(feature = "std")]
3035		{
3036			use std::time::{SystemTime, UNIX_EPOCH};
3037
3038			let tracking_time = SystemTime::now()
3039				.duration_since(UNIX_EPOCH)
3040				.expect("Time must be > 1970")
3041				.as_secs();
3042			// Mark a node as permanently failed so it's tracked as removed.
3043			let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
3044			gossip_sync.network_graph().node_failed_permanent(&node_1_pubkey);
3045
3046			// Return error and ignore valid channel announcement if one of the nodes has been tracked as removed.
3047			let valid_announcement = get_signed_channel_announcement(
3048				|unsigned_announcement| {
3049					unsigned_announcement.short_channel_id += 3;
3050				},
3051				node_1_privkey,
3052				node_2_privkey,
3053				&secp_ctx,
3054			);
3055			match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) {
3056				Ok(_) => panic!(),
3057				Err(e) => assert_eq!(e.err, "Channel with SCID 3 or one of its nodes was removed from our network graph recently")
3058			}
3059
3060			gossip_sync.network_graph().remove_stale_channels_and_tracking_with_time(
3061				tracking_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS,
3062			);
3063
3064			// The above channel announcement should be handled as per normal now.
3065			match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement)
3066			{
3067				Ok(res) => assert!(res),
3068				_ => panic!(),
3069			}
3070		}
3071
3072		let valid_excess_data_announcement = get_signed_channel_announcement(
3073			|unsigned_announcement| {
3074				unsigned_announcement.short_channel_id += 4;
3075				unsigned_announcement.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0);
3076			},
3077			node_1_privkey,
3078			node_2_privkey,
3079			&secp_ctx,
3080		);
3081
3082		let mut invalid_sig_announcement = valid_excess_data_announcement.clone();
3083		invalid_sig_announcement.contents.excess_data = Vec::new();
3084		match gossip_sync
3085			.handle_channel_announcement(Some(node_1_pubkey), &invalid_sig_announcement)
3086		{
3087			Ok(_) => panic!(),
3088			Err(e) => assert_eq!(e.err, "Invalid signature on channel_announcement message"),
3089		};
3090
3091		// Don't relay valid channels with excess data
3092		match gossip_sync
3093			.handle_channel_announcement(Some(node_1_pubkey), &valid_excess_data_announcement)
3094		{
3095			Ok(res) => assert!(!res),
3096			_ => panic!(),
3097		};
3098
3099		let channel_to_itself_announcement =
3100			get_signed_channel_announcement(|_| {}, node_1_privkey, node_1_privkey, &secp_ctx);
3101		match gossip_sync
3102			.handle_channel_announcement(Some(node_1_pubkey), &channel_to_itself_announcement)
3103		{
3104			Ok(_) => panic!(),
3105			Err(e) => assert_eq!(e.err, "Channel announcement node had a channel with itself"),
3106		};
3107
3108		// Test that channel announcements with the wrong chain hash are ignored (network graph is testnet,
3109		// announcement is mainnet).
3110		let incorrect_chain_announcement = get_signed_channel_announcement(
3111			|unsigned_announcement| {
3112				unsigned_announcement.chain_hash = ChainHash::using_genesis_block(Network::Bitcoin);
3113			},
3114			node_1_privkey,
3115			node_2_privkey,
3116			&secp_ctx,
3117		);
3118		match gossip_sync
3119			.handle_channel_announcement(Some(node_1_pubkey), &incorrect_chain_announcement)
3120		{
3121			Ok(_) => panic!(),
3122			Err(e) => {
3123				assert_eq!(e.err, "Channel announcement chain hash does not match genesis hash")
3124			},
3125		};
3126	}
3127
3128	#[test]
3129	fn handling_channel_update() {
3130		let secp_ctx = Secp256k1::new();
3131		let logger = test_utils::TestLogger::new();
3132		let chain_source = test_utils::TestChainSource::new(Network::Testnet);
3133		let network_graph = NetworkGraph::new(Network::Testnet, &logger);
3134		let gossip_sync = P2PGossipSync::new(&network_graph, Some(&chain_source), &logger);
3135
3136		let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
3137		let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
3138		let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
3139
3140		let amount_sats = Amount::from_sat(1000_000);
3141		let short_channel_id;
3142
3143		{
3144			// Announce a channel we will update
3145			let good_script = get_channel_script(&secp_ctx);
3146			*chain_source.utxo_ret.lock().unwrap() = UtxoResult::Sync(Ok(TxOut {
3147				value: amount_sats,
3148				script_pubkey: good_script.clone(),
3149			}));
3150
3151			let valid_channel_announcement =
3152				get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
3153			short_channel_id = valid_channel_announcement.contents.short_channel_id;
3154			match gossip_sync
3155				.handle_channel_announcement(Some(node_1_pubkey), &valid_channel_announcement)
3156			{
3157				Ok(_) => (),
3158				Err(_) => panic!(),
3159			};
3160		}
3161
3162		let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx);
3163		network_graph.verify_channel_update(&valid_channel_update).unwrap();
3164		match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3165			Ok(res) => assert!(res),
3166			_ => panic!(),
3167		};
3168
3169		{
3170			match network_graph.read_only().channels().get(&short_channel_id) {
3171				None => panic!(),
3172				Some(channel_info) => {
3173					assert_eq!(channel_info.one_to_two.as_ref().unwrap().cltv_expiry_delta, 144);
3174					assert!(channel_info.two_to_one.is_none());
3175				},
3176			};
3177		}
3178
3179		let valid_channel_update = get_signed_channel_update(
3180			|unsigned_channel_update| {
3181				unsigned_channel_update.timestamp += 100;
3182				unsigned_channel_update.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0);
3183			},
3184			node_1_privkey,
3185			&secp_ctx,
3186		);
3187		// Return false because contains excess data
3188		match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3189			Ok(res) => assert!(!res),
3190			_ => panic!(),
3191		};
3192
3193		let valid_channel_update = get_signed_channel_update(
3194			|unsigned_channel_update| {
3195				unsigned_channel_update.timestamp += 110;
3196				unsigned_channel_update.short_channel_id += 1;
3197			},
3198			node_1_privkey,
3199			&secp_ctx,
3200		);
3201		match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3202			Ok(_) => panic!(),
3203			Err(e) => assert_eq!(e.err, "Couldn't find channel for update"),
3204		};
3205
3206		let valid_channel_update = get_signed_channel_update(
3207			|unsigned_channel_update| {
3208				unsigned_channel_update.htlc_maximum_msat = MAX_VALUE_MSAT + 1;
3209				unsigned_channel_update.timestamp += 110;
3210			},
3211			node_1_privkey,
3212			&secp_ctx,
3213		);
3214		match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3215			Ok(_) => panic!(),
3216			Err(e) => assert_eq!(e.err, "htlc_maximum_msat is larger than maximum possible msats"),
3217		};
3218
3219		let valid_channel_update = get_signed_channel_update(
3220			|unsigned_channel_update| {
3221				unsigned_channel_update.htlc_maximum_msat = amount_sats.to_sat() * 1000 + 1;
3222				unsigned_channel_update.timestamp += 110;
3223			},
3224			node_1_privkey,
3225			&secp_ctx,
3226		);
3227		match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3228			Ok(_) => panic!(),
3229			Err(e) => assert_eq!(
3230				e.err,
3231				"htlc_maximum_msat is larger than channel capacity or capacity is bogus"
3232			),
3233		};
3234
3235		// Even though previous update was not relayed further, we still accepted it,
3236		// so we now won't accept update before the previous one.
3237		let valid_channel_update = get_signed_channel_update(
3238			|unsigned_channel_update| {
3239				unsigned_channel_update.timestamp += 100;
3240			},
3241			node_1_privkey,
3242			&secp_ctx,
3243		);
3244		match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3245			Ok(_) => panic!(),
3246			Err(e) => assert_eq!(e.err, "Update had same timestamp as last processed update"),
3247		};
3248
3249		let mut invalid_sig_channel_update = get_signed_channel_update(
3250			|unsigned_channel_update| {
3251				unsigned_channel_update.timestamp += 500;
3252			},
3253			node_1_privkey,
3254			&secp_ctx,
3255		);
3256		let zero_hash = Sha256dHash::hash(&[0; 32]);
3257		let fake_msghash = hash_to_message!(zero_hash.as_byte_array());
3258		invalid_sig_channel_update.signature = secp_ctx.sign_ecdsa(&fake_msghash, node_1_privkey);
3259		match gossip_sync.handle_channel_update(Some(node_1_pubkey), &invalid_sig_channel_update) {
3260			Ok(_) => panic!(),
3261			Err(e) => assert_eq!(e.err, "Invalid signature on channel_update message"),
3262		};
3263
3264		// Test that channel updates with the wrong chain hash are ignored (network graph is testnet, channel
3265		// update is mainet).
3266		let incorrect_chain_update = get_signed_channel_update(
3267			|unsigned_channel_update| {
3268				unsigned_channel_update.chain_hash =
3269					ChainHash::using_genesis_block(Network::Bitcoin);
3270			},
3271			node_1_privkey,
3272			&secp_ctx,
3273		);
3274
3275		match gossip_sync.handle_channel_update(Some(node_1_pubkey), &incorrect_chain_update) {
3276			Ok(_) => panic!(),
3277			Err(e) => assert_eq!(e.err, "Channel update chain hash does not match genesis hash"),
3278		};
3279	}
3280
3281	#[test]
3282	fn handling_network_update() {
3283		let logger = test_utils::TestLogger::new();
3284		let network_graph = NetworkGraph::new(Network::Testnet, &logger);
3285		let secp_ctx = Secp256k1::new();
3286
3287		let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
3288		let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
3289		let node_2_pk = PublicKey::from_secret_key(&secp_ctx, node_2_privkey);
3290		let node_2_id = NodeId::from_pubkey(&node_2_pk);
3291
3292		{
3293			// There is no nodes in the table at the beginning.
3294			assert_eq!(network_graph.read_only().nodes().len(), 0);
3295		}
3296
3297		let scid;
3298		{
3299			// Check that we can manually apply a channel update.
3300			let valid_channel_announcement =
3301				get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
3302			scid = valid_channel_announcement.contents.short_channel_id;
3303			let chain_source: Option<&test_utils::TestChainSource> = None;
3304			assert!(network_graph
3305				.update_channel_from_announcement(&valid_channel_announcement, &chain_source)
3306				.is_ok());
3307			assert!(network_graph.read_only().channels().get(&scid).is_some());
3308
3309			let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx);
3310
3311			assert!(network_graph.read_only().channels().get(&scid).unwrap().one_to_two.is_none());
3312			network_graph.update_channel(&valid_channel_update).unwrap();
3313			assert!(network_graph.read_only().channels().get(&scid).unwrap().one_to_two.is_some());
3314		}
3315
3316		// Non-permanent failure doesn't touch the channel at all
3317		{
3318			match network_graph.read_only().channels().get(&scid) {
3319				None => panic!(),
3320				Some(channel_info) => {
3321					assert!(channel_info.one_to_two.as_ref().unwrap().enabled);
3322				},
3323			};
3324
3325			network_graph.handle_network_update(&NetworkUpdate::ChannelFailure {
3326				short_channel_id: scid,
3327				is_permanent: false,
3328			});
3329
3330			match network_graph.read_only().channels().get(&scid) {
3331				None => panic!(),
3332				Some(channel_info) => {
3333					assert!(channel_info.one_to_two.as_ref().unwrap().enabled);
3334				},
3335			};
3336		}
3337
3338		// Permanent closing deletes a channel
3339		network_graph.handle_network_update(&NetworkUpdate::ChannelFailure {
3340			short_channel_id: scid,
3341			is_permanent: true,
3342		});
3343
3344		assert_eq!(network_graph.read_only().channels().len(), 0);
3345		// Nodes are also deleted because there are no associated channels anymore
3346		assert_eq!(network_graph.read_only().nodes().len(), 0);
3347
3348		{
3349			// Get a new network graph since we don't want to track removed nodes in this test with "std"
3350			let network_graph = NetworkGraph::new(Network::Testnet, &logger);
3351
3352			// Announce a channel to test permanent node failure
3353			let valid_channel_announcement =
3354				get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
3355			let short_channel_id = valid_channel_announcement.contents.short_channel_id;
3356			let chain_source: Option<&test_utils::TestChainSource> = None;
3357			assert!(network_graph
3358				.update_channel_from_announcement(&valid_channel_announcement, &chain_source)
3359				.is_ok());
3360			assert!(network_graph.read_only().channels().get(&short_channel_id).is_some());
3361
3362			// Non-permanent node failure does not delete any nodes or channels
3363			network_graph.handle_network_update(&NetworkUpdate::NodeFailure {
3364				node_id: node_2_pk,
3365				is_permanent: false,
3366			});
3367
3368			assert!(network_graph.read_only().channels().get(&short_channel_id).is_some());
3369			assert!(network_graph.read_only().nodes().get(&node_2_id).is_some());
3370
3371			// Permanent node failure deletes node and its channels
3372			network_graph.handle_network_update(&NetworkUpdate::NodeFailure {
3373				node_id: node_2_pk,
3374				is_permanent: true,
3375			});
3376
3377			assert_eq!(network_graph.read_only().nodes().len(), 0);
3378			// Channels are also deleted because the associated node has been deleted
3379			assert_eq!(network_graph.read_only().channels().len(), 0);
3380		}
3381	}
3382
3383	#[test]
3384	fn test_channel_timeouts() {
3385		// Test the removal of channels with `remove_stale_channels_and_tracking`.
3386		let logger = test_utils::TestLogger::new();
3387		let chain_source = test_utils::TestChainSource::new(Network::Testnet);
3388		let network_graph = NetworkGraph::new(Network::Testnet, &logger);
3389		let gossip_sync = P2PGossipSync::new(&network_graph, Some(&chain_source), &logger);
3390		let secp_ctx = Secp256k1::new();
3391
3392		let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
3393		let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
3394		let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
3395
3396		let valid_channel_announcement =
3397			get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
3398		let scid = valid_channel_announcement.contents.short_channel_id;
3399		let chain_source: Option<&test_utils::TestChainSource> = None;
3400		assert!(network_graph
3401			.update_channel_from_announcement(&valid_channel_announcement, &chain_source)
3402			.is_ok());
3403		assert!(network_graph.read_only().channels().get(&scid).is_some());
3404
3405		// Submit two channel updates for each channel direction (update.flags bit).
3406		let valid_channel_update = get_signed_channel_update(|_| {}, node_1_privkey, &secp_ctx);
3407		assert!(gossip_sync
3408			.handle_channel_update(Some(node_1_pubkey), &valid_channel_update)
3409			.is_ok());
3410		assert!(network_graph.read_only().channels().get(&scid).unwrap().one_to_two.is_some());
3411
3412		let valid_channel_update_2 = get_signed_channel_update(
3413			|update| {
3414				update.channel_flags |= 1;
3415			},
3416			node_2_privkey,
3417			&secp_ctx,
3418		);
3419		gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update_2).unwrap();
3420		assert!(network_graph.read_only().channels().get(&scid).unwrap().two_to_one.is_some());
3421
3422		network_graph.remove_stale_channels_and_tracking_with_time(
3423			100 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS,
3424		);
3425		assert_eq!(network_graph.read_only().channels().len(), 1);
3426		assert_eq!(network_graph.read_only().nodes().len(), 2);
3427
3428		network_graph.remove_stale_channels_and_tracking_with_time(
3429			101 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS,
3430		);
3431		#[cfg(not(feature = "std"))]
3432		{
3433			// Make sure removed channels are tracked.
3434			assert_eq!(network_graph.removed_channels.lock().unwrap().len(), 1);
3435		}
3436		network_graph.remove_stale_channels_and_tracking_with_time(
3437			101 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS,
3438		);
3439
3440		#[cfg(feature = "std")]
3441		{
3442			// In std mode, a further check is performed before fully removing the channel -
3443			// the channel_announcement must have been received at least two weeks ago. We
3444			// fudge that here by indicating the time has jumped two weeks.
3445			assert_eq!(network_graph.read_only().channels().len(), 1);
3446			assert_eq!(network_graph.read_only().nodes().len(), 2);
3447
3448			// Note that the directional channel information will have been removed already..
3449			// We want to check that this will work even if *one* of the channel updates is recent,
3450			// so we should add it with a recent timestamp.
3451			assert!(network_graph.read_only().channels().get(&scid).unwrap().one_to_two.is_none());
3452			use std::time::{SystemTime, UNIX_EPOCH};
3453			let announcement_time = SystemTime::now()
3454				.duration_since(UNIX_EPOCH)
3455				.expect("Time must be > 1970")
3456				.as_secs();
3457			let valid_channel_update = get_signed_channel_update(
3458				|unsigned_channel_update| {
3459					unsigned_channel_update.timestamp =
3460						(announcement_time + 1 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS) as u32;
3461				},
3462				node_1_privkey,
3463				&secp_ctx,
3464			);
3465			assert!(gossip_sync
3466				.handle_channel_update(Some(node_1_pubkey), &valid_channel_update)
3467				.is_ok());
3468			assert!(network_graph.read_only().channels().get(&scid).unwrap().one_to_two.is_some());
3469			network_graph.remove_stale_channels_and_tracking_with_time(
3470				announcement_time + 1 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS,
3471			);
3472			// Make sure removed channels are tracked.
3473			assert_eq!(network_graph.removed_channels.lock().unwrap().len(), 1);
3474			// Provide a later time so that sufficient time has passed
3475			network_graph.remove_stale_channels_and_tracking_with_time(
3476				announcement_time
3477					+ 1 + STALE_CHANNEL_UPDATE_AGE_LIMIT_SECS
3478					+ REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS,
3479			);
3480		}
3481
3482		assert_eq!(network_graph.read_only().channels().len(), 0);
3483		assert_eq!(network_graph.read_only().nodes().len(), 0);
3484		assert!(network_graph.removed_channels.lock().unwrap().is_empty());
3485
3486		#[cfg(feature = "std")]
3487		{
3488			use std::time::{SystemTime, UNIX_EPOCH};
3489
3490			let tracking_time = SystemTime::now()
3491				.duration_since(UNIX_EPOCH)
3492				.expect("Time must be > 1970")
3493				.as_secs();
3494
3495			// Clear tracked nodes and channels for clean slate
3496			network_graph.removed_channels.lock().unwrap().clear();
3497			network_graph.removed_nodes.lock().unwrap().clear();
3498
3499			// Add a channel and nodes from channel announcement. So our network graph will
3500			// now only consist of two nodes and one channel between them.
3501			assert!(network_graph
3502				.update_channel_from_announcement(&valid_channel_announcement, &chain_source)
3503				.is_ok());
3504
3505			// Mark the channel as permanently failed. This will also remove the two nodes
3506			// and all of the entries will be tracked as removed.
3507			network_graph.channel_failed_permanent_with_time(scid, Some(tracking_time));
3508
3509			// Should not remove from tracking if insufficient time has passed
3510			network_graph.remove_stale_channels_and_tracking_with_time(
3511				tracking_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS - 1,
3512			);
3513			assert_eq!(
3514				network_graph.removed_channels.lock().unwrap().len(),
3515				1,
3516				"Removed channel count ≠ 1 with tracking_time {}",
3517				tracking_time
3518			);
3519
3520			// Provide a later time so that sufficient time has passed
3521			network_graph.remove_stale_channels_and_tracking_with_time(
3522				tracking_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS,
3523			);
3524			assert!(
3525				network_graph.removed_channels.lock().unwrap().is_empty(),
3526				"Unexpectedly removed channels with tracking_time {}",
3527				tracking_time
3528			);
3529			assert!(
3530				network_graph.removed_nodes.lock().unwrap().is_empty(),
3531				"Unexpectedly removed nodes with tracking_time {}",
3532				tracking_time
3533			);
3534		}
3535
3536		#[cfg(not(feature = "std"))]
3537		{
3538			// When we don't have access to the system clock, the time we started tracking removal will only
3539			// be that provided by the first call to `remove_stale_channels_and_tracking_with_time`. Hence,
3540			// only if sufficient time has passed after that first call, will the next call remove it from
3541			// tracking.
3542			let removal_time = 1664619654;
3543
3544			// Clear removed nodes and channels for clean slate
3545			network_graph.removed_channels.lock().unwrap().clear();
3546			network_graph.removed_nodes.lock().unwrap().clear();
3547
3548			// Add a channel and nodes from channel announcement. So our network graph will
3549			// now only consist of two nodes and one channel between them.
3550			assert!(network_graph
3551				.update_channel_from_announcement(&valid_channel_announcement, &chain_source)
3552				.is_ok());
3553
3554			// Mark the channel as permanently failed. This will also remove the two nodes
3555			// and all of the entries will be tracked as removed.
3556			network_graph.channel_failed_permanent(scid);
3557
3558			// The first time we call the following, the channel will have a removal time assigned.
3559			network_graph.remove_stale_channels_and_tracking_with_time(removal_time);
3560			assert_eq!(network_graph.removed_channels.lock().unwrap().len(), 1);
3561
3562			// Provide a later time so that sufficient time has passed
3563			network_graph.remove_stale_channels_and_tracking_with_time(
3564				removal_time + REMOVED_ENTRIES_TRACKING_AGE_LIMIT_SECS,
3565			);
3566			assert!(network_graph.removed_channels.lock().unwrap().is_empty());
3567			assert!(network_graph.removed_nodes.lock().unwrap().is_empty());
3568		}
3569	}
3570
3571	#[test]
3572	fn getting_next_channel_announcements() {
3573		let network_graph = create_network_graph();
3574		let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
3575		let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
3576		let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
3577		let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
3578
3579		// Channels were not announced yet.
3580		let channels_with_announcements = gossip_sync.get_next_channel_announcement(0);
3581		assert!(channels_with_announcements.is_none());
3582
3583		let short_channel_id;
3584		{
3585			// Announce a channel we will update
3586			let valid_channel_announcement =
3587				get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
3588			short_channel_id = valid_channel_announcement.contents.short_channel_id;
3589			match gossip_sync
3590				.handle_channel_announcement(Some(node_1_pubkey), &valid_channel_announcement)
3591			{
3592				Ok(_) => (),
3593				Err(_) => panic!(),
3594			};
3595		}
3596
3597		// Contains initial channel announcement now.
3598		let channels_with_announcements =
3599			gossip_sync.get_next_channel_announcement(short_channel_id);
3600		if let Some(channel_announcements) = channels_with_announcements {
3601			let (_, ref update_1, ref update_2) = channel_announcements;
3602			assert_eq!(update_1, &None);
3603			assert_eq!(update_2, &None);
3604		} else {
3605			panic!();
3606		}
3607
3608		{
3609			// Valid channel update
3610			let valid_channel_update = get_signed_channel_update(
3611				|unsigned_channel_update| {
3612					unsigned_channel_update.timestamp = 101;
3613				},
3614				node_1_privkey,
3615				&secp_ctx,
3616			);
3617			match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3618				Ok(_) => (),
3619				Err(_) => panic!(),
3620			};
3621		}
3622
3623		// Now contains an initial announcement and an update.
3624		let channels_with_announcements =
3625			gossip_sync.get_next_channel_announcement(short_channel_id);
3626		if let Some(channel_announcements) = channels_with_announcements {
3627			let (_, ref update_1, ref update_2) = channel_announcements;
3628			assert_ne!(update_1, &None);
3629			assert_eq!(update_2, &None);
3630		} else {
3631			panic!();
3632		}
3633
3634		{
3635			// Channel update with excess data.
3636			let valid_channel_update = get_signed_channel_update(
3637				|unsigned_channel_update| {
3638					unsigned_channel_update.timestamp = 102;
3639					unsigned_channel_update.excess_data =
3640						[1; MAX_EXCESS_BYTES_FOR_RELAY + 1].to_vec();
3641				},
3642				node_1_privkey,
3643				&secp_ctx,
3644			);
3645			match gossip_sync.handle_channel_update(Some(node_1_pubkey), &valid_channel_update) {
3646				Ok(_) => (),
3647				Err(_) => panic!(),
3648			};
3649		}
3650
3651		// Test that announcements with excess data won't be returned
3652		let channels_with_announcements =
3653			gossip_sync.get_next_channel_announcement(short_channel_id);
3654		if let Some(channel_announcements) = channels_with_announcements {
3655			let (_, ref update_1, ref update_2) = channel_announcements;
3656			assert_eq!(update_1, &None);
3657			assert_eq!(update_2, &None);
3658		} else {
3659			panic!();
3660		}
3661
3662		// Further starting point have no channels after it
3663		let channels_with_announcements =
3664			gossip_sync.get_next_channel_announcement(short_channel_id + 1000);
3665		assert!(channels_with_announcements.is_none());
3666	}
3667
3668	#[test]
3669	fn getting_next_node_announcements() {
3670		let network_graph = create_network_graph();
3671		let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
3672		let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
3673		let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
3674		let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
3675		let node_id_1 = NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, node_1_privkey));
3676
3677		// No nodes yet.
3678		let next_announcements = gossip_sync.get_next_node_announcement(None);
3679		assert!(next_announcements.is_none());
3680
3681		{
3682			// Announce a channel to add 2 nodes
3683			let valid_channel_announcement =
3684				get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
3685			match gossip_sync
3686				.handle_channel_announcement(Some(node_1_pubkey), &valid_channel_announcement)
3687			{
3688				Ok(_) => (),
3689				Err(_) => panic!(),
3690			};
3691		}
3692
3693		// Nodes were never announced
3694		let next_announcements = gossip_sync.get_next_node_announcement(None);
3695		assert!(next_announcements.is_none());
3696
3697		{
3698			let valid_announcement =
3699				get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx);
3700			match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) {
3701				Ok(_) => (),
3702				Err(_) => panic!(),
3703			};
3704
3705			let valid_announcement =
3706				get_signed_node_announcement(|_| {}, node_2_privkey, &secp_ctx);
3707			match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) {
3708				Ok(_) => (),
3709				Err(_) => panic!(),
3710			};
3711		}
3712
3713		let next_announcements = gossip_sync.get_next_node_announcement(None);
3714		assert!(next_announcements.is_some());
3715
3716		// Skip the first node.
3717		let next_announcements = gossip_sync.get_next_node_announcement(Some(&node_id_1));
3718		assert!(next_announcements.is_some());
3719
3720		{
3721			// Later announcement which should not be relayed (excess data) prevent us from sharing a node
3722			let valid_announcement = get_signed_node_announcement(
3723				|unsigned_announcement| {
3724					unsigned_announcement.timestamp += 10;
3725					unsigned_announcement.excess_data =
3726						[1; MAX_EXCESS_BYTES_FOR_RELAY + 1].to_vec();
3727				},
3728				node_2_privkey,
3729				&secp_ctx,
3730			);
3731			match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) {
3732				Ok(res) => assert!(!res),
3733				Err(_) => panic!(),
3734			};
3735		}
3736
3737		let next_announcements = gossip_sync.get_next_node_announcement(Some(&node_id_1));
3738		assert!(next_announcements.is_none());
3739	}
3740
3741	#[test]
3742	fn network_graph_serialization() {
3743		let network_graph = create_network_graph();
3744		let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
3745
3746		let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
3747		let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
3748		let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
3749
3750		// Announce a channel to add a corresponding node.
3751		let valid_announcement =
3752			get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
3753		match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement) {
3754			Ok(res) => assert!(res),
3755			_ => panic!(),
3756		};
3757
3758		let valid_announcement = get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx);
3759		match gossip_sync.handle_node_announcement(Some(node_1_pubkey), &valid_announcement) {
3760			Ok(_) => (),
3761			Err(_) => panic!(),
3762		};
3763
3764		let mut w = test_utils::TestVecWriter(Vec::new());
3765		assert!(!network_graph.read_only().nodes().is_empty());
3766		assert!(!network_graph.read_only().channels().is_empty());
3767		network_graph.write(&mut w).unwrap();
3768
3769		let logger = Arc::new(test_utils::TestLogger::new());
3770		assert!(
3771			<NetworkGraph<_>>::read(&mut io::Cursor::new(&w.0), logger).unwrap() == network_graph
3772		);
3773	}
3774
3775	#[test]
3776	fn network_graph_tlv_serialization() {
3777		let network_graph = create_network_graph();
3778		network_graph.set_last_rapid_gossip_sync_timestamp(42);
3779
3780		let mut w = test_utils::TestVecWriter(Vec::new());
3781		network_graph.write(&mut w).unwrap();
3782
3783		let logger = Arc::new(test_utils::TestLogger::new());
3784		let reassembled_network_graph: NetworkGraph<_> =
3785			ReadableArgs::read(&mut io::Cursor::new(&w.0), logger).unwrap();
3786		assert!(reassembled_network_graph == network_graph);
3787		assert_eq!(reassembled_network_graph.get_last_rapid_gossip_sync_timestamp().unwrap(), 42);
3788	}
3789
3790	#[test]
3791	#[cfg(feature = "std")]
3792	fn calling_sync_routing_table() {
3793		use crate::ln::msgs::Init;
3794		use std::time::{SystemTime, UNIX_EPOCH};
3795
3796		let network_graph = create_network_graph();
3797		let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
3798		let node_privkey_1 = &SecretKey::from_slice(&[42; 32]).unwrap();
3799		let node_id_1 = PublicKey::from_secret_key(&secp_ctx, node_privkey_1);
3800
3801		let chain_hash = ChainHash::using_genesis_block(Network::Testnet);
3802
3803		// It should ignore if gossip_queries feature is not enabled
3804		{
3805			let init_msg = Init {
3806				features: InitFeatures::empty(),
3807				networks: None,
3808				remote_network_address: None,
3809			};
3810			gossip_sync.peer_connected(node_id_1, &init_msg, true).unwrap();
3811			let events = gossip_sync.get_and_clear_pending_msg_events();
3812			assert_eq!(events.len(), 0);
3813		}
3814
3815		// It should send a gossip_timestamp_filter with the correct information
3816		{
3817			let mut features = InitFeatures::empty();
3818			features.set_gossip_queries_optional();
3819			let init_msg = Init { features, networks: None, remote_network_address: None };
3820			gossip_sync.peer_connected(node_id_1, &init_msg, true).unwrap();
3821			let events = gossip_sync.get_and_clear_pending_msg_events();
3822			assert_eq!(events.len(), 1);
3823			match &events[0] {
3824				MessageSendEvent::SendGossipTimestampFilter { node_id, msg } => {
3825					assert_eq!(node_id, &node_id_1);
3826					assert_eq!(msg.chain_hash, chain_hash);
3827					let expected_timestamp = SystemTime::now()
3828						.duration_since(UNIX_EPOCH)
3829						.expect("Time must be > 1970")
3830						.as_secs();
3831					assert!(
3832						(msg.first_timestamp as u64) >= expected_timestamp - 60 * 60 * 24 * 7 * 2
3833					);
3834					assert!(
3835						(msg.first_timestamp as u64)
3836							< expected_timestamp - 60 * 60 * 24 * 7 * 2 + 10
3837					);
3838					assert_eq!(msg.timestamp_range, u32::max_value());
3839				},
3840				_ => panic!("Expected MessageSendEvent::SendChannelRangeQuery"),
3841			};
3842		}
3843	}
3844
3845	#[test]
3846	fn handling_query_channel_range() {
3847		let network_graph = create_network_graph();
3848		let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
3849
3850		let chain_hash = ChainHash::using_genesis_block(Network::Testnet);
3851		let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
3852		let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
3853		let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
3854		let node_id_2 = PublicKey::from_secret_key(&secp_ctx, node_2_privkey);
3855
3856		let mut scids: Vec<u64> = vec![
3857			scid_from_parts(0xfffffe, 0xffffff, 0xffff).unwrap(), // max
3858			scid_from_parts(0xffffff, 0xffffff, 0xffff).unwrap(), // never
3859		];
3860
3861		// used for testing multipart reply across blocks
3862		for block in 100000..=108001 {
3863			scids.push(scid_from_parts(block, 0, 0).unwrap());
3864		}
3865
3866		// used for testing resumption on same block
3867		scids.push(scid_from_parts(108001, 1, 0).unwrap());
3868
3869		for scid in scids {
3870			let valid_announcement = get_signed_channel_announcement(
3871				|unsigned_announcement| {
3872					unsigned_announcement.short_channel_id = scid;
3873				},
3874				node_1_privkey,
3875				node_2_privkey,
3876				&secp_ctx,
3877			);
3878			match gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &valid_announcement)
3879			{
3880				Ok(_) => (),
3881				_ => panic!(),
3882			};
3883		}
3884
3885		// Error when number_of_blocks=0
3886		do_handling_query_channel_range(
3887			&gossip_sync,
3888			&node_id_2,
3889			QueryChannelRange {
3890				chain_hash: chain_hash.clone(),
3891				first_blocknum: 0,
3892				number_of_blocks: 0,
3893			},
3894			false,
3895			vec![ReplyChannelRange {
3896				chain_hash: chain_hash.clone(),
3897				first_blocknum: 0,
3898				number_of_blocks: 0,
3899				sync_complete: true,
3900				short_channel_ids: vec![],
3901			}],
3902		);
3903
3904		// Error when wrong chain
3905		do_handling_query_channel_range(
3906			&gossip_sync,
3907			&node_id_2,
3908			QueryChannelRange {
3909				chain_hash: ChainHash::using_genesis_block(Network::Bitcoin),
3910				first_blocknum: 0,
3911				number_of_blocks: 0xffff_ffff,
3912			},
3913			false,
3914			vec![ReplyChannelRange {
3915				chain_hash: ChainHash::using_genesis_block(Network::Bitcoin),
3916				first_blocknum: 0,
3917				number_of_blocks: 0xffff_ffff,
3918				sync_complete: true,
3919				short_channel_ids: vec![],
3920			}],
3921		);
3922
3923		// Error when first_blocknum > 0xffffff
3924		do_handling_query_channel_range(
3925			&gossip_sync,
3926			&node_id_2,
3927			QueryChannelRange {
3928				chain_hash: chain_hash.clone(),
3929				first_blocknum: 0x01000000,
3930				number_of_blocks: 0xffff_ffff,
3931			},
3932			false,
3933			vec![ReplyChannelRange {
3934				chain_hash: chain_hash.clone(),
3935				first_blocknum: 0x01000000,
3936				number_of_blocks: 0xffff_ffff,
3937				sync_complete: true,
3938				short_channel_ids: vec![],
3939			}],
3940		);
3941
3942		// Empty reply when max valid SCID block num
3943		do_handling_query_channel_range(
3944			&gossip_sync,
3945			&node_id_2,
3946			QueryChannelRange {
3947				chain_hash: chain_hash.clone(),
3948				first_blocknum: 0xffffff,
3949				number_of_blocks: 1,
3950			},
3951			true,
3952			vec![ReplyChannelRange {
3953				chain_hash: chain_hash.clone(),
3954				first_blocknum: 0xffffff,
3955				number_of_blocks: 1,
3956				sync_complete: true,
3957				short_channel_ids: vec![],
3958			}],
3959		);
3960
3961		// No results in valid query range
3962		do_handling_query_channel_range(
3963			&gossip_sync,
3964			&node_id_2,
3965			QueryChannelRange {
3966				chain_hash: chain_hash.clone(),
3967				first_blocknum: 1000,
3968				number_of_blocks: 1000,
3969			},
3970			true,
3971			vec![ReplyChannelRange {
3972				chain_hash: chain_hash.clone(),
3973				first_blocknum: 1000,
3974				number_of_blocks: 1000,
3975				sync_complete: true,
3976				short_channel_ids: vec![],
3977			}],
3978		);
3979
3980		// Overflow first_blocknum + number_of_blocks
3981		do_handling_query_channel_range(
3982			&gossip_sync,
3983			&node_id_2,
3984			QueryChannelRange {
3985				chain_hash: chain_hash.clone(),
3986				first_blocknum: 0xfe0000,
3987				number_of_blocks: 0xffffffff,
3988			},
3989			true,
3990			vec![ReplyChannelRange {
3991				chain_hash: chain_hash.clone(),
3992				first_blocknum: 0xfe0000,
3993				number_of_blocks: 0xffffffff - 0xfe0000,
3994				sync_complete: true,
3995				short_channel_ids: vec![
3996					0xfffffe_ffffff_ffff, // max
3997				],
3998			}],
3999		);
4000
4001		// Single block exactly full
4002		do_handling_query_channel_range(
4003			&gossip_sync,
4004			&node_id_2,
4005			QueryChannelRange {
4006				chain_hash: chain_hash.clone(),
4007				first_blocknum: 100000,
4008				number_of_blocks: 8000,
4009			},
4010			true,
4011			vec![ReplyChannelRange {
4012				chain_hash: chain_hash.clone(),
4013				first_blocknum: 100000,
4014				number_of_blocks: 8000,
4015				sync_complete: true,
4016				short_channel_ids: (100000..=107999)
4017					.map(|block| scid_from_parts(block, 0, 0).unwrap())
4018					.collect(),
4019			}],
4020		);
4021
4022		// Multiple split on new block
4023		do_handling_query_channel_range(
4024			&gossip_sync,
4025			&node_id_2,
4026			QueryChannelRange {
4027				chain_hash: chain_hash.clone(),
4028				first_blocknum: 100000,
4029				number_of_blocks: 8001,
4030			},
4031			true,
4032			vec![
4033				ReplyChannelRange {
4034					chain_hash: chain_hash.clone(),
4035					first_blocknum: 100000,
4036					number_of_blocks: 7999,
4037					sync_complete: false,
4038					short_channel_ids: (100000..=107999)
4039						.map(|block| scid_from_parts(block, 0, 0).unwrap())
4040						.collect(),
4041				},
4042				ReplyChannelRange {
4043					chain_hash: chain_hash.clone(),
4044					first_blocknum: 107999,
4045					number_of_blocks: 2,
4046					sync_complete: true,
4047					short_channel_ids: vec![scid_from_parts(108000, 0, 0).unwrap()],
4048				},
4049			],
4050		);
4051
4052		// Multiple split on same block
4053		do_handling_query_channel_range(
4054			&gossip_sync,
4055			&node_id_2,
4056			QueryChannelRange {
4057				chain_hash: chain_hash.clone(),
4058				first_blocknum: 100002,
4059				number_of_blocks: 8000,
4060			},
4061			true,
4062			vec![
4063				ReplyChannelRange {
4064					chain_hash: chain_hash.clone(),
4065					first_blocknum: 100002,
4066					number_of_blocks: 7999,
4067					sync_complete: false,
4068					short_channel_ids: (100002..=108001)
4069						.map(|block| scid_from_parts(block, 0, 0).unwrap())
4070						.collect(),
4071				},
4072				ReplyChannelRange {
4073					chain_hash: chain_hash.clone(),
4074					first_blocknum: 108001,
4075					number_of_blocks: 1,
4076					sync_complete: true,
4077					short_channel_ids: vec![scid_from_parts(108001, 1, 0).unwrap()],
4078				},
4079			],
4080		);
4081	}
4082
4083	fn do_handling_query_channel_range(
4084		gossip_sync: &P2PGossipSync<
4085			&NetworkGraph<Arc<test_utils::TestLogger>>,
4086			Arc<test_utils::TestChainSource>,
4087			Arc<test_utils::TestLogger>,
4088		>,
4089		test_node_id: &PublicKey, msg: QueryChannelRange, expected_ok: bool,
4090		expected_replies: Vec<ReplyChannelRange>,
4091	) {
4092		let mut max_firstblocknum = msg.first_blocknum.saturating_sub(1);
4093		let mut c_lightning_0_9_prev_end_blocknum = max_firstblocknum;
4094		let query_end_blocknum = msg.end_blocknum();
4095		let result = gossip_sync.handle_query_channel_range(*test_node_id, msg);
4096
4097		if expected_ok {
4098			assert!(result.is_ok());
4099		} else {
4100			assert!(result.is_err());
4101		}
4102
4103		let events = gossip_sync.get_and_clear_pending_msg_events();
4104		assert_eq!(events.len(), expected_replies.len());
4105
4106		for i in 0..events.len() {
4107			let expected_reply = &expected_replies[i];
4108			match &events[i] {
4109				MessageSendEvent::SendReplyChannelRange { node_id, msg } => {
4110					assert_eq!(node_id, test_node_id);
4111					assert_eq!(msg.chain_hash, expected_reply.chain_hash);
4112					assert_eq!(msg.first_blocknum, expected_reply.first_blocknum);
4113					assert_eq!(msg.number_of_blocks, expected_reply.number_of_blocks);
4114					assert_eq!(msg.sync_complete, expected_reply.sync_complete);
4115					assert_eq!(msg.short_channel_ids, expected_reply.short_channel_ids);
4116
4117					// Enforce exactly the sequencing requirements present on c-lightning v0.9.3
4118					assert!(
4119						msg.first_blocknum == c_lightning_0_9_prev_end_blocknum
4120							|| msg.first_blocknum
4121								== c_lightning_0_9_prev_end_blocknum.saturating_add(1)
4122					);
4123					assert!(msg.first_blocknum >= max_firstblocknum);
4124					max_firstblocknum = msg.first_blocknum;
4125					c_lightning_0_9_prev_end_blocknum =
4126						msg.first_blocknum.saturating_add(msg.number_of_blocks);
4127
4128					// Check that the last block count is >= the query's end_blocknum
4129					if i == events.len() - 1 {
4130						assert!(
4131							msg.first_blocknum.saturating_add(msg.number_of_blocks)
4132								>= query_end_blocknum
4133						);
4134					}
4135				},
4136				_ => panic!("expected MessageSendEvent::SendReplyChannelRange"),
4137			}
4138		}
4139	}
4140
4141	#[test]
4142	fn handling_query_short_channel_ids() {
4143		let network_graph = create_network_graph();
4144		let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
4145		let node_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
4146		let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey);
4147
4148		let chain_hash = ChainHash::using_genesis_block(Network::Testnet);
4149
4150		let result = gossip_sync.handle_query_short_channel_ids(
4151			node_id,
4152			QueryShortChannelIds { chain_hash, short_channel_ids: vec![0x0003e8_000000_0000] },
4153		);
4154		assert!(result.is_err());
4155	}
4156
4157	#[test]
4158	fn displays_node_alias() {
4159		let format_str_alias = |alias: &str| {
4160			let mut bytes = [0u8; 32];
4161			bytes[..alias.as_bytes().len()].copy_from_slice(alias.as_bytes());
4162			format!("{}", NodeAlias(bytes))
4163		};
4164
4165		assert_eq!(format_str_alias("I\u{1F496}LDK! \u{26A1}"), "I\u{1F496}LDK! \u{26A1}");
4166		assert_eq!(format_str_alias("I\u{1F496}LDK!\0\u{26A1}"), "I\u{1F496}LDK!");
4167		assert_eq!(format_str_alias("I\u{1F496}LDK!\t\u{26A1}"), "I\u{1F496}LDK!\u{FFFD}\u{26A1}");
4168
4169		let format_bytes_alias = |alias: &[u8]| {
4170			let mut bytes = [0u8; 32];
4171			bytes[..alias.len()].copy_from_slice(alias);
4172			format!("{}", NodeAlias(bytes))
4173		};
4174
4175		assert_eq!(format_bytes_alias(b"\xFFI <heart> LDK!"), "\u{FFFD}I <heart> LDK!");
4176		assert_eq!(format_bytes_alias(b"\xFFI <heart>\0LDK!"), "\u{FFFD}I <heart>");
4177		assert_eq!(format_bytes_alias(b"\xFFI <heart>\tLDK!"), "\u{FFFD}I <heart>\u{FFFD}LDK!");
4178	}
4179
4180	#[test]
4181	fn channel_info_is_readable() {
4182		let chanmon_cfgs = crate::ln::functional_test_utils::create_chanmon_cfgs(2);
4183		let node_cfgs = crate::ln::functional_test_utils::create_node_cfgs(2, &chanmon_cfgs);
4184		let node_chanmgrs = crate::ln::functional_test_utils::create_node_chanmgrs(
4185			2,
4186			&node_cfgs,
4187			&[None, None, None, None],
4188		);
4189		let nodes = crate::ln::functional_test_utils::create_network(2, &node_cfgs, &node_chanmgrs);
4190		let config = crate::ln::functional_test_utils::test_default_channel_config();
4191
4192		// 1. Test encoding/decoding of ChannelUpdateInfo
4193		let chan_update_info = ChannelUpdateInfo {
4194			last_update: 23,
4195			enabled: true,
4196			cltv_expiry_delta: 42,
4197			htlc_minimum_msat: 1234,
4198			htlc_maximum_msat: 5678,
4199			fees: RoutingFees { base_msat: 9, proportional_millionths: 10 },
4200			last_update_message: None,
4201		};
4202
4203		let mut encoded_chan_update_info: Vec<u8> = Vec::new();
4204		assert!(chan_update_info.write(&mut encoded_chan_update_info).is_ok());
4205
4206		// First make sure we can read ChannelUpdateInfos we just wrote
4207		let read_chan_update_info: ChannelUpdateInfo =
4208			crate::util::ser::Readable::read(&mut encoded_chan_update_info.as_slice()).unwrap();
4209		assert_eq!(chan_update_info, read_chan_update_info);
4210
4211		// Check the serialization hasn't changed.
4212		let legacy_chan_update_info_with_some: Vec<u8> = <Vec<u8>>::from_hex("340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c0100").unwrap();
4213		assert_eq!(encoded_chan_update_info, legacy_chan_update_info_with_some);
4214
4215		// Check we fail if htlc_maximum_msat is not present in either the ChannelUpdateInfo itself
4216		// or the ChannelUpdate enclosed with `last_update_message`.
4217		let legacy_chan_update_info_with_some_and_fail_update: Vec<u8> = <Vec<u8>>::from_hex("b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f42400000271000000014").unwrap();
4218		let read_chan_update_info_res: Result<ChannelUpdateInfo, crate::ln::msgs::DecodeError> =
4219			crate::util::ser::Readable::read(
4220				&mut legacy_chan_update_info_with_some_and_fail_update.as_slice(),
4221			);
4222		assert!(read_chan_update_info_res.is_err());
4223
4224		let legacy_chan_update_info_with_none: Vec<u8> = <Vec<u8>>::from_hex("2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c0100").unwrap();
4225		let read_chan_update_info_res: Result<ChannelUpdateInfo, crate::ln::msgs::DecodeError> =
4226			crate::util::ser::Readable::read(&mut legacy_chan_update_info_with_none.as_slice());
4227		assert!(read_chan_update_info_res.is_err());
4228
4229		// 2. Test encoding/decoding of ChannelInfo
4230		// Check we can encode/decode ChannelInfo without ChannelUpdateInfo fields present.
4231		let chan_info_none_updates = ChannelInfo {
4232			features: channelmanager::provided_channel_features(&config),
4233			node_one: NodeId::from_pubkey(&nodes[0].node.get_our_node_id()),
4234			one_to_two: None,
4235			node_two: NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
4236			two_to_one: None,
4237			capacity_sats: None,
4238			announcement_message: None,
4239			announcement_received_time: 87654,
4240			node_one_counter: 0,
4241			node_two_counter: 1,
4242		};
4243
4244		let mut encoded_chan_info: Vec<u8> = Vec::new();
4245		assert!(chan_info_none_updates.write(&mut encoded_chan_info).is_ok());
4246
4247		let read_chan_info: ChannelInfo =
4248			crate::util::ser::Readable::read(&mut encoded_chan_info.as_slice()).unwrap();
4249		assert_eq!(chan_info_none_updates, read_chan_info);
4250
4251		// Check we can encode/decode ChannelInfo with ChannelUpdateInfo fields present.
4252		let chan_info_some_updates = ChannelInfo {
4253			features: channelmanager::provided_channel_features(&config),
4254			node_one: NodeId::from_pubkey(&nodes[0].node.get_our_node_id()),
4255			one_to_two: Some(chan_update_info.clone()),
4256			node_two: NodeId::from_pubkey(&nodes[1].node.get_our_node_id()),
4257			two_to_one: Some(chan_update_info.clone()),
4258			capacity_sats: None,
4259			announcement_message: None,
4260			announcement_received_time: 87654,
4261			node_one_counter: 0,
4262			node_two_counter: 1,
4263		};
4264
4265		let mut encoded_chan_info: Vec<u8> = Vec::new();
4266		assert!(chan_info_some_updates.write(&mut encoded_chan_info).is_ok());
4267
4268		let read_chan_info: ChannelInfo =
4269			crate::util::ser::Readable::read(&mut encoded_chan_info.as_slice()).unwrap();
4270		assert_eq!(chan_info_some_updates, read_chan_info);
4271
4272		// Check the serialization hasn't changed.
4273		let legacy_chan_info_with_some: Vec<u8> = <Vec<u8>>::from_hex("ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88043636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23083636340004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c01000a01000c0100").unwrap();
4274		assert_eq!(encoded_chan_info, legacy_chan_info_with_some);
4275
4276		// Check we can decode legacy ChannelInfo, even if the `two_to_one` / `one_to_two` /
4277		// `last_update_message` fields fail to decode due to missing htlc_maximum_msat.
4278		let legacy_chan_info_with_some_and_fail_update = <Vec<u8>>::from_hex("fd01ca00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce8804b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f4240000027100000001406210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c2308b6b6b40004000000170201010402002a060800000000000004d2080909000000000000162e0a0d0c00040000000902040000000a0c8181d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f00083a840000034d013413a70000009000000000000f424000002710000000140a01000c0100").unwrap();
4279		let read_chan_info: ChannelInfo = crate::util::ser::Readable::read(
4280			&mut legacy_chan_info_with_some_and_fail_update.as_slice(),
4281		)
4282		.unwrap();
4283		assert_eq!(read_chan_info.announcement_received_time, 87654);
4284		assert_eq!(read_chan_info.one_to_two, None);
4285		assert_eq!(read_chan_info.two_to_one, None);
4286
4287		let legacy_chan_info_with_none: Vec<u8> = <Vec<u8>>::from_hex("ba00020000010800000000000156660221027f921585f2ac0c7c70e36110adecfd8fd14b8a99bfb3d000a283fcac358fce88042e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c010006210355f8d2238a322d16b602bd0ceaad5b01019fb055971eaadcc9b29226a4da6c23082e2e2c0004000000170201010402002a060800000000000004d20801000a0d0c00040000000902040000000a0c01000a01000c0100").unwrap();
4288		let read_chan_info: ChannelInfo =
4289			crate::util::ser::Readable::read(&mut legacy_chan_info_with_none.as_slice()).unwrap();
4290		assert_eq!(read_chan_info.announcement_received_time, 87654);
4291		assert_eq!(read_chan_info.one_to_two, None);
4292		assert_eq!(read_chan_info.two_to_one, None);
4293	}
4294
4295	#[test]
4296	fn node_info_is_readable() {
4297		// 1. Check we can read a valid NodeAnnouncementInfo and fail on an invalid one
4298		let announcement_message = <Vec<u8>>::from_hex("d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a000122013413a7031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f2020201010101010101010101010101010101010101010101010101010101010101010000701fffefdfc2607").unwrap();
4299		let announcement_message =
4300			NodeAnnouncement::read(&mut announcement_message.as_slice()).unwrap();
4301		let valid_node_ann_info = NodeAnnouncementInfo::Relayed(announcement_message);
4302
4303		let mut encoded_valid_node_ann_info = Vec::new();
4304		assert!(valid_node_ann_info.write(&mut encoded_valid_node_ann_info).is_ok());
4305		let read_valid_node_ann_info =
4306			NodeAnnouncementInfo::read(&mut encoded_valid_node_ann_info.as_slice()).unwrap();
4307		assert_eq!(read_valid_node_ann_info, valid_node_ann_info);
4308		assert_eq!(read_valid_node_ann_info.addresses().len(), 1);
4309
4310		let encoded_invalid_node_ann_info = <Vec<u8>>::from_hex("3f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d2").unwrap();
4311		let read_invalid_node_ann_info_res =
4312			NodeAnnouncementInfo::read(&mut encoded_invalid_node_ann_info.as_slice());
4313		assert!(read_invalid_node_ann_info_res.is_err());
4314
4315		// 2. Check we can read a NodeInfo anyways, but set the NodeAnnouncementInfo to None if invalid
4316		let valid_node_info = NodeInfo {
4317			channels: Vec::new(),
4318			announcement_info: Some(valid_node_ann_info),
4319			node_counter: 0,
4320		};
4321
4322		let mut encoded_valid_node_info = Vec::new();
4323		assert!(valid_node_info.write(&mut encoded_valid_node_info).is_ok());
4324		let read_valid_node_info = NodeInfo::read(&mut encoded_valid_node_info.as_slice()).unwrap();
4325		assert_eq!(read_valid_node_info, valid_node_info);
4326
4327		let encoded_invalid_node_info_hex = <Vec<u8>>::from_hex("4402403f0009000788a000080a51a20204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014004d20400").unwrap();
4328		let read_invalid_node_info =
4329			NodeInfo::read(&mut encoded_invalid_node_info_hex.as_slice()).unwrap();
4330		assert_eq!(read_invalid_node_info.announcement_info, None);
4331	}
4332
4333	#[test]
4334	fn test_node_info_keeps_compatibility() {
4335		let old_ann_info_with_addresses = <Vec<u8>>::from_hex("3f0009000708a000080a51220204000000000403000000062000000000000000000000000000000000000000000000000000000000000000000a0505014104d2").unwrap();
4336		let ann_info_with_addresses =
4337			NodeAnnouncementInfo::read(&mut old_ann_info_with_addresses.as_slice())
4338				.expect("to be able to read an old NodeAnnouncementInfo with addresses");
4339		// This serialized info has no announcement_message but its address field should still be considered
4340		assert!(!ann_info_with_addresses.addresses().is_empty());
4341	}
4342
4343	#[test]
4344	fn test_node_id_display() {
4345		let node_id = NodeId([42; 33]);
4346		assert_eq!(
4347			format!("{}", &node_id),
4348			"2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a"
4349		);
4350	}
4351
4352	#[test]
4353	fn is_tor_only_node() {
4354		let network_graph = create_network_graph();
4355		let (secp_ctx, gossip_sync) = create_gossip_sync(&network_graph);
4356
4357		let node_1_privkey = &SecretKey::from_slice(&[42; 32]).unwrap();
4358		let node_1_pubkey = PublicKey::from_secret_key(&secp_ctx, node_1_privkey);
4359		let node_2_privkey = &SecretKey::from_slice(&[41; 32]).unwrap();
4360		let node_1_id = NodeId::from_pubkey(&PublicKey::from_secret_key(&secp_ctx, node_1_privkey));
4361
4362		let announcement =
4363			get_signed_channel_announcement(|_| {}, node_1_privkey, node_2_privkey, &secp_ctx);
4364		gossip_sync.handle_channel_announcement(Some(node_1_pubkey), &announcement).unwrap();
4365
4366		let tcp_ip_v4 = SocketAddress::TcpIpV4 { addr: [255, 254, 253, 252], port: 9735 };
4367		let tcp_ip_v6 = SocketAddress::TcpIpV6 {
4368			addr: [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
4369			port: 9735,
4370		};
4371		let onion_v2 =
4372			SocketAddress::OnionV2([255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 38, 7]);
4373		let onion_v3 = SocketAddress::OnionV3 {
4374			ed25519_pubkey: [
4375				255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240,
4376				239, 238, 237, 236, 235, 234, 233, 232, 231, 230, 229, 228, 227, 226, 225, 224,
4377			],
4378			checksum: 32,
4379			version: 16,
4380			port: 9735,
4381		};
4382		let hostname = SocketAddress::Hostname {
4383			hostname: Hostname::try_from(String::from("host")).unwrap(),
4384			port: 9735,
4385		};
4386
4387		assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4388
4389		let announcement = get_signed_node_announcement(|_| {}, node_1_privkey, &secp_ctx);
4390		gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap();
4391		assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4392
4393		let announcement = get_signed_node_announcement(
4394			|announcement| {
4395				announcement.addresses = vec![
4396					tcp_ip_v4.clone(),
4397					tcp_ip_v6.clone(),
4398					onion_v2.clone(),
4399					onion_v3.clone(),
4400					hostname.clone(),
4401				];
4402				announcement.timestamp += 1000;
4403			},
4404			node_1_privkey,
4405			&secp_ctx,
4406		);
4407		gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap();
4408		assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4409
4410		let announcement = get_signed_node_announcement(
4411			|announcement| {
4412				announcement.addresses =
4413					vec![tcp_ip_v4.clone(), tcp_ip_v6.clone(), onion_v2.clone(), onion_v3.clone()];
4414				announcement.timestamp += 2000;
4415			},
4416			node_1_privkey,
4417			&secp_ctx,
4418		);
4419		gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap();
4420		assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4421
4422		let announcement = get_signed_node_announcement(
4423			|announcement| {
4424				announcement.addresses =
4425					vec![tcp_ip_v6.clone(), onion_v2.clone(), onion_v3.clone()];
4426				announcement.timestamp += 3000;
4427			},
4428			node_1_privkey,
4429			&secp_ctx,
4430		);
4431		gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap();
4432		assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4433
4434		let announcement = get_signed_node_announcement(
4435			|announcement| {
4436				announcement.addresses = vec![onion_v2.clone(), onion_v3.clone()];
4437				announcement.timestamp += 4000;
4438			},
4439			node_1_privkey,
4440			&secp_ctx,
4441		);
4442		gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap();
4443		assert!(network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4444
4445		let announcement = get_signed_node_announcement(
4446			|announcement| {
4447				announcement.addresses = vec![onion_v2.clone()];
4448				announcement.timestamp += 5000;
4449			},
4450			node_1_privkey,
4451			&secp_ctx,
4452		);
4453		gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap();
4454		assert!(network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4455
4456		let announcement = get_signed_node_announcement(
4457			|announcement| {
4458				announcement.addresses = vec![tcp_ip_v4.clone()];
4459				announcement.timestamp += 6000;
4460			},
4461			node_1_privkey,
4462			&secp_ctx,
4463		);
4464		gossip_sync.handle_node_announcement(Some(node_1_pubkey), &announcement).unwrap();
4465		assert!(!network_graph.read_only().node(&node_1_id).unwrap().is_tor_only());
4466	}
4467}
4468
4469#[cfg(ldk_bench)]
4470pub mod benches {
4471	use super::*;
4472	use criterion::{black_box, Criterion};
4473	use std::io::Read;
4474
4475	pub fn read_network_graph(bench: &mut Criterion) {
4476		let logger = crate::util::test_utils::TestLogger::new();
4477		let (mut d, _) = crate::routing::router::bench_utils::get_graph_scorer_file().unwrap();
4478		let mut v = Vec::new();
4479		d.read_to_end(&mut v).unwrap();
4480		bench.bench_function("read_network_graph", |b| {
4481			b.iter(|| {
4482				NetworkGraph::read(&mut crate::io::Cursor::new(black_box(&v)), &logger).unwrap()
4483			})
4484		});
4485	}
4486
4487	pub fn write_network_graph(bench: &mut Criterion) {
4488		let logger = crate::util::test_utils::TestLogger::new();
4489		let (mut d, _) = crate::routing::router::bench_utils::get_graph_scorer_file().unwrap();
4490		let mut graph_buffer = Vec::new();
4491		d.read_to_end(&mut graph_buffer).unwrap();
4492		let net_graph = NetworkGraph::read(&mut &graph_buffer[..], &logger).unwrap();
4493		bench.bench_function("write_network_graph", |b| b.iter(|| black_box(&net_graph).encode()));
4494	}
4495}