lightning/util/
persist.rs

1// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
2// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
3// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
4// You may not use this file except in accordance with one or both of these
5// licenses.
6
7//! This module contains a simple key-value store trait [`KVStore`] that
8//! allows one to implement the persistence for [`ChannelManager`], [`NetworkGraph`],
9//! and [`ChannelMonitor`] all in one place.
10//!
11//! [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
12
13use bitcoin::{BlockHash, Txid};
14use core::cmp;
15use core::ops::Deref;
16use core::str::FromStr;
17
18use crate::prelude::*;
19use crate::{io, log_error};
20
21use crate::chain;
22use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator};
23use crate::chain::chainmonitor::Persist;
24use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate};
25use crate::chain::transaction::OutPoint;
26use crate::ln::channelmanager::AChannelManager;
27use crate::routing::gossip::NetworkGraph;
28use crate::routing::scoring::WriteableScore;
29use crate::sign::{ecdsa::EcdsaChannelSigner, EntropySource, SignerProvider};
30use crate::util::logger::Logger;
31use crate::util::ser::{Readable, ReadableArgs, Writeable};
32
33/// The alphabet of characters allowed for namespaces and keys.
34pub const KVSTORE_NAMESPACE_KEY_ALPHABET: &str =
35	"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-";
36
37/// The maximum number of characters namespaces and keys may have.
38pub const KVSTORE_NAMESPACE_KEY_MAX_LEN: usize = 120;
39
40/// The primary namespace under which the [`ChannelManager`] will be persisted.
41///
42/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
43pub const CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
44/// The secondary namespace under which the [`ChannelManager`] will be persisted.
45///
46/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
47pub const CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
48/// The key under which the [`ChannelManager`] will be persisted.
49///
50/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
51pub const CHANNEL_MANAGER_PERSISTENCE_KEY: &str = "manager";
52
53/// The primary namespace under which [`ChannelMonitor`]s will be persisted.
54pub const CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitors";
55/// The secondary namespace under which [`ChannelMonitor`]s will be persisted.
56pub const CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
57/// The primary namespace under which [`ChannelMonitorUpdate`]s will be persisted.
58pub const CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE: &str = "monitor_updates";
59
60/// The primary namespace under which archived [`ChannelMonitor`]s will be persisted.
61pub const ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE: &str = "archived_monitors";
62/// The secondary namespace under which archived [`ChannelMonitor`]s will be persisted.
63pub const ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
64
65/// The primary namespace under which the [`NetworkGraph`] will be persisted.
66pub const NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
67/// The secondary namespace under which the [`NetworkGraph`] will be persisted.
68pub const NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
69/// The key under which the [`NetworkGraph`] will be persisted.
70pub const NETWORK_GRAPH_PERSISTENCE_KEY: &str = "network_graph";
71
72/// The primary namespace under which the [`WriteableScore`] will be persisted.
73pub const SCORER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
74/// The secondary namespace under which the [`WriteableScore`] will be persisted.
75pub const SCORER_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
76/// The key under which the [`WriteableScore`] will be persisted.
77pub const SCORER_PERSISTENCE_KEY: &str = "scorer";
78
79/// The primary namespace under which [`OutputSweeper`] state will be persisted.
80///
81/// [`OutputSweeper`]: crate::util::sweep::OutputSweeper
82pub const OUTPUT_SWEEPER_PERSISTENCE_PRIMARY_NAMESPACE: &str = "";
83/// The secondary namespace under which [`OutputSweeper`] state will be persisted.
84///
85/// [`OutputSweeper`]: crate::util::sweep::OutputSweeper
86pub const OUTPUT_SWEEPER_PERSISTENCE_SECONDARY_NAMESPACE: &str = "";
87/// The secondary namespace under which [`OutputSweeper`] state will be persisted.
88/// The key under which [`OutputSweeper`] state will be persisted.
89///
90/// [`OutputSweeper`]: crate::util::sweep::OutputSweeper
91pub const OUTPUT_SWEEPER_PERSISTENCE_KEY: &str = "output_sweeper";
92
93/// A sentinel value to be prepended to monitors persisted by the [`MonitorUpdatingPersister`].
94///
95/// This serves to prevent someone from accidentally loading such monitors (which may need
96/// updates applied to be current) with another implementation.
97pub const MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL: &[u8] = &[0xFF; 2];
98
99/// Provides an interface that allows storage and retrieval of persisted values that are associated
100/// with given keys.
101///
102/// In order to avoid collisions the key space is segmented based on the given `primary_namespace`s
103/// and `secondary_namespace`s. Implementations of this trait are free to handle them in different
104/// ways, as long as per-namespace key uniqueness is asserted.
105///
106/// Keys and namespaces are required to be valid ASCII strings in the range of
107/// [`KVSTORE_NAMESPACE_KEY_ALPHABET`] and no longer than [`KVSTORE_NAMESPACE_KEY_MAX_LEN`]. Empty
108/// primary namespaces and secondary namespaces (`""`) are assumed to be a valid, however, if
109/// `primary_namespace` is empty, `secondary_namespace` is required to be empty, too. This means
110/// that concerns should always be separated by primary namespace first, before secondary
111/// namespaces are used. While the number of primary namespaces will be relatively small and is
112/// determined at compile time, there may be many secondary namespaces per primary namespace. Note
113/// that per-namespace uniqueness needs to also hold for keys *and* namespaces in any given
114/// namespace, i.e., conflicts between keys and equally named
115/// primary namespaces/secondary namespaces must be avoided.
116///
117/// **Note:** Users migrating custom persistence backends from the pre-v0.0.117 `KVStorePersister`
118/// interface can use a concatenation of `[{primary_namespace}/[{secondary_namespace}/]]{key}` to
119/// recover a `key` compatible with the data model previously assumed by `KVStorePersister::persist`.
120pub trait KVStore {
121	/// Returns the data stored for the given `primary_namespace`, `secondary_namespace`, and
122	/// `key`.
123	///
124	/// Returns an [`ErrorKind::NotFound`] if the given `key` could not be found in the given
125	/// `primary_namespace` and `secondary_namespace`.
126	///
127	/// [`ErrorKind::NotFound`]: io::ErrorKind::NotFound
128	fn read(
129		&self, primary_namespace: &str, secondary_namespace: &str, key: &str,
130	) -> Result<Vec<u8>, io::Error>;
131	/// Persists the given data under the given `key`.
132	///
133	/// Will create the given `primary_namespace` and `secondary_namespace` if not already present
134	/// in the store.
135	fn write(
136		&self, primary_namespace: &str, secondary_namespace: &str, key: &str, buf: &[u8],
137	) -> Result<(), io::Error>;
138	/// Removes any data that had previously been persisted under the given `key`.
139	///
140	/// If the `lazy` flag is set to `true`, the backend implementation might choose to lazily
141	/// remove the given `key` at some point in time after the method returns, e.g., as part of an
142	/// eventual batch deletion of multiple keys. As a consequence, subsequent calls to
143	/// [`KVStore::list`] might include the removed key until the changes are actually persisted.
144	///
145	/// Note that while setting the `lazy` flag reduces the I/O burden of multiple subsequent
146	/// `remove` calls, it also influences the atomicity guarantees as lazy `remove`s could
147	/// potentially get lost on crash after the method returns. Therefore, this flag should only be
148	/// set for `remove` operations that can be safely replayed at a later time.
149	///
150	/// Returns successfully if no data will be stored for the given `primary_namespace`,
151	/// `secondary_namespace`, and `key`, independently of whether it was present before its
152	/// invokation or not.
153	fn remove(
154		&self, primary_namespace: &str, secondary_namespace: &str, key: &str, lazy: bool,
155	) -> Result<(), io::Error>;
156	/// Returns a list of keys that are stored under the given `secondary_namespace` in
157	/// `primary_namespace`.
158	///
159	/// Returns the keys in arbitrary order, so users requiring a particular order need to sort the
160	/// returned keys. Returns an empty list if `primary_namespace` or `secondary_namespace` is unknown.
161	fn list(
162		&self, primary_namespace: &str, secondary_namespace: &str,
163	) -> Result<Vec<String>, io::Error>;
164}
165
166/// Provides additional interface methods that are required for [`KVStore`]-to-[`KVStore`]
167/// data migration.
168pub trait MigratableKVStore: KVStore {
169	/// Returns *all* known keys as a list of `primary_namespace`, `secondary_namespace`, `key` tuples.
170	///
171	/// This is useful for migrating data from [`KVStore`] implementation to [`KVStore`]
172	/// implementation.
173	///
174	/// Must exhaustively return all entries known to the store to ensure no data is missed, but
175	/// may return the items in arbitrary order.
176	fn list_all_keys(&self) -> Result<Vec<(String, String, String)>, io::Error>;
177}
178
179/// Migrates all data from one store to another.
180///
181/// This operation assumes that `target_store` is empty, i.e., any data present under copied keys
182/// might get overriden. User must ensure `source_store` is not modified during operation,
183/// otherwise no consistency guarantees can be given.
184///
185/// Will abort and return an error if any IO operation fails. Note that in this case the
186/// `target_store` might get left in an intermediate state.
187pub fn migrate_kv_store_data<S: MigratableKVStore, T: MigratableKVStore>(
188	source_store: &mut S, target_store: &mut T,
189) -> Result<(), io::Error> {
190	let keys_to_migrate = source_store.list_all_keys()?;
191
192	for (primary_namespace, secondary_namespace, key) in &keys_to_migrate {
193		let data = source_store.read(primary_namespace, secondary_namespace, key)?;
194		target_store.write(primary_namespace, secondary_namespace, key, &data)?;
195	}
196
197	Ok(())
198}
199
200/// Trait that handles persisting a [`ChannelManager`], [`NetworkGraph`], and [`WriteableScore`] to disk.
201///
202/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
203pub trait Persister<'a, CM: Deref, L: Deref, S: Deref>
204where
205	CM::Target: 'static + AChannelManager,
206	L::Target: 'static + Logger,
207	S::Target: WriteableScore<'a>,
208{
209	/// Persist the given ['ChannelManager'] to disk, returning an error if persistence failed.
210	///
211	/// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager
212	fn persist_manager(&self, channel_manager: &CM) -> Result<(), io::Error>;
213
214	/// Persist the given [`NetworkGraph`] to disk, returning an error if persistence failed.
215	fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error>;
216
217	/// Persist the given [`WriteableScore`] to disk, returning an error if persistence failed.
218	fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error>;
219}
220
221impl<'a, A: KVStore + ?Sized, CM: Deref, L: Deref, S: Deref> Persister<'a, CM, L, S> for A
222where
223	CM::Target: 'static + AChannelManager,
224	L::Target: 'static + Logger,
225	S::Target: WriteableScore<'a>,
226{
227	fn persist_manager(&self, channel_manager: &CM) -> Result<(), io::Error> {
228		self.write(
229			CHANNEL_MANAGER_PERSISTENCE_PRIMARY_NAMESPACE,
230			CHANNEL_MANAGER_PERSISTENCE_SECONDARY_NAMESPACE,
231			CHANNEL_MANAGER_PERSISTENCE_KEY,
232			&channel_manager.get_cm().encode(),
233		)
234	}
235
236	fn persist_graph(&self, network_graph: &NetworkGraph<L>) -> Result<(), io::Error> {
237		self.write(
238			NETWORK_GRAPH_PERSISTENCE_PRIMARY_NAMESPACE,
239			NETWORK_GRAPH_PERSISTENCE_SECONDARY_NAMESPACE,
240			NETWORK_GRAPH_PERSISTENCE_KEY,
241			&network_graph.encode(),
242		)
243	}
244
245	fn persist_scorer(&self, scorer: &S) -> Result<(), io::Error> {
246		self.write(
247			SCORER_PERSISTENCE_PRIMARY_NAMESPACE,
248			SCORER_PERSISTENCE_SECONDARY_NAMESPACE,
249			SCORER_PERSISTENCE_KEY,
250			&scorer.encode(),
251		)
252	}
253}
254
255impl<ChannelSigner: EcdsaChannelSigner, K: KVStore + ?Sized> Persist<ChannelSigner> for K {
256	// TODO: We really need a way for the persister to inform the user that its time to crash/shut
257	// down once these start returning failure.
258	// Then we should return InProgress rather than UnrecoverableError, implying we should probably
259	// just shut down the node since we're not retrying persistence!
260
261	fn persist_new_channel(
262		&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>,
263	) -> chain::ChannelMonitorUpdateStatus {
264		let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
265		match self.write(
266			CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
267			CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
268			&key,
269			&monitor.encode(),
270		) {
271			Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
272			Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError,
273		}
274	}
275
276	fn update_persisted_channel(
277		&self, funding_txo: OutPoint, _update: Option<&ChannelMonitorUpdate>,
278		monitor: &ChannelMonitor<ChannelSigner>,
279	) -> chain::ChannelMonitorUpdateStatus {
280		let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index);
281		match self.write(
282			CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
283			CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
284			&key,
285			&monitor.encode(),
286		) {
287			Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
288			Err(_) => chain::ChannelMonitorUpdateStatus::UnrecoverableError,
289		}
290	}
291
292	fn archive_persisted_channel(&self, funding_txo: OutPoint) {
293		let monitor_name = MonitorName::from(funding_txo);
294		let monitor = match self.read(
295			CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
296			CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
297			monitor_name.as_str(),
298		) {
299			Ok(monitor) => monitor,
300			Err(_) => return,
301		};
302		match self.write(
303			ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
304			ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
305			monitor_name.as_str(),
306			&monitor,
307		) {
308			Ok(()) => {},
309			Err(_e) => return,
310		};
311		let _ = self.remove(
312			CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
313			CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
314			monitor_name.as_str(),
315			true,
316		);
317	}
318}
319
320/// Read previously persisted [`ChannelMonitor`]s from the store.
321pub fn read_channel_monitors<K: Deref, ES: Deref, SP: Deref>(
322	kv_store: K, entropy_source: ES, signer_provider: SP,
323) -> Result<Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>, io::Error>
324where
325	K::Target: KVStore,
326	ES::Target: EntropySource + Sized,
327	SP::Target: SignerProvider + Sized,
328{
329	let mut res = Vec::new();
330
331	for stored_key in kv_store.list(
332		CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
333		CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
334	)? {
335		if stored_key.len() < 66 {
336			return Err(io::Error::new(
337				io::ErrorKind::InvalidData,
338				"Stored key has invalid length",
339			));
340		}
341
342		let txid = Txid::from_str(stored_key.split_at(64).0).map_err(|_| {
343			io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
344		})?;
345
346		let index: u16 = stored_key.split_at(65).1.parse().map_err(|_| {
347			io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
348		})?;
349
350		match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>::read(
351			&mut io::Cursor::new(kv_store.read(
352				CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
353				CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
354				&stored_key,
355			)?),
356			(&*entropy_source, &*signer_provider),
357		) {
358			Ok((block_hash, channel_monitor)) => {
359				if channel_monitor.get_funding_txo().0.txid != txid
360					|| channel_monitor.get_funding_txo().0.index != index
361				{
362					return Err(io::Error::new(
363						io::ErrorKind::InvalidData,
364						"ChannelMonitor was stored under the wrong key",
365					));
366				}
367				res.push((block_hash, channel_monitor));
368			},
369			Err(_) => {
370				return Err(io::Error::new(
371					io::ErrorKind::InvalidData,
372					"Failed to read ChannelMonitor",
373				))
374			},
375		}
376	}
377	Ok(res)
378}
379
380/// Implements [`Persist`] in a way that writes and reads both [`ChannelMonitor`]s and
381/// [`ChannelMonitorUpdate`]s.
382///
383/// # Overview
384///
385/// The main benefit this provides over the [`KVStore`]'s [`Persist`] implementation is decreased
386/// I/O bandwidth and storage churn, at the expense of more IOPS (including listing, reading, and
387/// deleting) and complexity. This is because it writes channel monitor differential updates,
388/// whereas the other (default) implementation rewrites the entire monitor on each update. For
389/// routing nodes, updates can happen many times per second to a channel, and monitors can be tens
390/// of megabytes (or more). Updates can be as small as a few hundred bytes.
391///
392/// Note that monitors written with `MonitorUpdatingPersister` are _not_ backward-compatible with
393/// the default [`KVStore`]'s [`Persist`] implementation. They have a prepended byte sequence,
394/// [`MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL`], applied to prevent deserialization with other
395/// persisters. This is because monitors written by this struct _may_ have unapplied updates. In
396/// order to downgrade, you must ensure that all updates are applied to the monitor, and remove the
397/// sentinel bytes.
398///
399/// # Storing monitors
400///
401/// Monitors are stored by implementing the [`Persist`] trait, which has two functions:
402///
403///   - [`Persist::persist_new_channel`], which persists whole [`ChannelMonitor`]s.
404///   - [`Persist::update_persisted_channel`], which persists only a [`ChannelMonitorUpdate`]
405///
406/// Whole [`ChannelMonitor`]s are stored in the [`CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE`],
407/// using the familiar encoding of an [`OutPoint`] (for example, `[SOME-64-CHAR-HEX-STRING]_1`).
408///
409/// Each [`ChannelMonitorUpdate`] is stored in a dynamic secondary namespace, as follows:
410///
411///   - primary namespace: [`CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE`]
412///   - secondary namespace: [the monitor's encoded outpoint name]
413///
414/// Under that secondary namespace, each update is stored with a number string, like `21`, which
415/// represents its `update_id` value.
416///
417/// For example, consider this channel, named for its transaction ID and index, or [`OutPoint`]:
418///
419///   - Transaction ID: `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef`
420///   - Index: `1`
421///
422/// Full channel monitors would be stored at a single key:
423///
424/// `[CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
425///
426/// Updates would be stored as follows (with `/` delimiting primary_namespace/secondary_namespace/key):
427///
428/// ```text
429/// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/1
430/// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/2
431/// [CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE]/deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1/3
432/// ```
433/// ... and so on.
434///
435/// # Reading channel state from storage
436///
437/// Channel state can be reconstructed by calling
438/// [`MonitorUpdatingPersister::read_all_channel_monitors_with_updates`]. Alternatively, users can
439/// list channel monitors themselves and load channels individually using
440/// [`MonitorUpdatingPersister::read_channel_monitor_with_updates`].
441///
442/// ## EXTREMELY IMPORTANT
443///
444/// It is extremely important that your [`KVStore::read`] implementation uses the
445/// [`io::ErrorKind::NotFound`] variant correctly: that is, when a file is not found, and _only_ in
446/// that circumstance (not when there is really a permissions error, for example). This is because
447/// neither channel monitor reading function lists updates. Instead, either reads the monitor, and
448/// using its stored `update_id`, synthesizes update storage keys, and tries them in sequence until
449/// one is not found. All _other_ errors will be bubbled up in the function's [`Result`].
450///
451/// # Pruning stale channel updates
452///
453/// Stale updates are pruned when the consolidation threshold is reached according to `maximum_pending_updates`.
454/// Monitor updates in the range between the latest `update_id` and `update_id - maximum_pending_updates`
455/// are deleted.
456/// The `lazy` flag is used on the [`KVStore::remove`] method, so there are no guarantees that the deletions
457/// will complete. However, stale updates are not a problem for data integrity, since updates are
458/// only read that are higher than the stored [`ChannelMonitor`]'s `update_id`.
459///
460/// If you have many stale updates stored (such as after a crash with pending lazy deletes), and
461/// would like to get rid of them, consider using the
462/// [`MonitorUpdatingPersister::cleanup_stale_updates`] function.
463pub struct MonitorUpdatingPersister<K: Deref, L: Deref, ES: Deref, SP: Deref, BI: Deref, FE: Deref>
464where
465	K::Target: KVStore,
466	L::Target: Logger,
467	ES::Target: EntropySource + Sized,
468	SP::Target: SignerProvider + Sized,
469	BI::Target: BroadcasterInterface,
470	FE::Target: FeeEstimator,
471{
472	kv_store: K,
473	logger: L,
474	maximum_pending_updates: u64,
475	entropy_source: ES,
476	signer_provider: SP,
477	broadcaster: BI,
478	fee_estimator: FE,
479}
480
481#[allow(dead_code)]
482impl<K: Deref, L: Deref, ES: Deref, SP: Deref, BI: Deref, FE: Deref>
483	MonitorUpdatingPersister<K, L, ES, SP, BI, FE>
484where
485	K::Target: KVStore,
486	L::Target: Logger,
487	ES::Target: EntropySource + Sized,
488	SP::Target: SignerProvider + Sized,
489	BI::Target: BroadcasterInterface,
490	FE::Target: FeeEstimator,
491{
492	/// Constructs a new [`MonitorUpdatingPersister`].
493	///
494	/// The `maximum_pending_updates` parameter controls how many updates may be stored before a
495	/// [`MonitorUpdatingPersister`] consolidates updates by writing a full monitor. Note that
496	/// consolidation will frequently occur with fewer updates than what you set here; this number
497	/// is merely the maximum that may be stored. When setting this value, consider that for higher
498	/// values of `maximum_pending_updates`:
499	///
500	///   - [`MonitorUpdatingPersister`] will tend to write more [`ChannelMonitorUpdate`]s than
501	/// [`ChannelMonitor`]s, approaching one [`ChannelMonitor`] write for every
502	/// `maximum_pending_updates` [`ChannelMonitorUpdate`]s.
503	///   - [`MonitorUpdatingPersister`] will issue deletes differently. Lazy deletes will come in
504	/// "waves" for each [`ChannelMonitor`] write. A larger `maximum_pending_updates` means bigger,
505	/// less frequent "waves."
506	///   - [`MonitorUpdatingPersister`] will potentially have more listing to do if you need to run
507	/// [`MonitorUpdatingPersister::cleanup_stale_updates`].
508	pub fn new(
509		kv_store: K, logger: L, maximum_pending_updates: u64, entropy_source: ES,
510		signer_provider: SP, broadcaster: BI, fee_estimator: FE,
511	) -> Self {
512		MonitorUpdatingPersister {
513			kv_store,
514			logger,
515			maximum_pending_updates,
516			entropy_source,
517			signer_provider,
518			broadcaster,
519			fee_estimator,
520		}
521	}
522
523	/// Reads all stored channel monitors, along with any stored updates for them.
524	///
525	/// It is extremely important that your [`KVStore::read`] implementation uses the
526	/// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the
527	/// documentation for [`MonitorUpdatingPersister`].
528	pub fn read_all_channel_monitors_with_updates(
529		&self,
530	) -> Result<
531		Vec<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>,
532		io::Error,
533	> {
534		let monitor_list = self.kv_store.list(
535			CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
536			CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
537		)?;
538		let mut res = Vec::with_capacity(monitor_list.len());
539		for monitor_key in monitor_list {
540			res.push(self.read_channel_monitor_with_updates(monitor_key)?)
541		}
542		Ok(res)
543	}
544
545	/// Read a single channel monitor, along with any stored updates for it.
546	///
547	/// It is extremely important that your [`KVStore::read`] implementation uses the
548	/// [`io::ErrorKind::NotFound`] variant correctly. For more information, please see the
549	/// documentation for [`MonitorUpdatingPersister`].
550	///
551	/// For `monitor_key`, channel storage keys be the channel's transaction ID and index, or
552	/// [`OutPoint`], with an underscore `_` between them. For example, given:
553	///
554	///   - Transaction ID: `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef`
555	///   - Index: `1`
556	///
557	/// The correct `monitor_key` would be:
558	/// `deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1`
559	///
560	/// Loading a large number of monitors will be faster if done in parallel. You can use this
561	/// function to accomplish this. Take care to limit the number of parallel readers.
562	pub fn read_channel_monitor_with_updates(
563		&self, monitor_key: String,
564	) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), io::Error>
565	{
566		let monitor_name = MonitorName::new(monitor_key)?;
567		let (block_hash, monitor) = self.read_monitor(&monitor_name)?;
568		let mut current_update_id = monitor.get_latest_update_id();
569		loop {
570			current_update_id = match current_update_id.checked_add(1) {
571				Some(next_update_id) => next_update_id,
572				None => break,
573			};
574			let update_name = UpdateName::from(current_update_id);
575			let update = match self.read_monitor_update(&monitor_name, &update_name) {
576				Ok(update) => update,
577				Err(err) if err.kind() == io::ErrorKind::NotFound => {
578					// We can't find any more updates, so we are done.
579					break;
580				},
581				Err(err) => return Err(err),
582			};
583
584			monitor
585				.update_monitor(&update, &self.broadcaster, &self.fee_estimator, &self.logger)
586				.map_err(|e| {
587				log_error!(
588					self.logger,
589					"Monitor update failed. monitor: {} update: {} reason: {:?}",
590					monitor_name.as_str(),
591					update_name.as_str(),
592					e
593				);
594				io::Error::new(io::ErrorKind::Other, "Monitor update failed")
595			})?;
596		}
597		Ok((block_hash, monitor))
598	}
599
600	/// Read a channel monitor.
601	fn read_monitor(
602		&self, monitor_name: &MonitorName,
603	) -> Result<(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), io::Error>
604	{
605		let outpoint: OutPoint = monitor_name.try_into()?;
606		let mut monitor_cursor = io::Cursor::new(self.kv_store.read(
607			CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
608			CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
609			monitor_name.as_str(),
610		)?);
611		// Discard the sentinel bytes if found.
612		if monitor_cursor.get_ref().starts_with(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL) {
613			monitor_cursor.set_position(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() as u64);
614		}
615		match <(BlockHash, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>)>::read(
616			&mut monitor_cursor,
617			(&*self.entropy_source, &*self.signer_provider),
618		) {
619			Ok((blockhash, channel_monitor)) => {
620				if channel_monitor.get_funding_txo().0.txid != outpoint.txid
621					|| channel_monitor.get_funding_txo().0.index != outpoint.index
622				{
623					log_error!(
624						self.logger,
625						"ChannelMonitor {} was stored under the wrong key!",
626						monitor_name.as_str()
627					);
628					Err(io::Error::new(
629						io::ErrorKind::InvalidData,
630						"ChannelMonitor was stored under the wrong key",
631					))
632				} else {
633					Ok((blockhash, channel_monitor))
634				}
635			},
636			Err(e) => {
637				log_error!(
638					self.logger,
639					"Failed to read ChannelMonitor {}, reason: {}",
640					monitor_name.as_str(),
641					e,
642				);
643				Err(io::Error::new(io::ErrorKind::InvalidData, "Failed to read ChannelMonitor"))
644			},
645		}
646	}
647
648	/// Read a channel monitor update.
649	fn read_monitor_update(
650		&self, monitor_name: &MonitorName, update_name: &UpdateName,
651	) -> Result<ChannelMonitorUpdate, io::Error> {
652		let update_bytes = self.kv_store.read(
653			CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
654			monitor_name.as_str(),
655			update_name.as_str(),
656		)?;
657		ChannelMonitorUpdate::read(&mut io::Cursor::new(update_bytes)).map_err(|e| {
658			log_error!(
659				self.logger,
660				"Failed to read ChannelMonitorUpdate {}/{}/{}, reason: {}",
661				CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
662				monitor_name.as_str(),
663				update_name.as_str(),
664				e,
665			);
666			io::Error::new(io::ErrorKind::InvalidData, "Failed to read ChannelMonitorUpdate")
667		})
668	}
669
670	/// Cleans up stale updates for all monitors.
671	///
672	/// This function works by first listing all monitors, and then for each of them, listing all
673	/// updates. The updates that have an `update_id` less than or equal to than the stored monitor
674	/// are deleted. The deletion can either be lazy or non-lazy based on the `lazy` flag; this will
675	/// be passed to [`KVStore::remove`].
676	pub fn cleanup_stale_updates(&self, lazy: bool) -> Result<(), io::Error> {
677		let monitor_keys = self.kv_store.list(
678			CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
679			CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
680		)?;
681		for monitor_key in monitor_keys {
682			let monitor_name = MonitorName::new(monitor_key)?;
683			let (_, current_monitor) = self.read_monitor(&monitor_name)?;
684			let updates = self.kv_store.list(
685				CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
686				monitor_name.as_str(),
687			)?;
688			for update in updates {
689				let update_name = UpdateName::new(update)?;
690				// if the update_id is lower than the stored monitor, delete
691				if update_name.0 <= current_monitor.get_latest_update_id() {
692					self.kv_store.remove(
693						CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
694						monitor_name.as_str(),
695						update_name.as_str(),
696						lazy,
697					)?;
698				}
699			}
700		}
701		Ok(())
702	}
703}
704
705impl<
706		ChannelSigner: EcdsaChannelSigner,
707		K: Deref,
708		L: Deref,
709		ES: Deref,
710		SP: Deref,
711		BI: Deref,
712		FE: Deref,
713	> Persist<ChannelSigner> for MonitorUpdatingPersister<K, L, ES, SP, BI, FE>
714where
715	K::Target: KVStore,
716	L::Target: Logger,
717	ES::Target: EntropySource + Sized,
718	SP::Target: SignerProvider + Sized,
719	BI::Target: BroadcasterInterface,
720	FE::Target: FeeEstimator,
721{
722	/// Persists a new channel. This means writing the entire monitor to the
723	/// parametrized [`KVStore`].
724	fn persist_new_channel(
725		&self, funding_txo: OutPoint, monitor: &ChannelMonitor<ChannelSigner>,
726	) -> chain::ChannelMonitorUpdateStatus {
727		// Determine the proper key for this monitor
728		let monitor_name = MonitorName::from(funding_txo);
729		// Serialize and write the new monitor
730		let mut monitor_bytes = Vec::with_capacity(
731			MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL.len() + monitor.serialized_length(),
732		);
733		monitor_bytes.extend_from_slice(MONITOR_UPDATING_PERSISTER_PREPEND_SENTINEL);
734		monitor.write(&mut monitor_bytes).unwrap();
735		match self.kv_store.write(
736			CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
737			CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
738			monitor_name.as_str(),
739			&monitor_bytes,
740		) {
741			Ok(_) => chain::ChannelMonitorUpdateStatus::Completed,
742			Err(e) => {
743				log_error!(
744					self.logger,
745					"Failed to write ChannelMonitor {}/{}/{} reason: {}",
746					CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
747					CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
748					monitor_name.as_str(),
749					e
750				);
751				chain::ChannelMonitorUpdateStatus::UnrecoverableError
752			},
753		}
754	}
755
756	/// Persists a channel update, writing only the update to the parameterized [`KVStore`] if possible.
757	///
758	/// In some cases, this will forward to [`MonitorUpdatingPersister::persist_new_channel`]:
759	///
760	///   - No full monitor is found in [`KVStore`]
761	///   - The number of pending updates exceeds `maximum_pending_updates` as given to [`Self::new`]
762	///   - LDK commands re-persisting the entire monitor through this function, specifically when
763	///	    `update` is `None`.
764	///   - The update is at [`u64::MAX`], indicating an update generated by pre-0.1 LDK.
765	fn update_persisted_channel(
766		&self, funding_txo: OutPoint, update: Option<&ChannelMonitorUpdate>,
767		monitor: &ChannelMonitor<ChannelSigner>,
768	) -> chain::ChannelMonitorUpdateStatus {
769		const LEGACY_CLOSED_CHANNEL_UPDATE_ID: u64 = u64::MAX;
770		if let Some(update) = update {
771			let persist_update = update.update_id != LEGACY_CLOSED_CHANNEL_UPDATE_ID
772				&& update.update_id % self.maximum_pending_updates != 0;
773			if persist_update {
774				let monitor_name = MonitorName::from(funding_txo);
775				let update_name = UpdateName::from(update.update_id);
776				match self.kv_store.write(
777					CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
778					monitor_name.as_str(),
779					update_name.as_str(),
780					&update.encode(),
781				) {
782					Ok(()) => chain::ChannelMonitorUpdateStatus::Completed,
783					Err(e) => {
784						log_error!(
785							self.logger,
786							"Failed to write ChannelMonitorUpdate {}/{}/{} reason: {}",
787							CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
788							monitor_name.as_str(),
789							update_name.as_str(),
790							e
791						);
792						chain::ChannelMonitorUpdateStatus::UnrecoverableError
793					},
794				}
795			} else {
796				let monitor_name = MonitorName::from(funding_txo);
797				// In case of channel-close monitor update, we need to read old monitor before persisting
798				// the new one in order to determine the cleanup range.
799				let maybe_old_monitor = match monitor.get_latest_update_id() {
800					LEGACY_CLOSED_CHANNEL_UPDATE_ID => self.read_monitor(&monitor_name).ok(),
801					_ => None,
802				};
803
804				// We could write this update, but it meets criteria of our design that calls for a full monitor write.
805				let monitor_update_status = self.persist_new_channel(funding_txo, monitor);
806
807				if let chain::ChannelMonitorUpdateStatus::Completed = monitor_update_status {
808					let channel_closed_legacy =
809						monitor.get_latest_update_id() == LEGACY_CLOSED_CHANNEL_UPDATE_ID;
810					let cleanup_range = if channel_closed_legacy {
811						// If there is an error while reading old monitor, we skip clean up.
812						maybe_old_monitor.map(|(_, ref old_monitor)| {
813							let start = old_monitor.get_latest_update_id();
814							// We never persist an update with the legacy closed update_id
815							let end = cmp::min(
816								start.saturating_add(self.maximum_pending_updates),
817								LEGACY_CLOSED_CHANNEL_UPDATE_ID - 1,
818							);
819							(start, end)
820						})
821					} else {
822						let end = monitor.get_latest_update_id();
823						let start = end.saturating_sub(self.maximum_pending_updates);
824						Some((start, end))
825					};
826
827					if let Some((start, end)) = cleanup_range {
828						self.cleanup_in_range(monitor_name, start, end);
829					}
830				}
831
832				monitor_update_status
833			}
834		} else {
835			// There is no update given, so we must persist a new monitor.
836			self.persist_new_channel(funding_txo, monitor)
837		}
838	}
839
840	fn archive_persisted_channel(&self, funding_txo: OutPoint) {
841		let monitor_name = MonitorName::from(funding_txo);
842		let monitor_key = monitor_name.as_str().to_string();
843		let monitor = match self.read_channel_monitor_with_updates(monitor_key) {
844			Ok((_block_hash, monitor)) => monitor,
845			Err(_) => return,
846		};
847		match self.kv_store.write(
848			ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
849			ARCHIVED_CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
850			monitor_name.as_str(),
851			&monitor.encode(),
852		) {
853			Ok(()) => {},
854			Err(_e) => return,
855		};
856		let _ = self.kv_store.remove(
857			CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE,
858			CHANNEL_MONITOR_PERSISTENCE_SECONDARY_NAMESPACE,
859			monitor_name.as_str(),
860			true,
861		);
862	}
863}
864
865impl<K: Deref, L: Deref, ES: Deref, SP: Deref, BI: Deref, FE: Deref>
866	MonitorUpdatingPersister<K, L, ES, SP, BI, FE>
867where
868	ES::Target: EntropySource + Sized,
869	K::Target: KVStore,
870	L::Target: Logger,
871	SP::Target: SignerProvider + Sized,
872	BI::Target: BroadcasterInterface,
873	FE::Target: FeeEstimator,
874{
875	// Cleans up monitor updates for given monitor in range `start..=end`.
876	fn cleanup_in_range(&self, monitor_name: MonitorName, start: u64, end: u64) {
877		for update_id in start..=end {
878			let update_name = UpdateName::from(update_id);
879			if let Err(e) = self.kv_store.remove(
880				CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
881				monitor_name.as_str(),
882				update_name.as_str(),
883				true,
884			) {
885				log_error!(
886					self.logger,
887					"Failed to clean up channel monitor updates for monitor {}, reason: {}",
888					monitor_name.as_str(),
889					e
890				);
891			};
892		}
893	}
894}
895
896/// A struct representing a name for a channel monitor.
897///
898/// `MonitorName` is primarily used within the [`MonitorUpdatingPersister`]
899/// in functions that store or retrieve channel monitor snapshots.
900/// It provides a consistent way to generate a unique key for channel
901/// monitors based on their funding outpoints.
902///
903/// While users of the Lightning Dev Kit library generally won't need
904/// to interact with [`MonitorName`] directly, it can be useful for:
905/// - Custom persistence implementations
906/// - Debugging or logging channel monitor operations
907/// - Extending the functionality of the `MonitorUpdatingPersister`
908//
909/// # Examples
910///
911/// ```
912/// use std::str::FromStr;
913///
914/// use bitcoin::Txid;
915///
916/// use lightning::util::persist::MonitorName;
917/// use lightning::chain::transaction::OutPoint;
918///
919/// let outpoint = OutPoint {
920///	 txid: Txid::from_str("deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef").unwrap(),
921///	 index: 1,
922/// };
923/// let monitor_name = MonitorName::from(outpoint);
924/// assert_eq!(monitor_name.as_str(), "deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1");
925///
926/// // Using MonitorName to generate a storage key
927/// let storage_key = format!("channel_monitors/{}", monitor_name.as_str());
928/// ```
929#[derive(Debug)]
930pub struct MonitorName(String);
931
932impl MonitorName {
933	/// Constructs a [`MonitorName`], after verifying that an [`OutPoint`] can
934	/// be formed from the given `name`.
935	/// This method is useful if you have a String and you want to verify that
936	/// it's a valid storage key for a channel monitor.
937	pub fn new(name: String) -> Result<Self, io::Error> {
938		MonitorName::do_try_into_outpoint(&name)?;
939		Ok(Self(name))
940	}
941
942	/// Convert this monitor name to a str.
943	/// This method is particularly useful when you need to use the monitor name
944	/// as a key in a key-value store or when logging.
945	pub fn as_str(&self) -> &str {
946		&self.0
947	}
948
949	/// Attempt to form a valid [`OutPoint`] from a given name string.
950	fn do_try_into_outpoint(name: &str) -> Result<OutPoint, io::Error> {
951		let mut parts = name.splitn(2, '_');
952		let txid = if let Some(part) = parts.next() {
953			Txid::from_str(part).map_err(|_| {
954				io::Error::new(io::ErrorKind::InvalidData, "Invalid tx ID in stored key")
955			})?
956		} else {
957			return Err(io::Error::new(
958				io::ErrorKind::InvalidData,
959				"Stored monitor key is not a splittable string",
960			));
961		};
962		let index = if let Some(part) = parts.next() {
963			part.parse().map_err(|_| {
964				io::Error::new(io::ErrorKind::InvalidData, "Invalid tx index in stored key")
965			})?
966		} else {
967			return Err(io::Error::new(
968				io::ErrorKind::InvalidData,
969				"No tx index value found after underscore in stored key",
970			));
971		};
972		Ok(OutPoint { txid, index })
973	}
974}
975
976impl TryFrom<&MonitorName> for OutPoint {
977	type Error = io::Error;
978
979	/// Attempts to convert a `MonitorName` back into an `OutPoint`.
980	///
981	/// This is useful when you have a `MonitorName` (perhaps retrieved from storage)
982	/// and need to reconstruct the original `OutPoint` it represents.
983	fn try_from(value: &MonitorName) -> Result<Self, io::Error> {
984		MonitorName::do_try_into_outpoint(&value.0)
985	}
986}
987
988impl From<OutPoint> for MonitorName {
989	/// Creates a `MonitorName` from an `OutPoint`.
990	///
991	/// This is typically used when you need to generate a storage key or identifier
992	/// for a new or existing channel monitor.
993	fn from(value: OutPoint) -> Self {
994		MonitorName(format!("{}_{}", value.txid.to_string(), value.index))
995	}
996}
997
998/// A struct representing a name for a channel monitor update.
999///
1000/// [`UpdateName`] is primarily used within the [`MonitorUpdatingPersister`] in
1001/// functions that store or retrieve partial updates to channel monitors. It
1002/// provides a consistent way to generate and parse unique identifiers for
1003/// monitor updates based on their sequence number.
1004///
1005/// The name is derived from the update's sequence ID, which is a monotonically
1006/// increasing u64 value. This format allows for easy ordering of updates and
1007/// efficient storage and retrieval in key-value stores.
1008///
1009/// # Usage
1010///
1011/// While users of the Lightning Dev Kit library generally won't need to
1012/// interact with `UpdateName` directly, it still can be useful for custom
1013/// persistence implementations. The u64 value is the update_id that can be
1014/// compared with [ChannelMonitor::get_latest_update_id] to check if this update
1015/// has been applied to the channel monitor or not, which is useful for pruning
1016/// stale channel monitor updates off persistence.
1017///
1018/// # Examples
1019///
1020/// ```
1021/// use lightning::util::persist::UpdateName;
1022///
1023/// let update_id: u64 = 42;
1024/// let update_name = UpdateName::from(update_id);
1025/// assert_eq!(update_name.as_str(), "42");
1026///
1027/// // Using UpdateName to generate a storage key
1028/// let monitor_name = "some_monitor_name";
1029/// let storage_key = format!("channel_monitor_updates/{}/{}", monitor_name, update_name.as_str());
1030/// ```
1031#[derive(Debug)]
1032pub struct UpdateName(pub u64, String);
1033
1034impl UpdateName {
1035	/// Constructs an [`UpdateName`], after verifying that an update sequence ID
1036	/// can be derived from the given `name`.
1037	pub fn new(name: String) -> Result<Self, io::Error> {
1038		match name.parse::<u64>() {
1039			Ok(u) => Ok(u.into()),
1040			Err(_) => {
1041				Err(io::Error::new(io::ErrorKind::InvalidData, "cannot parse u64 from update name"))
1042			},
1043		}
1044	}
1045
1046	/// Convert this update name to a string slice.
1047	///
1048	/// This method is particularly useful when you need to use the update name
1049	/// as part of a key in a key-value store or when logging.
1050	///
1051	/// # Examples
1052	///
1053	/// ```
1054	/// use lightning::util::persist::UpdateName;
1055	///
1056	/// let update_name = UpdateName::from(42);
1057	/// assert_eq!(update_name.as_str(), "42");
1058	/// ```
1059	pub fn as_str(&self) -> &str {
1060		&self.1
1061	}
1062}
1063
1064impl From<u64> for UpdateName {
1065	/// Creates an `UpdateName` from a `u64`.
1066	///
1067	/// This is typically used when you need to generate a storage key or
1068	/// identifier
1069	/// for a new channel monitor update.
1070	///
1071	/// # Examples
1072	///
1073	/// ```
1074	/// use lightning::util::persist::UpdateName;
1075	///
1076	/// let update_id: u64 = 42;
1077	/// let update_name = UpdateName::from(update_id);
1078	/// assert_eq!(update_name.as_str(), "42");
1079	/// ```
1080	fn from(value: u64) -> Self {
1081		Self(value, value.to_string())
1082	}
1083}
1084
1085#[cfg(test)]
1086mod tests {
1087	use super::*;
1088	use crate::chain::ChannelMonitorUpdateStatus;
1089	use crate::events::{ClosureReason, MessageSendEventsProvider};
1090	use crate::ln::functional_test_utils::*;
1091	use crate::sync::Arc;
1092	use crate::util::test_channel_signer::TestChannelSigner;
1093	use crate::util::test_utils::{self, TestLogger, TestStore};
1094	use crate::{check_added_monitors, check_closed_broadcast};
1095
1096	const EXPECTED_UPDATES_PER_PAYMENT: u64 = 5;
1097
1098	#[test]
1099	fn converts_u64_to_update_name() {
1100		assert_eq!(UpdateName::from(0).as_str(), "0");
1101		assert_eq!(UpdateName::from(21).as_str(), "21");
1102		assert_eq!(UpdateName::from(u64::MAX).as_str(), "18446744073709551615");
1103	}
1104
1105	#[test]
1106	fn bad_update_name_fails() {
1107		assert!(UpdateName::new("deadbeef".to_string()).is_err());
1108		assert!(UpdateName::new("-1".to_string()).is_err());
1109	}
1110
1111	#[test]
1112	fn monitor_from_outpoint_works() {
1113		let monitor_name1 = MonitorName::from(OutPoint {
1114			txid: Txid::from_str(
1115				"deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef",
1116			)
1117			.unwrap(),
1118			index: 1,
1119		});
1120		assert_eq!(
1121			monitor_name1.as_str(),
1122			"deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_1"
1123		);
1124
1125		let monitor_name2 = MonitorName::from(OutPoint {
1126			txid: Txid::from_str(
1127				"f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef",
1128			)
1129			.unwrap(),
1130			index: u16::MAX,
1131		});
1132		assert_eq!(
1133			monitor_name2.as_str(),
1134			"f33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeeff33dbeef_65535"
1135		);
1136	}
1137
1138	#[test]
1139	fn bad_monitor_string_fails() {
1140		assert!(MonitorName::new(
1141			"deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef".to_string()
1142		)
1143		.is_err());
1144		assert!(MonitorName::new(
1145			"deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_65536".to_string()
1146		)
1147		.is_err());
1148		assert!(MonitorName::new(
1149			"deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef_21".to_string()
1150		)
1151		.is_err());
1152	}
1153
1154	// Exercise the `MonitorUpdatingPersister` with real channels and payments.
1155	#[test]
1156	fn persister_with_real_monitors() {
1157		// This value is used later to limit how many iterations we perform.
1158		let persister_0_max_pending_updates = 7;
1159		// Intentionally set this to a smaller value to test a different alignment.
1160		let persister_1_max_pending_updates = 3;
1161		let chanmon_cfgs = create_chanmon_cfgs(4);
1162		let persister_0 = MonitorUpdatingPersister {
1163			kv_store: &TestStore::new(false),
1164			logger: &TestLogger::new(),
1165			maximum_pending_updates: persister_0_max_pending_updates,
1166			entropy_source: &chanmon_cfgs[0].keys_manager,
1167			signer_provider: &chanmon_cfgs[0].keys_manager,
1168			broadcaster: &chanmon_cfgs[0].tx_broadcaster,
1169			fee_estimator: &chanmon_cfgs[0].fee_estimator,
1170		};
1171		let persister_1 = MonitorUpdatingPersister {
1172			kv_store: &TestStore::new(false),
1173			logger: &TestLogger::new(),
1174			maximum_pending_updates: persister_1_max_pending_updates,
1175			entropy_source: &chanmon_cfgs[1].keys_manager,
1176			signer_provider: &chanmon_cfgs[1].keys_manager,
1177			broadcaster: &chanmon_cfgs[1].tx_broadcaster,
1178			fee_estimator: &chanmon_cfgs[1].fee_estimator,
1179		};
1180		let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1181		let chain_mon_0 = test_utils::TestChainMonitor::new(
1182			Some(&chanmon_cfgs[0].chain_source),
1183			&chanmon_cfgs[0].tx_broadcaster,
1184			&chanmon_cfgs[0].logger,
1185			&chanmon_cfgs[0].fee_estimator,
1186			&persister_0,
1187			&chanmon_cfgs[0].keys_manager,
1188		);
1189		let chain_mon_1 = test_utils::TestChainMonitor::new(
1190			Some(&chanmon_cfgs[1].chain_source),
1191			&chanmon_cfgs[1].tx_broadcaster,
1192			&chanmon_cfgs[1].logger,
1193			&chanmon_cfgs[1].fee_estimator,
1194			&persister_1,
1195			&chanmon_cfgs[1].keys_manager,
1196		);
1197		node_cfgs[0].chain_monitor = chain_mon_0;
1198		node_cfgs[1].chain_monitor = chain_mon_1;
1199		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1200		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1201
1202		// Check that the persisted channel data is empty before any channels are
1203		// open.
1204		let mut persisted_chan_data_0 =
1205			persister_0.read_all_channel_monitors_with_updates().unwrap();
1206		assert_eq!(persisted_chan_data_0.len(), 0);
1207		let mut persisted_chan_data_1 =
1208			persister_1.read_all_channel_monitors_with_updates().unwrap();
1209		assert_eq!(persisted_chan_data_1.len(), 0);
1210
1211		// Helper to make sure the channel is on the expected update ID.
1212		macro_rules! check_persisted_data {
1213			($expected_update_id: expr) => {
1214				persisted_chan_data_0 =
1215					persister_0.read_all_channel_monitors_with_updates().unwrap();
1216				// check that we stored only one monitor
1217				assert_eq!(persisted_chan_data_0.len(), 1);
1218				for (_, mon) in persisted_chan_data_0.iter() {
1219					// check that when we read it, we got the right update id
1220					assert_eq!(mon.get_latest_update_id(), $expected_update_id);
1221
1222					let monitor_name = MonitorName::from(mon.get_funding_txo().0);
1223					assert_eq!(
1224						persister_0
1225							.kv_store
1226							.list(
1227								CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
1228								monitor_name.as_str()
1229							)
1230							.unwrap()
1231							.len() as u64,
1232						mon.get_latest_update_id() % persister_0_max_pending_updates,
1233						"Wrong number of updates stored in persister 0",
1234					);
1235				}
1236				persisted_chan_data_1 =
1237					persister_1.read_all_channel_monitors_with_updates().unwrap();
1238				assert_eq!(persisted_chan_data_1.len(), 1);
1239				for (_, mon) in persisted_chan_data_1.iter() {
1240					assert_eq!(mon.get_latest_update_id(), $expected_update_id);
1241					let monitor_name = MonitorName::from(mon.get_funding_txo().0);
1242					assert_eq!(
1243						persister_1
1244							.kv_store
1245							.list(
1246								CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
1247								monitor_name.as_str()
1248							)
1249							.unwrap()
1250							.len() as u64,
1251						mon.get_latest_update_id() % persister_1_max_pending_updates,
1252						"Wrong number of updates stored in persister 1",
1253					);
1254				}
1255			};
1256		}
1257
1258		// Create some initial channel and check that a channel was persisted.
1259		let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
1260		check_persisted_data!(0);
1261
1262		// Send a few payments and make sure the monitors are updated to the latest.
1263		send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000);
1264		check_persisted_data!(EXPECTED_UPDATES_PER_PAYMENT);
1265		send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
1266		check_persisted_data!(2 * EXPECTED_UPDATES_PER_PAYMENT);
1267
1268		// Send a few more payments to try all the alignments of max pending updates with
1269		// updates for a payment sent and received.
1270		let mut sender = 0;
1271		for i in 3..=persister_0_max_pending_updates * 2 {
1272			let receiver;
1273			if sender == 0 {
1274				sender = 1;
1275				receiver = 0;
1276			} else {
1277				sender = 0;
1278				receiver = 1;
1279			}
1280			send_payment(&nodes[sender], &vec![&nodes[receiver]][..], 21_000);
1281			check_persisted_data!(i * EXPECTED_UPDATES_PER_PAYMENT);
1282		}
1283
1284		// Force close because cooperative close doesn't result in any persisted
1285		// updates.
1286
1287		let node_id_1 = nodes[1].node.get_our_node_id();
1288		let chan_id = nodes[0].node.list_channels()[0].channel_id;
1289		let err_msg = "Channel force-closed".to_string();
1290		nodes[0].node.force_close_broadcasting_latest_txn(&chan_id, &node_id_1, err_msg).unwrap();
1291
1292		let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) };
1293		check_closed_event(&nodes[0], 1, reason, false, &[node_id_1], 100000);
1294		check_closed_broadcast!(nodes[0], true);
1295		check_added_monitors!(nodes[0], 1);
1296
1297		let node_txn = nodes[0].tx_broadcaster.txn_broadcast();
1298		assert_eq!(node_txn.len(), 1);
1299		let txn = vec![node_txn[0].clone(), node_txn[0].clone()];
1300		let dummy_block = create_dummy_block(nodes[0].best_block_hash(), 42, txn);
1301		connect_block(&nodes[1], &dummy_block);
1302
1303		check_closed_broadcast!(nodes[1], true);
1304		let reason = ClosureReason::CommitmentTxConfirmed;
1305		let node_id_0 = nodes[0].node.get_our_node_id();
1306		check_closed_event(&nodes[1], 1, reason, false, &[node_id_0], 100000);
1307		check_added_monitors!(nodes[1], 1);
1308
1309		// Make sure everything is persisted as expected after close.
1310		check_persisted_data!(
1311			persister_0_max_pending_updates * 2 * EXPECTED_UPDATES_PER_PAYMENT + 1
1312		);
1313	}
1314
1315	// Test that if the `MonitorUpdatingPersister`'s can't actually write, trying to persist a
1316	// monitor or update with it results in the persister returning an UnrecoverableError status.
1317	#[test]
1318	fn unrecoverable_error_on_write_failure() {
1319		// Set up a dummy channel and force close. This will produce a monitor
1320		// that we can then use to test persistence.
1321		let chanmon_cfgs = create_chanmon_cfgs(2);
1322		let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1323		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1324		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1325		let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
1326		let err_msg = "Channel force-closed".to_string();
1327		let node_id_0 = nodes[0].node.get_our_node_id();
1328		nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &node_id_0, err_msg).unwrap();
1329		let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) };
1330		check_closed_event(&nodes[1], 1, reason, false, &[node_id_0], 100000);
1331		{
1332			let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
1333			let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap();
1334			let cmu = &cmu_map.get(&added_monitors[0].1.channel_id()).unwrap()[0];
1335			let txid =
1336				Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be")
1337					.unwrap();
1338			let test_txo = OutPoint { txid, index: 0 };
1339
1340			let ro_persister = MonitorUpdatingPersister {
1341				kv_store: &TestStore::new(true),
1342				logger: &TestLogger::new(),
1343				maximum_pending_updates: 11,
1344				entropy_source: node_cfgs[0].keys_manager,
1345				signer_provider: node_cfgs[0].keys_manager,
1346				broadcaster: node_cfgs[0].tx_broadcaster,
1347				fee_estimator: node_cfgs[0].fee_estimator,
1348			};
1349			match ro_persister.persist_new_channel(test_txo, &added_monitors[0].1) {
1350				ChannelMonitorUpdateStatus::UnrecoverableError => {
1351					// correct result
1352				},
1353				ChannelMonitorUpdateStatus::Completed => {
1354					panic!("Completed persisting new channel when shouldn't have")
1355				},
1356				ChannelMonitorUpdateStatus::InProgress => {
1357					panic!("Returned InProgress when shouldn't have")
1358				},
1359			}
1360			match ro_persister.update_persisted_channel(test_txo, Some(cmu), &added_monitors[0].1) {
1361				ChannelMonitorUpdateStatus::UnrecoverableError => {
1362					// correct result
1363				},
1364				ChannelMonitorUpdateStatus::Completed => {
1365					panic!("Completed persisting new channel when shouldn't have")
1366				},
1367				ChannelMonitorUpdateStatus::InProgress => {
1368					panic!("Returned InProgress when shouldn't have")
1369				},
1370			}
1371			added_monitors.clear();
1372		}
1373		nodes[1].node.get_and_clear_pending_msg_events();
1374	}
1375
1376	// Confirm that the `clean_stale_updates` function finds and deletes stale updates.
1377	#[test]
1378	fn clean_stale_updates_works() {
1379		let test_max_pending_updates = 7;
1380		let chanmon_cfgs = create_chanmon_cfgs(3);
1381		let persister_0 = MonitorUpdatingPersister {
1382			kv_store: &TestStore::new(false),
1383			logger: &TestLogger::new(),
1384			maximum_pending_updates: test_max_pending_updates,
1385			entropy_source: &chanmon_cfgs[0].keys_manager,
1386			signer_provider: &chanmon_cfgs[0].keys_manager,
1387			broadcaster: &chanmon_cfgs[0].tx_broadcaster,
1388			fee_estimator: &chanmon_cfgs[0].fee_estimator,
1389		};
1390		let persister_1 = MonitorUpdatingPersister {
1391			kv_store: &TestStore::new(false),
1392			logger: &TestLogger::new(),
1393			maximum_pending_updates: test_max_pending_updates,
1394			entropy_source: &chanmon_cfgs[1].keys_manager,
1395			signer_provider: &chanmon_cfgs[1].keys_manager,
1396			broadcaster: &chanmon_cfgs[1].tx_broadcaster,
1397			fee_estimator: &chanmon_cfgs[1].fee_estimator,
1398		};
1399		let mut node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
1400		let chain_mon_0 = test_utils::TestChainMonitor::new(
1401			Some(&chanmon_cfgs[0].chain_source),
1402			&chanmon_cfgs[0].tx_broadcaster,
1403			&chanmon_cfgs[0].logger,
1404			&chanmon_cfgs[0].fee_estimator,
1405			&persister_0,
1406			&chanmon_cfgs[0].keys_manager,
1407		);
1408		let chain_mon_1 = test_utils::TestChainMonitor::new(
1409			Some(&chanmon_cfgs[1].chain_source),
1410			&chanmon_cfgs[1].tx_broadcaster,
1411			&chanmon_cfgs[1].logger,
1412			&chanmon_cfgs[1].fee_estimator,
1413			&persister_1,
1414			&chanmon_cfgs[1].keys_manager,
1415		);
1416		node_cfgs[0].chain_monitor = chain_mon_0;
1417		node_cfgs[1].chain_monitor = chain_mon_1;
1418		let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
1419		let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
1420
1421		// Check that the persisted channel data is empty before any channels are
1422		// open.
1423		let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates().unwrap();
1424		assert_eq!(persisted_chan_data.len(), 0);
1425
1426		// Create some initial channel
1427		let _ = create_announced_chan_between_nodes(&nodes, 0, 1);
1428
1429		// Send a few payments to advance the updates a bit
1430		send_payment(&nodes[0], &vec![&nodes[1]][..], 8_000_000);
1431		send_payment(&nodes[1], &vec![&nodes[0]][..], 4_000_000);
1432
1433		// Get the monitor and make a fake stale update at update_id=1 (lowest height of an update possible)
1434		let persisted_chan_data = persister_0.read_all_channel_monitors_with_updates().unwrap();
1435		let (_, monitor) = &persisted_chan_data[0];
1436		let monitor_name = MonitorName::from(monitor.get_funding_txo().0);
1437		persister_0
1438			.kv_store
1439			.write(
1440				CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
1441				monitor_name.as_str(),
1442				UpdateName::from(1).as_str(),
1443				&[0u8; 1],
1444			)
1445			.unwrap();
1446
1447		// Do the stale update cleanup
1448		persister_0.cleanup_stale_updates(false).unwrap();
1449
1450		// Confirm the stale update is unreadable/gone
1451		assert!(persister_0
1452			.kv_store
1453			.read(
1454				CHANNEL_MONITOR_UPDATE_PERSISTENCE_PRIMARY_NAMESPACE,
1455				monitor_name.as_str(),
1456				UpdateName::from(1).as_str()
1457			)
1458			.is_err());
1459	}
1460
1461	fn persist_fn<P: Deref, ChannelSigner: EcdsaChannelSigner>(_persist: P) -> bool
1462	where
1463		P::Target: Persist<ChannelSigner>,
1464	{
1465		true
1466	}
1467
1468	#[test]
1469	fn kvstore_trait_object_usage() {
1470		let store: Arc<dyn KVStore + Send + Sync> = Arc::new(TestStore::new(false));
1471		assert!(persist_fn::<_, TestChannelSigner>(store.clone()));
1472	}
1473}