Skip to content

Commit

Permalink
Merge pull request #1564 from TheBlueMatt/2022-06-panic-on-behind
Browse files Browse the repository at this point in the history
Panic if we're running with outdated state instead of force-closing
  • Loading branch information
TheBlueMatt authored Jun 27, 2022
2 parents 92ca7ff + caa2a9a commit a600eee
Showing 10 changed files with 129 additions and 99 deletions.
4 changes: 2 additions & 2 deletions fuzz/src/full_stack.rs
Original file line number Diff line number Diff line change
@@ -252,7 +252,7 @@ impl<'a> Drop for MoneyLossDetector<'a> {
}

// Force all channels onto the chain (and time out claim txn)
self.manager.force_close_all_channels();
self.manager.force_close_all_channels_broadcasting_latest_txn();
}
}
}
@@ -624,7 +624,7 @@ pub fn do_test(data: &[u8], logger: &Arc<dyn Logger>) {
let channel_id = get_slice!(1)[0] as usize;
if channel_id >= channels.len() { return; }
channels.sort_by(|a, b| { a.channel_id.cmp(&b.channel_id) });
channelmanager.force_close_channel(&channels[channel_id].channel_id, &channels[channel_id].counterparty.node_id).unwrap();
channelmanager.force_close_broadcasting_latest_txn(&channels[channel_id].channel_id, &channels[channel_id].counterparty.node_id).unwrap();
},
// 15 is above
_ => return,
4 changes: 2 additions & 2 deletions lightning-background-processor/src/lib.rs
Original file line number Diff line number Diff line change
@@ -744,7 +744,7 @@ mod tests {
}

// Force-close the channel.
nodes[0].node.force_close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();
nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap();

// Check that the force-close updates are persisted.
check_persisted_data!(nodes[0].node, filepath.clone());
@@ -880,7 +880,7 @@ mod tests {
let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone()));

// Force close the channel and check that the SpendableOutputs event was handled.
nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
let commitment_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().pop().unwrap();
confirm_transaction_depth(&mut nodes[0], &commitment_tx, BREAKDOWN_TIMEOUT as u32);
let event = receiver
6 changes: 3 additions & 3 deletions lightning-persister/src/lib.rs
Original file line number Diff line number Diff line change
@@ -213,7 +213,7 @@ mod tests {

// Force close because cooperative close doesn't result in any persisted
// updates.
nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
@@ -247,7 +247,7 @@ mod tests {
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
nodes[1].node.force_close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
@@ -286,7 +286,7 @@ mod tests {
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
nodes[1].node.force_close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
2 changes: 1 addition & 1 deletion lightning/src/ln/chanmon_update_fail_tests.rs
Original file line number Diff line number Diff line change
@@ -226,7 +226,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
}

// ...and make sure we can force-close a frozen channel
nodes[0].node.force_close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id()).unwrap();
check_added_monitors!(nodes[0], 1);
check_closed_broadcast!(nodes[0], true);

26 changes: 20 additions & 6 deletions lightning/src/ln/channel.rs
Original file line number Diff line number Diff line change
@@ -802,7 +802,6 @@ pub(super) enum ChannelError {
Ignore(String),
Warn(String),
Close(String),
CloseDelayBroadcast(String),
}

impl fmt::Debug for ChannelError {
@@ -811,7 +810,6 @@ impl fmt::Debug for ChannelError {
&ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
&ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
&ChannelError::Close(ref e) => write!(f, "Close : {}", e),
&ChannelError::CloseDelayBroadcast(ref e) => write!(f, "CloseDelayBroadcast : {}", e)
}
}
}
@@ -3799,6 +3797,11 @@ impl<Signer: Sign> Channel<Signer> {

/// May panic if some calls other than message-handling calls (which will all Err immediately)
/// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
///
/// Some links printed in log lines are included here to check them during build (when run with
/// `cargo doc --document-private-items`):
/// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
/// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
pub fn channel_reestablish<L: Deref>(&mut self, msg: &msgs::ChannelReestablish, logger: &L,
node_pk: PublicKey, genesis_block_hash: BlockHash, best_block: &BestBlock)
-> Result<ReestablishResponses, ChannelError> where L::Target: Logger {
@@ -3824,9 +3827,20 @@ impl<Signer: Sign> Channel<Signer> {
return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
}
if msg.next_remote_commitment_number > INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number {
return Err(ChannelError::CloseDelayBroadcast(
"We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting".to_owned()
));
macro_rules! log_and_panic {
($err_msg: expr) => {
log_error!(logger, $err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id));
panic!($err_msg, log_bytes!(self.channel_id), log_pubkey!(self.counterparty_node_id));
}
}
log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
}
},
OptionalField::Absent => {}
@@ -3933,7 +3947,7 @@ impl<Signer: Sign> Channel<Signer> {
// now!
match self.free_holding_cell_htlcs(logger) {
Err(ChannelError::Close(msg)) => Err(ChannelError::Close(msg)),
Err(ChannelError::Warn(_)) | Err(ChannelError::Ignore(_)) | Err(ChannelError::CloseDelayBroadcast(_)) =>
Err(ChannelError::Warn(_)) | Err(ChannelError::Ignore(_)) =>
panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
Ok((Some((commitment_update, monitor_update)), holding_cell_failed_htlcs)) => {
Ok(ReestablishResponses {
66 changes: 37 additions & 29 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
@@ -369,15 +369,6 @@ impl MsgHandleErrInternal {
},
},
},
ChannelError::CloseDelayBroadcast(msg) => LightningError {
err: msg.clone(),
action: msgs::ErrorAction::SendErrorMessage {
msg: msgs::ErrorMessage {
channel_id,
data: msg
},
},
},
},
chan_id: None,
shutdown_finish: None,
@@ -1273,13 +1264,6 @@ macro_rules! convert_chan_err {
(true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
},
ChannelError::CloseDelayBroadcast(msg) => {
log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg);
update_maps_on_chan_removal!($self, $short_to_id, $channel);
let shutdown_res = $channel.force_shutdown(false);
(true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
}
}
}
}
@@ -1945,7 +1929,8 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana

/// `peer_msg` should be set when we receive a message from a peer, but not set when the
/// user closes, which will be re-exposed as the `ChannelClosed` reason.
fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>) -> Result<PublicKey, APIError> {
fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool)
-> Result<PublicKey, APIError> {
let mut chan = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_state_lock;
@@ -1964,7 +1949,7 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
}
};
log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
self.finish_force_close_channel(chan.force_shutdown(true));
self.finish_force_close_channel(chan.force_shutdown(broadcast));
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
let mut channel_state = self.channel_state.lock().unwrap();
channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
@@ -1975,13 +1960,9 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
Ok(chan.get_counterparty_node_id())
}

/// Force closes a channel, immediately broadcasting the latest local commitment transaction to
/// the chain and rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
/// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
/// channel.
pub fn force_close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> {
fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None) {
match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) {
Ok(counterparty_node_id) => {
self.channel_state.lock().unwrap().pending_msg_events.push(
events::MessageSendEvent::HandleError {
@@ -1997,11 +1978,39 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
}
}

/// Force closes a channel, immediately broadcasting the latest local transaction(s) and
/// rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
/// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
/// channel.
pub fn force_close_broadcasting_latest_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey)
-> Result<(), APIError> {
self.force_close_sending_error(channel_id, counterparty_node_id, true)
}

/// Force closes a channel, rejecting new HTLCs on the given channel but skips broadcasting
/// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the
/// `counterparty_node_id` isn't the counterparty of the corresponding channel.
///
/// You can always get the latest local transaction(s) to broadcast from
/// [`ChannelMonitor::get_latest_holder_commitment_txn`].
pub fn force_close_without_broadcasting_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey)
-> Result<(), APIError> {
self.force_close_sending_error(channel_id, counterparty_node_id, false)
}

/// Force close all channels, immediately broadcasting the latest local commitment transaction
/// for each to the chain and rejecting new HTLCs on each.
pub fn force_close_all_channels(&self) {
pub fn force_close_all_channels_broadcasting_latest_txn(&self) {
for chan in self.list_channels() {
let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id);
}
}

/// Force close all channels rejecting new HTLCs on each but without broadcasting the latest
/// local transaction(s).
pub fn force_close_all_channels_without_broadcasting_txn(&self) {
for chan in self.list_channels() {
let _ = self.force_close_channel(&chan.channel_id, &chan.counterparty.node_id);
let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id);
}
}

@@ -3188,7 +3197,6 @@ impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelMana
// ChannelClosed event is generated by handle_error for us.
Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
},
ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
};
handle_errors.push((counterparty_node_id, err));
continue;
@@ -6058,7 +6066,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
for chan in self.list_channels() {
if chan.counterparty.node_id == *counterparty_node_id {
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data));
let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data), true);
}
}
} else {
@@ -6080,7 +6088,7 @@ impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
}

// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data));
let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data), true);
}
}
}
Loading

0 comments on commit a600eee

Please sign in to comment.