Skip to content

Commit

Permalink
Example using pub use macro
Browse files Browse the repository at this point in the history
  • Loading branch information
henghonglee committed Jun 1, 2023
1 parent 32eb894 commit 8515d27
Show file tree
Hide file tree
Showing 20 changed files with 459 additions and 316 deletions.
40 changes: 20 additions & 20 deletions lightning-background-processor/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -272,9 +272,9 @@ macro_rules! define_run_body {
$loop_exit_check: expr, $await: expr, $get_timer: expr, $timer_elapsed: expr,
$check_slow_await: expr)
=> { {
log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup");
_log_trace_with_peer_id_channel_id!($logger, None, None, "Calling ChannelManager's timer_tick_occurred on startup");
$channel_manager.timer_tick_occurred();
log_trace!($logger, "Rebroadcasting monitor's pending claims on startup");
_log_trace_with_peer_id_channel_id!($logger, None, None, "Rebroadcasting monitor's pending claims on startup");
$chain_monitor.rebroadcast_pending_claims();

let mut last_freshness_call = $get_timer(FRESHNESS_TIMER);
Expand Down Expand Up @@ -303,7 +303,7 @@ macro_rules! define_run_body {

// Exit the loop if the background processor was requested to stop.
if $loop_exit_check {
log_trace!($logger, "Terminating background processor.");
_log_trace_with_peer_id_channel_id!($logger, None, None, "Terminating background processor.");
break;
}

Expand All @@ -316,17 +316,17 @@ macro_rules! define_run_body {

// Exit the loop if the background processor was requested to stop.
if $loop_exit_check {
log_trace!($logger, "Terminating background processor.");
_log_trace_with_peer_id_channel_id!($logger, None, None, "Terminating background processor.");
break;
}

if updates_available {
log_trace!($logger, "Persisting ChannelManager...");
_log_trace_with_peer_id_channel_id!($logger, None, None, "Persisting ChannelManager...");
$persister.persist_manager(&*$channel_manager)?;
log_trace!($logger, "Done persisting ChannelManager.");
_log_trace_with_peer_id_channel_id!($logger, None, None, "Done persisting ChannelManager.");
}
if $timer_elapsed(&mut last_freshness_call, FRESHNESS_TIMER) {
log_trace!($logger, "Calling ChannelManager's timer_tick_occurred");
_log_trace_with_peer_id_channel_id!($logger, None, None, "Calling ChannelManager's timer_tick_occurred");
$channel_manager.timer_tick_occurred();
last_freshness_call = $get_timer(FRESHNESS_TIMER);
}
Expand All @@ -343,11 +343,11 @@ macro_rules! define_run_body {
// may call Bitcoin Core RPCs during event handling, which very often takes
// more than a handful of seconds to complete, and shouldn't disconnect all our
// peers.
log_trace!($logger, "100ms sleep took more than a second, disconnecting peers.");
_log_trace_with_peer_id_channel_id!($logger, None, None, "100ms sleep took more than a second, disconnecting peers.");
$peer_manager.as_ref().disconnect_all_peers();
last_ping_call = $get_timer(PING_TIMER);
} else if $timer_elapsed(&mut last_ping_call, PING_TIMER) {
log_trace!($logger, "Calling PeerManager's timer_tick_occurred");
_log_trace_with_peer_id_channel_id!($logger, None, None, "Calling PeerManager's timer_tick_occurred");
$peer_manager.as_ref().timer_tick_occurred();
last_ping_call = $get_timer(PING_TIMER);
}
Expand All @@ -367,16 +367,16 @@ macro_rules! define_run_body {
// The network graph must not be pruned while rapid sync completion is pending
if let Some(network_graph) = $gossip_sync.prunable_network_graph() {
#[cfg(feature = "std")] {
log_trace!($logger, "Pruning and persisting network graph.");
_log_trace_with_peer_id_channel_id!($logger, None, None, "Pruning and persisting network graph.");
network_graph.remove_stale_channels_and_tracking();
}
#[cfg(not(feature = "std"))] {
log_warn!($logger, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
log_trace!($logger, "Persisting network graph.");
log_warn!($logger, None, None, "Not pruning network graph, consider enabling `std` or doing so manually with remove_stale_channels_and_tracking_with_time.");
_log_trace_with_peer_id_channel_id!($logger, None, None, "Persisting network graph.");
}

if let Err(e) = $persister.persist_graph(network_graph) {
log_error!($logger, "Error: Failed to persist network graph, check your disk and permissions {}", e)
_log_error_with_peer_id_channel_id!($logger, None, None, "Error: Failed to persist network graph, check your disk and permissions {}", e)
}

have_pruned = true;
Expand All @@ -387,16 +387,16 @@ macro_rules! define_run_body {

if $timer_elapsed(&mut last_scorer_persist_call, SCORER_PERSIST_TIMER) {
if let Some(ref scorer) = $scorer {
log_trace!($logger, "Persisting scorer");
_log_trace_with_peer_id_channel_id!($logger, None, None, "Persisting scorer");
if let Err(e) = $persister.persist_scorer(&scorer) {
log_error!($logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
_log_error_with_peer_id_channel_id!($logger, None, None, "Error: Failed to persist scorer, check your disk and permissions {}", e)
}
}
last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER);
}

if $timer_elapsed(&mut last_rebroadcast_call, REBROADCAST_TIMER) {
log_trace!($logger, "Rebroadcasting monitor's pending claims");
_log_trace_with_peer_id_channel_id!($logger, None, None, "Rebroadcasting monitor's pending claims");
$chain_monitor.rebroadcast_pending_claims();
last_rebroadcast_call = $get_timer(REBROADCAST_TIMER);
}
Expand Down Expand Up @@ -633,9 +633,9 @@ where
}
if let Some(ref scorer) = scorer {
if update_scorer(scorer, &event) {
log_trace!(logger, "Persisting scorer after update");
_log_trace_with_peer_id_channel_id!(logger, None, None, "Persisting scorer after update");
if let Err(e) = persister.persist_scorer(&scorer) {
log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
_log_error_with_peer_id_channel_id!(logger, None, None, "Error: Failed to persist scorer, check your disk and permissions {}", e)
}
}
}
Expand Down Expand Up @@ -768,9 +768,9 @@ impl BackgroundProcessor {
}
if let Some(ref scorer) = scorer {
if update_scorer(scorer, &event) {
log_trace!(logger, "Persisting scorer after update");
_log_trace_with_peer_id_channel_id!(logger, None, None, "Persisting scorer after update");
if let Err(e) = persister.persist_scorer(&scorer) {
log_error!(logger, "Error: Failed to persist scorer, check your disk and permissions {}", e)
_log_error_with_peer_id_channel_id!(logger, None, None, "Error: Failed to persist scorer, check your disk and permissions {}", e)
}
}
}
Expand Down
28 changes: 14 additions & 14 deletions lightning-invoice/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -186,8 +186,8 @@ where
)
.map_err(|_| SignOrCreationError::CreationError(CreationError::InvalidAmount))?
};

log_trace!(logger, "Creating phantom invoice from {} participating nodes with payment hash {}",
// todo: example of external crate using log_trace! initially now unable to re-export with pub use _log_trace_with_peer_id_channel_id as log_trace
_log_trace_with_peer_id_channel_id!(logger, None, None, "Creating phantom invoice from {} participating nodes with payment hash {}",
phantom_route_hints.len(), log_bytes!(payment_hash.0));

let mut invoice = invoice
Expand Down Expand Up @@ -236,7 +236,7 @@ where
let mut phantom_hints: Vec<Vec<RouteHint>> = Vec::new();

for PhantomRouteHints { channels, phantom_scid, real_node_pubkey } in phantom_route_hints {
log_trace!(logger, "Generating phantom route hints for node {}",
_log_trace_with_peer_id_channel_id!(logger, Some(real_node_pubkey), None, "Generating phantom route hints for node {}",
log_pubkey!(real_node_pubkey));
let mut route_hints = sort_and_filter_channels(channels, amt_msat, &logger);

Expand Down Expand Up @@ -513,7 +513,7 @@ fn _create_invoice_from_channelmanager_and_duration_since_epoch_with_payment_has
return Err(SignOrCreationError::CreationError(CreationError::MinFinalCltvExpiryDeltaTooShort));
}

log_trace!(logger, "Creating invoice with payment hash {}", log_bytes!(payment_hash.0));
_log_trace_with_peer_id_channel_id!(logger, None, None, "Creating invoice with payment hash {}", log_bytes!(payment_hash.0));

let invoice = match description {
InvoiceDescription::Direct(description) => {
Expand Down Expand Up @@ -584,10 +584,10 @@ fn sort_and_filter_channels<L: Deref>(
let mut online_min_capacity_channel_exists = false;
let mut has_pub_unconf_chan = false;

log_trace!(logger, "Considering {} channels for invoice route hints", channels.len());
_log_trace_with_peer_id_channel_id!(logger, None, None, "Considering {} channels for invoice route hints", channels.len());
for channel in channels.into_iter().filter(|chan| chan.is_channel_ready) {
if channel.get_inbound_payment_scid().is_none() || channel.counterparty.forwarding_info.is_none() {
log_trace!(logger, "Ignoring channel {} for invoice route hints", log_bytes!(channel.channel_id));
_log_trace_with_peer_id_channel_id!(logger, Some(channel.counterparty.node_id), Some(channel.channel_id), "Ignoring channel {} for invoice route hints", log_bytes!(channel.channel_id));
continue;
}

Expand All @@ -600,15 +600,15 @@ fn sort_and_filter_channels<L: Deref>(
} else {
// If any public channel exists, return no hints and let the sender
// look at the public channels instead.
log_trace!(logger, "Not including channels in invoice route hints on account of public channel {}",
_log_trace_with_peer_id_channel_id!(logger, Some(channel.counterparty.node_id), Some(channel.channel_id), "Not including channels in invoice route hints on account of public channel {}",
log_bytes!(channel.channel_id));
return vec![]
}
}

if channel.inbound_capacity_msat >= min_inbound_capacity {
if !min_capacity_channel_exists {
log_trace!(logger, "Channel with enough inbound capacity exists for invoice route hints");
_log_trace_with_peer_id_channel_id!(logger, Some(channel.counterparty.node_id), Some(channel.channel_id), "Channel with enough inbound capacity exists for invoice route hints");
min_capacity_channel_exists = true;
}

Expand All @@ -618,7 +618,7 @@ fn sort_and_filter_channels<L: Deref>(
}

if channel.is_usable && !online_channel_exists {
log_trace!(logger, "Channel with connected peer exists for invoice route hints");
_log_trace_with_peer_id_channel_id!(logger, Some(channel.counterparty.node_id), Some(channel.channel_id), "Channel with connected peer exists for invoice route hints");
online_channel_exists = true;
}

Expand All @@ -638,7 +638,7 @@ fn sort_and_filter_channels<L: Deref>(
let new_channel_preferable = channel.is_public == entry.get().is_public && !prefer_current;

if new_now_public || new_channel_preferable {
log_trace!(logger,
_log_trace_with_peer_id_channel_id!(logger, None, None,
"Preferring counterparty {} channel {} (SCID {:?}, {} msats) over {} (SCID {:?}, {} msats) for invoice route hints",
log_pubkey!(channel.counterparty.node_id),
log_bytes!(channel.channel_id), channel.short_channel_id,
Expand All @@ -647,7 +647,7 @@ fn sort_and_filter_channels<L: Deref>(
current_max_capacity);
entry.insert(channel);
} else {
log_trace!(logger,
_log_trace_with_peer_id_channel_id!(logger, None, None,
"Preferring counterparty {} channel {} (SCID {:?}, {} msats) over {} (SCID {:?}, {} msats) for invoice route hints",
log_pubkey!(channel.counterparty.node_id),
log_bytes!(entry.get().channel_id), entry.get().short_channel_id,
Expand Down Expand Up @@ -703,14 +703,14 @@ fn sort_and_filter_channels<L: Deref>(
} else { true };

if include_channel {
log_trace!(logger, "Including channel {} in invoice route hints",
_log_trace_with_peer_id_channel_id!(logger, Some(channel.counterparty.node_id), Some(channel.channel_id), "Including channel {} in invoice route hints",
log_bytes!(channel.channel_id));
} else if !has_enough_capacity {
log_trace!(logger, "Ignoring channel {} without enough capacity for invoice route hints",
_log_trace_with_peer_id_channel_id!(logger, Some(channel.counterparty.node_id), Some(channel.channel_id), "Ignoring channel {} without enough capacity for invoice route hints",
log_bytes!(channel.channel_id));
} else {
debug_assert!(!channel.is_usable || (has_pub_unconf_chan && !channel.is_public));
log_trace!(logger, "Ignoring channel {} with disconnected peer",
_log_trace_with_peer_id_channel_id!(logger, Some(channel.counterparty.node_id), Some(channel.channel_id), "Ignoring channel {} with disconnected peer",
log_bytes!(channel.channel_id));
}

Expand Down
18 changes: 9 additions & 9 deletions lightning-rapid-gossip-sync/src/processing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ use lightning::ln::msgs::{
};
use lightning::routing::gossip::NetworkGraph;
use lightning::util::logger::Logger;
use lightning::{log_debug, log_warn, log_trace, log_given_level, log_gossip};
use lightning::{_log_debug_with_peer_id_channel_id, _log_warn_with_peer_id_channel_id, _log_trace_with_peer_id_channel_id, log_given_level, _log_gossip_with_peer_id_channel_id};
use lightning::util::ser::{BigSize, Readable};
use lightning::io;

Expand Down Expand Up @@ -59,7 +59,7 @@ impl<NG: Deref<Target=NetworkGraph<L>>, L: Deref> RapidGossipSync<NG, L> where L
mut read_cursor: &mut R,
current_time_unix: Option<u64>
) -> Result<u32, GraphSyncError> {
log_trace!(self.logger, "Processing RGS data...");
_log_trace_with_peer_id_channel_id!(self.logger, None, None, "Processing RGS data...");
let mut prefix = [0u8; 4];
read_cursor.read_exact(&mut prefix)?;

Expand Down Expand Up @@ -122,7 +122,7 @@ impl<NG: Deref<Target=NetworkGraph<L>>, L: Deref> RapidGossipSync<NG, L> where L
let node_id_1 = node_ids[node_id_1_index.0 as usize];
let node_id_2 = node_ids[node_id_2_index.0 as usize];

log_gossip!(self.logger, "Adding channel {} from RGS announcement at {}",
_log_gossip_with_peer_id_channel_id!(self.logger, None, None,"Adding channel {} from RGS announcement at {}",
short_channel_id, latest_seen_timestamp);

let announcement_result = network_graph.add_channel_from_partial_announcement(
Expand All @@ -136,7 +136,7 @@ impl<NG: Deref<Target=NetworkGraph<L>>, L: Deref> RapidGossipSync<NG, L> where L
if let ErrorAction::IgnoreDuplicateGossip = lightning_error.action {
// everything is fine, just a duplicate channel announcement
} else {
log_warn!(self.logger, "Failed to process channel announcement: {:?}", lightning_error);
_log_warn_with_peer_id_channel_id!(self.logger, None, None,"Failed to process channel announcement: {:?}", lightning_error);
return Err(lightning_error.into());
}
}
Expand All @@ -145,7 +145,7 @@ impl<NG: Deref<Target=NetworkGraph<L>>, L: Deref> RapidGossipSync<NG, L> where L
previous_scid = 0; // updates start at a new scid

let update_count: u32 = Readable::read(read_cursor)?;
log_debug!(self.logger, "Processing RGS update from {} with {} nodes, {} channel announcements and {} channel updates.",
_log_debug_with_peer_id_channel_id!(self.logger, None, None, "Processing RGS update from {} with {} nodes, {} channel announcements and {} channel updates.",
latest_seen_timestamp, node_id_count, announcement_count, update_count);
if update_count == 0 {
return Ok(latest_seen_timestamp);
Expand Down Expand Up @@ -198,7 +198,7 @@ impl<NG: Deref<Target=NetworkGraph<L>>, L: Deref> RapidGossipSync<NG, L> where L
synthetic_update.fee_base_msat = directional_info.fees.base_msat;
synthetic_update.fee_proportional_millionths = directional_info.fees.proportional_millionths;
} else {
log_trace!(self.logger,
_log_trace_with_peer_id_channel_id!(self.logger, None, None,
"Skipping application of channel update for chan {} with flags {} as original data is missing.",
short_channel_id, channel_flags);
skip_update_for_unknown_channel = true;
Expand Down Expand Up @@ -234,13 +234,13 @@ impl<NG: Deref<Target=NetworkGraph<L>>, L: Deref> RapidGossipSync<NG, L> where L
continue;
}

log_gossip!(self.logger, "Updating channel {} with flags {} from RGS announcement at {}",
_log_gossip_with_peer_id_channel_id!(self.logger, None, None, "Updating channel {} with flags {} from RGS announcement at {}",
short_channel_id, channel_flags, latest_seen_timestamp);
match network_graph.update_channel_unsigned(&synthetic_update) {
Ok(_) => {},
Err(LightningError { action: ErrorAction::IgnoreDuplicateGossip, .. }) => {},
Err(LightningError { action: ErrorAction::IgnoreAndLog(level), err }) => {
log_given_level!(self.logger, level, "Failed to apply channel update: {:?}", err);
log_given_level!(self.logger, level, None, None, "Failed to apply channel update: {:?}", err);
},
Err(LightningError { action: ErrorAction::IgnoreError, .. }) => {},
Err(e) => return Err(e.into()),
Expand All @@ -254,7 +254,7 @@ impl<NG: Deref<Target=NetworkGraph<L>>, L: Deref> RapidGossipSync<NG, L> where L
}

self.is_initial_sync_complete.store(true, Ordering::Release);
log_trace!(self.logger, "Done processing RGS data from {}", latest_seen_timestamp);
_log_trace_with_peer_id_channel_id!(self.logger, None, None, "Done processing RGS data from {}", latest_seen_timestamp);
Ok(latest_seen_timestamp)
}
}
Expand Down
Loading

0 comments on commit 8515d27

Please sign in to comment.