Skip to content

Commit

Permalink
Version-aware serialization of node features and addresses in delta.
Browse files Browse the repository at this point in the history
  • Loading branch information
arik-so committed May 17, 2024
1 parent 6c57ce0 commit 6a84338
Show file tree
Hide file tree
Showing 4 changed files with 246 additions and 45 deletions.
143 changes: 121 additions & 22 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ use std::io::BufReader;
use std::ops::Deref;
use std::sync::Arc;
use bitcoin::blockdata::constants::ChainHash;
use lightning::ln::msgs::SocketAddress;
use lightning::log_info;

use lightning::routing::gossip::{NetworkGraph, NodeId};
Expand All @@ -23,10 +24,10 @@ use lightning::util::ser::{ReadableArgs, Writeable};
use tokio::sync::mpsc;
use tokio_postgres::{Client, NoTls};
use crate::config::SYMLINK_GRANULARITY_INTERVAL;
use crate::lookup::DeltaSet;
use crate::lookup::{DeltaSet, NodeDeltaSet};

use crate::persistence::GossipPersister;
use crate::serialization::UpdateSerialization;
use crate::serialization::{SerializationSet, UpdateSerialization};
use crate::snapshot::Snapshotter;
use crate::types::RGSSLogger;

Expand All @@ -49,7 +50,7 @@ mod tests;
/// sync formats arise in the future.
///
/// The fourth byte is the protocol version in case our format gets updated.
const GOSSIP_PREFIX: [u8; 4] = [76, 68, 75, 1];
const GOSSIP_PREFIX: [u8; 3] = [76, 68, 75];

pub struct RapidSyncProcessor<L: Deref> where L::Target: Logger {
network_graph: Arc<NetworkGraph<L>>,
Expand All @@ -59,7 +60,13 @@ pub struct RapidSyncProcessor<L: Deref> where L::Target: Logger {
pub struct SerializedResponse {
pub data: Vec<u8>,
pub message_count: u32,
pub announcement_count: u32,
pub node_announcement_count: u32,
/// Despite the name, the count of node announcements that have associated updates, be those
/// features, addresses, or both
pub node_update_count: u32,
pub node_feature_update_count: u32,
pub node_address_update_count: u32,
pub channel_announcement_count: u32,
pub update_count: u32,
pub update_count_full: u32,
pub update_count_incremental: u32,
Expand Down Expand Up @@ -171,18 +178,32 @@ fn serialize_empty_blob(current_timestamp: u64) -> Vec<u8> {
blob
}

async fn serialize_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>, last_sync_timestamp: u32, snapshot_reference_timestamp: Option<u64>, logger: L) -> SerializedResponse where L::Target: Logger {
async fn calculate_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>, last_sync_timestamp: u32, snapshot_reference_timestamp: Option<u64>, logger: L) -> SerializationSet where L::Target: Logger {
let client = connect_to_db().await;

network_graph.remove_stale_channels_and_tracking();

let mut output: Vec<u8> = vec![];
let snapshot_interval = config::snapshot_generation_interval();

// set a flag if the chain hash is prepended
// chain hash only necessary if either channel announcements or non-incremental updates are present
// for announcement-free incremental-only updates, chain hash can be skipped

let mut delta_set = DeltaSet::new();
let mut node_delta_set = NodeDeltaSet::new();
lookup::fetch_channel_announcements(&mut delta_set, network_graph, &client, last_sync_timestamp, snapshot_reference_timestamp, logger.clone()).await;
log_info!(logger, "announcement channel count: {}", delta_set.len());
lookup::fetch_channel_updates(&mut delta_set, &client, last_sync_timestamp, logger.clone()).await;
log_info!(logger, "update-fetched channel count: {}", delta_set.len());
lookup::fetch_node_updates(&mut node_delta_set, &client, last_sync_timestamp, logger.clone()).await;
log_info!(logger, "update-fetched node count: {}", node_delta_set.len());
lookup::filter_delta_set(&mut delta_set, logger.clone());
log_info!(logger, "update-filtered channel count: {}", delta_set.len());
serialization::serialize_delta_set(delta_set, node_delta_set, last_sync_timestamp)
}

fn serialize_delta<L: Deref + Clone>(serialization_details: &SerializationSet, serialization_version: u8, logger: L) -> SerializedResponse where L::Target: Logger {
let mut output: Vec<u8> = vec![];
let snapshot_interval = config::snapshot_generation_interval();

let mut node_id_set: HashSet<NodeId> = HashSet::new();
let mut node_id_indices: HashMap<NodeId, usize> = HashMap::new();
let mut node_ids: Vec<NodeId> = Vec::new();
Expand All @@ -199,21 +220,12 @@ async fn serialize_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>,
node_id_indices[&node_id]
};

let mut delta_set = DeltaSet::new();
lookup::fetch_channel_announcements(&mut delta_set, network_graph, &client, last_sync_timestamp, snapshot_reference_timestamp, logger.clone()).await;
log_info!(logger, "announcement channel count: {}", delta_set.len());
lookup::fetch_channel_updates(&mut delta_set, &client, last_sync_timestamp, logger.clone()).await;
log_info!(logger, "update-fetched channel count: {}", delta_set.len());
lookup::filter_delta_set(&mut delta_set, logger.clone());
log_info!(logger, "update-filtered channel count: {}", delta_set.len());
let serialization_details = serialization::serialize_delta_set(delta_set, last_sync_timestamp);

// process announcements
// write the number of channel announcements to the output
let announcement_count = serialization_details.announcements.len() as u32;
announcement_count.write(&mut output).unwrap();
let mut previous_announcement_scid = 0;
for current_announcement in serialization_details.announcements {
for current_announcement in &serialization_details.announcements {
let id_index_1 = get_node_id_index(current_announcement.node_id_1);
let id_index_2 = get_node_id_index(current_announcement.node_id_2);
let mut stripped_announcement = serialization::serialize_stripped_channel_announcement(&current_announcement, id_index_1, id_index_2, previous_announcement_scid);
Expand All @@ -227,7 +239,7 @@ async fn serialize_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>,
let update_count = serialization_details.updates.len() as u32;
update_count.write(&mut output).unwrap();

let default_update_values = serialization_details.full_update_defaults;
let default_update_values = &serialization_details.full_update_defaults;
if update_count > 0 {
default_update_values.cltv_expiry_delta.write(&mut output).unwrap();
default_update_values.htlc_minimum_msat.write(&mut output).unwrap();
Expand All @@ -238,7 +250,7 @@ async fn serialize_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>,

let mut update_count_full = 0;
let mut update_count_incremental = 0;
for current_update in serialization_details.updates {
for current_update in &serialization_details.updates {
match &current_update {
UpdateSerialization::Full(_) => {
update_count_full += 1;
Expand All @@ -258,6 +270,7 @@ async fn serialize_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>,
let message_count = announcement_count + update_count;

let mut prefixed_output = GOSSIP_PREFIX.to_vec();
prefixed_output.push(serialization_version);

// always write the chain hash
serialization_details.chain_hash.write(&mut prefixed_output).unwrap();
Expand All @@ -267,11 +280,93 @@ async fn serialize_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>,
let serialized_seen_timestamp = latest_seen_timestamp.saturating_sub(overflow_seconds);
serialized_seen_timestamp.write(&mut prefixed_output).unwrap();

if serialization_version >= 2 { // serialize the most common node features
for mutated_node_id in serialization_details.node_mutations.keys() {
// consider mutated nodes outside channel announcements
get_node_id_index(mutated_node_id.clone());
}

let default_feature_count = serialization_details.node_announcement_feature_defaults.len() as u8;
default_feature_count.write(&mut prefixed_output).unwrap();

for current_feature in &serialization_details.node_announcement_feature_defaults {
current_feature.write(&mut prefixed_output).unwrap();
}
}

let node_id_count = node_ids.len() as u32;
node_id_count.write(&mut prefixed_output).unwrap();

let mut node_update_count = 0u32;
let mut node_feature_update_count = 0u32;
let mut node_address_update_count = 0u32;

for current_node_id in node_ids {
current_node_id.write(&mut prefixed_output).unwrap();
let mut current_node_delta_serialization: Vec<u8> = Vec::new();
current_node_id.write(&mut current_node_delta_serialization).unwrap();

if serialization_version >= 2 {
let mut current_node_delta_extension: Vec<u8> = Vec::new();

if let Some(node_delta) = serialization_details.node_mutations.get(&current_node_id) {
let mut has_update = false;

/*
Bitmap:
7: expect extra data after the pubkey (a u16 for the count, and then that number of bytes)
6-4: index of new features among default (0-6). If index is 7 (all 3 bits are set, it's
outside the present default range)
3: features have changed
2: addresses have changed
1: used for all keys
0: used for odd keys
*/

if node_delta.has_address_set_changed {
// signal the presence of node addresses
current_node_delta_serialization[0] |= 1 << 2;
let address_set = &node_delta.latest_details_after_seen.as_ref().unwrap().addresses;
let addresses: Vec<SocketAddress> = address_set.clone().into_iter().collect();
addresses.write(&mut current_node_delta_extension).unwrap();

has_update = true;
node_address_update_count += 1;
}

if node_delta.has_feature_set_changed {
// signal the presence of node features

current_node_delta_serialization[0] |= 1 << 3;
let latest_features = &node_delta.latest_details_after_seen.as_ref().unwrap().features;

// are these features among the most common ones?
if let Some(index) = serialization_details.node_announcement_feature_defaults.iter().position(|f| f == latest_features) {
// this feature set is among the 6 defaults
current_node_delta_serialization[0] |= (index as u8) << 4;
} else {
current_node_delta_serialization[0] |= 0b_0111_0000; // 7 << 4
latest_features.write(&mut current_node_delta_extension).unwrap();
}

has_update = true;
node_feature_update_count += 1;
}

if has_update {
let extension_length = current_node_delta_extension.len() as u16;
if extension_length > 0 {
current_node_delta_serialization[0] |= 0b_1000_0000;
extension_length.write(&mut current_node_delta_serialization).unwrap();
current_node_delta_serialization.append(&mut current_node_delta_extension);
}

node_update_count += 1;
}
}
}

prefixed_output.append(&mut current_node_delta_serialization);
}

prefixed_output.append(&mut output);
Expand All @@ -282,7 +377,11 @@ async fn serialize_delta<L: Deref + Clone>(network_graph: Arc<NetworkGraph<L>>,
SerializedResponse {
data: prefixed_output,
message_count,
announcement_count,
node_announcement_count: node_id_count,
node_update_count,
node_feature_update_count,
node_address_update_count,
channel_announcement_count: announcement_count,
update_count,
update_count_full,
update_count_incremental,
Expand Down
33 changes: 30 additions & 3 deletions src/serialization.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,16 +4,19 @@ use std::time::{SystemTime, UNIX_EPOCH};

use bitcoin::Network;
use bitcoin::blockdata::constants::ChainHash;
use lightning::ln::features::NodeFeatures;
use lightning::ln::msgs::{UnsignedChannelAnnouncement, UnsignedChannelUpdate};
use lightning::util::ser::{BigSize, Writeable};
use crate::config;

use crate::lookup::{DeltaSet, DirectedUpdateDelta};
use crate::lookup::{DeltaSet, DirectedUpdateDelta, NodeDeltaSet};

pub(super) struct SerializationSet {
pub(super) announcements: Vec<UnsignedChannelAnnouncement>,
pub(super) updates: Vec<UpdateSerialization>,
pub(super) full_update_defaults: DefaultUpdateValues,
pub(super) node_announcement_feature_defaults: Vec<NodeFeatures>,
pub(super) node_mutations: NodeDeltaSet,
pub(super) latest_seen: u32,
pub(super) chain_hash: ChainHash,
}
Expand Down Expand Up @@ -104,11 +107,13 @@ struct FullUpdateValueHistograms {
htlc_maximum_msat: HashMap<u64, usize>,
}

pub(super) fn serialize_delta_set(delta_set: DeltaSet, last_sync_timestamp: u32) -> SerializationSet {
pub(super) fn serialize_delta_set(channel_delta_set: DeltaSet, node_delta_set: NodeDeltaSet, last_sync_timestamp: u32) -> SerializationSet {
let mut serialization_set = SerializationSet {
announcements: vec![],
updates: vec![],
full_update_defaults: Default::default(),
node_announcement_feature_defaults: vec![],
node_mutations: Default::default(),
chain_hash: ChainHash::using_genesis_block(Network::Bitcoin),
latest_seen: 0,
};
Expand All @@ -134,7 +139,7 @@ pub(super) fn serialize_delta_set(delta_set: DeltaSet, last_sync_timestamp: u32)
// if the previous seen update happened more than 6 days ago, the client may have pruned it, and an incremental update wouldn't work
let non_incremental_previous_update_threshold_timestamp = SystemTime::now().checked_sub(config::CHANNEL_REMINDER_AGE).unwrap().duration_since(UNIX_EPOCH).unwrap().as_secs() as u32;

for (scid, channel_delta) in delta_set.into_iter() {
for (scid, channel_delta) in channel_delta_set.into_iter() {

// any announcement chain hash is gonna be the same value. Just set it from the first one.
let channel_announcement_delta = channel_delta.announcement.as_ref().unwrap();
Expand Down Expand Up @@ -214,6 +219,22 @@ pub(super) fn serialize_delta_set(delta_set: DeltaSet, last_sync_timestamp: u32)
};

serialization_set.full_update_defaults = default_update_values;

serialization_set.node_mutations = node_delta_set.into_iter().filter(|(_id, delta)| {
// either something changed, or this node is new
delta.has_feature_set_changed || delta.has_address_set_changed || delta.last_details_before_seen.is_none()
}).collect();

let mut node_feature_histogram: HashMap<&NodeFeatures, usize> = Default::default();
for (_id, delta) in serialization_set.node_mutations.iter() {
if delta.has_feature_set_changed || delta.last_details_before_seen.is_none() {
if let Some(latest_details) = delta.latest_details_after_seen.as_ref() {
*node_feature_histogram.entry(&latest_details.features).or_insert(0) += 1;
};
}
}
serialization_set.node_announcement_feature_defaults = find_leading_histogram_entries(node_feature_histogram, 7);

serialization_set
}

Expand Down Expand Up @@ -327,3 +348,9 @@ pub(super) fn find_most_common_histogram_entry_with_default<T: Copy>(histogram:
// though for htlc maximum msat it could be a u64::max
default
}

pub(super) fn find_leading_histogram_entries(histogram: HashMap<&NodeFeatures, usize>, count: usize) -> Vec<NodeFeatures> {
let mut entry_counts: Vec<_> = histogram.iter().filter(|&(_, &count)| count > 1).collect();
entry_counts.sort_by(|a, b| b.1.cmp(&a.1));
entry_counts.into_iter().take(count).map(|(&features, _count)| features.clone()).collect()
}
5 changes: 3 additions & 2 deletions src/snapshot.rs
Original file line number Diff line number Diff line change
Expand Up @@ -114,12 +114,13 @@ impl<L: Deref + Clone> Snapshotter<L> where L::Target: Logger {
{
log_info!(self.logger, "Calculating {}-second snapshot", current_scope);
// calculate the snapshot
let snapshot = super::serialize_delta(network_graph_clone, current_last_sync_timestamp.clone() as u32, Some(reference_timestamp), self.logger.clone()).await;
let delta = super::calculate_delta(network_graph_clone.clone(), current_last_sync_timestamp.clone() as u32, Some(reference_timestamp), self.logger.clone()).await;
let snapshot = super::serialize_delta(&delta, 1, self.logger.clone());

// persist the snapshot and update the symlink
let snapshot_filename = format!("snapshot__calculated-at:{}__range:{}-scope__previous-sync:{}.lngossip", reference_timestamp, current_scope, current_last_sync_timestamp);
let snapshot_path = format!("{}/{}", pending_snapshot_directory, snapshot_filename);
log_info!(self.logger, "Persisting {}-second snapshot: {} ({} messages, {} announcements, {} updates ({} full, {} incremental))", current_scope, snapshot_filename, snapshot.message_count, snapshot.announcement_count, snapshot.update_count, snapshot.update_count_full, snapshot.update_count_incremental);
log_info!(self.logger, "Persisting {}-second snapshot: {} ({} messages, {} announcements, {} updates ({} full, {} incremental))", current_scope, snapshot_filename, snapshot.message_count, snapshot.channel_announcement_count, snapshot.update_count, snapshot.update_count_full, snapshot.update_count_incremental);
fs::write(&snapshot_path, snapshot.data).unwrap();
snapshot_filenames_by_scope.insert(current_scope.clone(), snapshot_filename);
}
Expand Down
Loading

0 comments on commit 6a84338

Please sign in to comment.