Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add adapter tx/rx metrics #358

Draft
wants to merge 42 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
42 commits
Select commit Hold shift + click to select a range
1d8c5e5
stop logging the whole adapter response
maksymar Jan 28, 2025
191533c
.
maksymar Jan 28, 2025
a54cab2
.
maksymar Jan 28, 2025
6a3855d
.
maksymar Jan 28, 2025
eb2aa61
add successor_response_stats
maksymar Jan 28, 2025
99eea13
update ignore files
maksymar Jan 28, 2025
173a790
TESTNET_CHAIN_MAX_DEPTH: u128 = 200
maksymar Jan 28, 2025
30c4a02
log_metrics
maksymar Jan 28, 2025
47e985a
fix labels
maksymar Jan 28, 2025
cedb00f
serde default
maksymar Jan 28, 2025
334c7d9
add CARGO_TERM_COLOR always to CI
maksymar Jan 28, 2025
aa70945
TESTNET_CHAIN_MAX_DEPTH 1000
maksymar Jan 28, 2025
5bf0201
TESTNET_CHAIN_MAX_DEPTH: u128 = 500
maksymar Jan 28, 2025
308dfa6
TESTNET_CHAIN_MAX_DEPTH: u128 = 300
maksymar Jan 28, 2025
6aaa505
TESTNET_CHAIN_MAX_DEPTH: u128 = 700
maksymar Jan 28, 2025
a06b966
turn of extra logging
maksymar Jan 28, 2025
52679b7
turn off metrics
maksymar Jan 28, 2025
a9af118
.
maksymar Jan 28, 2025
dc27d21
enable metrics
maksymar Jan 28, 2025
4176aeb
TESTNET_CHAIN_MAX_DEPTH 1000
maksymar Jan 28, 2025
fb03fcb
TESTNET_CHAIN_MAX_DEPTH 700
maksymar Jan 28, 2025
6651959
TESTNET_CHAIN_MAX_DEPTH 500
maksymar Jan 28, 2025
4e2046d
TESTNET_CHAIN_MAX_DEPTH 300
maksymar Jan 28, 2025
ab520d2
TESTNET_CHAIN_MAX_DEPTH 500
maksymar Jan 28, 2025
eed2433
TESTNET_CHAIN_MAX_DEPTH 700
maksymar Jan 28, 2025
1c348b5
TESTNET_CHAIN_MAX_DEPTH 1000
maksymar Jan 28, 2025
780d564
TESTNET_CHAIN_MAX_DEPTH 900
maksymar Jan 28, 2025
4029a93
TESTNET_CHAIN_MAX_DEPTH 800
maksymar Jan 28, 2025
094fa98
1000
maksymar Jan 28, 2025
f80fc3a
add extra logs
maksymar Jan 28, 2025
0630d74
commet log_metrics
maksymar Jan 29, 2025
3dd5a29
rename label
maksymar Jan 29, 2025
bbc0127
fix printing logs
maksymar Jan 29, 2025
47e05be
fix logs
maksymar Jan 29, 2025
cce30ad
add successor_request_stats metric
maksymar Jan 31, 2025
a802bcb
.
maksymar Jan 31, 2025
a988fde
.
maksymar Jan 31, 2025
73a22b8
.
maksymar Jan 31, 2025
989b299
.
maksymar Jan 31, 2025
cc17a79
fix rx/tx metrics
maksymar Jan 31, 2025
820b759
merge master
maksymar Feb 3, 2025
b3ad758
rm log_metrics
maksymar Feb 3, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 30 additions & 0 deletions canister/src/api/metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,36 @@ fn encode_metrics(w: &mut MetricsEncoder<Vec<u8>>) -> std::io::Result<()> {
.value(&[("flag", "enabled")], enabled)?
.value(&[("flag", "disabled")], disabled)?;

if let Some(stats) = &state.syncing_state.get_successors_request_stats {
encode_labeled_gauge(
w,
"get_successors_tx_count",
"The number of get_successors requests.",
&stats.get_count_metrics(),
)?;
}

if let Some(stats) = &state.syncing_state.get_successors_response_stats {
encode_labeled_gauge(
w,
"get_successors_rx_count",
"The number of get_successors responses.",
&stats.get_count_metrics(),
)?;
encode_labeled_gauge(
w,
"get_successors_rx_block_count",
"The number of blocks in get_successors responses.",
&stats.get_block_count_metrics(),
)?;
encode_labeled_gauge(
w,
"get_successors_rx_block_size",
"The total size of the blocks in get_successors responses.",
&stats.get_block_size_metrics(),
)?;
}

Ok(())
})
}
Expand Down
42 changes: 41 additions & 1 deletion canister/src/heartbeat.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use crate::{
api::get_current_fee_percentiles_impl,
runtime::{call_get_successors, cycles_burn, print},
state::{self, ResponseToProcess},
state::{self, ResponseToProcess, SuccessorsRequestStats, SuccessorsResponseStats},
types::{
GetSuccessorsCompleteResponse, GetSuccessorsRequest, GetSuccessorsRequestInitial,
GetSuccessorsResponse,
Expand Down Expand Up @@ -63,6 +63,18 @@ async fn maybe_fetch_blocks() -> bool {
}
};

with_state_mut(|s| {
let tx_stats = s
.syncing_state
.get_successors_request_stats
.get_or_insert_with(SuccessorsRequestStats::default);
tx_stats.total_count += 1;
match request {
GetSuccessorsRequest::Initial(_) => tx_stats.initial_count += 1,
GetSuccessorsRequest::FollowUp(_) => tx_stats.follow_up_count += 1,
}
});

print(&format!("Sending request: {:?}", request));

let response: Result<(GetSuccessorsResponse,), _> =
Expand All @@ -80,6 +92,10 @@ async fn maybe_fetch_blocks() -> bool {
}
};

s.syncing_state
.get_successors_response_stats
.get_or_insert_with(SuccessorsResponseStats::default);

match response {
GetSuccessorsResponse::Complete(response) => {
// Received complete response.
Expand All @@ -93,6 +109,14 @@ async fn maybe_fetch_blocks() -> bool {
"Received complete response: {} blocks, total {} bytes.",
count, bytes,
));
if let Some(rx_stats) = s.syncing_state.get_successors_response_stats.as_mut() {
rx_stats.complete_count += 1;
rx_stats.complete_block_count += count;
rx_stats.complete_block_size += bytes;
rx_stats.total_count += 1;
rx_stats.total_block_count += count;
rx_stats.total_block_size += bytes;
}
s.syncing_state.response_to_process = Some(ResponseToProcess::Complete(response));
}
GetSuccessorsResponse::Partial(partial_response) => {
Expand All @@ -107,6 +131,14 @@ async fn maybe_fetch_blocks() -> bool {
"Received partial response: {} bytes, {} follow-ups remaining.",
bytes, remaining,
));
if let Some(rx_stats) = s.syncing_state.get_successors_response_stats.as_mut() {
rx_stats.partial_count += 1;
rx_stats.partial_block_count += 1;
rx_stats.partial_block_size += bytes;
rx_stats.total_count += 1;
rx_stats.total_block_count += 1;
rx_stats.total_block_size += bytes;
}
s.syncing_state.response_to_process =
Some(ResponseToProcess::Partial(partial_response, 0));
}
Expand All @@ -116,6 +148,14 @@ async fn maybe_fetch_blocks() -> bool {
// a partial response to process.
let bytes = block_bytes.len() as u64;
print(&format!("Received follow-up response: {} bytes.", bytes));
if let Some(rx_stats) = s.syncing_state.get_successors_response_stats.as_mut() {
rx_stats.follow_up_count += 1;
rx_stats.follow_up_block_count += 1;
rx_stats.follow_up_block_size += bytes;
rx_stats.total_count += 1;
rx_stats.total_block_count += 1;
rx_stats.total_block_size += bytes;
}
let (mut partial_response, mut follow_up_index) = match s.syncing_state.response_to_process.take() {
Some(ResponseToProcess::Partial(res, pages)) => (res, pages),
other => unreachable!("Cannot receive follow-up response without a previous partial response. Previous response found: {:?}", other)
Expand Down
75 changes: 75 additions & 0 deletions canister/src/state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -325,6 +325,14 @@ pub struct SyncingState {

/// The number of errors occurred when inserting a block.
pub num_insert_block_errors: u64,

/// Stats about the request sent to GetSuccessors.
#[serde(default)] // Ensures backward compatibility during deserialization
pub get_successors_request_stats: Option<SuccessorsRequestStats>,

/// Stats about the responses received from GetSuccessors.
#[serde(default)] // Ensures backward compatibility during deserialization
pub get_successors_response_stats: Option<SuccessorsResponseStats>,
}

impl Default for SyncingState {
Expand All @@ -336,10 +344,77 @@ impl Default for SyncingState {
num_get_successors_rejects: 0,
num_block_deserialize_errors: 0,
num_insert_block_errors: 0,
get_successors_request_stats: None,
get_successors_response_stats: None,
}
}
}

#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Default)]
pub struct SuccessorsRequestStats {
pub total_count: u64,
pub initial_count: u64,
pub follow_up_count: u64,
}

impl SuccessorsRequestStats {
pub fn get_count_metrics(&self) -> Vec<((&str, &str), u64)> {
vec![
(("type", "total"), self.total_count),
(("type", "initial"), self.initial_count),
(("type", "follow_up"), self.follow_up_count),
]
}
}

#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Default)]
pub struct SuccessorsResponseStats {
pub total_count: u64,
pub total_block_count: u64,
pub total_block_size: u64,

pub complete_count: u64,
pub complete_block_count: u64,
pub complete_block_size: u64,

pub partial_count: u64,
pub partial_block_count: u64,
pub partial_block_size: u64,

pub follow_up_count: u64,
pub follow_up_block_count: u64,
pub follow_up_block_size: u64,
}

impl SuccessorsResponseStats {
pub fn get_count_metrics(&self) -> Vec<((&str, &str), u64)> {
vec![
(("type", "total"), self.total_count),
(("type", "complete"), self.complete_count),
(("type", "partial"), self.partial_count),
(("type", "follow_up"), self.follow_up_count),
]
}

pub fn get_block_count_metrics(&self) -> Vec<((&str, &str), u64)> {
vec![
(("type", "total"), self.total_block_count),
(("type", "complete"), self.complete_block_count),
(("type", "partial"), self.partial_block_count),
(("type", "follow_up"), self.follow_up_block_count),
]
}

pub fn get_block_size_metrics(&self) -> Vec<((&str, &str), u64)> {
vec![
(("type", "total"), self.total_block_size),
(("type", "complete"), self.complete_block_size),
(("type", "partial"), self.partial_block_size),
(("type", "follow_up"), self.follow_up_block_size),
]
}
}

/// Cache for storing last calculated fee percentiles
///
/// Stores last tip block hash and fee percentiles associated with it.
Expand Down
2 changes: 1 addition & 1 deletion canister/src/unstable_blocks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ use self::next_block_headers::NextBlockHeaders;

// The maximum number of blocks that a chain on testnet can exceed other chains before its
// anchor block is marked as stable.
const TESTNET_CHAIN_MAX_DEPTH: u128 = 1000;
const TESTNET_CHAIN_MAX_DEPTH: u128 = 200; // TODO: DO NOT SUBMIT! this is only for testing.

/// A data structure for maintaining all unstable blocks.
///
Expand Down
12 changes: 9 additions & 3 deletions dfx.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,13 @@
{
"dfx": "0.21.0",
"canisters": {
"bitcoin": {
"bitcoin_t": {
"type": "custom",
"candid": "./canister/candid.did",
"wasm": "target/wasm32-unknown-unknown/release/ic-btc-canister.wasm.gz",
"build": "./scripts/build-canister.sh ic-btc-canister"
},
"bitcoin_m": {
"type": "custom",
"candid": "./canister/candid.did",
"wasm": "target/wasm32-unknown-unknown/release/ic-btc-canister.wasm.gz",
Expand Down Expand Up @@ -79,10 +85,10 @@
},
"testnet": {
"providers": [
"http://[2a00:fb01:400:42:5000:aaff:fea4:ae46]:8080"
"http://[2600:c00:2:100:505e:86ff:fed7:a12]:8080"
],
"type": "persistent"
}
},
"version": 1
}
}
28 changes: 28 additions & 0 deletions poll_logs.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
#!/bin/bash

CANISTER_ID="g4xu7-jiaaa-aaaan-aaaaq-cai"

# Function to fetch logs and filter out new lines
fetch_and_filter_logs() {
# Fetch logs
new_logs=$(dfx canister logs --network testnet $CANISTER_ID)

# Compare with previous logs to find new ones
while IFS= read -r line; do
if [[ ! "${previous_logs[*]}" =~ "$line" ]]; then
echo "$line"
fi
done <<< "$new_logs"

# Update previous logs
previous_logs=("$new_logs")
}

# Initial fetch and filter
fetch_and_filter_logs

# Infinite loop to continuously fetch and filter logs
while true; do
fetch_and_filter_logs
sleep 0.1
done
Loading